pax_global_header00006660000000000000000000000064135106463450014521gustar00rootroot0000000000000052 comment=a337b99b77170ee97fee11c98f8448e3ebf445e9 backport-iwlwifi-dkms-7906/000077500000000000000000000000001351064634500157015ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/.blacklist.map000066400000000000000000000005241351064634500204270ustar00rootroot00000000000000# Update this map when a driver gets renamed or # symbols from old drivers get moved to a newer # driver. If you have the driver on the right # hand side it will be blacklisted upon installation # only if you actually installed the driver on the # left. # new-driver old-driver iwlwifi iwlagn iwl4965 iwlagn videodev v4l2-compat-ioctl32 backport-iwlwifi-dkms-7906/.gitignore000066400000000000000000000004451351064634500176740ustar00rootroot00000000000000Kconfig.kernel Kconfig.versions .kernel_config_md5 .config .config.old *~ *.o *.d .tmp_* *.ko *.cmd *.tmp *.ver modules.order backport-include/backport/autoconf.h modules kconf/mconf kconf/conf .tmp_versions Module.symvers *.mod.c .cache.mk *.pyc __pycache__ # cscope files cscope.* ncscope.* backport-iwlwifi-dkms-7906/Android-make-iwlwifi.mk000066400000000000000000000064101351064634500221760ustar00rootroot00000000000000 # Run only this build if variant define the needed configuration # e.g. Enabling iwlwifi for XMM6321 # BOARD_USING_INTEL_IWL := true - this will enable iwlwifi building # INTEL_IWL_BOARD_CONFIG := xmm6321 - the configuration, defconfig-xmm6321 # INTEL_IWL_USE_COMPAT_INSTALL := y - this will use kernel modules installation # INTEL_IWL_COMPAT_INSTALL_DIR := updates - the folder that the modules will be installed in # INTEL_IWL_COMPAT_INSTALL_PATH ?= $(ANDROID_BUILD_TOP)/$(TARGET_OUT) - the install path for the modules .PHONY: iwlwifi INTEL_IWL_SRC_DIR := $(call my-dir) INTEL_IWL_OUT_DIR := $(abspath $(PRODUCT_OUT)/iwlwifi) INTEL_IWL_COMPAT_INSTALL_PATH ?= $(abspath $(TARGET_OUT)) ifeq ($(CROSS_COMPILE),) ifneq ($(TARGET_TOOLS_PREFIX),) CROSS_COMPILE := CROSS_COMPILE=$(notdir $(TARGET_TOOLS_PREFIX)) endif endif ifeq ($(CROSS_COMPILE),) ifeq ($(TARGET_ARCH),arm) $(warning You are building for an ARM platform, but no CROSS_COMPILE is set. This is likely an error.) else $(warning CROSS_COMPILE variant is not set; Defaulting to host gcc.) endif endif ifeq ($(INTEL_IWL_USE_COMPAT_INSTALL),y) INTEL_IWL_COMPAT_INSTALL := iwlwifi_install INTEL_IWL_KERNEL_DEPEND := $(INSTALLED_KERNEL_TARGET) else # use system install copy_modules_to_root: iwlwifi ALL_KERNEL_MODULES += $(INTEL_IWL_OUT_DIR) INTEL_IWL_KERNEL_DEPEND := build_bzImage endif ifeq ($(INTEL_IWL_USE_RM_MAC_CFG),y) copy_modules_to_root: iwlwifi INTEL_IWL_COMPAT_INSTALL_PATH := $(abspath $(KERNEL_OUT_MODINSTALL)) INTEL_IWL_KERNEL_DEPEND := modules_install INTEL_IWL_RM_MAC_CFG_DEPEND := iwlwifi_rm_mac_cfg INTEL_IWL_INSTALL_MOD_STRIP := INSTALL_MOD_STRIP=1 endif # IWLWIFI_CONFIGURE is a rule to use a defconfig for iwlwifi IWLWIFI_CONFIGURE := $(INTEL_IWL_OUT_DIR)/.config KERNEL_OUT_ABS_DIR := $(abspath $(KERNEL_OUT_DIR)) iwlwifi: iwlwifi_build $(INTEL_IWL_COMPAT_INSTALL) $(INTEL_IWL_MOD_DEP) INTEL_IWL_SRC_FILES := $(shell find $(INTEL_IWL_SRC_DIR) -path $(INTEL_IWL_SRC_DIR)/.git -prune -o -print) $(INTEL_IWL_OUT_DIR): $(INTEL_IWL_SRC_FILES) @echo Syncing directory $(INTEL_IWL_SRC_DIR) to $(INTEL_IWL_OUT_DIR) @rsync -rt --del $(INTEL_IWL_SRC_DIR)/ $(INTEL_IWL_OUT_DIR) $(IWLWIFI_CONFIGURE): $(INTEL_IWL_KERNEL_DEPEND) $(INTEL_IWL_OUT_DIR) @echo Configuring kernel module iwlwifi with defconfig-$(INTEL_IWL_BOARD_CONFIG) @$(MAKE) -C $(INTEL_IWL_OUT_DIR)/ ARCH=$(TARGET_ARCH) $(CROSS_COMPILE) KLIB_BUILD=$(KERNEL_OUT_ABS_DIR) defconfig-$(INTEL_IWL_BOARD_CONFIG) iwlwifi_build: $(IWLWIFI_CONFIGURE) @$(info Building kernel module iwlwifi in $(INTEL_IWL_OUT_DIR)) @$(MAKE) -C $(INTEL_IWL_OUT_DIR)/ ARCH=$(TARGET_ARCH) $(CROSS_COMPILE) KLIB_BUILD=$(KERNEL_OUT_ABS_DIR) iwlwifi_install: iwlwifi_build $(INTEL_IWL_RM_MAC_CFG_DEPEND) @$(info Installing kernel modules in $(INTEL_IWL_COMPAT_INSTALL_PATH)) @$(MAKE) -C $(KERNEL_OUT_ABS_DIR) ARCH=$(TARGET_ARCH) M=$(INTEL_IWL_OUT_DIR)/ INSTALL_MOD_DIR=$(INTEL_IWL_COMPAT_INSTALL_DIR) INSTALL_MOD_PATH=$(INTEL_IWL_COMPAT_INSTALL_PATH) $(INTEL_IWL_INSTALL_MOD_STRIP) modules_install iwlwifi_rm_mac_cfg: iwlwifi_build $(info Remove kernel cfg80211.ko and mac80211.ko) @find $(KERNEL_OUT_MODINSTALL)/lib/modules/ -name "mac80211.ko" | xargs rm -f @find $(KERNEL_OUT_MODINSTALL)/lib/modules/ -name "cfg80211.ko" | xargs rm -f .PHONY: iwlwifi_clean iwlwifi_clean: rm -rf $(INTEL_IWL_OUT_DIR) backport-iwlwifi-dkms-7906/Android-upper-folder.mk000066400000000000000000000002141351064634500222110ustar00rootroot00000000000000LOCAL_PATH := $(call my-dir) ifneq ($(INTEL_IWL_MODULE_SUB_FOLDER),) include $(LOCAL_PATH)/$(INTEL_IWL_MODULE_SUB_FOLDER)/Android.mk endif backport-iwlwifi-dkms-7906/Android.mk000066400000000000000000000065541351064634500176240ustar00rootroot00000000000000# including iwlwifi Android mk only if it was declared. ifeq ($(BOARD_USING_INTEL_IWL),true) # Run only this build if variant define the needed configuration # e.g. Enabling iwlwifi for XMM6321 # BOARD_USING_INTEL_IWL := true - this will enable iwlwifi building # INTEL_IWL_BOARD_CONFIG := xmm6321 - the configuration, defconfig-xmm6321 # INTEL_IWL_USE_COMPAT_INSTALL := y - this will use kernel modules installation # INTEL_IWL_COMPAT_INSTALL_DIR := updates - the folder that the modules will be installed in # INTEL_IWL_COMPAT_INSTALL_PATH ?= $(ANDROID_BUILD_TOP)/$(TARGET_OUT) - the install path for the modules .PHONY: iwlwifi INTEL_IWL_SRC_DIR := $(call my-dir) INTEL_IWL_OUT_DIR := $(abspath $(PRODUCT_OUT)/iwlwifi) INTEL_IWL_COMPAT_INSTALL_PATH ?= $(abspath $(TARGET_OUT)) ifeq ($(CROSS_COMPILE),) ifneq ($(TARGET_TOOLS_PREFIX),) CROSS_COMPILE := CROSS_COMPILE=$(notdir $(TARGET_TOOLS_PREFIX)) endif endif ifeq ($(CROSS_COMPILE),) ifeq ($(TARGET_ARCH),arm) $(warning You are building for an ARM platform, but no CROSS_COMPILE is set. This is likely an error.) else $(warning CROSS_COMPILE variant is not set; Defaulting to host gcc.) endif endif ifeq ($(INTEL_IWL_USE_COMPAT_INSTALL),y) INTEL_IWL_COMPAT_INSTALL := iwlwifi_install INTEL_IWL_KERNEL_DEPEND := $(INSTALLED_KERNEL_TARGET) else # use system install copy_modules_to_root: iwlwifi ALL_KERNEL_MODULES += $(INTEL_IWL_OUT_DIR) INTEL_IWL_KERNEL_DEPEND := build_bzImage endif ifeq ($(INTEL_IWL_USE_RM_MAC_CFG),y) copy_modules_to_root: iwlwifi INTEL_IWL_COMPAT_INSTALL_PATH := $(abspath $(KERNEL_OUT_MODINSTALL)) INTEL_IWL_KERNEL_DEPEND := modules_install INTEL_IWL_RM_MAC_CFG_DEPEND := iwlwifi_rm_mac_cfg INTEL_IWL_INSTALL_MOD_STRIP := INSTALL_MOD_STRIP=1 endif # IWLWIFI_CONFIGURE is a rule to use a defconfig for iwlwifi IWLWIFI_CONFIGURE := $(INTEL_IWL_OUT_DIR)/.config KERNEL_OUT_ABS_DIR := $(abspath $(KERNEL_OUT_DIR)) iwlwifi: iwlwifi_build $(INTEL_IWL_COMPAT_INSTALL) $(INTEL_IWL_MOD_DEP) INTEL_IWL_SRC_FILES := $(shell find $(INTEL_IWL_SRC_DIR) -path $(INTEL_IWL_SRC_DIR)/.git -prune -o -print) $(INTEL_IWL_OUT_DIR): $(INTEL_IWL_SRC_FILES) @echo Syncing directory $(INTEL_IWL_SRC_DIR) to $(INTEL_IWL_OUT_DIR) @rsync -rt --del $(INTEL_IWL_SRC_DIR)/ $(INTEL_IWL_OUT_DIR) $(IWLWIFI_CONFIGURE): $(INTEL_IWL_KERNEL_DEPEND) $(INTEL_IWL_OUT_DIR) @echo Configuring kernel module iwlwifi with defconfig-$(INTEL_IWL_BOARD_CONFIG) @$(MAKE) -C $(INTEL_IWL_OUT_DIR)/ ARCH=$(TARGET_ARCH) $(CROSS_COMPILE) KLIB_BUILD=$(KERNEL_OUT_ABS_DIR) defconfig-$(INTEL_IWL_BOARD_CONFIG) iwlwifi_build: $(IWLWIFI_CONFIGURE) @$(info Building kernel module iwlwifi in $(INTEL_IWL_OUT_DIR)) @$(MAKE) -C $(INTEL_IWL_OUT_DIR)/ ARCH=$(TARGET_ARCH) $(CROSS_COMPILE) KLIB_BUILD=$(KERNEL_OUT_ABS_DIR) iwlwifi_install: iwlwifi_build $(INTEL_IWL_RM_MAC_CFG_DEPEND) @$(info Installing kernel modules in $(INTEL_IWL_COMPAT_INSTALL_PATH)) @$(MAKE) -C $(KERNEL_OUT_ABS_DIR) ARCH=$(TARGET_ARCH) M=$(INTEL_IWL_OUT_DIR)/ INSTALL_MOD_DIR=$(INTEL_IWL_COMPAT_INSTALL_DIR) INSTALL_MOD_PATH=$(INTEL_IWL_COMPAT_INSTALL_PATH) $(INTEL_IWL_INSTALL_MOD_STRIP) modules_install iwlwifi_rm_mac_cfg: iwlwifi_build $(info Remove kernel cfg80211.ko and mac80211.ko) @find $(KERNEL_OUT_MODINSTALL)/lib/modules/ -name "mac80211.ko" | xargs rm -f @find $(KERNEL_OUT_MODINSTALL)/lib/modules/ -name "cfg80211.ko" | xargs rm -f .PHONY: iwlwifi_clean iwlwifi_clean: rm -rf $(INTEL_IWL_OUT_DIR) endif backport-iwlwifi-dkms-7906/COPYING000066400000000000000000000006471351064634500167430ustar00rootroot00000000000000The Linux Kernel is provided under: SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note Being under the terms of the GNU General Public License version 2 only, according with: LICENSES/preferred/GPL-2.0 With an explicit syscall exception, as stated at: LICENSES/exceptions/Linux-syscall-note In addition, other licenses may also apply. Please see: Documentation/process/license-rules.rst for more details. backport-iwlwifi-dkms-7906/Kconfig000066400000000000000000000012121351064634500172000ustar00rootroot00000000000000mainmenu "Backports from $BACKPORTED_KERNEL_NAME $BACKPORTED_KERNEL_VERSION (backports $BACKPORTS_VERSION)" config BACKPORT_DIR string option env="BACKPORT_DIR" config BACKPORTS_VERSION string option env="BACKPORTS_VERSION" config BACKPORTED_KERNEL_VERSION string option env="BACKPORTED_KERNEL_VERSION" config BACKPORTED_KERNEL_NAME string option env="BACKPORTED_KERNEL_NAME" # Packaging hacks source "$BACKPORT_DIR/Kconfig.package.hacks" # Code we backport source "$BACKPORT_DIR/Kconfig.sources" # these will be generated source "$BACKPORT_DIR/Kconfig.kernel" source "$BACKPORT_DIR/Kconfig.versions" source "$BACKPORT_DIR/Kconfig.local" backport-iwlwifi-dkms-7906/Kconfig.local000066400000000000000000000147671351064634500203140ustar00rootroot00000000000000config BACKPORTED_BACKPORT_DIR tristate default BACKPORT_DIR config BACKPORTED_BACKPORTS_VERSION tristate default BACKPORTS_VERSION config BACKPORTED_BACKPORTED_KERNEL_VERSION tristate default BACKPORTED_KERNEL_VERSION config BACKPORTED_BACKPORTED_KERNEL_NAME tristate default BACKPORTED_KERNEL_NAME config BACKPORTED_WIRELESS tristate default WIRELESS config BACKPORTED_NET_CORE tristate default NET_CORE config BACKPORTED_EXPERT tristate default EXPERT config BACKPORTED_BP_MODULES tristate default BP_MODULES config BACKPORTED_BPAUTO_BUILD_CORDIC tristate default BPAUTO_BUILD_CORDIC config BACKPORTED_BPAUTO_CORDIC tristate default BPAUTO_CORDIC config BACKPORTED_BPAUTO_MII tristate default BPAUTO_MII config BACKPORTED_BPAUTO_BUILD_LEDS tristate default BPAUTO_BUILD_LEDS config BACKPORTED_BPAUTO_NEW_LEDS tristate default BPAUTO_NEW_LEDS config BACKPORTED_BPAUTO_LEDS_CLASS tristate default BPAUTO_LEDS_CLASS config BACKPORTED_BPAUTO_LEDS_TRIGGERS tristate default BPAUTO_LEDS_TRIGGERS config BACKPORTED_BPAUTO_USERSEL_BUILD_ALL tristate default BPAUTO_USERSEL_BUILD_ALL config BACKPORTED_BPAUTO_WANT_DEV_COREDUMP tristate default BPAUTO_WANT_DEV_COREDUMP config BACKPORTED_BPAUTO_BUILD_WANT_DEV_COREDUMP tristate default BPAUTO_BUILD_WANT_DEV_COREDUMP config BACKPORTED_BPAUTO_RHASHTABLE tristate default BPAUTO_RHASHTABLE config BACKPORTED_BPAUTO_BUCKET_LOCKS tristate default BPAUTO_BUCKET_LOCKS config BACKPORTED_BPAUTO_REFCOUNT tristate default BPAUTO_REFCOUNT config BACKPORTED_BPAUTO_SYSTEM_DATA_VERIFICATION tristate default BPAUTO_SYSTEM_DATA_VERIFICATION config BACKPORTED_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION tristate default BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION config BACKPORTED_BPAUTO_PUBLIC_KEY tristate default BPAUTO_PUBLIC_KEY config BACKPORTED_BPAUTO_ASN1_DECODER tristate default BPAUTO_ASN1_DECODER config BACKPORTED_BPAUTO_PKCS7 tristate default BPAUTO_PKCS7 config BACKPORTED_CFG80211 tristate default CFG80211 config BACKPORTED_NL80211_TESTMODE tristate default NL80211_TESTMODE config BACKPORTED_CFG80211_DEVELOPER_WARNINGS tristate default CFG80211_DEVELOPER_WARNINGS config BACKPORTED_CFG80211_CERTIFICATION_ONUS tristate default CFG80211_CERTIFICATION_ONUS config BACKPORTED_CFG80211_REQUIRE_SIGNED_REGDB tristate default CFG80211_REQUIRE_SIGNED_REGDB config BACKPORTED_CFG80211_USE_KERNEL_REGDB_KEYS tristate default CFG80211_USE_KERNEL_REGDB_KEYS config BACKPORTED_CFG80211_EXTRA_REGDB_KEYDIR tristate default CFG80211_EXTRA_REGDB_KEYDIR config BACKPORTED_CFG80211_REG_CELLULAR_HINTS tristate default CFG80211_REG_CELLULAR_HINTS config BACKPORTED_CFG80211_REG_RELAX_NO_IR tristate default CFG80211_REG_RELAX_NO_IR config BACKPORTED_CFG80211_DEFAULT_PS tristate default CFG80211_DEFAULT_PS config BACKPORTED_CFG80211_DEBUGFS tristate default CFG80211_DEBUGFS config BACKPORTED_CFG80211_CRDA_SUPPORT tristate default CFG80211_CRDA_SUPPORT config BACKPORTED_CFG80211_WEXT tristate default CFG80211_WEXT config BACKPORTED_CFG80211_WEXT_EXPORT tristate default CFG80211_WEXT_EXPORT config BACKPORTED_LIB80211 tristate default LIB80211 config BACKPORTED_LIB80211_CRYPT_WEP tristate default LIB80211_CRYPT_WEP config BACKPORTED_LIB80211_CRYPT_CCMP tristate default LIB80211_CRYPT_CCMP config BACKPORTED_LIB80211_CRYPT_TKIP tristate default LIB80211_CRYPT_TKIP config BACKPORTED_LIB80211_DEBUG tristate default LIB80211_DEBUG config BACKPORTED_MAC80211 tristate default MAC80211 config BACKPORTED_MAC80211_HAS_RC tristate default MAC80211_HAS_RC config BACKPORTED_MAC80211_RC_MINSTREL tristate default MAC80211_RC_MINSTREL config BACKPORTED_MAC80211_RC_DEFAULT_MINSTREL tristate default MAC80211_RC_DEFAULT_MINSTREL config BACKPORTED_MAC80211_RC_DEFAULT tristate default MAC80211_RC_DEFAULT config BACKPORTED_MAC80211_MESH tristate default MAC80211_MESH config BACKPORTED_MAC80211_LEDS tristate default MAC80211_LEDS config BACKPORTED_MAC80211_DEBUGFS tristate default MAC80211_DEBUGFS config BACKPORTED_MAC80211_MESSAGE_TRACING tristate default MAC80211_MESSAGE_TRACING config BACKPORTED_MAC80211_DEBUG_MENU tristate default MAC80211_DEBUG_MENU config BACKPORTED_MAC80211_NOINLINE tristate default MAC80211_NOINLINE config BACKPORTED_MAC80211_VERBOSE_DEBUG tristate default MAC80211_VERBOSE_DEBUG config BACKPORTED_MAC80211_MLME_DEBUG tristate default MAC80211_MLME_DEBUG config BACKPORTED_MAC80211_STA_DEBUG tristate default MAC80211_STA_DEBUG config BACKPORTED_MAC80211_HT_DEBUG tristate default MAC80211_HT_DEBUG config BACKPORTED_MAC80211_OCB_DEBUG tristate default MAC80211_OCB_DEBUG config BACKPORTED_MAC80211_IBSS_DEBUG tristate default MAC80211_IBSS_DEBUG config BACKPORTED_MAC80211_PS_DEBUG tristate default MAC80211_PS_DEBUG config BACKPORTED_MAC80211_MPL_DEBUG tristate default MAC80211_MPL_DEBUG config BACKPORTED_MAC80211_MPATH_DEBUG tristate default MAC80211_MPATH_DEBUG config BACKPORTED_MAC80211_MHWMP_DEBUG tristate default MAC80211_MHWMP_DEBUG config BACKPORTED_MAC80211_MESH_SYNC_DEBUG tristate default MAC80211_MESH_SYNC_DEBUG config BACKPORTED_MAC80211_MESH_CSA_DEBUG tristate default MAC80211_MESH_CSA_DEBUG config BACKPORTED_MAC80211_MESH_PS_DEBUG tristate default MAC80211_MESH_PS_DEBUG config BACKPORTED_MAC80211_TDLS_DEBUG tristate default MAC80211_TDLS_DEBUG config BACKPORTED_MAC80211_DEBUG_COUNTERS tristate default MAC80211_DEBUG_COUNTERS config BACKPORTED_MAC80211_STA_HASH_MAX_SIZE tristate default MAC80211_STA_HASH_MAX_SIZE config BACKPORTED_WLAN tristate default WLAN config BACKPORTED_WIRELESS_WDS tristate default WIRELESS_WDS config BACKPORTED_PCMCIA_RAYCS tristate default PCMCIA_RAYCS config BACKPORTED_PCMCIA_WL3501 tristate default PCMCIA_WL3501 config BACKPORTED_MAC80211_HWSIM tristate default MAC80211_HWSIM config BACKPORTED_USB_NET_RNDIS_WLAN tristate default USB_NET_RNDIS_WLAN config BACKPORTED_VIRT_WIFI tristate default VIRT_WIFI config BACKPORTED_WLAN_VENDOR_INTEL tristate default WLAN_VENDOR_INTEL config BACKPORTED_IWLWIFI tristate default IWLWIFI config BACKPORTED_IWLWIFI_LEDS tristate default IWLWIFI_LEDS config BACKPORTED_IWLDVM tristate default IWLDVM config BACKPORTED_IWLMVM tristate default IWLMVM config BACKPORTED_IWLWIFI_OPMODE_MODULAR tristate default IWLWIFI_OPMODE_MODULAR config BACKPORTED_IWLWIFI_BCAST_FILTERING tristate default IWLWIFI_BCAST_FILTERING config BACKPORTED_IWLWIFI_DEBUG tristate default IWLWIFI_DEBUG config BACKPORTED_IWLWIFI_DEBUGFS tristate default IWLWIFI_DEBUGFS config BACKPORTED_IWLWIFI_DEVICE_TRACING tristate default IWLWIFI_DEVICE_TRACING backport-iwlwifi-dkms-7906/Kconfig.package.hacks000066400000000000000000000002571351064634500216720ustar00rootroot00000000000000# some hacks for when we use backports to generate a package # to build modules out of tree. config WIRELESS def_bool y config NET_CORE def_bool y config EXPERT def_bool y backport-iwlwifi-dkms-7906/Kconfig.sources000066400000000000000000000007671351064634500207000ustar00rootroot00000000000000# this has the configuration for the backport code source "$BACKPORT_DIR/compat/Kconfig" # these are copied from the kernel source "$BACKPORT_DIR/net/wireless/Kconfig" source "$BACKPORT_DIR/net/mac80211/Kconfig" source "$BACKPORT_DIR/drivers/net/wireless/Kconfig" #source "$BACKPORT_DIR/drivers/net/usb/Kconfig" #source "$BACKPORT_DIR/drivers/ssb/Kconfig" #source "$BACKPORT_DIR/drivers/bcma/Kconfig" #source "$BACKPORT_DIR/drivers/usb/class/Kconfig" #source "$BACKPORT_DIR/drivers/staging/Kconfig" backport-iwlwifi-dkms-7906/MAINTAINERS000066400000000000000000017013251351064634500174070ustar00rootroot00000000000000 List of maintainers and how to submit kernel changes Please try to follow the guidelines below. This will make things easier on the maintainers. Not all of these guidelines matter for every trivial patch so apply some common sense. 1. Always _test_ your changes, however small, on at least 4 or 5 people, preferably many more. 2. Try to release a few ALPHA test versions to the net. Announce them onto the kernel channel and await results. This is especially important for device drivers, because often that's the only way you will find things like the fact version 3 firmware needs a magic fix you didn't know about, or some clown changed the chips on a board and not its name. (Don't laugh! Look at the SMC etherpower for that.) 3. Make sure your changes compile correctly in multiple configurations. In particular check that changes work both as a module and built into the kernel. 4. When you are happy with a change make it generally available for testing and await feedback. 5. Make a patch available to the relevant maintainer in the list. Use 'diff -u' to make the patch easy to merge. Be prepared to get your changes sent back with seemingly silly requests about formatting and variable names. These aren't as silly as they seem. One job the maintainers (and especially Linus) do is to keep things looking the same. Sometimes this means that the clever hack in your driver to get around a problem actually needs to become a generalized kernel feature ready for next time. PLEASE check your patch with the automated style checker (scripts/checkpatch.pl) to catch trivial style violations. See Documentation/process/coding-style.rst for guidance here. PLEASE CC: the maintainers and mailing lists that are generated by scripts/get_maintainer.pl. The results returned by the script will be best if you have git installed and are making your changes in a branch derived from Linus' latest git tree. See Documentation/process/submitting-patches.rst for details. PLEASE try to include any credit lines you want added with the patch. It avoids people being missed off by mistake and makes it easier to know who wants adding and who doesn't. PLEASE document known bugs. If it doesn't work for everything or does something very odd once a month document it. PLEASE remember that submissions must be made under the terms of the Linux Foundation certificate of contribution and should include a Signed-off-by: line. The current version of this "Developer's Certificate of Origin" (DCO) is listed in the file Documentation/process/submitting-patches.rst. 6. Make sure you have the right to send any changes you make. If you do changes at work you may find your employer owns the patch not you. 7. When sending security related changes or reports to a maintainer please Cc: security@kernel.org, especially if the maintainer does not respond. Please keep in mind that the security team is a small set of people who can be efficient only when working on verified bugs. Please only Cc: this list when you have identified that the bug would present a short-term risk to other users if it were publicly disclosed. For example, reports of address leaks do not represent an immediate threat and are better handled publicly, and ideally, should come with a patch proposal. Please do not send automated reports to this list either. Such bugs will be handled better and faster in the usual public places. 8. Happy hacking. Descriptions of section entries: P: Person (obsolete) M: Mail patches to: FullName R: Designated reviewer: FullName These reviewers should be CCed on patches. L: Mailing list that is relevant to this area W: Web-page with status/info B: URI for where to file bugs. A web-page with detailed bug filing info, a direct bug tracker link, or a mailto: URI. C: URI for chat protocol, server and channel where developers usually hang out, for example irc://server/channel. Q: Patchwork web based patch tracking system site T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit S: Status, one of the following: Supported: Someone is actually paid to look after this. Maintained: Someone actually looks after it. Odd Fixes: It has a maintainer but they don't have time to do much other than throw the odd patch in. See below.. Orphan: No current maintainer [but maybe you could take the role as you write your new code]. Obsolete: Old code. Something tagged obsolete generally means it has been replaced by a better system and you should be using that. F: Files and directories with wildcard patterns. A trailing slash includes all files and subdirectory files. F: drivers/net/ all files in and below drivers/net F: drivers/net/* all files in drivers/net, but not below F: */net/* all files in "any top level directory"/net One pattern per line. Multiple F: lines acceptable. N: Files and directories with regex patterns. N: [^a-z]tegra all files whose path contains the word tegra One pattern per line. Multiple N: lines acceptable. scripts/get_maintainer.pl has different behavior for files that match F: pattern and matches of N: patterns. By default, get_maintainer will not look at git log history when an F: pattern match occurs. When an N: match occurs, git log history is used to also notify the people that have git commit signatures. X: Files and directories that are NOT maintained, same rules as F: Files exclusions are tested before file matches. Can be useful for excluding a specific subdirectory, for instance: F: net/ X: net/ipv6/ matches all files in and below net excluding net/ipv6/ K: Keyword perl extended regex pattern to match content in a patch or file. For instance: K: of_get_profile matches patches or files that contain "of_get_profile" K: \b(printk|pr_(info|err))\b matches patches or files that contain one or more of the words printk, pr_info or pr_err One regex pattern per line. Multiple K: lines acceptable. Note: For the hard of thinking, this list is meant to remain in alphabetical order. If you could add yourselves to it in alphabetical order that would be so much easier [Ed] Maintainers List (try to look for most precise areas first) ----------------------------------- 3C59X NETWORK DRIVER M: Steffen Klassert L: netdev@vger.kernel.org S: Odd Fixes F: Documentation/networking/device_drivers/3com/vortex.txt F: drivers/net/ethernet/3com/3c59x.c 3CR990 NETWORK DRIVER M: David Dillow L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/3com/typhoon* 3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS) M: Adam Radford L: linux-scsi@vger.kernel.org W: http://www.lsi.com S: Supported F: drivers/scsi/3w-* 53C700 AND 53C700-66 SCSI DRIVER M: "James E.J. Bottomley" L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/53c700* 6LOWPAN GENERIC (BTLE/IEEE 802.15.4) M: Alexander Aring M: Jukka Rissanen L: linux-bluetooth@vger.kernel.org L: linux-wpan@vger.kernel.org S: Maintained F: net/6lowpan/ F: include/net/6lowpan.h F: Documentation/networking/6lowpan.txt 6PACK NETWORK DRIVER FOR AX.25 M: Andreas Koensgen L: linux-hams@vger.kernel.org S: Maintained F: drivers/net/hamradio/6pack.c 8169 10/100/1000 GIGABIT ETHERNET DRIVER M: Realtek linux nic maintainers M: Heiner Kallweit L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/realtek/r8169.c 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER M: Greg Kroah-Hartman L: linux-serial@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git F: drivers/tty/serial/8250* F: include/linux/serial_8250.h 8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.] L: netdev@vger.kernel.org S: Orphan / Obsolete F: drivers/net/ethernet/8390/ 9P FILE SYSTEM M: Eric Van Hensbergen M: Latchesar Ionkov M: Dominique Martinet L: v9fs-developer@lists.sourceforge.net W: http://swik.net/v9fs Q: http://patchwork.kernel.org/project/v9fs-devel/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs.git T: git git://github.com/martinetd/linux.git S: Maintained F: Documentation/filesystems/9p.txt F: fs/9p/ F: net/9p/ F: include/net/9p/ F: include/uapi/linux/virtio_9p.h F: include/trace/events/9p.h A8293 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/a8293* AACRAID SCSI RAID DRIVER M: Adaptec OEM Raid Solutions L: linux-scsi@vger.kernel.org W: http://www.adaptec.com/ S: Supported F: Documentation/scsi/aacraid.txt F: drivers/scsi/aacraid/ ABI/API L: linux-api@vger.kernel.org F: include/linux/syscalls.h F: kernel/sys_ni.c ABIT UGURU 1,2 HARDWARE MONITOR DRIVER M: Hans de Goede L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/abituguru.c ABIT UGURU 3 HARDWARE MONITOR DRIVER M: Alistair John Strachan L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/abituguru3.c ACCES 104-DIO-48E GPIO DRIVER M: William Breathitt Gray L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-104-dio-48e.c ACCES 104-IDI-48 GPIO DRIVER M: "William Breathitt Gray" L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-104-idi-48.c ACCES 104-IDIO-16 GPIO DRIVER M: "William Breathitt Gray" L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-104-idio-16.c ACCES 104-QUAD-8 IIO DRIVER M: William Breathitt Gray L: linux-iio@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 F: drivers/iio/counter/104-quad-8.c ACCES PCI-IDIO-16 GPIO DRIVER M: William Breathitt Gray L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-pci-idio-16.c ACCES PCIe-IDIO-24 GPIO DRIVER M: William Breathitt Gray L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-pcie-idio-24.c ACENIC DRIVER M: Jes Sorensen L: linux-acenic@sunsite.dk S: Maintained F: drivers/net/ethernet/alteon/acenic* ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER M: Peter Feuerer L: platform-driver-x86@vger.kernel.org W: http://piie.net/?section=acerhdf S: Maintained F: drivers/platform/x86/acerhdf.c ACER WMI LAPTOP EXTRAS M: "Lee, Chun-Yi" L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/acer-wmi.c ACPI M: "Rafael J. Wysocki" M: Len Brown L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi Q: https://patchwork.kernel.org/project/linux-acpi/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/ F: drivers/pnp/pnpacpi/ F: include/linux/acpi.h F: include/linux/fwnode.h F: include/acpi/ F: Documentation/acpi/ F: Documentation/ABI/testing/sysfs-bus-acpi F: Documentation/ABI/testing/configfs-acpi F: drivers/pci/*acpi* F: drivers/pci/*/*acpi* F: tools/power/acpi/ ACPI APEI M: "Rafael J. Wysocki" M: Len Brown L: linux-acpi@vger.kernel.org R: Tony Luck R: Borislav Petkov F: drivers/acpi/apei/ ACPI COMPONENT ARCHITECTURE (ACPICA) M: Robert Moore M: Erik Schmauss M: "Rafael J. Wysocki" L: linux-acpi@vger.kernel.org L: devel@acpica.org W: https://acpica.org/ W: https://github.com/acpica/acpica/ Q: https://patchwork.kernel.org/project/linux-acpi/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm B: https://bugzilla.kernel.org B: https://bugs.acpica.org S: Supported F: drivers/acpi/acpica/ F: include/acpi/ F: tools/power/acpi/ ACPI FAN DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/fan.c ACPI FOR ARM64 (ACPI/arm64) M: Lorenzo Pieralisi M: Hanjun Guo M: Sudeep Holla L: linux-acpi@vger.kernel.org S: Maintained F: drivers/acpi/arm64 ACPI I2C MULTI INSTANTIATE DRIVER M: Hans de Goede L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/i2c-multi-instantiate.c ACPI PMIC DRIVERS M: "Rafael J. Wysocki" M: Len Brown R: Andy Shevchenko R: Mika Westerberg L: linux-acpi@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-acpi/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/pmic/ ACPI THERMAL DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/*thermal* ACPI VIDEO DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/acpi_video.c ACPI WMI DRIVER L: platform-driver-x86@vger.kernel.org S: Orphan F: drivers/platform/x86/wmi.c F: include/uapi/linux/wmi.h AD1889 ALSA SOUND DRIVER M: Thibaut Varene W: http://wiki.parisc-linux.org/AD1889 L: linux-parisc@vger.kernel.org S: Maintained F: sound/pci/ad1889.* AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER M: Michael Hennerich W: http://wiki.analog.com/AD5254 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/misc/ad525x_dpot.c AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821) M: Michael Hennerich W: http://wiki.analog.com/AD5398 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/regulator/ad5398.c AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A) M: Michael Hennerich W: http://wiki.analog.com/AD7142 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/input/misc/ad714x.c AD7877 TOUCHSCREEN DRIVER M: Michael Hennerich W: http://wiki.analog.com/AD7877 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/input/touchscreen/ad7877.c AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889) M: Michael Hennerich W: http://wiki.analog.com/AD7879 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/input/touchscreen/ad7879.c ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR) M: Jiri Kosina S: Maintained ADF7242 IEEE 802.15.4 RADIO DRIVER M: Michael Hennerich W: https://wiki.analog.com/ADF7242 W: http://ez.analog.com/community/linux-device-drivers L: linux-wpan@vger.kernel.org S: Supported F: drivers/net/ieee802154/adf7242.c F: Documentation/devicetree/bindings/net/ieee802154/adf7242.txt ADM1025 HARDWARE MONITOR DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/adm1025 F: drivers/hwmon/adm1025.c ADM1029 HARDWARE MONITOR DRIVER M: Corentin Labbe L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/adm1029.c ADM8211 WIRELESS DRIVER L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ S: Orphan F: drivers/net/wireless/admtek/adm8211.* ADP1653 FLASH CONTROLLER DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adp1653.c F: include/media/i2c/adp1653.h ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501) M: Michael Hennerich W: http://wiki.analog.com/ADP5520 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/mfd/adp5520.c F: drivers/video/backlight/adp5520_bl.c F: drivers/leds/leds-adp5520.c F: drivers/gpio/gpio-adp5520.c F: drivers/input/keyboard/adp5520-keys.c ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587) M: Michael Hennerich W: http://wiki.analog.com/ADP5588 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/input/keyboard/adp5588-keys.c F: drivers/gpio/gpio-adp5588.c ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863) M: Michael Hennerich W: http://wiki.analog.com/ADP8860 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/video/backlight/adp8860_bl.c ADS1015 HARDWARE MONITOR DRIVER M: Dirk Eibach L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/ads1015 F: drivers/hwmon/ads1015.c F: include/linux/platform_data/ads1015.h ADT746X FAN DRIVER M: Colin Leroy S: Maintained F: drivers/macintosh/therm_adt746x.c ADT7475 HARDWARE MONITOR DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/adt7475 F: drivers/hwmon/adt7475.c ADVANSYS SCSI DRIVER M: Matthew Wilcox M: Hannes Reinecke L: linux-scsi@vger.kernel.org S: Maintained F: Documentation/scsi/advansys.txt F: drivers/scsi/advansys.c ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346) M: Michael Hennerich W: http://wiki.analog.com/ADXL345 W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/input/misc/adxl34x.c ADXL372 THREE-AXIS DIGITAL ACCELEROMETER DRIVER M: Stefan Popa W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/accel/adxl372.c F: drivers/iio/accel/adxl372_spi.c F: drivers/iio/accel/adxl372_i2c.c F: Documentation/devicetree/bindings/iio/accel/adxl372.txt AF9013 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/af9013* AF9033 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/af9033* AFFS FILE SYSTEM M: David Sterba L: linux-fsdevel@vger.kernel.org S: Odd Fixes F: Documentation/filesystems/affs.txt F: fs/affs/ AFS FILESYSTEM M: David Howells L: linux-afs@lists.infradead.org S: Supported F: fs/afs/ F: include/trace/events/afs.h F: Documentation/filesystems/afs.txt W: https://www.infradead.org/~dhowells/kafs/ AGPGART DRIVER M: David Airlie T: git git://anongit.freedesktop.org/drm/drm S: Maintained F: drivers/char/agp/ F: include/linux/agp* F: include/uapi/linux/agp* AHA152X SCSI DRIVER M: "Juergen E. Fischer" L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/aha152x* F: drivers/scsi/pcmcia/aha152x* AIC7XXX / AIC79XX SCSI DRIVER M: Hannes Reinecke L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/aic7xxx/ AIMSLAB FM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-aimslab* AIO M: Benjamin LaHaise L: linux-aio@kvack.org S: Supported F: fs/aio.c F: include/linux/*aio*.h AIRSPY MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/airspy/ ALACRITECH GIGABIT ETHERNET DRIVER M: Lino Sanfilippo S: Maintained F: drivers/net/ethernet/alacritech/* ALCATEL SPEEDTOUCH USB DRIVER M: Duncan Sands L: linux-usb@vger.kernel.org W: http://www.linux-usb.org/SpeedTouch/ S: Maintained F: drivers/usb/atm/speedtch.c F: drivers/usb/atm/usbatm.c ALCHEMY AU1XX0 MMC DRIVER M: Manuel Lauss S: Maintained F: drivers/mmc/host/au1xmmc.c ALI1563 I2C DRIVER M: Rudolf Marek L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/busses/i2c-ali1563 F: drivers/i2c/busses/i2c-ali1563.c ALLWINNER SECURITY SYSTEM M: Corentin Labbe L: linux-crypto@vger.kernel.org S: Maintained F: drivers/crypto/sunxi-ss/ ALLWINNER VPU DRIVER M: Maxime Ripard M: Paul Kocialkowski L: linux-media@vger.kernel.org S: Maintained F: drivers/staging/media/sunxi/cedrus/ ALPHA PORT M: Richard Henderson M: Ivan Kokshaysky M: Matt Turner S: Odd Fixes L: linux-alpha@vger.kernel.org F: arch/alpha/ ALPS PS/2 TOUCHPAD DRIVER R: Pali Rohár F: drivers/input/mouse/alps.* ALTERA I2C CONTROLLER DRIVER M: Thor Thayer S: Maintained F: drivers/i2c/busses/i2c-altera.c ALTERA MAILBOX DRIVER M: Ley Foon Tan L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained F: drivers/mailbox/mailbox-altera.c ALTERA PIO DRIVER M: Tien Hock Loh L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-altera.c ALTERA SYSTEM RESOURCE DRIVER FOR ARRIA10 DEVKIT M: Thor Thayer S: Maintained F: drivers/gpio/gpio-altera-a10sr.c F: drivers/mfd/altera-a10sr.c F: drivers/reset/reset-a10sr.c F: include/linux/mfd/altera-a10sr.h F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h ALTERA TRIPLE SPEED ETHERNET DRIVER M: Thor Thayer L: netdev@vger.kernel.org L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained F: drivers/net/ethernet/altera/ ALTERA UART/JTAG UART SERIAL DRIVERS M: Tobias Klauser L: linux-serial@vger.kernel.org L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained F: drivers/tty/serial/altera_uart.c F: drivers/tty/serial/altera_jtaguart.c F: include/linux/altera_uart.h F: include/linux/altera_jtaguart.h AMAZON ETHERNET DRIVERS M: Netanel Belgazal R: Saeed Bishara R: Zorik Machulsky L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/amazon/ena.txt F: drivers/net/ethernet/amazon/ AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER M: Tom Lendacky M: Gary Hook L: linux-crypto@vger.kernel.org S: Supported F: drivers/crypto/ccp/ F: include/linux/ccp.h AMD DISPLAY CORE M: Harry Wentland M: Leo Li L: amd-gfx@lists.freedesktop.org T: git git://people.freedesktop.org/~agd5f/linux S: Supported F: drivers/gpu/drm/amd/display/ AMD FAM15H PROCESSOR POWER MONITORING DRIVER M: Huang Rui L: linux-hwmon@vger.kernel.org S: Supported F: Documentation/hwmon/fam15h_power F: drivers/hwmon/fam15h_power.c AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER L: linux-geode@lists.infradead.org (moderated for non-subscribers) S: Orphan F: drivers/usb/gadget/udc/amd5536udc.* AMD GEODE PROCESSOR/CHIPSET SUPPORT P: Andres Salomon L: linux-geode@lists.infradead.org (moderated for non-subscribers) W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html S: Supported F: drivers/char/hw_random/geode-rng.c F: drivers/crypto/geode* F: drivers/video/fbdev/geode/ F: arch/x86/include/asm/geode.h AMD IOMMU (AMD-VI) M: Joerg Roedel L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git S: Maintained F: drivers/iommu/amd_iommu*.[ch] F: include/linux/amd-iommu.h AMD KFD M: Oded Gabbay L: dri-devel@lists.freedesktop.org T: git git://people.freedesktop.org/~gabbayo/linux.git S: Supported F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c F: drivers/gpu/drm/amd/amdkfd/ F: drivers/gpu/drm/amd/include/cik_structs.h F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h F: drivers/gpu/drm/amd/include/vi_structs.h F: drivers/gpu/drm/amd/include/v9_structs.h F: include/uapi/linux/kfd_ioctl.h AMD POWERPLAY M: Rex Zhu M: Evan Quan L: amd-gfx@lists.freedesktop.org S: Supported F: drivers/gpu/drm/amd/powerplay/ T: git git://people.freedesktop.org/~agd5f/linux AMD SEATTLE DEVICE TREE SUPPORT M: Brijesh Singh M: Suravee Suthikulpanit M: Tom Lendacky S: Supported F: arch/arm64/boot/dts/amd/ AMD XGBE DRIVER M: Tom Lendacky L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/amd/xgbe/ F: arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi ANALOG DEVICES INC AD5686 DRIVER M: Stefan Popa L: linux-pm@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/dac/ad5686* F: drivers/iio/dac/ad5696* ANALOG DEVICES INC AD5758 DRIVER M: Stefan Popa L: linux-iio@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/dac/ad5758.c F: Documentation/devicetree/bindings/iio/dac/ad5758.txt ANALOG DEVICES INC AD7124 DRIVER M: Stefan Popa L: linux-iio@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/adc/ad7124.c F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt ANALOG DEVICES INC AD9389B DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/ad9389b* ANALOG DEVICES INC ADGS1408 DRIVER M: Mircea Caprioru S: Supported F: drivers/mux/adgs1408.c F: Documentation/devicetree/bindings/mux/adi,adgs1408.txt ANALOG DEVICES INC ADP5061 DRIVER M: Stefan Popa L: linux-pm@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/power/supply/adp5061.c ANALOG DEVICES INC ADV7180 DRIVER M: Lars-Peter Clausen L: linux-media@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/media/i2c/adv7180.c ANALOG DEVICES INC ADV748X DRIVER M: Kieran Bingham L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adv748x/* ANALOG DEVICES INC ADV7511 DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adv7511* ANALOG DEVICES INC ADV7604 DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adv7604* ANALOG DEVICES INC ADV7842 DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adv7842* ANALOG DEVICES INC ASOC CODEC DRIVERS M: Lars-Peter Clausen L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://wiki.analog.com/ W: http://ez.analog.com/community/linux-device-drivers S: Supported F: sound/soc/codecs/adau* F: sound/soc/codecs/adav* F: sound/soc/codecs/ad1* F: sound/soc/codecs/ad7* F: sound/soc/codecs/ssm* F: sound/soc/codecs/sigmadsp.* ANALOG DEVICES INC DMA DRIVERS M: Lars-Peter Clausen W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/dma/dma-axi-dmac.c ANALOG DEVICES INC IIO DRIVERS M: Lars-Peter Clausen M: Michael Hennerich W: http://wiki.analog.com/ W: http://ez.analog.com/community/linux-device-drivers S: Supported F: Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523 F: Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350 F: drivers/iio/*/ad* F: drivers/iio/adc/ltc2497* X: drivers/iio/*/adjd* F: drivers/staging/iio/*/ad* ANDES ARCHITECTURE M: Greentime Hu M: Vincent Chen T: git https://github.com/andestech/linux.git S: Supported F: arch/nds32/ F: Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt F: Documentation/devicetree/bindings/nds32/ K: nds32 N: nds32 ANDROID CONFIG FRAGMENTS M: Rob Herring S: Supported F: kernel/configs/android* ANDROID DRIVERS M: Greg Kroah-Hartman M: Arve Hjønnevåg M: Todd Kjos M: Martijn Coenen M: Joel Fernandes M: Christian Brauner T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git L: devel@driverdev.osuosl.org S: Supported F: drivers/android/ F: drivers/staging/android/ ANDROID GOLDFISH PIC DRIVER M: Miodrag Dinic S: Supported F: Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt F: drivers/irqchip/irq-goldfish-pic.c ANDROID GOLDFISH RTC DRIVER M: Miodrag Dinic S: Supported F: Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt F: drivers/rtc/rtc-goldfish.c ANDROID ION DRIVER M: Laura Abbott M: Sumit Semwal L: devel@driverdev.osuosl.org L: dri-devel@lists.freedesktop.org L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers) S: Supported F: drivers/staging/android/ion F: drivers/staging/android/uapi/ion.h AOA (Apple Onboard Audio) ALSA DRIVER M: Johannes Berg L: linuxppc-dev@lists.ozlabs.org L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/aoa/ APEX EMBEDDED SYSTEMS STX104 IIO DRIVER M: William Breathitt Gray L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/adc/stx104.c APM DRIVER M: Jiri Kosina S: Odd fixes T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/apm.git F: arch/x86/kernel/apm_32.c F: include/linux/apm_bios.h F: include/uapi/linux/apm_bios.h F: drivers/char/apm-emulation.c APPARMOR SECURITY MODULE M: John Johansen L: apparmor@lists.ubuntu.com (subscribers-only, general discussion) W: wiki.apparmor.net T: git git://git.kernel.org/pub/scm/linux/kernel/git/jj/linux-apparmor S: Supported F: security/apparmor/ F: Documentation/admin-guide/LSM/apparmor.rst APPLE BCM5974 MULTITOUCH DRIVER M: Henrik Rydberg L: linux-input@vger.kernel.org S: Odd fixes F: drivers/input/mouse/bcm5974.c APPLE SMC DRIVER M: Henrik Rydberg L: linux-hwmon@vger.kernel.org S: Odd fixes F: drivers/hwmon/applesmc.c APPLETALK NETWORK LAYER L: netdev@vger.kernel.org S: Odd fixes F: drivers/net/appletalk/ F: net/appletalk/ APPLIED MICRO (APM) X-GENE DEVICE TREE SUPPORT M: Duc Dang S: Supported F: arch/arm64/boot/dts/apm/ APPLIED MICRO (APM) X-GENE SOC EDAC M: Loc Ho S: Supported F: drivers/edac/xgene_edac.c F: Documentation/devicetree/bindings/edac/apm-xgene-edac.txt APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER M: Iyappan Subramanian M: Keyur Chudgar S: Supported F: drivers/net/ethernet/apm/xgene-v2/ APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER M: Iyappan Subramanian M: Keyur Chudgar M: Quan Nguyen S: Supported F: drivers/net/ethernet/apm/xgene/ F: drivers/net/phy/mdio-xgene.c F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt APPLIED MICRO (APM) X-GENE SOC PMU M: Tai Nguyen S: Supported F: drivers/perf/xgene_pmu.c F: Documentation/perf/xgene-pmu.txt F: Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt APTINA CAMERA SENSOR PLL M: Laurent Pinchart L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/aptina-pll.* ARC FRAMEBUFFER DRIVER M: Jaya Kumar S: Maintained F: drivers/video/fbdev/arcfb.c F: drivers/video/fbdev/core/fb_defio.c ARC PGU DRM DRIVER M: Alexey Brodkin S: Supported F: drivers/gpu/drm/arc/ F: Documentation/devicetree/bindings/display/snps,arcpgu.txt ARCNET NETWORK LAYER M: Michael Grzeschik L: netdev@vger.kernel.org S: Maintained F: drivers/net/arcnet/ F: include/uapi/linux/if_arcnet.h ARM ARCHITECTED TIMER DRIVER M: Mark Rutland M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/include/asm/arch_timer.h F: arch/arm64/include/asm/arch_timer.h F: drivers/clocksource/arm_arch_timer.c ARM INTEGRATOR, VERSATILE AND REALVIEW SUPPORT M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/arm/arm-boards F: Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt F: Documentation/devicetree/bindings/clock/arm-integrator.txt F: Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt F: Documentation/devicetree/bindings/mtd/arm-versatile.txt F: arch/arm/mach-integrator/ F: arch/arm/mach-realview/ F: arch/arm/mach-versatile/ F: arch/arm/plat-versatile/ F: arch/arm/boot/dts/arm-realview-* F: arch/arm/boot/dts/integrator* F: arch/arm/boot/dts/versatile* F: drivers/clk/versatile/ F: drivers/i2c/busses/i2c-versatile.c F: drivers/irqchip/irq-versatile-fpga.c F: drivers/mtd/maps/physmap_of_versatile.c F: drivers/power/reset/arm-versatile-reboot.c F: drivers/soc/versatile/ ARM HDLCD DRM DRIVER M: Liviu Dudau S: Supported F: drivers/gpu/drm/arm/hdlcd_* F: Documentation/devicetree/bindings/display/arm,hdlcd.txt ARM MALI-DP DRM DRIVER M: Liviu Dudau M: Brian Starkey M: Mali DP Maintainers S: Supported F: drivers/gpu/drm/arm/ F: Documentation/devicetree/bindings/display/arm,malidp.txt ARM MFM AND FLOPPY DRIVERS M: Ian Molton S: Maintained F: arch/arm/lib/floppydma.S F: arch/arm/include/asm/floppy.h ARM PMU PROFILING AND DEBUGGING M: Will Deacon M: Mark Rutland S: Maintained L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) F: arch/arm*/kernel/perf_* F: arch/arm/oprofile/common.c F: arch/arm*/kernel/hw_breakpoint.c F: arch/arm*/include/asm/hw_breakpoint.h F: arch/arm*/include/asm/perf_event.h F: drivers/perf/* F: include/linux/perf/arm_pmu.h F: Documentation/devicetree/bindings/arm/pmu.txt F: Documentation/devicetree/bindings/perf/ ARM PORT M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ S: Odd Fixes T: git git://git.armlinux.org.uk/~rmk/linux-arm.git F: arch/arm/ X: arch/arm/boot/dts/ ARM PRIMECELL AACI PL041 DRIVER M: Russell King S: Odd Fixes F: sound/arm/aaci.* ARM PRIMECELL BUS SUPPORT M: Russell King S: Odd Fixes F: drivers/amba/ F: include/linux/amba/bus.h ARM PRIMECELL CLCD PL110 DRIVER M: Russell King S: Odd Fixes F: drivers/video/fbdev/amba-clcd.* ARM PRIMECELL KMI PL050 DRIVER M: Russell King S: Odd Fixes F: drivers/input/serio/ambakmi.* F: include/linux/amba/kmi.h ARM PRIMECELL MMCI PL180/1 DRIVER M: Russell King S: Odd Fixes F: drivers/mmc/host/mmci.* F: include/linux/amba/mmci.h ARM PRIMECELL SSP PL022 SPI DRIVER M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/spi/spi_pl022.txt F: drivers/spi/spi-pl022.c ARM PRIMECELL UART PL010 AND PL011 DRIVERS M: Russell King S: Odd Fixes F: drivers/tty/serial/amba-pl01*.c F: include/linux/amba/serial.h ARM PRIMECELL VIC PL190/PL192 DRIVER M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/interrupt-controller/arm,vic.txt F: drivers/irqchip/irq-vic.c ARM SMMU DRIVERS M: Will Deacon R: Robin Murphy L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/iommu/arm-smmu.c F: drivers/iommu/arm-smmu-v3.c F: drivers/iommu/io-pgtable-arm.c F: drivers/iommu/io-pgtable-arm-v7s.c ARM SUB-ARCHITECTURES L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-*/ F: arch/arm/plat-*/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git ARM/ACTIONS SEMI ARCHITECTURE M: Andreas Färber R: Manivannan Sadhasivam L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: owl F: arch/arm/mach-actions/ F: arch/arm/boot/dts/owl-* F: arch/arm64/boot/dts/actions/ F: drivers/clk/actions/ F: drivers/clocksource/timer-owl* F: drivers/dma/owl-dma.c F: drivers/i2c/busses/i2c-owl.c F: drivers/pinctrl/actions/* F: drivers/soc/actions/ F: include/dt-bindings/power/owl-* F: include/linux/soc/actions/ F: Documentation/devicetree/bindings/arm/actions.txt F: Documentation/devicetree/bindings/clock/actions,owl-cmu.txt F: Documentation/devicetree/bindings/dma/owl-dma.txt F: Documentation/devicetree/bindings/i2c/i2c-owl.txt F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt F: Documentation/devicetree/bindings/power/actions,owl-sps.txt F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt ARM/ADS SPHERE MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/AFEB9260 MACHINE SUPPORT M: Sergey Lapin L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/AJECO 1ARM MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/Allwinner SoC Clock Support M: Emilio López S: Maintained F: drivers/clk/sunxi/ ARM/Allwinner sunXi SoC support M: Maxime Ripard M: Chen-Yu Tsai L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: sun[x456789]i N: sun50i F: arch/arm/mach-sunxi/ F: arch/arm64/boot/dts/allwinner/ F: drivers/clk/sunxi-ng/ F: drivers/pinctrl/sunxi/ F: drivers/soc/sunxi/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux.git ARM/Amlogic Meson SoC CLOCK FRAMEWORK M: Neil Armstrong M: Jerome Brunet L: linux-amlogic@lists.infradead.org S: Maintained F: drivers/clk/meson/ F: include/dt-bindings/clock/meson* F: include/dt-bindings/clock/gxbb* F: Documentation/devicetree/bindings/clock/amlogic* ARM/Amlogic Meson SoC support M: Kevin Hilman L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-amlogic@lists.infradead.org W: http://linux-meson.com/ S: Maintained F: arch/arm/mach-meson/ F: arch/arm/boot/dts/meson* F: arch/arm64/boot/dts/amlogic/ F: drivers/pinctrl/meson/ F: drivers/mmc/host/meson* F: drivers/soc/amlogic/ N: meson ARM/Amlogic Meson SoC Sound Drivers M: Jerome Brunet L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/soc/meson/ F: Documentation/devicetree/bindings/sound/amlogic* ARM/Annapurna Labs ALPINE ARCHITECTURE M: Tsahee Zidenberg M: Antoine Tenart L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-alpine/ F: arch/arm/boot/dts/alpine* F: arch/arm64/boot/dts/al/ F: drivers/*/*alpine* ARM/ARTPEC MACHINE SUPPORT M: Jesper Nilsson M: Lars Persson S: Maintained L: linux-arm-kernel@axis.com F: arch/arm/mach-artpec F: arch/arm/boot/dts/artpec6* F: drivers/clk/axis F: drivers/crypto/axis F: drivers/pinctrl/pinctrl-artpec* F: Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt ARM/ASPEED I2C DRIVER M: Brendan Higgins R: Benjamin Herrenschmidt R: Joel Stanley L: linux-i2c@vger.kernel.org L: openbmc@lists.ozlabs.org (moderated for non-subscribers) S: Maintained F: drivers/irqchip/irq-aspeed-i2c-ic.c F: drivers/i2c/busses/i2c-aspeed.c F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt F: Documentation/devicetree/bindings/i2c/i2c-aspeed.txt ARM/ASPEED MACHINE SUPPORT M: Joel Stanley R: Andrew Jeffery L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers) Q: https://patchwork.ozlabs.org/project/linux-aspeed/list/ S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/aspeed.git F: arch/arm/mach-aspeed/ F: arch/arm/boot/dts/aspeed-* N: aspeed ARM/CALXEDA HIGHBANK ARCHITECTURE M: Rob Herring L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-highbank/ F: arch/arm/boot/dts/highbank.dts F: arch/arm/boot/dts/ecx-*.dts* ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT M: Krzysztof Halasa S: Maintained F: arch/arm/mach-cns3xxx/ ARM/CAVIUM THUNDER NETWORK DRIVER M: Sunil Goutham M: Robert Richter L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/net/ethernet/cavium/thunder/ ARM/CIRRUS LOGIC BK3 MACHINE SUPPORT M: Lukasz Majewski L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ep93xx/ts72xx.c ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE M: Alexander Shiyan L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Odd Fixes N: clps711x ARM/CIRRUS LOGIC EDB9315A MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE M: Hartley Sweeten M: Alexander Sverdlin L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ep93xx/ F: arch/arm/mach-ep93xx/include/mach/ ARM/CLKDEV SUPPORT M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev F: drivers/clk/clkdev.c ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT M: Mike Rapoport L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/CONEXANT DIGICOLOR MACHINE SUPPORT M: Baruch Siach L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/cx92755* N: digicolor ARM/CONTEC MICRO9 MACHINE SUPPORT M: Hubert Feurstein S: Maintained F: arch/arm/mach-ep93xx/micro9.c ARM/CORESIGHT FRAMEWORK AND DRIVERS M: Mathieu Poirier R: Suzuki K Poulose L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/hwtracing/coresight/* F: Documentation/trace/coresight.txt F: Documentation/trace/coresight-cpu-debug.txt F: Documentation/devicetree/bindings/arm/coresight.txt F: Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt F: Documentation/ABI/testing/sysfs-bus-coresight-devices-* F: tools/perf/arch/arm/util/pmu.c F: tools/perf/arch/arm/util/auxtrace.c F: tools/perf/arch/arm/util/cs-etm.c F: tools/perf/arch/arm/util/cs-etm.h F: tools/perf/util/cs-etm.* F: tools/perf/util/cs-etm-decoder/* ARM/CORGI MACHINE SUPPORT M: Richard Purdie S: Maintained ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE M: Hans Ulli Kroll M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/ulli-kroll/linux.git S: Maintained F: Documentation/devicetree/bindings/arm/gemini.txt F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt F: arch/arm/mach-gemini/ F: drivers/net/ethernet/cortina/ F: drivers/pinctrl/pinctrl-gemini.c F: drivers/rtc/rtc-ftrtc010.c ARM/CSR SIRFPRIMA2 MACHINE SUPPORT M: Barry Song L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git S: Maintained F: arch/arm/boot/dts/prima2* F: arch/arm/mach-prima2/ F: drivers/clk/sirf/ F: drivers/clocksource/timer-prima2.c F: drivers/clocksource/timer-atlas7.c N: [^a-z]sirf X: drivers/gnss ARM/EBSA110 MACHINE SUPPORT M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/mach-ebsa110/ F: drivers/net/ethernet/amd/am79c961a.* ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT M: Uwe Kleine-König R: Pengutronix Kernel Team L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: efm32 ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6) M: Robert Jarzmik L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-pxa/ezx.c ARM/FARADAY FA526 PORT M: Hans Ulli Kroll L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.berlios.de/gemini-board F: arch/arm/mm/*-fa* ARM/FOOTBRIDGE ARCHITECTURE M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/include/asm/hardware/dec21285.h F: arch/arm/mach-footbridge/ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE M: Shawn Guo M: Sascha Hauer R: Pengutronix Kernel Team R: Fabio Estevam R: NXP Linux Team L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git F: arch/arm/mach-imx/ F: arch/arm/mach-mxs/ F: arch/arm/boot/dts/imx* F: arch/arm/configs/imx*_defconfig F: arch/arm64/boot/dts/freescale/imx* F: drivers/clk/imx/ F: drivers/firmware/imx/ F: drivers/soc/imx/ F: include/linux/firmware/imx/ F: include/soc/imx/ ARM/FREESCALE VYBRID ARM ARCHITECTURE M: Shawn Guo M: Sascha Hauer R: Pengutronix Kernel Team R: Stefan Agner L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git F: arch/arm/mach-imx/*vf610* F: arch/arm/boot/dts/vf* ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE M: Shawn Guo M: Li Yang L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git F: arch/arm/boot/dts/ls1021a* F: arch/arm64/boot/dts/freescale/fsl-* F: arch/arm64/boot/dts/freescale/qoriq-* ARM/GLOMATION GESBC9312SX MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/GUMSTIX MACHINE SUPPORT M: Steve Sakoman L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/H4700 (HP IPAQ HX4700) MACHINE SUPPORT M: Philipp Zabel M: Paul Parsons L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-pxa/hx4700.c F: arch/arm/mach-pxa/include/mach/hx4700.h F: sound/soc/pxa/hx4700.c ARM/HISILICON SOC SUPPORT M: Wei Xu L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.hisilicon.com S: Supported T: git git://github.com/hisilicon/linux-hisi.git F: arch/arm/mach-hisi/ F: arch/arm/boot/dts/hi3* F: arch/arm/boot/dts/hip* F: arch/arm/boot/dts/hisi* F: arch/arm64/boot/dts/hisilicon/ ARM/HP JORNADA 7XX MACHINE SUPPORT M: Kristoffer Ericson W: www.jlime.com S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: arch/arm/mach-sa1100/jornada720.c F: arch/arm/mach-sa1100/include/mach/jornada720.h ARM/IGEP MACHINE SUPPORT M: Enric Balletbo i Serra M: Javier Martinez Canillas L: linux-omap@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/omap3-igep* ARM/INCOME PXA270 SUPPORT M: Marek Vasut L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-pxa/colibri-pxa270-income.c ARM/INTEL IOP13XX ARM ARCHITECTURE M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP32X ARM ARCHITECTURE M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP33X ARM ARCHITECTURE L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Orphan ARM/INTEL IQ81342EX MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IXDP2850 MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IXP4XX ARM ARCHITECTURE M: Imre Kaloz M: Krzysztof Halasa L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ixp4xx/ ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT M: Jonathan Cameron L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-pxa/stargate2.c F: drivers/pcmcia/pxa2xx_stargate2.c ARM/INTEL XSC3 (MANZANO) ARM CORE M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/LG1K ARCHITECTURE M: Chanho Min L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm64/boot/dts/lg/ ARM/LOGICPD PXA270 MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/LPC18XX ARCHITECTURE M: Vladimir Zapolskiy L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/lpc43* F: drivers/i2c/busses/i2c-lpc2k.c F: drivers/memory/pl172.c F: drivers/mtd/spi-nor/nxp-spifi.c F: drivers/rtc/rtc-lpc24xx.c N: lpc18xx ARM/LPC32XX SOC SUPPORT M: Vladimir Zapolskiy M: Sylvain Lemieux L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/vzapolskiy/linux-lpc32xx.git S: Maintained F: arch/arm/boot/dts/lpc32* F: arch/arm/mach-lpc32xx/ F: drivers/i2c/busses/i2c-pnx.c F: drivers/net/ethernet/nxp/lpc_eth.c F: drivers/usb/host/ohci-nxp.c F: drivers/watchdog/pnx4008_wdt.c N: lpc32xx ARM/MAGICIAN MACHINE SUPPORT M: Philipp Zabel S: Maintained ARM/Marvell Dove/MV78xx0/Orion SOC support M: Jason Cooper M: Andrew Lunn M: Sebastian Hesselbarth M: Gregory Clement L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/soc/dove/ F: arch/arm/mach-dove/ F: arch/arm/mach-mv78xx0/ F: arch/arm/mach-orion5x/ F: arch/arm/plat-orion/ F: arch/arm/boot/dts/dove* F: arch/arm/boot/dts/orion5x* ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support M: Jason Cooper M: Andrew Lunn M: Gregory Clement M: Sebastian Hesselbarth L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/armada* F: arch/arm/boot/dts/kirkwood* F: arch/arm/configs/mvebu_*_defconfig F: arch/arm/mach-mvebu/ F: arch/arm64/boot/dts/marvell/armada* F: drivers/cpufreq/armada-37xx-cpufreq.c F: drivers/cpufreq/mvebu-cpufreq.c F: drivers/irqchip/irq-armada-370-xp.c F: drivers/irqchip/irq-mvebu-* F: drivers/pinctrl/mvebu/ F: drivers/rtc/rtc-armada38x.c ARM/Mediatek RTC DRIVER M: Eddie Huang M: Sean Wang L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/rtc/rtc-mt7622.txt F: drivers/rtc/rtc-mt6397.c F: drivers/rtc/rtc-mt7622.c ARM/Mediatek SoC support M: Matthias Brugger L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) W: https://mtk.bcnfs.org/ C: irc://chat.freenode.net/linux-mediatek S: Maintained F: arch/arm/boot/dts/mt6* F: arch/arm/boot/dts/mt7* F: arch/arm/boot/dts/mt8* F: arch/arm/mach-mediatek/ F: arch/arm64/boot/dts/mediatek/ F: drivers/soc/mediatek/ N: mtk N: mt[678] K: mediatek ARM/Mediatek USB3 PHY DRIVER M: Chunfeng Yun L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/phy/mediatek/ F: Documentation/devicetree/bindings/phy/phy-mtk-* ARM/MICREL KS8695 ARCHITECTURE M: Greg Ungerer L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) F: arch/arm/mach-ks8695/ S: Odd Fixes ARM/Microchip (AT91) SoC support M: Nicolas Ferre M: Alexandre Belloni M: Ludovic Desroches L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.linux4sam.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git S: Supported N: at91 N: atmel F: arch/arm/mach-at91/ F: include/soc/at91/ F: arch/arm/boot/dts/at91*.dts F: arch/arm/boot/dts/at91*.dtsi F: arch/arm/boot/dts/sama*.dts F: arch/arm/boot/dts/sama*.dtsi F: arch/arm/include/debug/at91.S F: drivers/memory/atmel* F: drivers/watchdog/sama5d4_wdt.c X: drivers/input/touchscreen/atmel_mxt_ts.c X: drivers/net/wireless/atmel/ ARM/MIOA701 MACHINE SUPPORT M: Robert Jarzmik L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) F: arch/arm/mach-pxa/mioa701.c S: Maintained ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT M: Michael Petchkovsky S: Maintained ARM/NOMADIK/U300/Ux500 ARCHITECTURES M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-nomadik/ F: arch/arm/mach-u300/ F: arch/arm/mach-ux500/ F: arch/arm/boot/dts/ste-* F: drivers/clk/clk-nomadik.c F: drivers/clk/clk-u300.c F: drivers/clocksource/clksrc-dbx500-prcmu.c F: drivers/clocksource/timer-u300.c F: drivers/dma/coh901318* F: drivers/dma/ste_dma40* F: drivers/hwspinlock/u8500_hsem.c F: drivers/i2c/busses/i2c-nomadik.c F: drivers/i2c/busses/i2c-stu300.c F: drivers/mfd/ab3100* F: drivers/mfd/ab8500* F: drivers/mfd/abx500* F: drivers/mfd/dbx500* F: drivers/mfd/db8500* F: drivers/pinctrl/nomadik/ F: drivers/pinctrl/pinctrl-coh901* F: drivers/pinctrl/pinctrl-u300.c F: drivers/rtc/rtc-ab3100.c F: drivers/rtc/rtc-ab8500.c F: drivers/rtc/rtc-coh901331.c F: drivers/rtc/rtc-pl031.c F: drivers/watchdog/coh901327_wdt.c F: Documentation/devicetree/bindings/arm/ste-* F: Documentation/devicetree/bindings/arm/ux500/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git ARM/NUVOTON NPCM ARCHITECTURE M: Avi Fishman M: Tomer Maimon R: Patrick Venture R: Nancy Yuen R: Brendan Higgins L: openbmc@lists.ozlabs.org (moderated for non-subscribers) S: Supported F: arch/arm/mach-npcm/ F: arch/arm/boot/dts/nuvoton-npcm* F: include/dt-bindings/clock/nuvoton,npcm7xx-clks.h F: drivers/*/*npcm* F: Documentation/devicetree/bindings/*/*npcm* F: Documentation/devicetree/bindings/*/*/*npcm* ARM/NUVOTON W90X900 ARM ARCHITECTURE M: Wan ZongShun L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.mcuos.com S: Maintained F: arch/arm/mach-w90x900/ F: drivers/input/keyboard/w90p910_keypad.c F: drivers/input/touchscreen/w90p910_ts.c F: drivers/watchdog/nuc900_wdt.c F: drivers/net/ethernet/nuvoton/w90p910_ether.c F: drivers/mtd/nand/raw/nuc900_nand.c F: drivers/rtc/rtc-nuc900.c F: drivers/spi/spi-nuc900.c F: drivers/usb/host/ehci-w90x900.c F: drivers/video/fbdev/nuc900fb.c ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT M: Nelson Castillo L: openmoko-kernel@lists.openmoko.org (subscribers-only) W: http://wiki.openmoko.org/wiki/Neo_FreeRunner S: Supported ARM/Orion SoC/Technologic Systems TS-78xx platform support M: Alexander Clouter L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.digriz.org.uk/ts78xx/kernel S: Maintained F: arch/arm/mach-orion5x/ts78xx-* ARM/OXNAS platform support M: Neil Armstrong L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-oxnas@groups.io (moderated for non-subscribers) S: Maintained F: arch/arm/mach-oxnas/ F: arch/arm/boot/dts/ox8*.dts* N: oxnas ARM/PALM TREO SUPPORT M: Tomas Cech L: linux-arm-kernel@lists.infradead.org W: http://hackndev.com S: Maintained F: arch/arm/mach-pxa/palmtreo.* ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT M: Marek Vasut L: linux-arm-kernel@lists.infradead.org W: http://hackndev.com S: Maintained F: arch/arm/mach-pxa/include/mach/palmtx.h F: arch/arm/mach-pxa/palmtx.c F: arch/arm/mach-pxa/palmt5.* F: arch/arm/mach-pxa/include/mach/palmld.h F: arch/arm/mach-pxa/palmld.c F: arch/arm/mach-pxa/palmte2.* F: arch/arm/mach-pxa/include/mach/palmtc.h F: arch/arm/mach-pxa/palmtc.c ARM/PALMZ72 SUPPORT M: Sergey Lapin L: linux-arm-kernel@lists.infradead.org W: http://hackndev.com S: Maintained F: arch/arm/mach-pxa/palmz72.* ARM/PLEB SUPPORT M: Peter Chubb W: http://www.disy.cse.unsw.edu.au/Hardware/PLEB S: Maintained ARM/PT DIGITAL BOARD PORT M: Stefan Eletzhofer L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ S: Maintained ARM/QUALCOMM SUPPORT M: Andy Gross M: David Brown L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/soc/qcom/ F: arch/arm/boot/dts/qcom-*.dts F: arch/arm/boot/dts/qcom-*.dtsi F: arch/arm/mach-qcom/ F: arch/arm64/boot/dts/qcom/* F: drivers/i2c/busses/i2c-qup.c F: drivers/clk/qcom/ F: drivers/dma/qcom/ F: drivers/soc/qcom/ F: drivers/spi/spi-qup.c F: drivers/tty/serial/msm_serial.c F: drivers/*/pm8???-* F: drivers/mfd/ssbi.c F: drivers/firmware/qcom_scm* T: git git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux.git ARM/RADISYS ENP2611 MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/RDA MICRO ARCHITECTURE M: Manivannan Sadhasivam L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-unisoc@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/rda8810pl-* F: drivers/clocksource/timer-rda.c F: drivers/irqchip/irq-rda-intc.c F: drivers/tty/serial/rda-uart.c F: Documentation/devicetree/bindings/arm/rda.txt F: Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt F: Documentation/devicetree/bindings/serial/rda,8810pl-uart.txt F: Documentation/devicetree/bindings/timer/rda,8810pl-timer.txt ARM/REALTEK ARCHITECTURE M: Andreas Färber L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm64/boot/dts/realtek/ F: Documentation/devicetree/bindings/arm/realtek.txt ARM/RENESAS ARM64 ARCHITECTURE M: Simon Horman M: Magnus Damm L: linux-renesas-soc@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next S: Supported F: arch/arm64/boot/dts/renesas/ F: Documentation/devicetree/bindings/arm/shmobile.txt F: drivers/soc/renesas/ F: include/linux/soc/renesas/ ARM/RISCPC ARCHITECTURE M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/include/asm/hardware/entry-macro-iomd.S F: arch/arm/include/asm/hardware/ioc.h F: arch/arm/include/asm/hardware/iomd.h F: arch/arm/include/asm/hardware/memc.h F: arch/arm/mach-rpc/ F: drivers/net/ethernet/8390/etherh.c F: drivers/net/ethernet/i825xx/ether1* F: drivers/net/ethernet/seeq/ether3* F: drivers/scsi/arm/ ARM/Rockchip SoC support M: Heiko Stuebner L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-rockchip@lists.infradead.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git S: Maintained F: arch/arm/boot/dts/rk3* F: arch/arm/boot/dts/rv1108* F: arch/arm/mach-rockchip/ F: drivers/clk/rockchip/ F: drivers/i2c/busses/i2c-rk3x.c F: drivers/*/*rockchip* F: drivers/*/*/*rockchip* F: sound/soc/rockchip/ N: rockchip ARM/SAMSUNG EXYNOS ARM ARCHITECTURES M: Kukjin Kim M: Krzysztof Kozlowski L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ S: Maintained F: arch/arm/boot/dts/s3c* F: arch/arm/boot/dts/s5p* F: arch/arm/boot/dts/exynos* F: arch/arm64/boot/dts/exynos/ F: arch/arm/plat-samsung/ F: arch/arm/mach-s3c24*/ F: arch/arm/mach-s3c64xx/ F: arch/arm/mach-s5p*/ F: arch/arm/mach-exynos*/ F: drivers/*/*s3c24* F: drivers/*/*/*s3c24* F: drivers/*/*s3c64xx* F: drivers/*/*s5pv210* F: drivers/memory/samsung/* F: drivers/soc/samsung/* F: Documentation/arm/Samsung/ F: Documentation/devicetree/bindings/arm/samsung/ F: Documentation/devicetree/bindings/sram/samsung-sram.txt F: Documentation/devicetree/bindings/power/pd-samsung.txt N: exynos ARM/SAMSUNG MOBILE MACHINE SUPPORT M: Kyungmin Park L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-s5pv210/ ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT M: Kyungmin Park M: Kamil Debski M: Andrzej Hajda L: linux-arm-kernel@lists.infradead.org L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/s5p-g2d/ ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT M: Marek Szyprowski L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/s5p-cec/ F: Documentation/devicetree/bindings/media/s5p-cec.txt ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT M: Andrzej Pietrasiewicz M: Jacek Anaszewski L: linux-arm-kernel@lists.infradead.org L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/s5p-jpeg/ ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT M: Kyungmin Park M: Kamil Debski M: Jeongtae Park M: Andrzej Hajda L: linux-arm-kernel@lists.infradead.org L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/s5p-mfc/ ARM/SHMOBILE ARM ARCHITECTURE M: Simon Horman M: Magnus Damm L: linux-renesas-soc@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next S: Supported F: arch/arm/boot/dts/emev2* F: arch/arm/boot/dts/r7s* F: arch/arm/boot/dts/r8a* F: arch/arm/boot/dts/r9a* F: arch/arm/boot/dts/sh* F: arch/arm/configs/shmobile_defconfig F: arch/arm/include/debug/renesas-scif.S F: arch/arm/mach-shmobile/ F: Documentation/devicetree/bindings/arm/shmobile.txt F: drivers/soc/renesas/ F: include/linux/soc/renesas/ ARM/SOCFPGA ARCHITECTURE M: Dinh Nguyen S: Maintained F: arch/arm/mach-socfpga/ F: arch/arm/boot/dts/socfpga* F: arch/arm/configs/socfpga_defconfig F: arch/arm64/boot/dts/altera/ W: http://www.rocketboards.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT M: Dinh Nguyen S: Maintained F: drivers/clk/socfpga/ ARM/SOCFPGA EDAC SUPPORT M: Thor Thayer S: Maintained F: drivers/edac/altera_edac. ARM/SPREADTRUM SoC SUPPORT M: Orson Zhai M: Baolin Wang M: Chunyan Zhang S: Maintained F: arch/arm64/boot/dts/sprd N: sprd ARM/STI ARCHITECTURE M: Patrice Chotard L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.stlinux.com S: Maintained F: arch/arm/mach-sti/ F: arch/arm/boot/dts/sti* F: drivers/char/hw_random/st-rng.c F: drivers/clocksource/arm_global_timer.c F: drivers/clocksource/clksrc_st_lpc.c F: drivers/cpufreq/sti-cpufreq.c F: drivers/dma/st_fdma* F: drivers/i2c/busses/i2c-st.c F: drivers/media/rc/st_rc.c F: drivers/media/platform/sti/c8sectpfe/ F: drivers/mmc/host/sdhci-st.c F: drivers/phy/st/phy-miphy28lp.c F: drivers/phy/st/phy-stih407-usb.c F: drivers/pinctrl/pinctrl-st.c F: drivers/remoteproc/st_remoteproc.c F: drivers/remoteproc/st_slim_rproc.c F: drivers/reset/sti/ F: drivers/rtc/rtc-st-lpc.c F: drivers/tty/serial/st-asc.c F: drivers/usb/dwc3/dwc3-st.c F: drivers/usb/host/ehci-st.c F: drivers/usb/host/ohci-st.c F: drivers/watchdog/st_lpc_wdt.c F: drivers/ata/ahci_st.c F: include/linux/remoteproc/st_slim_rproc.h ARM/STM32 ARCHITECTURE M: Maxime Coquelin M: Alexandre Torgue L: linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next N: stm32 N: stm F: arch/arm/boot/dts/stm32* F: arch/arm/mach-stm32/ F: drivers/clocksource/armv7m_systick.c ARM/Synaptics SoC support M: Jisheng Zhang M: Sebastian Hesselbarth L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-berlin/ F: arch/arm/boot/dts/berlin* F: arch/arm64/boot/dts/synaptics/ ARM/TANGO ARCHITECTURE M: Marc Gonzalez M: Mans Rullgard L: linux-arm-kernel@lists.infradead.org S: Odd Fixes N: tango ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/TEGRA HDMI CEC SUBSYSTEM SUPPORT M: Hans Verkuil L: linux-tegra@vger.kernel.org L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/tegra-cec/ F: Documentation/devicetree/bindings/media/tegra-cec.txt ARM/TETON BGA MACHINE SUPPORT M: "Mark F. Brown" L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS M: Santosh Shilimkar L: linux-kernel@vger.kernel.org S: Maintained F: drivers/memory/*emif* ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE M: Tero Kristo M: Nishanth Menon L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/arm/ti/k3.txt F: arch/arm64/boot/dts/ti/Makefile F: arch/arm64/boot/dts/ti/k3-* F: include/dt-bindings/pinctrl/k3.h ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE M: Santosh Shilimkar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-keystone/ F: arch/arm/boot/dts/keystone-* T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK M: Santosh Shilimkar L: linux-kernel@vger.kernel.org S: Maintained F: drivers/clk/keystone/ ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE M: Santosh Shilimkar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-kernel@vger.kernel.org S: Maintained F: drivers/clocksource/timer-keystone.c ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER M: Santosh Shilimkar L: linux-kernel@vger.kernel.org S: Maintained F: drivers/power/reset/keystone-reset.c ARM/THECUS N2100 MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/TOSA MACHINE SUPPORT M: Dmitry Eremin-Solenikov M: Dirk Opfer S: Maintained ARM/UNIPHIER ARCHITECTURE M: Masahiro Yamada L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git S: Maintained F: Documentation/devicetree/bindings/arm/socionext/uniphier.txt F: Documentation/devicetree/bindings/gpio/gpio-uniphier.txt F: Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.txt F: arch/arm/boot/dts/uniphier* F: arch/arm/include/asm/hardware/cache-uniphier.h F: arch/arm/mach-uniphier/ F: arch/arm/mm/cache-uniphier.c F: arch/arm64/boot/dts/socionext/uniphier* F: drivers/bus/uniphier-system-bus.c F: drivers/clk/uniphier/ F: drivers/dmaengine/uniphier-mdmac.c F: drivers/gpio/gpio-uniphier.c F: drivers/i2c/busses/i2c-uniphier* F: drivers/irqchip/irq-uniphier-aidet.c F: drivers/mmc/host/uniphier-sd.c F: drivers/pinctrl/uniphier/ F: drivers/reset/reset-uniphier.c F: drivers/tty/serial/8250/8250_uniphier.c N: uniphier ARM/Ux500 CLOCK FRAMEWORK SUPPORT M: Ulf Hansson L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://git.linaro.org/people/ulfh/clk.git S: Maintained F: drivers/clk/ux500/ ARM/VERSATILE EXPRESS PLATFORM M: Liviu Dudau M: Sudeep Holla M: Lorenzo Pieralisi L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/vexpress* F: arch/arm64/boot/dts/arm/ F: arch/arm/mach-vexpress/ F: */*/vexpress* F: */*/*/vexpress* F: drivers/clk/versatile/clk-vexpress-osc.c F: drivers/clocksource/timer-versatile.c N: mps2 ARM/VFP SUPPORT M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/vfp/ ARM/VOIPAC PXA270 SUPPORT M: Marek Vasut L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-pxa/vpac270.c F: arch/arm/mach-pxa/include/mach/vpac270.h ARM/VT8500 ARM ARCHITECTURE M: Tony Prisk L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-vt8500/ F: drivers/clocksource/timer-vt8500.c F: drivers/i2c/busses/i2c-wmt.c F: drivers/mmc/host/wmt-sdmmc.c F: drivers/pwm/pwm-vt8500.c F: drivers/rtc/rtc-vt8500.c F: drivers/tty/serial/vt8500_serial.c F: drivers/usb/host/ehci-platform.c F: drivers/usb/host/uhci-platform.c F: drivers/video/fbdev/vt8500lcdfb.* F: drivers/video/fbdev/wm8505fb* F: drivers/video/fbdev/wmt_ge_rops.* ARM/ZIPIT Z2 SUPPORT M: Marek Vasut L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-pxa/z2.c F: arch/arm/mach-pxa/include/mach/z2.h ARM/ZTE ARCHITECTURE M: Jun Nie M: Shawn Guo L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/zx2967* F: arch/arm/mach-zx/ F: arch/arm64/boot/dts/zte/ F: drivers/clk/zte/ F: drivers/dma/zx_dma.c F: drivers/gpio/gpio-zx.c F: drivers/i2c/busses/i2c-zx2967.c F: drivers/mmc/host/dw_mmc-zx.* F: drivers/pinctrl/zte/ F: drivers/soc/zte/ F: drivers/thermal/zx2967_thermal.c F: drivers/watchdog/zx2967_wdt.c F: Documentation/devicetree/bindings/arm/zte.yaml F: Documentation/devicetree/bindings/clock/zx2967*.txt F: Documentation/devicetree/bindings/dma/zxdma.txt F: Documentation/devicetree/bindings/gpio/zx296702-gpio.txt F: Documentation/devicetree/bindings/i2c/i2c-zx2967.txt F: Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt F: Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt F: Documentation/devicetree/bindings/reset/zte,zx2967-reset.txt F: Documentation/devicetree/bindings/soc/zte/ F: Documentation/devicetree/bindings/sound/zte,*.txt F: Documentation/devicetree/bindings/thermal/zx2967-thermal.txt F: Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt F: include/dt-bindings/clock/zx2967*.h F: include/dt-bindings/soc/zte,*.h F: sound/soc/codecs/zx_aud96p22.c F: sound/soc/zte/ ARM/ZYNQ ARCHITECTURE M: Michal Simek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://wiki.xilinx.com T: git https://github.com/Xilinx/linux-xlnx.git S: Supported F: arch/arm/mach-zynq/ F: drivers/cpuidle/cpuidle-zynq.c F: drivers/block/xsysace.c N: zynq N: xilinx F: drivers/clocksource/timer-cadence-ttc.c F: drivers/i2c/busses/i2c-cadence.c F: drivers/mmc/host/sdhci-of-arasan.c F: drivers/edac/synopsys_edac.c F: drivers/i2c/busses/i2c-xiic.c ARM64 PORT (AARCH64 ARCHITECTURE) M: Catalin Marinas M: Will Deacon L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git S: Maintained F: arch/arm64/ X: arch/arm64/boot/dts/ F: Documentation/arm64/ AS3645A LED FLASH CONTROLLER DRIVER M: Sakari Ailus L: linux-leds@vger.kernel.org S: Maintained F: drivers/leds/leds-as3645a.c ASAHI KASEI AK7375 LENS VOICE COIL DRIVER M: Tianshu Qiu L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ak7375.c F: Documentation/devicetree/bindings/media/i2c/ak7375.txt ASAHI KASEI AK8974 DRIVER M: Linus Walleij L: linux-iio@vger.kernel.org W: http://www.akm.com/ S: Supported F: drivers/iio/magnetometer/ak8974.c ASC7621 HARDWARE MONITOR DRIVER M: George Joseph L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/asc7621 F: drivers/hwmon/asc7621.c ASPEED VIDEO ENGINE DRIVER M: Eddie James L: linux-media@vger.kernel.org L: openbmc@lists.ozlabs.org (moderated for non-subscribers) S: Maintained F: drivers/media/platform/aspeed-video.c F: Documentation/devicetree/bindings/media/aspeed-video.txt ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS M: Corentin Chary L: acpi4asus-user@lists.sourceforge.net L: platform-driver-x86@vger.kernel.org W: http://acpi4asus.sf.net S: Maintained F: drivers/platform/x86/asus*.c F: drivers/platform/x86/eeepc*.c ASUS WIRELESS RADIO CONTROL DRIVER M: João Paulo Rechi Vita L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/asus-wireless.c ASYMMETRIC KEYS M: David Howells L: keyrings@vger.kernel.org S: Maintained F: Documentation/crypto/asymmetric-keys.txt F: include/linux/verification.h F: include/crypto/public_key.h F: include/crypto/pkcs7.h F: crypto/asymmetric_keys/ ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API R: Dan Williams W: http://sourceforge.net/projects/xscaleiop S: Odd fixes F: Documentation/crypto/async-tx-api.txt F: crypto/async_tx/ F: drivers/dma/ F: include/linux/dmaengine.h F: include/linux/async_tx.h AT24 EEPROM DRIVER M: Bartosz Golaszewski L: linux-i2c@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git S: Maintained F: Documentation/devicetree/bindings/eeprom/at24.txt F: drivers/misc/eeprom/at24.c F: include/linux/platform_data/at24.h ATA OVER ETHERNET (AOE) DRIVER M: "Ed L. Cashin" W: http://www.openaoe.org/ S: Supported F: Documentation/aoe/ F: drivers/block/aoe/ ATHEROS 71XX/9XXX GPIO DRIVER M: Alban Bedel W: https://github.com/AlbanBedel/linux T: git git://github.com/AlbanBedel/linux S: Maintained F: drivers/gpio/gpio-ath79.c F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt ATHEROS 71XX/9XXX USB PHY DRIVER M: Alban Bedel W: https://github.com/AlbanBedel/linux T: git git://github.com/AlbanBedel/linux S: Maintained F: drivers/phy/qualcomm/phy-ath79-usb.c F: Documentation/devicetree/bindings/phy/phy-ath79-usb.txt ATHEROS ATH GENERIC UTILITIES M: Kalle Valo L: linux-wireless@vger.kernel.org S: Supported F: drivers/net/wireless/ath/* ATHEROS ATH5K WIRELESS DRIVER M: Jiri Slaby M: Nick Kossifidis M: Luis Chamberlain L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/ath5k S: Maintained F: drivers/net/wireless/ath/ath5k/ ATHEROS ATH6KL WIRELESS DRIVER M: Kalle Valo L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/ath6kl T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git S: Supported F: drivers/net/wireless/ath/ath6kl/ ATI_REMOTE2 DRIVER M: Ville Syrjala S: Maintained F: drivers/input/misc/ati_remote2.c ATK0110 HWMON DRIVER M: Luca Tettamanti L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/asus_atk0110.c ATLX ETHERNET DRIVERS M: Jay Cliburn M: Chris Snook L: netdev@vger.kernel.org W: http://sourceforge.net/projects/atl1 W: http://atl1.sourceforge.net S: Maintained F: drivers/net/ethernet/atheros/ ATM M: Chas Williams <3chas3@gmail.com> L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers) L: netdev@vger.kernel.org W: http://linux-atm.sourceforge.net S: Maintained F: drivers/atm/ F: include/linux/atm* F: include/uapi/linux/atm* ATMEL MACB ETHERNET DRIVER M: Nicolas Ferre S: Supported F: drivers/net/ethernet/cadence/ ATMEL MAXTOUCH DRIVER M: Nick Dyer T: git git://github.com/ndyer/linux.git S: Maintained F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt F: drivers/input/touchscreen/atmel_mxt_ts.c ATMEL WIRELESS DRIVER M: Simon Kelley L: linux-wireless@vger.kernel.org W: http://www.thekelleys.org.uk/atmel W: http://atmelwlandriver.sourceforge.net/ S: Maintained F: drivers/net/wireless/atmel/atmel* ATOMIC INFRASTRUCTURE M: Will Deacon M: Peter Zijlstra R: Boqun Feng L: linux-kernel@vger.kernel.org S: Maintained F: arch/*/include/asm/atomic*.h F: include/*/atomic*.h ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER M: Bradley Grove L: linux-scsi@vger.kernel.org W: http://www.attotech.com S: Supported F: drivers/scsi/esas2r ATUSB IEEE 802.15.4 RADIO DRIVER M: Stefan Schmidt L: linux-wpan@vger.kernel.org S: Maintained F: drivers/net/ieee802154/atusb.c F: drivers/net/ieee802154/atusb.h F: drivers/net/ieee802154/at86rf230.h AUDIT SUBSYSTEM M: Paul Moore M: Eric Paris L: linux-audit@redhat.com (moderated for non-subscribers) W: https://github.com/linux-audit T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git S: Supported F: include/linux/audit.h F: include/uapi/linux/audit.h F: kernel/audit* AUXILIARY DISPLAY DRIVERS M: Miguel Ojeda Sandonis S: Maintained F: drivers/auxdisplay/ F: include/linux/cfag12864b.h AVIA HX711 ANALOG DIGITAL CONVERTER IIO DRIVER M: Andreas Klinger L: linux-iio@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/iio/adc/avia-hx711.txt F: drivers/iio/adc/hx711.c AX.25 NETWORK LAYER M: Ralf Baechle L: linux-hams@vger.kernel.org W: http://www.linux-ax25.org/ S: Maintained F: include/uapi/linux/ax25.h F: include/net/ax25.h F: net/ax25/ AXENTIA ARM DEVICES M: Peter Rosin L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/arm/axentia.txt F: arch/arm/boot/dts/at91-linea.dtsi F: arch/arm/boot/dts/at91-natte.dtsi F: arch/arm/boot/dts/at91-nattis-2-natte-2.dts F: arch/arm/boot/dts/at91-tse850-3.dts AXENTIA ASOC DRIVERS M: Peter Rosin L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/axentia,* F: sound/soc/atmel/tse850-pcm5142.c AXXIA I2C CONTROLLER M: Krzysztof Adamski L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/i2c/i2c-axxia.txt F: drivers/i2c/busses/i2c-axxia.c AZ6007 DVB DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/az6007.c AZTECH FM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-aztech* B43 WIRELESS DRIVER L: linux-wireless@vger.kernel.org L: b43-dev@lists.infradead.org W: http://wireless.kernel.org/en/users/Drivers/b43 S: Odd Fixes F: drivers/net/wireless/broadcom/b43/ B43LEGACY WIRELESS DRIVER M: Larry Finger L: linux-wireless@vger.kernel.org L: b43-dev@lists.infradead.org W: http://wireless.kernel.org/en/users/Drivers/b43 S: Maintained F: drivers/net/wireless/broadcom/b43legacy/ BACKLIGHT CLASS/SUBSYSTEM M: Lee Jones M: Daniel Thompson M: Jingoo Han L: dri-devel@lists.freedesktop.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/backlight.git S: Maintained F: drivers/video/backlight/ F: include/linux/backlight.h F: include/linux/pwm_backlight.h F: Documentation/devicetree/bindings/leds/backlight BATMAN ADVANCED M: Marek Lindner M: Simon Wunderlich M: Antonio Quartulli L: b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers) W: https://www.open-mesh.org/ Q: https://patchwork.open-mesh.org/project/batman/list/ S: Maintained F: Documentation/ABI/testing/sysfs-class-net-batman-adv F: Documentation/ABI/testing/sysfs-class-net-mesh F: Documentation/networking/batman-adv.rst F: include/uapi/linux/batadv_packet.h F: include/uapi/linux/batman_adv.h F: net/batman-adv/ BAYCOM/HDLCDRV DRIVERS FOR AX.25 M: Thomas Sailer L: linux-hams@vger.kernel.org W: http://www.baycom.org/~tom/ham/ham.html S: Maintained F: drivers/net/hamradio/baycom* BCACHE (BLOCK LAYER CACHE) M: Coly Li M: Kent Overstreet L: linux-bcache@vger.kernel.org W: http://bcache.evilpiepirate.org C: irc://irc.oftc.net/bcache S: Maintained F: drivers/md/bcache/ BDISP ST MEDIA DRIVER M: Fabien Dessenne L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Supported F: drivers/media/platform/sti/bdisp BECKHOFF CX5020 ETHERCAT MASTER DRIVER M: Dariusz Marcinkiewicz L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/ec_bhf.c BEFS FILE SYSTEM M: Luis de Bethencourt M: Salah Triki S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/luisbg/linux-befs.git F: Documentation/filesystems/befs.txt F: fs/befs/ BFQ I/O SCHEDULER M: Paolo Valente M: Jens Axboe L: linux-block@vger.kernel.org S: Maintained F: block/bfq-* F: Documentation/block/bfq-iosched.txt BFS FILE SYSTEM M: "Tigran A. Aivazian" S: Maintained F: Documentation/filesystems/bfs.txt F: fs/bfs/ F: include/uapi/linux/bfs_fs.h BLINKM RGB LED DRIVER M: Jan-Simon Moeller S: Maintained F: drivers/leds/leds-blinkm.c BLOCK LAYER M: Jens Axboe L: linux-block@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: block/ F: drivers/block/ F: kernel/trace/blktrace.c F: lib/sbitmap.c BLOCK2MTD DRIVER M: Joern Engel L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/devices/block2mtd.c BLUETOOTH DRIVERS M: Marcel Holtmann M: Johan Hedberg L: linux-bluetooth@vger.kernel.org W: http://www.bluez.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git S: Maintained F: drivers/bluetooth/ BLUETOOTH SUBSYSTEM M: Marcel Holtmann M: Johan Hedberg L: linux-bluetooth@vger.kernel.org W: http://www.bluez.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git S: Maintained F: net/bluetooth/ F: include/net/bluetooth/ BONDING DRIVER M: Jay Vosburgh M: Veaceslav Falico M: Andy Gospodarek L: netdev@vger.kernel.org W: http://sourceforge.net/projects/bonding/ S: Supported F: drivers/net/bonding/ F: include/uapi/linux/if_bonding.h BPF (Safe dynamic programs and tools) M: Alexei Starovoitov M: Daniel Borkmann L: netdev@vger.kernel.org L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 S: Supported F: arch/*/net/* F: Documentation/networking/filter.txt F: Documentation/bpf/ F: include/linux/bpf* F: include/linux/filter.h F: include/trace/events/xdp.h F: include/uapi/linux/bpf* F: include/uapi/linux/filter.h F: kernel/bpf/ F: kernel/trace/bpf_trace.c F: lib/test_bpf.c F: net/bpf/ F: net/core/filter.c F: net/sched/act_bpf.c F: net/sched/cls_bpf.c F: samples/bpf/ F: tools/bpf/ F: tools/lib/bpf/ F: tools/testing/selftests/bpf/ BPF JIT for ARM M: Shubham Bansal L: netdev@vger.kernel.org S: Maintained F: arch/arm/net/ BPF JIT for ARM64 M: Daniel Borkmann M: Alexei Starovoitov M: Zi Shen Lim L: netdev@vger.kernel.org S: Supported F: arch/arm64/net/ BPF JIT for MIPS (32-BIT AND 64-BIT) M: Paul Burton L: netdev@vger.kernel.org S: Maintained F: arch/mips/net/ BPF JIT for NFP NICs M: Jakub Kicinski L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/netronome/nfp/bpf/ BPF JIT for POWERPC (32-BIT AND 64-BIT) M: Naveen N. Rao M: Sandipan Das L: netdev@vger.kernel.org S: Maintained F: arch/powerpc/net/ BPF JIT for S390 M: Martin Schwidefsky M: Heiko Carstens L: netdev@vger.kernel.org S: Maintained F: arch/s390/net/ X: arch/s390/net/pnet.c BPF JIT for SPARC (32-BIT AND 64-BIT) M: David S. Miller L: netdev@vger.kernel.org S: Maintained F: arch/sparc/net/ BPF JIT for X86 32-BIT M: Wang YanQing L: netdev@vger.kernel.org S: Maintained F: arch/x86/net/bpf_jit_comp32.c BPF JIT for X86 64-BIT M: Alexei Starovoitov M: Daniel Borkmann L: netdev@vger.kernel.org S: Supported F: arch/x86/net/ X: arch/x86/net/bpf_jit_comp32.c BROADCOM B44 10/100 ETHERNET DRIVER M: Michael Chan L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/b44.* BROADCOM B53 ETHERNET SWITCH DRIVER M: Florian Fainelli L: netdev@vger.kernel.org L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Supported F: drivers/net/dsa/b53/* F: include/linux/platform_data/b53.h BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE M: Florian Fainelli M: Ray Jui M: Scott Branden M: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/mach-bcm S: Maintained N: bcm281* N: bcm113* N: bcm216* N: kona F: arch/arm/mach-bcm/ BROADCOM BCM2835 ARM ARCHITECTURE M: Eric Anholt M: Stefan Wahren L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/anholt/linux S: Maintained N: bcm2835 F: drivers/staging/vc04_services BROADCOM BCM47XX MIPS ARCHITECTURE M: Hauke Mehrtens M: Rafał Miłecki L: linux-mips@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/mips/brcm/ F: arch/mips/bcm47xx/* F: arch/mips/include/asm/mach-bcm47xx/* BROADCOM BCM5301X ARM ARCHITECTURE M: Hauke Mehrtens M: Rafał Miłecki M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org S: Maintained F: arch/arm/mach-bcm/bcm_5301x.c F: arch/arm/boot/dts/bcm5301x*.dtsi F: arch/arm/boot/dts/bcm470* F: arch/arm/boot/dts/bcm953012* BROADCOM BCM53573 ARM ARCHITECTURE M: Rafał Miłecki L: linux-arm-kernel@lists.infradead.org S: Maintained F: arch/arm/boot/dts/bcm53573* F: arch/arm/boot/dts/bcm47189* BROADCOM BCM63XX ARM ARCHITECTURE M: Florian Fainelli M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/broadcom/stblinux.git S: Maintained N: bcm63xx BROADCOM BCM63XX/BCM33XX UDC DRIVER M: Kevin Cernekee L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/gadget/udc/bcm63xx_udc.* BROADCOM BCM7XXX ARM ARCHITECTURE M: Brian Norris M: Gregory Fong M: Florian Fainelli M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/broadcom/stblinux.git S: Maintained F: arch/arm/mach-bcm/*brcmstb* F: arch/arm/boot/dts/bcm7*.dts* F: drivers/bus/brcmstb_gisb.c F: arch/arm/mm/cache-b15-rac.c F: arch/arm/include/asm/hardware/cache-b15-rac.h N: brcmstb BROADCOM BMIPS CPUFREQ DRIVER M: Markus Mayer M: bcm-kernel-feedback-list@broadcom.com L: linux-pm@vger.kernel.org S: Maintained F: drivers/cpufreq/bmips-cpufreq.c BROADCOM BMIPS MIPS ARCHITECTURE M: Kevin Cernekee M: Florian Fainelli L: linux-mips@vger.kernel.org T: git git://github.com/broadcom/stblinux.git S: Maintained F: arch/mips/bmips/* F: arch/mips/include/asm/mach-bmips/* F: arch/mips/kernel/*bmips* F: arch/mips/boot/dts/brcm/bcm*.dts* F: drivers/irqchip/irq-bcm63* F: drivers/irqchip/irq-bcm7* F: drivers/irqchip/irq-brcmstb* F: include/linux/bcm963xx_nvram.h F: include/linux/bcm963xx_tag.h BROADCOM BNX2 GIGABIT ETHERNET DRIVER M: Rasesh Mody M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2.* F: drivers/net/ethernet/broadcom/bnx2_* BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER M: QLogic-Storage-Upstream@qlogic.com L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/bnx2fc/ BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER M: QLogic-Storage-Upstream@qlogic.com L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/bnx2i/ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER M: Ariel Elior M: Sudarsana Kalluru M: everest-linux-l2@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER M: Michael Chan L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnxt/ BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER M: Arend van Spriel M: Franky Lin M: Hante Meuleman M: Chi-Hsien Lin M: Wright Feng L: linux-wireless@vger.kernel.org L: brcm80211-dev-list.pdl@broadcom.com L: brcm80211-dev-list@cypress.com S: Supported F: drivers/net/wireless/broadcom/brcm80211/ BROADCOM BRCMSTB GPIO DRIVER M: Gregory Fong L: bcm-kernel-feedback-list@broadcom.com S: Supported F: drivers/gpio/gpio-brcmstb.c F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt BROADCOM BRCMSTB I2C DRIVER M: Kamal Dasu L: linux-i2c@vger.kernel.org L: bcm-kernel-feedback-list@broadcom.com S: Supported F: drivers/i2c/busses/i2c-brcmstb.c F: Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER M: Al Cooper L: linux-kernel@vger.kernel.org L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: drivers/phy/broadcom/phy-brcm-usb* BROADCOM GENET ETHERNET DRIVER M: Doug Berger M: Florian Fainelli L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/genet/ BROADCOM IPROC ARM ARCHITECTURE M: Ray Jui M: Scott Branden M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/broadcom/cygnus-linux.git S: Maintained N: iproc N: cygnus N: bcm[-_]nsp N: bcm9113* N: bcm9583* N: bcm9585* N: bcm9586* N: bcm988312 N: bcm113* N: bcm583* N: bcm585* N: bcm586* N: bcm88312 N: hr2 N: stingray F: arch/arm64/boot/dts/broadcom/northstar2/* F: arch/arm64/boot/dts/broadcom/stingray/* F: drivers/clk/bcm/clk-ns* F: drivers/clk/bcm/clk-sr* F: drivers/pinctrl/bcm/pinctrl-ns* F: include/dt-bindings/clock/bcm-sr* BROADCOM KONA GPIO DRIVER M: Ray Jui L: bcm-kernel-feedback-list@broadcom.com S: Supported F: drivers/gpio/gpio-bcm-kona.c F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt BROADCOM NETXTREME-E ROCE DRIVER M: Selvin Xavier M: Devesh Sharma M: Somnath Kotur M: Sriharsha Basavapatna L: linux-rdma@vger.kernel.org W: http://www.broadcom.com S: Supported F: drivers/infiniband/hw/bnxt_re/ F: include/uapi/rdma/bnxt_re-abi.h BROADCOM NVRAM DRIVER M: Rafał Miłecki L: linux-mips@vger.kernel.org S: Maintained F: drivers/firmware/broadcom/* BROADCOM SPECIFIC AMBA DRIVER (BCMA) M: Rafał Miłecki L: linux-wireless@vger.kernel.org S: Maintained F: drivers/bcma/ F: include/linux/bcma/ BROADCOM STB AVS CPUFREQ DRIVER M: Markus Mayer M: bcm-kernel-feedback-list@broadcom.com L: linux-pm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt F: drivers/cpufreq/brcmstb* BROADCOM STB AVS TMON DRIVER M: Markus Mayer M: bcm-kernel-feedback-list@broadcom.com L: linux-pm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt F: drivers/thermal/broadcom/brcmstb* BROADCOM STB NAND FLASH DRIVER M: Brian Norris M: Kamal Dasu L: linux-mtd@lists.infradead.org L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: drivers/mtd/nand/raw/brcmnand/ BROADCOM STB DPFE DRIVER M: Markus Mayer M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.txt F: drivers/memory/brcmstb_dpfe.c BROADCOM SPI DRIVER M: Kamal Dasu M: bcm-kernel-feedback-list@broadcom.com S: Maintained F: Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt F: drivers/spi/spi-bcm-qspi.* F: drivers/spi/spi-brcmstb-qspi.c F: drivers/spi/spi-iproc-qspi.c BROADCOM SYSTEMPORT ETHERNET DRIVER M: Florian Fainelli L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bcmsysport.* BROADCOM TG3 GIGABIT ETHERNET DRIVER M: Siva Reddy Kallam M: Prashant Sreedharan M: Michael Chan L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/tg3.* BROCADE BFA FC SCSI DRIVER M: Anil Gurumurthy M: Sudarsana Kalluru L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/bfa/ BROCADE BNA 10 GIGABIT ETHERNET DRIVER M: Rasesh Mody M: Sudarsana Kalluru M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/brocade/bna/ BSG (block layer generic sg v4 driver) M: FUJITA Tomonori L: linux-scsi@vger.kernel.org S: Supported F: block/bsg.c F: include/linux/bsg.h F: include/uapi/linux/bsg.h BT87X AUDIO DRIVER M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained F: Documentation/sound/cards/bt87x.rst F: sound/pci/bt87x.c BT8XXGPIO DRIVER M: Michael Buesch W: http://bu3sch.de/btgpio.php S: Maintained F: drivers/gpio/gpio-bt8xx.c BTRFS FILE SYSTEM M: Chris Mason M: Josef Bacik M: David Sterba L: linux-btrfs@vger.kernel.org W: http://btrfs.wiki.kernel.org/ Q: http://patchwork.kernel.org/project/linux-btrfs/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git S: Maintained F: Documentation/filesystems/btrfs.txt F: fs/btrfs/ F: include/linux/btrfs* F: include/uapi/linux/btrfs* BTTV VIDEO4LINUX DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes F: Documentation/media/v4l-drivers/bttv* F: drivers/media/pci/bt8xx/bttv* BUS FREQUENCY DRIVER FOR SAMSUNG EXYNOS M: Chanwoo Choi L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git S: Maintained F: drivers/devfreq/exynos-bus.c F: Documentation/devicetree/bindings/devfreq/exynos-bus.txt BUSLOGIC SCSI DRIVER M: Khalid Aziz L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/BusLogic.* F: drivers/scsi/FlashPoint.* C-MEDIA CMI8788 DRIVER M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained F: sound/pci/oxygen/ C-SKY ARCHITECTURE M: Guo Ren T: git https://github.com/c-sky/csky-linux.git S: Supported F: arch/csky/ F: Documentation/devicetree/bindings/csky/ F: drivers/irqchip/irq-csky-* F: Documentation/devicetree/bindings/interrupt-controller/csky,* F: drivers/clocksource/timer-gx6605s.c F: drivers/clocksource/timer-mp-csky.c F: Documentation/devicetree/bindings/timer/csky,* K: csky N: csky C6X ARCHITECTURE M: Mark Salter M: Aurelien Jacquiot L: linux-c6x-dev@linux-c6x.org W: http://www.linux-c6x.org/wiki/index.php/Main_Page S: Maintained F: arch/c6x/ CA8210 IEEE-802.15.4 RADIO DRIVER M: Harry Morris L: linux-wpan@vger.kernel.org W: https://github.com/Cascoda/ca8210-linux.git S: Maintained F: drivers/net/ieee802154/ca8210.c F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS M: David Howells L: linux-cachefs@redhat.com (moderated for non-subscribers) S: Supported F: Documentation/filesystems/caching/cachefiles.txt F: fs/cachefiles/ CADENCE MIPI-CSI2 BRIDGES M: Maxime Ripard L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/cdns,*.txt F: drivers/media/platform/cadence/cdns-csi2* CADET FM/AM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-cadet* CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER M: Jonathan Corbet L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/media/v4l-drivers/cafe_ccic* F: drivers/media/platform/marvell-ccic/ CAIF NETWORK LAYER M: Dmitry Tarnyagin L: netdev@vger.kernel.org S: Supported F: Documentation/networking/caif/ F: drivers/net/caif/ F: include/uapi/linux/caif/ F: include/net/caif/ F: net/caif/ CAKE QDISC M: Toke Høiland-Jørgensen L: cake@lists.bufferbloat.net (moderated for non-subscribers) S: Maintained F: net/sched/sch_cake.c CALGARY x86-64 IOMMU M: Muli Ben-Yehuda M: Jon Mason L: iommu@lists.linux-foundation.org S: Maintained F: arch/x86/kernel/pci-calgary_64.c F: arch/x86/kernel/tce_64.c F: arch/x86/include/asm/calgary.h F: arch/x86/include/asm/tce.h CAN NETWORK DRIVERS M: Wolfgang Grandegger M: Marc Kleine-Budde L: linux-can@vger.kernel.org W: https://github.com/linux-can T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h F: include/linux/can/platform/ F: include/uapi/linux/can/error.h F: include/uapi/linux/can/netlink.h CAN NETWORK LAYER M: Oliver Hartkopp M: Marc Kleine-Budde L: linux-can@vger.kernel.org W: https://github.com/linux-can T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained F: Documentation/networking/can.rst F: net/can/ F: include/linux/can/core.h F: include/uapi/linux/can.h F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h F: include/uapi/linux/can/gw.h CAPABILITIES M: Serge Hallyn L: linux-security-module@vger.kernel.org S: Supported F: include/linux/capability.h F: include/uapi/linux/capability.h F: security/commoncap.c F: kernel/capability.c CAPELLA MICROSYSTEMS LIGHT SENSOR DRIVER M: Kevin Tsai S: Maintained F: drivers/iio/light/cm* CARL9170 LINUX COMMUNITY WIRELESS DRIVER M: Christian Lamparter L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/carl9170 S: Maintained F: drivers/net/wireless/ath/carl9170/ CAVIUM I2C DRIVER M: Jan Glauber M: David Daney W: http://www.cavium.com S: Supported F: drivers/i2c/busses/i2c-octeon* F: drivers/i2c/busses/i2c-thunderx* CAVIUM LIQUIDIO NETWORK DRIVER M: Derek Chickles M: Satanand Burla M: Felix Manlunas M: Raghu Vatsavayi L: netdev@vger.kernel.org W: http://www.cavium.com S: Supported F: drivers/net/ethernet/cavium/liquidio/ CAVIUM MMC DRIVER M: Jan Glauber M: David Daney M: Steven J. Hill W: http://www.cavium.com S: Supported F: drivers/mmc/host/cavium* CAVIUM OCTEON-TX CRYPTO DRIVER M: George Cherian L: linux-crypto@vger.kernel.org W: http://www.cavium.com S: Supported F: drivers/crypto/cavium/cpt/ CAVIUM THUNDERX2 ARM64 SOC M: Robert Richter M: Jayachandran C L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm64/boot/dts/cavium/thunder2-99xx* F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt CC2520 IEEE-802.15.4 RADIO DRIVER M: Varka Bhadram L: linux-wpan@vger.kernel.org S: Maintained F: drivers/net/ieee802154/cc2520.c F: include/linux/spi/cc2520.h F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER M: Yael Chemla M: Gilad Ben-Yossef L: linux-crypto@vger.kernel.org S: Supported F: drivers/crypto/ccree/ W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family CEC FRAMEWORK M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org S: Supported F: Documentation/media/kapi/cec-core.rst F: Documentation/media/uapi/cec F: drivers/media/cec/ F: drivers/media/rc/keymaps/rc-cec.c F: include/media/cec.h F: include/media/cec-notifier.h F: include/uapi/linux/cec.h F: include/uapi/linux/cec-funcs.h F: Documentation/devicetree/bindings/media/cec.txt F: Documentation/ABI/testing/debugfs-cec-error-inj CEC GPIO DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org S: Supported F: drivers/media/platform/cec-gpio/ F: Documentation/devicetree/bindings/media/cec-gpio.txt CELL BROADBAND ENGINE ARCHITECTURE M: Arnd Bergmann L: linuxppc-dev@lists.ozlabs.org W: http://www.ibm.com/developerworks/power/cell/ S: Supported F: arch/powerpc/include/asm/cell*.h F: arch/powerpc/include/asm/spu*.h F: arch/powerpc/include/uapi/asm/spu*.h F: arch/powerpc/oprofile/*cell* F: arch/powerpc/platforms/cell/ CEPH COMMON CODE (LIBCEPH) M: Ilya Dryomov M: "Yan, Zheng" M: Sage Weil L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git T: git git://github.com/ceph/ceph-client.git S: Supported F: net/ceph/ F: include/linux/ceph/ F: include/linux/crush/ CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH) M: "Yan, Zheng" M: Sage Weil M: Ilya Dryomov L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git T: git git://github.com/ceph/ceph-client.git S: Supported F: Documentation/filesystems/ceph.txt F: fs/ceph/ CERTIFICATE HANDLING: M: David Howells M: David Woodhouse L: keyrings@vger.kernel.org S: Maintained F: Documentation/admin-guide/module-signing.rst F: certs/ F: scripts/sign-file.c F: scripts/extract-cert.c CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: L: linux-usb@vger.kernel.org S: Orphan F: Documentation/usb/WUSB-Design-overview.txt F: Documentation/usb/wusb-cbaf F: drivers/usb/host/hwa-hc.c F: drivers/usb/host/whci/ F: drivers/usb/wusbcore/ F: include/linux/usb/wusb* CFAG12864B LCD DRIVER M: Miguel Ojeda Sandonis S: Maintained F: drivers/auxdisplay/cfag12864b.c F: include/linux/cfag12864b.h CFAG12864BFB LCD FRAMEBUFFER DRIVER M: Miguel Ojeda Sandonis S: Maintained F: drivers/auxdisplay/cfag12864bfb.c F: include/linux/cfag12864b.h 802.11 (including CFG80211/NL80211) M: Johannes Berg L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git S: Maintained F: net/wireless/ F: include/uapi/linux/nl80211.h F: include/linux/ieee80211.h F: include/net/wext.h F: include/net/cfg80211.h F: include/net/iw_handler.h F: include/net/ieee80211_radiotap.h F: Documentation/driver-api/80211/cfg80211.rst F: Documentation/networking/regulatory.txt CHAR and MISC DRIVERS M: Arnd Bergmann M: Greg Kroah-Hartman T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git S: Supported F: drivers/char/ F: drivers/misc/ F: include/linux/miscdevice.h CHECKPATCH M: Andy Whitcroft M: Joe Perches S: Maintained F: scripts/checkpatch.pl CHINESE DOCUMENTATION M: Harry Wei L: xiyoulinuxkernelgroup@googlegroups.com (subscribers-only) L: linux-kernel@zh-kernel.org (moderated for non-subscribers) S: Maintained F: Documentation/translations/zh_CN/ CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER M: Peter Chen T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/chipidea/ CHIPONE ICN8318 I2C TOUCHSCREEN DRIVER M: Hans de Goede L: linux-input@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt F: drivers/input/touchscreen/chipone_icn8318.c CHIPONE ICN8505 I2C TOUCHSCREEN DRIVER M: Hans de Goede L: linux-input@vger.kernel.org S: Maintained F: drivers/input/touchscreen/chipone_icn8505.c CHROME HARDWARE PLATFORM SUPPORT M: Benson Leung M: Enric Balletbo i Serra S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/bleung/chrome-platform.git F: drivers/platform/chrome/ CHROMEOS EC SUBDRIVERS M: Benson Leung M: Enric Balletbo i Serra R: Guenter Roeck S: Maintained N: cros_ec N: cros-ec F: drivers/power/supply/cros_usbpd-charger.c CIRRUS LOGIC AUDIO CODEC DRIVERS M: Brian Austin M: Paul Handrigan L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/soc/codecs/cs* CIRRUS LOGIC EP93XX ETHERNET DRIVER M: Hartley Sweeten L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/cirrus/ep93xx_eth.c CISCO FCOE HBA DRIVER M: Satish Kharat M: Sesidhar Baddela M: Karan Tilak Kumar L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/fnic/ CISCO SCSI HBA DRIVER M: Karan Tilak Kumar M: Sesidhar Baddela L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/snic/ CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti M: Govindarajulu Varadarajan <_govind@gmx.com> M: Parvi Kaustubhi S: Supported F: drivers/net/ethernet/cisco/enic/ CISCO VIC LOW LATENCY NIC DRIVER M: Christian Benvenuti M: Nelson Escobar M: Parvi Kaustubhi S: Supported F: drivers/infiniband/hw/usnic/ CIRRUS LOGIC MADERA CODEC DRIVERS M: Charles Keepax M: Richard Fitzgerald L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: patches@opensource.cirrus.com T: git https://github.com/CirrusLogic/linux-drivers.git W: https://github.com/CirrusLogic/linux-drivers/wiki S: Supported F: Documentation/devicetree/bindings/mfd/madera.txt F: Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt F: include/linux/irqchip/irq-madera* F: include/linux/mfd/madera/* F: drivers/gpio/gpio-madera* F: drivers/irqchip/irq-madera* F: drivers/mfd/madera* F: drivers/mfd/cs47l* F: drivers/pinctrl/cirrus/* CLANG-FORMAT FILE M: Miguel Ojeda S: Maintained F: .clang-format CLEANCACHE API M: Konrad Rzeszutek Wilk L: linux-kernel@vger.kernel.org S: Maintained F: mm/cleancache.c F: include/linux/cleancache.h CLK API M: Russell King L: linux-clk@vger.kernel.org S: Maintained F: include/linux/clk.h CLOCKSOURCE, CLOCKEVENT DRIVERS M: Daniel Lezcano M: Thomas Gleixner L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core S: Supported F: drivers/clocksource/ F: Documentation/devicetree/bindings/timer/ CMPC ACPI DRIVER M: Thadeu Lima de Souza Cascardo M: Daniel Oliveira Nascimento L: platform-driver-x86@vger.kernel.org S: Supported F: drivers/platform/x86/classmate-laptop.c COBALT MEDIA DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Supported F: drivers/media/pci/cobalt/ COCCINELLE/Semantic Patches (SmPL) M: Julia Lawall M: Gilles Muller M: Nicolas Palix M: Michal Marek L: cocci@systeme.lip6.fr (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc W: http://coccinelle.lip6.fr/ S: Supported F: Documentation/dev-tools/coccinelle.rst F: scripts/coccinelle/ F: scripts/coccicheck CODA FILE SYSTEM M: Jan Harkes M: coda@cs.cmu.edu L: codalist@coda.cs.cmu.edu W: http://www.coda.cs.cmu.edu/ S: Maintained F: Documentation/filesystems/coda.txt F: fs/coda/ F: include/linux/coda*.h F: include/uapi/linux/coda*.h CODA V4L2 MEM2MEM DRIVER M: Philipp Zabel L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/coda.txt F: drivers/media/platform/coda/ CODE OF CONDUCT M: Greg Kroah-Hartman S: Supported F: Documentation/process/code-of-conduct.rst F: Documentation/process/code-of-conduct-interpretation.rst COMMON CLK FRAMEWORK M: Michael Turquette M: Stephen Boyd L: linux-clk@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-clk/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git S: Maintained F: Documentation/devicetree/bindings/clock/ F: drivers/clk/ X: drivers/clk/clkdev.c F: include/linux/clk-pr* F: include/linux/clk/ F: include/linux/of_clk.h COMMON INTERNET FILE SYSTEM (CIFS) M: Steve French L: linux-cifs@vger.kernel.org L: samba-technical@lists.samba.org (moderated for non-subscribers) W: http://linux-cifs.samba.org/ T: git git://git.samba.org/sfrench/cifs-2.6.git S: Supported F: Documentation/filesystems/cifs/ F: fs/cifs/ COMPACTPCI HOTPLUG CORE M: Scott Murray L: linux-pci@vger.kernel.org S: Maintained F: drivers/pci/hotplug/cpci_hotplug* COMPACTPCI HOTPLUG GENERIC DRIVER M: Scott Murray L: linux-pci@vger.kernel.org S: Maintained F: drivers/pci/hotplug/cpcihp_generic.c COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER M: Scott Murray L: linux-pci@vger.kernel.org S: Maintained F: drivers/pci/hotplug/cpcihp_zt5550.* COMPAL LAPTOP SUPPORT M: Cezary Jackiewicz L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/compal-laptop.c COMPILER ATTRIBUTES M: Miguel Ojeda S: Maintained F: include/linux/compiler_attributes.h CONEXANT ACCESSRUNNER USB DRIVER L: accessrunner-general@lists.sourceforge.net W: http://accessrunner.sourceforge.net/ S: Orphan F: drivers/usb/atm/cxacru.c CONFIGFS M: Joel Becker M: Christoph Hellwig T: git git://git.infradead.org/users/hch/configfs.git S: Supported F: fs/configfs/ F: include/linux/configfs.h CONNECTOR M: Evgeniy Polyakov L: netdev@vger.kernel.org S: Maintained F: drivers/connector/ CONTROL GROUP (CGROUP) M: Tejun Heo M: Li Zefan M: Johannes Weiner L: cgroups@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git S: Maintained F: Documentation/cgroup* F: include/linux/cgroup* F: kernel/cgroup* CONTROL GROUP - CPUSET M: Li Zefan L: cgroups@vger.kernel.org W: http://www.bullopensource.org/cpuset/ W: http://oss.sgi.com/projects/cpusets/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git S: Maintained F: Documentation/cgroup-v1/cpusets.txt F: include/linux/cpuset.h F: kernel/cgroup/cpuset.c CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) M: Johannes Weiner M: Michal Hocko M: Vladimir Davydov L: cgroups@vger.kernel.org L: linux-mm@kvack.org S: Maintained F: mm/memcontrol.c F: mm/swap_cgroup.c CORETEMP HARDWARE MONITORING DRIVER M: Fenghua Yu L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/coretemp F: drivers/hwmon/coretemp.c COSA/SRP SYNC SERIAL DRIVER M: Jan "Yenya" Kasprzak W: http://www.fi.muni.cz/~kas/cosa/ S: Maintained F: drivers/net/wan/cosa* CPMAC ETHERNET DRIVER M: Florian Fainelli L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/ti/cpmac.c CPU FREQUENCY DRIVERS M: "Rafael J. Wysocki" M: Viresh Kumar L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) B: https://bugzilla.kernel.org F: Documentation/cpu-freq/ F: Documentation/devicetree/bindings/cpufreq/ F: drivers/cpufreq/ F: include/linux/cpufreq.h F: tools/testing/selftests/cpufreq/ CPU FREQUENCY DRIVERS - ARM BIG LITTLE M: Viresh Kumar M: Sudeep Holla L: linux-pm@vger.kernel.org W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php S: Maintained F: drivers/cpufreq/arm_big_little.h F: drivers/cpufreq/arm_big_little.c CPU POWER MONITORING SUBSYSTEM M: Thomas Renninger M: Shuah Khan L: linux-pm@vger.kernel.org S: Maintained F: tools/power/cpupower/ CPUID/MSR DRIVER M: "H. Peter Anvin" S: Maintained F: arch/x86/kernel/cpuid.c F: arch/x86/kernel/msr.c CPUIDLE DRIVER - ARM BIG LITTLE M: Lorenzo Pieralisi M: Daniel Lezcano L: linux-pm@vger.kernel.org L: linux-arm-kernel@lists.infradead.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git S: Maintained F: drivers/cpuidle/cpuidle-big_little.c CPUIDLE DRIVER - ARM EXYNOS M: Bartlomiej Zolnierkiewicz M: Daniel Lezcano M: Kukjin Kim L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Supported F: drivers/cpuidle/cpuidle-exynos.c F: arch/arm/mach-exynos/pm.c CPUIDLE DRIVERS M: "Rafael J. Wysocki" M: Daniel Lezcano L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git B: https://bugzilla.kernel.org F: drivers/cpuidle/* F: include/linux/cpuidle.h CRAMFS FILESYSTEM M: Nicolas Pitre S: Maintained F: Documentation/filesystems/cramfs.txt F: fs/cramfs/ CRYPTO API M: Herbert Xu M: "David S. Miller" L: linux-crypto@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git S: Maintained F: Documentation/crypto/ F: Documentation/devicetree/bindings/crypto/ F: arch/*/crypto/ F: crypto/ F: drivers/crypto/ F: include/crypto/ F: include/linux/crypto* CRYPTOGRAPHIC RANDOM NUMBER GENERATOR M: Neil Horman L: linux-crypto@vger.kernel.org S: Maintained F: crypto/ansi_cprng.c F: crypto/rng.c CS3308 MEDIA DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org S: Odd Fixes F: drivers/media/i2c/cs3308.c CS5535 Audio ALSA driver M: Jaya Kumar S: Maintained F: sound/pci/cs5535audio/ CSI DRIVERS FOR ALLWINNER V3s M: Yong Deng L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/platform/sunxi/sun6i-csi/ F: Documentation/devicetree/bindings/media/sun6i-csi.txt CW1200 WLAN driver M: Solomon Peachy S: Maintained F: drivers/net/wireless/st/cw1200/ CX18 VIDEO4LINUX DRIVER M: Andy Walls L: ivtv-devel@ivtvdriver.org (subscribers-only) L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org W: http://www.ivtvdriver.org/index.php/Cx18 S: Maintained F: Documentation/media/v4l-drivers/cx18* F: drivers/media/pci/cx18/ F: include/uapi/linux/ivtv* CX2341X MPEG ENCODER HELPER MODULE M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/common/cx2341x* F: include/media/drv-intf/cx2341x.h CX24120 MEDIA DRIVER M: Jemma Denson M: Patrick Boettcher L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/cx24120* CX88 VIDEO4LINUX DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes F: Documentation/media/v4l-drivers/cx88* F: drivers/media/pci/cx88/ CXD2820R MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/cxd2820r* CXGB3 ETHERNET DRIVER (CXGB3) M: Arjun Vynipadath L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported F: drivers/net/ethernet/chelsio/cxgb3/ CXGB3 ISCSI DRIVER (CXGB3I) M: Karen Xie L: linux-scsi@vger.kernel.org W: http://www.chelsio.com S: Supported F: drivers/scsi/cxgbi/cxgb3i CXGB3 IWARP RNIC DRIVER (IW_CXGB3) M: Steve Wise L: linux-rdma@vger.kernel.org W: http://www.openfabrics.org S: Supported F: drivers/infiniband/hw/cxgb3/ F: include/uapi/rdma/cxgb3-abi.h CXGB4 CRYPTO DRIVER (chcr) M: Harsh Jain L: linux-crypto@vger.kernel.org W: http://www.chelsio.com S: Supported F: drivers/crypto/chelsio CXGB4 ETHERNET DRIVER (CXGB4) M: Arjun Vynipadath L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported F: drivers/net/ethernet/chelsio/cxgb4/ CXGB4 ISCSI DRIVER (CXGB4I) M: Karen Xie L: linux-scsi@vger.kernel.org W: http://www.chelsio.com S: Supported F: drivers/scsi/cxgbi/cxgb4i CXGB4 IWARP RNIC DRIVER (IW_CXGB4) M: Steve Wise L: linux-rdma@vger.kernel.org W: http://www.openfabrics.org S: Supported F: drivers/infiniband/hw/cxgb4/ F: include/uapi/rdma/cxgb4-abi.h CXGB4VF ETHERNET DRIVER (CXGB4VF) M: Casey Leedom L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported F: drivers/net/ethernet/chelsio/cxgb4vf/ CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER M: Frederic Barrat M: Andrew Donnellan L: linuxppc-dev@lists.ozlabs.org S: Supported F: arch/powerpc/platforms/powernv/pci-cxl.c F: drivers/misc/cxl/ F: include/misc/cxl* F: include/uapi/misc/cxl.h F: Documentation/powerpc/cxl.txt F: Documentation/ABI/testing/sysfs-class-cxl CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER M: Manoj N. Kumar M: Matthew R. Ochs M: Uma Krishnan L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/cxlflash/ F: include/uapi/scsi/cxlflash_ioctl.h F: Documentation/powerpc/cxlflash.txt CYBERPRO FB DRIVER M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ S: Maintained F: drivers/video/fbdev/cyber2000fb.* CYCLADES ASYNC MUX DRIVER W: http://www.cyclades.com/ S: Orphan F: drivers/tty/cyclades.c F: include/linux/cyclades.h F: include/uapi/linux/cyclades.h CYCLADES PC300 DRIVER W: http://www.cyclades.com/ S: Orphan F: drivers/net/wan/pc300* CYPRESS_FIRMWARE MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/common/cypress_firmware* CYTTSP TOUCHSCREEN DRIVER M: Ferruh Yigit L: linux-input@vger.kernel.org S: Supported F: drivers/input/touchscreen/cyttsp* F: include/linux/input/cyttsp.h D-LINK DIR-685 TOUCHKEYS DRIVER M: Linus Walleij L: linux-input@vger.kernel.org S: Supported F: drivers/input/keyboard/dlink-dir685-touchkeys.c DALLAS/MAXIM DS1685-FAMILY REAL TIME CLOCK M: Joshua Kinard S: Maintained F: drivers/rtc/rtc-ds1685.c F: include/linux/rtc/ds1685.h DAMA SLAVE for AX.25 M: Joerg Reuter W: http://yaina.de/jreuter/ W: http://www.qsl.net/dl1bke/ L: linux-hams@vger.kernel.org S: Maintained F: net/ax25/af_ax25.c F: net/ax25/ax25_dev.c F: net/ax25/ax25_ds_* F: net/ax25/ax25_in.c F: net/ax25/ax25_out.c F: net/ax25/ax25_timer.c F: net/ax25/sysctl_net_ax25.c DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER L: netdev@vger.kernel.org S: Orphan F: Documentation/networking/device_drivers/dec/dmfe.txt F: drivers/net/ethernet/dec/tulip/dmfe.c DC390/AM53C974 SCSI driver M: Hannes Reinecke L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/am53c974.c DC395x SCSI driver M: Oliver Neukum M: Ali Akcaagac M: Jamie Lenehan L: dc395x@twibble.org W: http://twibble.org/dist/dc395x/ W: http://lists.twibble.org/mailman/listinfo/dc395x/ S: Maintained F: Documentation/scsi/dc395x.txt F: drivers/scsi/dc395x.* DCCP PROTOCOL M: Gerrit Renker L: dccp@vger.kernel.org W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp S: Maintained F: include/linux/dccp.h F: include/uapi/linux/dccp.h F: include/linux/tfrc.h F: net/dccp/ DECnet NETWORK LAYER W: http://linux-decnet.sourceforge.net L: linux-decnet-user@lists.sourceforge.net S: Orphan F: Documentation/networking/decnet.txt F: net/decnet/ DECSTATION PLATFORM SUPPORT M: "Maciej W. Rozycki" L: linux-mips@vger.kernel.org W: http://www.linux-mips.org/wiki/DECstation S: Maintained F: arch/mips/dec/ F: arch/mips/include/asm/dec/ F: arch/mips/include/asm/mach-dec/ DEFXX FDDI NETWORK DRIVER M: "Maciej W. Rozycki" S: Maintained F: drivers/net/fddi/defxx.* DELL SMBIOS DRIVER M: Pali Rohár M: Mario Limonciello L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/dell-smbios.* DELL SMBIOS SMM DRIVER M: Mario Limonciello L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/dell-smbios-smm.c DELL SMBIOS WMI DRIVER M: Mario Limonciello L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/dell-smbios-wmi.c F: tools/wmi/dell-smbios-example.c DEFZA FDDI NETWORK DRIVER M: "Maciej W. Rozycki" S: Maintained F: drivers/net/fddi/defza.* DELL LAPTOP DRIVER M: Matthew Garrett M: Pali Rohár L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/dell-laptop.c DELL LAPTOP FREEFALL DRIVER M: Pali Rohár S: Maintained F: drivers/platform/x86/dell-smo8800.c DELL LAPTOP RBTN DRIVER M: Pali Rohár S: Maintained F: drivers/platform/x86/dell-rbtn.* DELL REMOTE BIOS UPDATE DRIVER M: Stuart Hayes L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/dell_rbu.c DELL LAPTOP SMM DRIVER M: Pali Rohár S: Maintained F: drivers/hwmon/dell-smm-hwmon.c F: include/uapi/linux/i8k.h DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas) M: Stuart Hayes L: platform-driver-x86@vger.kernel.org S: Maintained F: Documentation/dcdbas.txt F: drivers/platform/x86/dcdbas.* DELL WMI NOTIFICATIONS DRIVER M: Matthew Garrett M: Pali Rohár S: Maintained F: drivers/platform/x86/dell-wmi.c DELL WMI DESCRIPTOR DRIVER M: Mario Limonciello S: Maintained F: drivers/platform/x86/dell-wmi-descriptor.c DELTA ST MEDIA DRIVER M: Hugues Fruchet L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Supported F: drivers/media/platform/sti/delta DENALI NAND DRIVER M: Masahiro Yamada L: linux-mtd@lists.infradead.org S: Supported F: drivers/mtd/nand/raw/denali* DESIGNWARE USB2 DRD IP DRIVER M: Minas Harutyunyan L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained F: drivers/usb/dwc2/ DESIGNWARE USB3 DRD IP DRIVER M: Felipe Balbi L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained F: drivers/usb/dwc3/ DEVANTECH SRF ULTRASONIC RANGER IIO DRIVER M: Andreas Klinger L: linux-iio@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-bus-iio-distance-srf08 F: drivers/iio/proximity/srf*.c DEVICE COREDUMP (DEV_COREDUMP) M: Johannes Berg L: linux-kernel@vger.kernel.org S: Maintained F: drivers/base/devcoredump.c F: include/linux/devcoredump.h DEVICE FREQUENCY (DEVFREQ) M: MyungJoo Ham M: Kyungmin Park R: Chanwoo Choi L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git S: Maintained F: drivers/devfreq/ F: include/linux/devfreq.h F: Documentation/devicetree/bindings/devfreq/ DEVICE FREQUENCY EVENT (DEVFREQ-EVENT) M: Chanwoo Choi L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git S: Supported F: drivers/devfreq/event/ F: drivers/devfreq/devfreq-event.c F: include/linux/devfreq-event.h F: Documentation/devicetree/bindings/devfreq/event/ DEVICE NUMBER REGISTRY M: Torben Mathiasen W: http://lanana.org/docs/device-list/index.html S: Maintained DEVICE-MAPPER (LVM) M: Alasdair Kergon M: Mike Snitzer M: dm-devel@redhat.com L: dm-devel@redhat.com W: http://sources.redhat.com/dm Q: http://patchwork.kernel.org/project/dm-devel/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git T: quilt http://people.redhat.com/agk/patches/linux/editing/ S: Maintained F: Documentation/device-mapper/ F: drivers/md/Makefile F: drivers/md/Kconfig F: drivers/md/dm* F: drivers/md/persistent-data/ F: include/linux/device-mapper.h F: include/linux/dm-*.h F: include/uapi/linux/dm-*.h DEVLINK M: Jiri Pirko L: netdev@vger.kernel.org S: Supported F: net/core/devlink.c F: include/net/devlink.h F: include/uapi/linux/devlink.h DIALOG SEMICONDUCTOR DRIVERS M: Support Opensource W: http://www.dialog-semiconductor.com/products S: Supported F: Documentation/hwmon/da90?? F: Documentation/devicetree/bindings/mfd/da90*.txt F: Documentation/devicetree/bindings/input/da90??-onkey.txt F: Documentation/devicetree/bindings/thermal/da90??-thermal.txt F: Documentation/devicetree/bindings/regulator/da92*.txt F: Documentation/devicetree/bindings/watchdog/da90??-wdt.txt F: Documentation/devicetree/bindings/sound/da[79]*.txt F: drivers/gpio/gpio-da90??.c F: drivers/hwmon/da90??-hwmon.c F: drivers/iio/adc/da91??-*.c F: drivers/input/misc/da90??_onkey.c F: drivers/input/touchscreen/da9052_tsi.c F: drivers/leds/leds-da90??.c F: drivers/mfd/da903x.c F: drivers/mfd/da90??-*.c F: drivers/mfd/da91??-*.c F: drivers/power/supply/da9052-battery.c F: drivers/power/supply/da91??-*.c F: drivers/regulator/da903x.c F: drivers/regulator/da9???-regulator.[ch] F: drivers/thermal/da90??-thermal.c F: drivers/rtc/rtc-da90??.c F: drivers/video/backlight/da90??_bl.c F: drivers/watchdog/da90??_wdt.c F: include/linux/mfd/da903x.h F: include/linux/mfd/da9052/ F: include/linux/mfd/da9055/ F: include/linux/mfd/da9062/ F: include/linux/mfd/da9063/ F: include/linux/mfd/da9150/ F: include/linux/regulator/da9211.h F: include/sound/da[79]*.h F: sound/soc/codecs/da[79]*.[ch] DIAMOND SYSTEMS GPIO-MM GPIO DRIVER M: William Breathitt Gray L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-gpio-mm.c DIOLAN U2C-12 I2C DRIVER M: Guenter Roeck L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-diolan-u2c.c FILESYSTEM DIRECT ACCESS (DAX) M: Matthew Wilcox M: Ross Zwisler M: Jan Kara L: linux-fsdevel@vger.kernel.org S: Supported F: fs/dax.c F: include/linux/dax.h F: include/trace/events/fs_dax.h DEVICE DIRECT ACCESS (DAX) M: Dan Williams M: Dave Jiang M: Ross Zwisler M: Vishal Verma L: linux-nvdimm@lists.01.org S: Supported F: drivers/dax/ DIRECTORY NOTIFICATION (DNOTIFY) M: Jan Kara R: Amir Goldstein L: linux-fsdevel@vger.kernel.org S: Maintained F: Documentation/filesystems/dnotify.txt F: fs/notify/dnotify/ F: include/linux/dnotify.h DISK GEOMETRY AND PARTITION HANDLING M: Andries Brouwer W: http://www.win.tue.nl/~aeb/linux/Large-Disk.html W: http://www.win.tue.nl/~aeb/linux/zip/zip-1.html W: http://www.win.tue.nl/~aeb/partitions/partition_types-1.html S: Maintained DISKQUOTA M: Jan Kara S: Maintained F: Documentation/filesystems/quota.txt F: fs/quota/ F: include/linux/quota*.h F: include/uapi/linux/quota*.h DISPLAYLINK USB 2.0 FRAMEBUFFER DRIVER (UDLFB) M: Bernie Thompson L: linux-fbdev@vger.kernel.org S: Maintained W: http://plugable.com/category/projects/udlfb/ F: drivers/video/fbdev/udlfb.c F: include/video/udlfb.h F: Documentation/fb/udlfb.txt DISTRIBUTED LOCK MANAGER (DLM) M: Christine Caulfield M: David Teigland L: cluster-devel@redhat.com W: http://sources.redhat.com/cluster/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm.git S: Supported F: fs/dlm/ DMA BUFFER SHARING FRAMEWORK M: Sumit Semwal S: Maintained L: linux-media@vger.kernel.org L: dri-devel@lists.freedesktop.org L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers) F: drivers/dma-buf/ F: include/linux/dma-buf* F: include/linux/reservation.h F: include/linux/*fence.h F: Documentation/driver-api/dma-buf.rst T: git git://anongit.freedesktop.org/drm/drm-misc DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul L: dmaengine@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ S: Maintained F: drivers/dma/ F: include/linux/dmaengine.h F: include/linux/of_dma.h F: Documentation/devicetree/bindings/dma/ F: Documentation/driver-api/dmaengine/ T: git git://git.infradead.org/users/vkoul/slave-dma.git DMA MAPPING HELPERS M: Christoph Hellwig M: Marek Szyprowski R: Robin Murphy L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/users/hch/dma-mapping.git W: http://git.infradead.org/users/hch/dma-mapping.git S: Supported F: kernel/dma/ F: include/asm-generic/dma-mapping.h F: include/linux/dma-direct.h F: include/linux/dma-mapping.h F: include/linux/dma-noncoherent.h DME1737 HARDWARE MONITOR DRIVER M: Juerg Haefliger L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/dme1737 F: drivers/hwmon/dme1737.c DMI/SMBIOS SUPPORT M: Jean Delvare S: Maintained T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/ F: Documentation/ABI/testing/sysfs-firmware-dmi-tables F: drivers/firmware/dmi-id.c F: drivers/firmware/dmi_scan.c F: include/linux/dmi.h DOCUMENTATION M: Jonathan Corbet L: linux-doc@vger.kernel.org S: Maintained F: Documentation/ F: scripts/kernel-doc X: Documentation/ABI/ X: Documentation/acpi/ X: Documentation/devicetree/ X: Documentation/i2c/ X: Documentation/media/ X: Documentation/power/ X: Documentation/spi/ T: git git://git.lwn.net/linux.git docs-next DOCUMENTATION/ITALIAN M: Federico Vaga L: linux-doc@vger.kernel.org S: Maintained F: Documentation/translations/it_IT DONGWOON DW9714 LENS VOICE COIL DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/dw9714.c F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt DONGWOON DW9807 LENS VOICE COIL DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/dw9807-vcm.c F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.txt DOUBLETALK DRIVER M: "James R. Van Zandt" L: blinux-list@redhat.com S: Maintained F: drivers/char/dtlk.c F: include/linux/dtlk.h DPAA2 DATAPATH I/O (DPIO) DRIVER M: Roy Pledge L: linux-kernel@vger.kernel.org S: Maintained F: drivers/soc/fsl/dpio DPAA2 ETHERNET DRIVER M: Ioana Radulescu L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth* F: drivers/net/ethernet/freescale/dpaa2/dpni* F: drivers/net/ethernet/freescale/dpaa2/dpkg.h F: drivers/net/ethernet/freescale/dpaa2/Makefile F: drivers/net/ethernet/freescale/dpaa2/Kconfig DPAA2 ETHERNET SWITCH DRIVER M: Ioana Radulescu M: Ioana Ciornei L: linux-kernel@vger.kernel.org S: Maintained F: drivers/staging/fsl-dpaa2/ethsw DPAA2 PTP CLOCK DRIVER M: Yangbo Lu L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp* F: drivers/net/ethernet/freescale/dpaa2/dprtc* DPT_I2O SCSI RAID DRIVER M: Adaptec OEM Raid Solutions L: linux-scsi@vger.kernel.org W: http://www.adaptec.com/ S: Maintained F: drivers/scsi/dpt* F: drivers/scsi/dpt/ DRBD DRIVER M: Philipp Reisner M: Lars Ellenberg L: drbd-dev@lists.linbit.com W: http://www.drbd.org T: git git://git.linbit.com/linux-drbd.git T: git git://git.linbit.com/drbd-8.4.git S: Supported F: drivers/block/drbd/ F: lib/lru_cache.c F: Documentation/blockdev/drbd/ DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS M: Greg Kroah-Hartman R: "Rafael J. Wysocki" T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git S: Supported F: Documentation/kobject.txt F: drivers/base/ F: fs/debugfs/ F: fs/sysfs/ F: include/linux/debugfs.h F: include/linux/kobj* F: lib/kobj* DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS) M: Kevin Hilman M: Nishanth Menon S: Maintained F: drivers/power/avs/ F: include/linux/power/smartreflex.h L: linux-pm@vger.kernel.org DRM DRIVER FOR ARM PL111 CLCD M: Eric Anholt T: git git://anongit.freedesktop.org/drm/drm-misc S: Supported F: drivers/gpu/drm/pl111/ DRM DRIVER FOR ARM VERSATILE TFT PANELS M: Linus Walleij T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/panel/panel-arm-versatile.c F: Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt DRM DRIVER FOR AST SERVER GRAPHICS CHIPS M: Dave Airlie S: Odd Fixes F: drivers/gpu/drm/ast/ DRM DRIVER FOR BOCHS VIRTUAL GPU M: Gerd Hoffmann L: virtualization@lists.linux-foundation.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/bochs/ DRM DRIVER FOR FARADAY TVE200 TV ENCODER M: Linus Walleij T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/tve200/ DRM DRIVER FOR ILITEK ILI9225 PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/ili9225.c F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt DRM DRIVER FOR HX8357D PANELS M: Eric Anholt T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/tinydrm/hx8357d.c F: Documentation/devicetree/bindings/display/himax,hx8357d.txt DRM DRIVER FOR INTEL I810 VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/i810/ F: include/uapi/drm/i810_drm.h DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS S: Orphan / Obsolete F: drivers/gpu/drm/mga/ F: include/uapi/drm/mga_drm.h DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS M: Dave Airlie S: Odd Fixes F: drivers/gpu/drm/mgag200/ DRM DRIVER FOR MI0283QT M: Noralf Trønnes S: Maintained F: drivers/gpu/drm/tinydrm/mi0283qt.c F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt DRM DRIVER FOR MSM ADRENO GPU M: Rob Clark L: linux-arm-msm@vger.kernel.org L: dri-devel@lists.freedesktop.org L: freedreno@lists.freedesktop.org T: git git://people.freedesktop.org/~robclark/linux S: Maintained F: drivers/gpu/drm/msm/ F: include/uapi/drm/msm_drm.h F: Documentation/devicetree/bindings/display/msm/ DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS M: Ben Skeggs L: dri-devel@lists.freedesktop.org L: nouveau@lists.freedesktop.org T: git git://github.com/skeggsb/linux S: Supported F: drivers/gpu/drm/nouveau/ F: include/uapi/drm/nouveau_drm.h DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS M: Stefan Mavrodiev S: Maintained F: drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c F: Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS M: Noralf Trønnes S: Maintained F: drivers/gpu/drm/tinydrm/repaper.c F: Documentation/devicetree/bindings/display/repaper.txt DRM DRIVER FOR QEMU'S CIRRUS DEVICE M: Dave Airlie M: Gerd Hoffmann L: virtualization@lists.linux-foundation.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Obsolete W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ F: drivers/gpu/drm/cirrus/ DRM DRIVER FOR QXL VIRTUAL GPU M: Dave Airlie M: Gerd Hoffmann L: virtualization@lists.linux-foundation.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/qxl/ F: include/uapi/drm/qxl_drm.h DRM DRIVER FOR RAGE 128 VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/r128/ F: include/uapi/drm/r128_drm.h DRM DRIVER FOR SAVAGE VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/savage/ F: include/uapi/drm/savage_drm.h DRM DRIVER FOR SIS VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/sis/ F: include/uapi/drm/sis_drm.h DRM DRIVER FOR SITRONIX ST7586 PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/st7586.c F: Documentation/devicetree/bindings/display/sitronix,st7586.txt DRM DRIVER FOR SITRONIX ST7735R PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/st7735r.c F: Documentation/devicetree/bindings/display/sitronix,st7735r.txt DRM DRIVER FOR TDFX VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/tdfx/ DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS M: Dave Airlie R: Sean Paul L: dri-devel@lists.freedesktop.org S: Odd Fixes F: drivers/gpu/drm/udl/ T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVER FOR VMWARE VIRTUAL GPU M: "VMware Graphics" M: Thomas Hellstrom L: dri-devel@lists.freedesktop.org T: git git://people.freedesktop.org/~thomash/linux S: Supported F: drivers/gpu/drm/vmwgfx/ F: include/uapi/drm/vmwgfx_drm.h DRM DRIVERS M: David Airlie M: Daniel Vetter L: dri-devel@lists.freedesktop.org T: git git://anongit.freedesktop.org/drm/drm B: https://bugs.freedesktop.org/ C: irc://chat.freenode.net/dri-devel S: Maintained F: drivers/gpu/drm/ F: drivers/gpu/vga/ F: Documentation/devicetree/bindings/display/ F: Documentation/devicetree/bindings/gpu/ F: Documentation/gpu/ F: include/drm/ F: include/uapi/drm/ F: include/linux/vga* DRM DRIVERS AND MISC GPU PATCHES M: Maarten Lankhorst M: Maxime Ripard M: Sean Paul W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/gpu/ F: drivers/gpu/vga/ F: drivers/gpu/drm/* F: include/drm/drm* F: include/uapi/drm/drm* F: include/linux/vga* DRM DRIVERS FOR ALLWINNER A10 M: Maxime Ripard L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/sun4i/ F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR AMLOGIC SOCS M: Neil Armstrong L: dri-devel@lists.freedesktop.org L: linux-amlogic@lists.infradead.org W: http://linux-meson.com/ S: Supported F: drivers/gpu/drm/meson/ F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt F: Documentation/gpu/meson.rst T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR ATMEL HLCDC M: Boris Brezillon L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/atmel-hlcdc/ F: Documentation/devicetree/bindings/display/atmel/ T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR BRIDGE CHIPS M: Archit Taneja M: Andrzej Hajda R: Laurent Pinchart S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/bridge/ DRM DRIVERS FOR EXYNOS M: Inki Dae M: Joonyoung Shim M: Seung-Woo Kim M: Kyungmin Park L: dri-devel@lists.freedesktop.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git S: Supported F: drivers/gpu/drm/exynos/ F: include/uapi/drm/exynos_drm.h F: Documentation/devicetree/bindings/display/exynos/ DRM DRIVERS FOR FREESCALE DCU M: Stefan Agner M: Alison Wang L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/fsl-dcu/ F: Documentation/devicetree/bindings/display/fsl,dcu.txt F: Documentation/devicetree/bindings/display/fsl,tcon.txt F: Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR FREESCALE IMX M: Philipp Zabel L: dri-devel@lists.freedesktop.org S: Maintained F: drivers/gpu/drm/imx/ F: drivers/gpu/ipu-v3/ F: Documentation/devicetree/bindings/display/imx/ DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets) M: Patrik Jakobsson L: dri-devel@lists.freedesktop.org T: git git://github.com/patjak/drm-gma500 S: Maintained F: drivers/gpu/drm/gma500/ DRM DRIVERS FOR HISILICON M: Xinliang Liu M: Rongrong Zou R: Xinwei Kong R: Chen Feng L: dri-devel@lists.freedesktop.org T: git git://github.com/xin3liang/linux.git S: Maintained F: drivers/gpu/drm/hisilicon/ F: Documentation/devicetree/bindings/display/hisilicon/ DRM DRIVERS FOR MEDIATEK M: CK Hu M: Philipp Zabel L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/mediatek/ F: Documentation/devicetree/bindings/display/mediatek/ DRM DRIVERS FOR NVIDIA TEGRA M: Thierry Reding L: dri-devel@lists.freedesktop.org L: linux-tegra@vger.kernel.org T: git git://anongit.freedesktop.org/tegra/linux.git S: Supported F: drivers/gpu/drm/tegra/ F: drivers/gpu/host1x/ F: include/linux/host1x.h F: include/uapi/drm/tegra_drm.h F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt DRM DRIVERS FOR RENESAS M: Laurent Pinchart M: Kieran Bingham L: dri-devel@lists.freedesktop.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/pinchartl/media drm/du/next S: Supported F: drivers/gpu/drm/rcar-du/ F: drivers/gpu/drm/shmobile/ F: include/linux/platform_data/shmob_drm.h F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt F: Documentation/devicetree/bindings/display/renesas,du.txt DRM DRIVERS FOR ROCKCHIP M: Sandy Huang M: Heiko Stübner L: dri-devel@lists.freedesktop.org S: Maintained F: drivers/gpu/drm/rockchip/ F: Documentation/devicetree/bindings/display/rockchip/ T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR STI M: Benjamin Gaignard M: Vincent Abriou L: dri-devel@lists.freedesktop.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/sti F: Documentation/devicetree/bindings/display/st,stih4xx.txt DRM DRIVERS FOR STM M: Yannick Fertre M: Philippe Cornu M: Benjamin Gaignard M: Vincent Abriou L: dri-devel@lists.freedesktop.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/stm F: Documentation/devicetree/bindings/display/st,stm32-ltdc.txt DRM DRIVERS FOR TI LCDC M: Jyri Sarha R: Tomi Valkeinen L: dri-devel@lists.freedesktop.org S: Maintained F: drivers/gpu/drm/tilcdc/ F: Documentation/devicetree/bindings/display/tilcdc/ DRM DRIVERS FOR TI OMAP M: Tomi Valkeinen L: dri-devel@lists.freedesktop.org S: Maintained F: drivers/gpu/drm/omapdrm/ F: Documentation/devicetree/bindings/display/ti/ DRM DRIVERS FOR V3D M: Eric Anholt S: Supported F: drivers/gpu/drm/v3d/ F: include/uapi/drm/v3d_drm.h F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR VC4 M: Eric Anholt T: git git://github.com/anholt/linux S: Supported F: drivers/gpu/drm/vc4/ F: include/uapi/drm/vc4_drm.h F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR VIVANTE GPU IP M: Lucas Stach R: Russell King R: Christian Gmeiner L: etnaviv@lists.freedesktop.org L: dri-devel@lists.freedesktop.org S: Maintained F: drivers/gpu/drm/etnaviv/ F: include/uapi/drm/etnaviv_drm.h F: Documentation/devicetree/bindings/display/etnaviv/ DRM DRIVERS FOR ZTE ZX M: Shawn Guo L: dri-devel@lists.freedesktop.org S: Maintained F: drivers/gpu/drm/zte/ F: Documentation/devicetree/bindings/display/zte,vou.txt T: git git://anongit.freedesktop.org/drm/drm-misc DRM PANEL DRIVERS M: Thierry Reding L: dri-devel@lists.freedesktop.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/drm_panel.c F: drivers/gpu/drm/panel/ F: include/drm/drm_panel.h F: Documentation/devicetree/bindings/display/panel/ DRM TINYDRM DRIVERS M: Noralf Trønnes W: https://github.com/notro/tinydrm/wiki/Development T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/tinydrm/ F: include/drm/tinydrm/ DRM DRIVERS FOR XEN M: Oleksandr Andrushchenko T: git git://anongit.freedesktop.org/drm/drm-misc L: dri-devel@lists.freedesktop.org L: xen-devel@lists.xen.org S: Supported F: drivers/gpu/drm/xen/ F: Documentation/gpu/xen-front.rst DRM TTM SUBSYSTEM M: Christian Koenig M: Huang Rui M: Junwei Zhang T: git git://people.freedesktop.org/~agd5f/linux S: Maintained L: dri-devel@lists.freedesktop.org F: include/drm/ttm/ F: drivers/gpu/drm/ttm/ DSBR100 USB FM RADIO DRIVER M: Alexey Klimov L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/radio/dsbr100.c DSCC4 DRIVER M: Francois Romieu L: netdev@vger.kernel.org S: Maintained F: drivers/net/wan/dscc4.c DT3155 MEDIA DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/pci/dt3155/ DVB_USB_AF9015 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/af9015* DVB_USB_AF9035 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/af9035* DVB_USB_ANYSEE MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/anysee* DVB_USB_AU6610 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/au6610* DVB_USB_CE6230 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/ce6230* DVB_USB_CXUSB MEDIA DRIVER M: Michael Krufky L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://github.com/mkrufky Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb/cxusb* DVB_USB_EC168 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/ec168* DVB_USB_GL861 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/gl861* DVB_USB_MXL111SF MEDIA DRIVER M: Michael Krufky L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://github.com/mkrufky Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mkrufky/mxl111sf.git S: Maintained F: drivers/media/usb/dvb-usb-v2/mxl111sf* DVB_USB_RTL28XXU MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/rtl28xxu* DVB_USB_V2 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/dvb-usb-v2/dvb_usb* F: drivers/media/usb/dvb-usb-v2/usb_urb.c DYNAMIC DEBUG M: Jason Baron S: Maintained F: lib/dynamic_debug.c F: include/linux/dynamic_debug.h DYNAMIC INTERRUPT MODERATION M: Tal Gilboa S: Maintained F: include/linux/net_dim.h DZ DECSTATION DZ11 SERIAL DRIVER M: "Maciej W. Rozycki" S: Maintained F: drivers/tty/serial/dz.* E3X0 POWER BUTTON DRIVER M: Moritz Fischer L: usrp-users@lists.ettus.com W: http://www.ettus.com S: Supported F: drivers/input/misc/e3x0-button.c F: Documentation/devicetree/bindings/input/e3x0-button.txt E4000 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/e4000* EARTH_PT1 MEDIA DRIVER M: Akihiro Tsukada L: linux-media@vger.kernel.org S: Odd Fixes F: drivers/media/pci/pt1/ EARTH_PT3 MEDIA DRIVER M: Akihiro Tsukada L: linux-media@vger.kernel.org S: Odd Fixes F: drivers/media/pci/pt3/ EC100 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/ec100* ECRYPT FILE SYSTEM M: Tyler Hicks L: ecryptfs@vger.kernel.org W: http://ecryptfs.org W: https://launchpad.net/ecryptfs T: git git://git.kernel.org/pub/scm/linux/kernel/git/tyhicks/ecryptfs.git S: Supported F: Documentation/filesystems/ecryptfs.txt F: fs/ecryptfs/ EDAC-AMD64 M: Borislav Petkov L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/amd64_edac* EDAC-CALXEDA M: Robert Richter L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/highbank* EDAC-CAVIUM OCTEON M: Ralf Baechle M: David Daney L: linux-edac@vger.kernel.org L: linux-mips@vger.kernel.org S: Supported F: drivers/edac/octeon_edac* EDAC-CAVIUM THUNDERX M: David Daney M: Jan Glauber L: linux-edac@vger.kernel.org S: Supported F: drivers/edac/thunderx_edac* EDAC-CORE M: Borislav Petkov M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next S: Supported F: Documentation/admin-guide/ras.rst F: Documentation/driver-api/edac.rst F: drivers/edac/ F: include/linux/edac.h EDAC-E752X M: Mark Gross L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/e752x_edac.c EDAC-E7XXX L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/e7xxx_edac.c EDAC-FSL_DDR M: York Sun L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/fsl_ddr_edac.* EDAC-GHES M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/ghes_edac.c EDAC-I3000 L: linux-edac@vger.kernel.org S: Orphan F: drivers/edac/i3000_edac.c EDAC-I5000 L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i5000_edac.c EDAC-I5400 M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i5400_edac.c EDAC-I7300 M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i7300_edac.c EDAC-I7CORE M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i7core_edac.c EDAC-I82443BXGX M: Tim Small L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i82443bxgx_edac.c EDAC-I82975X M: "Arvind R." L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i82975x_edac.c EDAC-IE31200 M: Jason Baron L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/ie31200_edac.c EDAC-MPC85XX M: Johannes Thumshirn L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/mpc85xx_edac.[ch] EDAC-PASEMI M: Egor Martovetsky L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/pasemi_edac.c EDAC-PND2 M: Tony Luck L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/pnd2_edac.[ch] EDAC-R82600 M: Tim Small L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/r82600_edac.c EDAC-SBRIDGE M: Tony Luck R: Qiuxu Zhuo L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/sb_edac.c EDAC-SKYLAKE M: Tony Luck L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/skx_edac.c EDAC-TI M: Tero Kristo L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/ti_edac.c EDAC-QCOM M: Channagoud Kadabi M: Venkata Narendra Kumar Gutta L: linux-arm-msm@vger.kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/qcom_edac.c EDIROL UA-101/UA-1000 DRIVER M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained F: sound/usb/misc/ua101.c EFI TEST DRIVER L: linux-efi@vger.kernel.org M: Ivan Hu M: Ard Biesheuvel S: Maintained F: drivers/firmware/efi/test/ EFI VARIABLE FILESYSTEM M: Matthew Garrett M: Jeremy Kerr M: Ard Biesheuvel T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git L: linux-efi@vger.kernel.org S: Maintained F: fs/efivarfs/ EFIFB FRAMEBUFFER DRIVER L: linux-fbdev@vger.kernel.org M: Peter Jones S: Maintained F: drivers/video/fbdev/efifb.c EFS FILESYSTEM W: http://aeschi.ch.eu.org/efs/ S: Orphan F: fs/efs/ EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER M: Douglas Miller L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/ibm/ehea/ EM28XX VIDEO4LINUX DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/em28xx/ F: Documentation/media/v4l-drivers/em28xx* EMBEDDED LINUX M: Paul Gortmaker M: Matt Mackall M: David Woodhouse L: linux-embedded@vger.kernel.org S: Maintained Emulex 10Gbps iSCSI - OneConnect DRIVER M: Subbu Seetharaman M: Ketan Mukadam M: Jitendra Bhivare L: linux-scsi@vger.kernel.org W: http://www.broadcom.com S: Supported F: drivers/scsi/be2iscsi/ Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) M: Sathya Perla M: Ajit Khaparde M: Sriharsha Basavapatna M: Somnath Kotur L: netdev@vger.kernel.org W: http://www.emulex.com S: Supported F: drivers/net/ethernet/emulex/benet/ EMULEX ONECONNECT ROCE DRIVER M: Selvin Xavier M: Devesh Sharma L: linux-rdma@vger.kernel.org W: http://www.broadcom.com S: Odd Fixes F: drivers/infiniband/hw/ocrdma/ F: include/uapi/rdma/ocrdma-abi.h EMULEX/BROADCOM LPFC FC/FCOE SCSI DRIVER M: James Smart M: Dick Kennedy L: linux-scsi@vger.kernel.org W: http://www.broadcom.com S: Supported F: drivers/scsi/lpfc/ ENE CB710 FLASH CARD READER DRIVER M: Michał Mirosław S: Maintained F: drivers/misc/cb710/ F: drivers/mmc/host/cb710-mmc.* F: include/linux/cb710.h ENE KB2426 (ENE0100/ENE020XX) INFRARED RECEIVER M: Maxim Levitsky S: Maintained F: drivers/media/rc/ene_ir.* EPSON S1D13XXX FRAMEBUFFER DRIVER M: Kristoffer Ericson S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: drivers/video/fbdev/s1d13xxxfb.c F: include/video/s1d13xxxfb.h ERRSEQ ERROR TRACKING INFRASTRUCTURE M: Jeff Layton S: Maintained F: lib/errseq.c F: include/linux/errseq.h ET131X NETWORK DRIVER M: Mark Einon S: Odd Fixes F: drivers/net/ethernet/agere/ ETHERNET BRIDGE M: Roopa Prabhu M: Nikolay Aleksandrov L: bridge@lists.linux-foundation.org (moderated for non-subscribers) L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net:Bridge S: Maintained F: include/linux/netfilter_bridge/ F: net/bridge/ ETHERNET PHY LIBRARY M: Andrew Lunn M: Florian Fainelli M: Heiner Kallweit L: netdev@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-bus-mdio F: Documentation/devicetree/bindings/net/mdio* F: Documentation/networking/phy.txt F: drivers/net/phy/ F: drivers/of/of_mdio.c F: drivers/of/of_net.c F: include/linux/*mdio*.h F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h F: include/linux/platform_data/mdio-bcm-unimac.h F: include/linux/platform_data/mdio-gpio.h F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h EXT2 FILE SYSTEM M: Jan Kara L: linux-ext4@vger.kernel.org S: Maintained F: Documentation/filesystems/ext2.txt F: fs/ext2/ F: include/linux/ext2* EXT4 FILE SYSTEM M: "Theodore Ts'o" M: Andreas Dilger L: linux-ext4@vger.kernel.org W: http://ext4.wiki.kernel.org Q: http://patchwork.ozlabs.org/project/linux-ext4/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git S: Maintained F: Documentation/filesystems/ext4/ F: fs/ext4/ Extended Verification Module (EVM) M: Mimi Zohar L: linux-integrity@vger.kernel.org S: Supported F: security/integrity/evm/ EXTENSIBLE FIRMWARE INTERFACE (EFI) M: Ard Biesheuvel L: linux-efi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git S: Maintained F: Documentation/efi-stub.txt F: arch/*/kernel/efi.c F: arch/x86/boot/compressed/eboot.[ch] F: arch/*/include/asm/efi.h F: arch/x86/platform/efi/ F: drivers/firmware/efi/ F: include/linux/efi*.h F: arch/arm/boot/compressed/efi-header.S F: arch/arm64/kernel/efi-entry.S EXTERNAL CONNECTOR SUBSYSTEM (EXTCON) M: MyungJoo Ham M: Chanwoo Choi L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/extcon.git S: Maintained F: drivers/extcon/ F: include/linux/extcon/ F: include/linux/extcon.h F: Documentation/extcon/ F: Documentation/devicetree/bindings/extcon/ EXYNOS DP DRIVER M: Jingoo Han L: dri-devel@lists.freedesktop.org S: Maintained F: drivers/gpu/drm/exynos/exynos_dp* EXYNOS SYSMMU (IOMMU) driver M: Marek Szyprowski L: iommu@lists.linux-foundation.org S: Maintained F: drivers/iommu/exynos-iommu.c EZchip NPS platform support M: Vineet Gupta M: Ofer Levi S: Supported F: arch/arc/plat-eznps F: arch/arc/boot/dts/eznps.dts F2FS FILE SYSTEM M: Jaegeuk Kim M: Chao Yu L: linux-f2fs-devel@lists.sourceforge.net W: https://f2fs.wiki.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git S: Maintained F: Documentation/filesystems/f2fs.txt F: Documentation/ABI/testing/sysfs-fs-f2fs F: fs/f2fs/ F: include/linux/f2fs_fs.h F: include/trace/events/f2fs.h F71805F HARDWARE MONITORING DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/f71805f F: drivers/hwmon/f71805f.c FADDR2LINE M: Josh Poimboeuf S: Maintained F: scripts/faddr2line FAILOVER MODULE M: Sridhar Samudrala L: netdev@vger.kernel.org S: Supported F: net/core/failover.c F: include/net/failover.h F: Documentation/networking/failover.rst FANOTIFY M: Jan Kara R: Amir Goldstein L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/notify/fanotify/ F: include/linux/fanotify.h F: include/uapi/linux/fanotify.h FARSYNC SYNCHRONOUS DRIVER M: Kevin Curtis W: http://www.farsite.co.uk/ S: Supported F: drivers/net/wan/farsync.* FAULT INJECTION SUPPORT M: Akinobu Mita S: Supported F: Documentation/fault-injection/ F: lib/fault-inject.c FBTFT Framebuffer drivers S: Orphan L: dri-devel@lists.freedesktop.org L: linux-fbdev@vger.kernel.org F: drivers/staging/fbtft/ FC0011 TUNER DRIVER M: Michael Buesch L: linux-media@vger.kernel.org S: Maintained F: drivers/media/tuners/fc0011.h F: drivers/media/tuners/fc0011.c FC2580 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/fc2580* FCOE SUBSYSTEM (libfc, libfcoe, fcoe) M: Johannes Thumshirn L: linux-scsi@vger.kernel.org W: www.Open-FCoE.org S: Supported F: drivers/scsi/libfc/ F: drivers/scsi/fcoe/ F: include/scsi/fc/ F: include/scsi/libfc.h F: include/scsi/libfcoe.h F: include/uapi/scsi/fc/ FILE LOCKING (flock() and fcntl()/lockf()) M: Jeff Layton M: "J. Bruce Fields" L: linux-fsdevel@vger.kernel.org S: Maintained F: include/linux/fcntl.h F: include/uapi/linux/fcntl.h F: fs/fcntl.c F: fs/locks.c FILESYSTEMS (VFS and infrastructure) M: Alexander Viro L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/* F: include/linux/fs.h F: include/uapi/linux/fs.h FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER M: Riku Voipio L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/f75375s.c F: include/linux/f75375s.h FIREWIRE AUDIO DRIVERS M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained F: sound/firewire/ FIREWIRE MEDIA DRIVERS (firedtv) M: Stefan Richter L: linux-media@vger.kernel.org L: linux1394-devel@lists.sourceforge.net T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git S: Maintained F: drivers/media/firewire/ FIREWIRE SBP-2 TARGET M: Chris Boot L: linux-scsi@vger.kernel.org L: target-devel@vger.kernel.org L: linux1394-devel@lists.sourceforge.net T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master S: Maintained F: drivers/target/sbp/ FIREWIRE SUBSYSTEM M: Stefan Richter L: linux1394-devel@lists.sourceforge.net W: http://ieee1394.wiki.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git S: Maintained F: drivers/firewire/ F: include/linux/firewire.h F: include/uapi/linux/firewire*.h F: tools/firewire/ FIRMWARE LOADER (request_firmware) M: Luis Chamberlain L: linux-kernel@vger.kernel.org S: Maintained F: Documentation/firmware_class/ F: drivers/base/firmware_loader/ F: include/linux/firmware.h FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card) M: Joshua Morris M: Philip Kelleher S: Maintained F: drivers/block/rsxx/ FLOPPY DRIVER M: Jiri Kosina T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git S: Odd fixes F: drivers/block/floppy.c FMC SUBSYSTEM M: Alessandro Rubini W: http://www.ohwr.org/projects/fmc-bus S: Supported F: drivers/fmc/ F: include/linux/fmc*.h F: include/linux/ipmi-fru.h K: fmc_d.*register FPGA MANAGER FRAMEWORK M: Alan Tull M: Moritz Fischer L: linux-fpga@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git Q: http://patchwork.kernel.org/project/linux-fpga/list/ F: Documentation/fpga/ F: Documentation/driver-api/fpga/ F: Documentation/devicetree/bindings/fpga/ F: drivers/fpga/ F: include/linux/fpga/ W: http://www.rocketboards.org FPGA DFL DRIVERS M: Wu Hao L: linux-fpga@vger.kernel.org S: Maintained F: Documentation/fpga/dfl.txt F: include/uapi/linux/fpga-dfl.h F: drivers/fpga/dfl* FPU EMULATOR M: Bill Metzenthen W: http://floatingpoint.sourceforge.net/emulator/index.html S: Maintained F: arch/x86/math-emu/ FRAME RELAY DLCI/FRAD (Sangoma drivers too) L: netdev@vger.kernel.org S: Orphan F: drivers/net/wan/dlci.c F: drivers/net/wan/sdla.c FRAMEBUFFER LAYER M: Bartlomiej Zolnierkiewicz L: dri-devel@lists.freedesktop.org L: linux-fbdev@vger.kernel.org T: git git://github.com/bzolnier/linux.git Q: http://patchwork.kernel.org/project/linux-fbdev/list/ S: Maintained F: Documentation/fb/ F: drivers/video/ F: include/video/ F: include/linux/fb.h F: include/uapi/video/ F: include/uapi/linux/fb.h FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER M: Horia Geantă M: Aymen Sghaier L: linux-crypto@vger.kernel.org S: Maintained F: drivers/crypto/caam/ F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt FREESCALE DIU FRAMEBUFFER DRIVER M: Timur Tabi L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/fsl-diu-fb.* FREESCALE DMA DRIVER M: Li Yang M: Zhang Wei L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/dma/fsldma.* FREESCALE eTSEC ETHERNET DRIVER (GIANFAR) M: Claudiu Manoil L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/gianfar* F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt FREESCALE GPMI NAND DRIVER M: Han Xu L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/nand/raw/gpmi-nand/* FREESCALE I2C CPM DRIVER M: Jochen Friedrich L: linuxppc-dev@lists.ozlabs.org L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-cpm.c FREESCALE IMX LPI2C DRIVER M: Dong Aisheng L: linux-i2c@vger.kernel.org L: linux-imx@nxp.com S: Maintained F: drivers/i2c/busses/i2c-imx-lpi2c.c F: Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt FREESCALE IMX / MXC FEC DRIVER M: Fugang Duan L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/fec_main.c F: drivers/net/ethernet/freescale/fec_ptp.c F: drivers/net/ethernet/freescale/fec.h F: Documentation/devicetree/bindings/net/fsl-fec.txt FREESCALE IMX / MXC FRAMEBUFFER DRIVER M: Sascha Hauer R: Pengutronix Kernel Team L: linux-fbdev@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: include/linux/platform_data/video-imxfb.h F: drivers/video/fbdev/imxfb.c FREESCALE QORIQ DPAA ETHERNET DRIVER M: Madalin Bucur L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/dpaa FREESCALE QORIQ DPAA FMAN DRIVER M: Madalin Bucur L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/fman F: Documentation/devicetree/bindings/net/fsl-fman.txt FREESCALE QORIQ PTP CLOCK DRIVER M: Yangbo Lu L: netdev@vger.kernel.org S: Maintained F: drivers/ptp/ptp_qoriq.c F: include/linux/fsl/ptp_qoriq.h F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt FREESCALE QUAD SPI DRIVER M: Han Xu L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/spi-nor/fsl-quadspi.c FREESCALE QUICC ENGINE LIBRARY M: Qiang Zhao L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/soc/fsl/qe/ F: include/soc/fsl/*qe*.h F: include/soc/fsl/*ucc*.h FREESCALE QUICC ENGINE UCC ETHERNET DRIVER M: Li Yang L: netdev@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/net/ethernet/freescale/ucc_geth* FREESCALE QUICC ENGINE UCC HDLC DRIVER M: Zhao Qiang L: netdev@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/net/wan/fsl_ucc_hdlc* FREESCALE QUICC ENGINE UCC UART DRIVER M: Timur Tabi L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/tty/serial/ucc_uart.c FREESCALE SOC DRIVERS M: Li Yang L: linuxppc-dev@lists.ozlabs.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/soc/fsl/ F: drivers/soc/fsl/ F: include/linux/fsl/ FREESCALE SOC FS_ENET DRIVER M: Pantelis Antoniou L: linuxppc-dev@lists.ozlabs.org L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h FREESCALE SOC SOUND DRIVERS M: Timur Tabi M: Nicolin Chen M: Xiubo Li R: Fabio Estevam L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: linuxppc-dev@lists.ozlabs.org S: Maintained F: sound/soc/fsl/fsl* F: sound/soc/fsl/imx* F: sound/soc/fsl/mpc8610_hpcd.c FREESCALE USB PERIPHERAL DRIVERS M: Li Yang L: linux-usb@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/usb/gadget/udc/fsl* FREEVXFS FILESYSTEM M: Christoph Hellwig W: ftp://ftp.openlinux.org/pub/people/hch/vxfs S: Maintained F: fs/freevxfs/ FREEZER M: "Rafael J. Wysocki" M: Pavel Machek L: linux-pm@vger.kernel.org S: Supported F: Documentation/power/freezing-of-tasks.txt F: include/linux/freezer.h F: kernel/freezer.c FRONTSWAP API M: Konrad Rzeszutek Wilk L: linux-kernel@vger.kernel.org S: Maintained F: mm/frontswap.c F: include/linux/frontswap.h FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS M: David Howells L: linux-cachefs@redhat.com (moderated for non-subscribers) S: Supported F: Documentation/filesystems/caching/ F: fs/fscache/ F: include/linux/fscache*.h FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT M: Theodore Y. Ts'o M: Jaegeuk Kim L: linux-fscrypt@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-fscrypt/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/fscrypt.git S: Supported F: fs/crypto/ F: include/linux/fscrypt*.h F: Documentation/filesystems/fscrypt.rst FSI-ATTACHED I2C DRIVER M: Eddie James L: linux-i2c@vger.kernel.org L: openbmc@lists.ozlabs.org (moderated for non-subscribers) S: Maintained F: drivers/i2c/busses/i2c-fsi.c F: Documentation/devicetree/bindings/i2c/i2c-fsi.txt FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE M: Jan Kara R: Amir Goldstein L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/notify/ F: include/linux/fsnotify*.h FUJITSU LAPTOP EXTRAS M: Jonathan Woithe L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/fujitsu-laptop.c FUJITSU M-5MO LS CAMERA ISP DRIVER M: Kyungmin Park M: Heungjun Kim L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/m5mols/ F: include/media/i2c/m5mols.h FUJITSU TABLET EXTRAS M: Robert Gerlach L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/fujitsu-tablet.c FUSE: FILESYSTEM IN USERSPACE M: Miklos Szeredi L: linux-fsdevel@vger.kernel.org W: http://fuse.sourceforge.net/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git S: Maintained F: fs/fuse/ F: include/uapi/linux/fuse.h F: Documentation/filesystems/fuse.txt FUTEX SUBSYSTEM M: Thomas Gleixner M: Ingo Molnar R: Peter Zijlstra R: Darren Hart L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core S: Maintained F: kernel/futex.c F: kernel/futex_compat.c F: include/asm-generic/futex.h F: include/linux/futex.h F: include/uapi/linux/futex.h F: tools/testing/selftests/futex/ F: tools/perf/bench/futex* F: Documentation/*futex* GCC PLUGINS M: Kees Cook R: Emese Revfy L: kernel-hardening@lists.openwall.com S: Maintained F: scripts/gcc-plugins/ F: scripts/gcc-plugin.sh F: scripts/Makefile.gcc-plugins F: Documentation/gcc-plugins.txt GASKET DRIVER FRAMEWORK M: Rob Springer M: Todd Poynor M: Ben Chan S: Maintained F: drivers/staging/gasket/ GCOV BASED KERNEL PROFILING M: Peter Oberparleiter S: Maintained F: kernel/gcov/ F: Documentation/dev-tools/gcov.rst GDB KERNEL DEBUGGING HELPER SCRIPTS M: Jan Kiszka M: Kieran Bingham S: Supported F: scripts/gdb/ GDT SCSI DISK ARRAY CONTROLLER DRIVER M: Achim Leubner L: linux-scsi@vger.kernel.org W: http://www.icp-vortex.com/ S: Supported F: drivers/scsi/gdt* GEMTEK FM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-gemtek* GENERIC GPIO I2C DRIVER M: Haavard Skinnemoen S: Supported F: drivers/i2c/busses/i2c-gpio.c F: include/linux/platform_data/i2c-gpio.h GENERIC GPIO I2C MULTIPLEXER DRIVER M: Peter Korsgaard L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/muxes/i2c-mux-gpio.c F: include/linux/platform_data/i2c-mux-gpio.h F: Documentation/i2c/muxes/i2c-mux-gpio GENERIC HDLC (WAN) DRIVERS M: Krzysztof Halasa W: http://www.kernel.org/pub/linux/utils/net/hdlc/ S: Maintained F: drivers/net/wan/c101.c F: drivers/net/wan/hd6457* F: drivers/net/wan/hdlc* F: drivers/net/wan/n2.c F: drivers/net/wan/pc300too.c F: drivers/net/wan/pci200syn.c F: drivers/net/wan/wanxl* GENERIC INCLUDE/ASM HEADER FILES M: Arnd Bergmann L: linux-arch@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git S: Maintained F: include/asm-generic/ F: include/uapi/asm-generic/ GENERIC PHY FRAMEWORK M: Kishon Vijay Abraham I L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux-phy.git S: Supported F: drivers/phy/ F: include/linux/phy/ F: Documentation/devicetree/bindings/phy/ GENERIC PINCTRL I2C DEMULTIPLEXER DRIVER M: Wolfram Sang S: Supported F: drivers/i2c/muxes/i2c-demux-pinctrl.c GENERIC PM DOMAINS M: "Rafael J. Wysocki" M: Kevin Hilman M: Ulf Hansson L: linux-pm@vger.kernel.org S: Supported F: drivers/base/power/domain*.c F: include/linux/pm_domain.h F: Documentation/devicetree/bindings/power/power_domain.txt GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER M: Eugen Hristev L: linux-input@vger.kernel.org S: Maintained F: drivers/input/touchscreen/resistive-adc-touch.c GENERIC UIO DRIVER FOR PCI DEVICES M: "Michael S. Tsirkin" L: kvm@vger.kernel.org S: Supported F: drivers/uio/uio_pci_generic.c GENWQE (IBM Generic Workqueue Card) M: Frank Haverkamp S: Supported F: drivers/misc/genwqe/ GET_MAINTAINER SCRIPT M: Joe Perches S: Maintained F: scripts/get_maintainer.pl GFS2 FILE SYSTEM M: Bob Peterson M: Andreas Gruenbacher L: cluster-devel@redhat.com W: http://sources.redhat.com/cluster/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git S: Supported F: Documentation/filesystems/gfs2*.txt F: fs/gfs2/ F: include/uapi/linux/gfs2_ondisk.h GIGASET ISDN DRIVERS M: Paul Bolle L: gigaset307x-common@lists.sourceforge.net W: http://gigaset307x.sourceforge.net/ S: Odd Fixes F: Documentation/isdn/README.gigaset F: drivers/isdn/gigaset/ F: include/uapi/linux/gigaset_dev.h GNSS SUBSYSTEM M: Johan Hovold T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git S: Maintained F: Documentation/ABI/testing/sysfs-class-gnss F: Documentation/devicetree/bindings/gnss/ F: drivers/gnss/ F: include/linux/gnss.h GO7007 MPEG CODEC M: Hans Verkuil L: linux-media@vger.kernel.org S: Maintained F: drivers/media/usb/go7007/ GOODIX TOUCHSCREEN M: Bastien Nocera L: linux-input@vger.kernel.org S: Maintained F: drivers/input/touchscreen/goodix.c GPD POCKET FAN DRIVER M: Hans de Goede L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/gpd-pocket-fan.c GPIO ACPI SUPPORT M: Mika Westerberg M: Andy Shevchenko L: linux-gpio@vger.kernel.org L: linux-acpi@vger.kernel.org S: Maintained F: Documentation/acpi/gpio-properties.txt F: drivers/gpio/gpiolib-acpi.c GPIO IR Transmitter M: Sean Young L: linux-media@vger.kernel.org S: Maintained F: drivers/media/rc/gpio-ir-tx.c GPIO MOCKUP DRIVER M: Bamvor Jian Zhang L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-mockup.c F: tools/testing/selftests/gpio/ GPIO SUBSYSTEM M: Linus Walleij M: Bartosz Golaszewski L: linux-gpio@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git S: Maintained F: Documentation/devicetree/bindings/gpio/ F: Documentation/driver-api/gpio/ F: Documentation/gpio/ F: Documentation/ABI/testing/gpio-cdev F: Documentation/ABI/obsolete/sysfs-gpio F: drivers/gpio/ F: include/linux/gpio/ F: include/linux/gpio.h F: include/linux/of_gpio.h F: include/asm-generic/gpio.h F: include/uapi/linux/gpio.h F: tools/gpio/ GRE DEMULTIPLEXER DRIVER M: Dmitry Kozlov L: netdev@vger.kernel.org S: Maintained F: net/ipv4/gre_demux.c F: net/ipv4/gre_offload.c F: include/net/gre.h GRETH 10/100/1G Ethernet MAC device driver M: Andreas Larsson L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/aeroflex/ GREYBUS AUDIO PROTOCOLS DRIVERS M: Vaibhav Agarwal M: Mark Greer S: Maintained F: drivers/staging/greybus/audio_apbridgea.c F: drivers/staging/greybus/audio_apbridgea.h F: drivers/staging/greybus/audio_codec.c F: drivers/staging/greybus/audio_codec.h F: drivers/staging/greybus/audio_gb.c F: drivers/staging/greybus/audio_manager.c F: drivers/staging/greybus/audio_manager.h F: drivers/staging/greybus/audio_manager_module.c F: drivers/staging/greybus/audio_manager_private.h F: drivers/staging/greybus/audio_manager_sysfs.c F: drivers/staging/greybus/audio_module.c F: drivers/staging/greybus/audio_topology.c GREYBUS FW/HID/SPI PROTOCOLS DRIVERS M: Viresh Kumar S: Maintained F: drivers/staging/greybus/authentication.c F: drivers/staging/greybus/bootrom.c F: drivers/staging/greybus/firmware.h F: drivers/staging/greybus/fw-core.c F: drivers/staging/greybus/fw-download.c F: drivers/staging/greybus/fw-management.c F: drivers/staging/greybus/greybus_authentication.h F: drivers/staging/greybus/greybus_firmware.h F: drivers/staging/greybus/hid.c F: drivers/staging/greybus/i2c.c F: drivers/staging/greybus/spi.c F: drivers/staging/greybus/spilib.c F: drivers/staging/greybus/spilib.h GREYBUS LOOPBACK DRIVER M: Bryan O'Donoghue S: Maintained F: drivers/staging/greybus/loopback.c GREYBUS PLATFORM DRIVERS M: Vaibhav Hiremath S: Maintained F: drivers/staging/greybus/arche-platform.c F: drivers/staging/greybus/arche-apb-ctrl.c F: drivers/staging/greybus/arche_platform.h GREYBUS SDIO/GPIO/SPI PROTOCOLS DRIVERS M: Rui Miguel Silva S: Maintained F: drivers/staging/greybus/sdio.c F: drivers/staging/greybus/light.c F: drivers/staging/greybus/gpio.c F: drivers/staging/greybus/power_supply.c F: drivers/staging/greybus/spi.c F: drivers/staging/greybus/spilib.c GREYBUS SUBSYSTEM M: Johan Hovold M: Alex Elder M: Greg Kroah-Hartman S: Maintained F: drivers/staging/greybus/ L: greybus-dev@lists.linaro.org (moderated for non-subscribers) GREYBUS UART PROTOCOLS DRIVERS M: David Lin S: Maintained F: drivers/staging/greybus/uart.c F: drivers/staging/greybus/log.c GS1662 VIDEO SERIALIZER M: Charles-Antoine Couret L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/spi/gs1662.c GSPCA FINEPIX SUBDRIVER M: Frank Zago L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/gspca/finepix.c GSPCA GL860 SUBDRIVER M: Olivier Lorin L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/gspca/gl860/ GSPCA M5602 SUBDRIVER M: Erik Andren L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/gspca/m5602/ GSPCA PAC207 SONIXB SUBDRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Odd Fixes F: drivers/media/usb/gspca/pac207.c GSPCA SN9C20X SUBDRIVER M: Brian Johnson L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/gspca/sn9c20x.c GSPCA T613 SUBDRIVER M: Leandro Costantino L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/gspca/t613.c GSPCA USB WEBCAM DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Odd Fixes F: drivers/media/usb/gspca/ GTP (GPRS Tunneling Protocol) M: Pablo Neira Ayuso M: Harald Welte L: osmocom-net-gprs@lists.osmocom.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/gtp.git S: Maintained F: drivers/net/gtp.c GUID PARTITION TABLE (GPT) M: Davidlohr Bueso L: linux-efi@vger.kernel.org S: Maintained F: block/partitions/efi.* H8/300 ARCHITECTURE M: Yoshinori Sato L: uclinux-h8-devel@lists.sourceforge.jp (moderated for non-subscribers) W: http://uclinux-h8.sourceforge.jp T: git git://git.sourceforge.jp/gitroot/uclinux-h8/linux.git S: Maintained F: arch/h8300/ F: drivers/clocksource/h8300_*.c F: drivers/clk/h8300/ F: drivers/irqchip/irq-renesas-h8*.c HACKRF MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/hackrf/ HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER M: Frank Seidel L: platform-driver-x86@vger.kernel.org W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ S: Maintained F: drivers/platform/x86/hdaps.c HARDWARE MONITORING M: Jean Delvare M: Guenter Roeck L: linux-hwmon@vger.kernel.org W: http://hwmon.wiki.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git S: Maintained F: Documentation/devicetree/bindings/hwmon/ F: Documentation/hwmon/ F: drivers/hwmon/ F: include/linux/hwmon*.h F: include/trace/events/hwmon*.h HARDWARE RANDOM NUMBER GENERATOR CORE M: Matt Mackall M: Herbert Xu L: linux-crypto@vger.kernel.org S: Odd fixes F: Documentation/devicetree/bindings/rng/ F: Documentation/hw_random.txt F: drivers/char/hw_random/ F: include/linux/hw_random.h HARDWARE TRACING FACILITIES M: Alexander Shishkin S: Maintained F: drivers/hwtracing/ HARDWARE SPINLOCK CORE M: Ohad Ben-Cohen M: Bjorn Andersson L: linux-remoteproc@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git F: Documentation/devicetree/bindings/hwlock/ F: Documentation/hwspinlock.txt F: drivers/hwspinlock/ F: include/linux/hwspinlock.h HARMONY SOUND DRIVER L: linux-parisc@vger.kernel.org S: Maintained F: sound/parisc/harmony.* HDPVR USB VIDEO ENCODER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/usb/hdpvr/ HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER M: Jerry Hoemann S: Supported F: Documentation/watchdog/hpwdt.txt F: drivers/watchdog/hpwdt.c HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa) M: Don Brace L: esc.storagedev@microsemi.com L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/hpsa.txt F: drivers/scsi/hpsa*.[ch] F: include/linux/cciss*.h F: include/uapi/linux/cciss*.h HFI1 DRIVER M: Mike Marciniszyn M: Dennis Dalessandro L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/hw/hfi1 HFS FILESYSTEM L: linux-fsdevel@vger.kernel.org S: Orphan F: Documentation/filesystems/hfs.txt F: fs/hfs/ HFSPLUS FILESYSTEM L: linux-fsdevel@vger.kernel.org S: Orphan F: Documentation/filesystems/hfsplus.txt F: fs/hfsplus/ HGA FRAMEBUFFER DRIVER M: Ferenc Bakonyi L: linux-nvidia@lists.surfsouth.com W: http://drama.obuda.kando.hu/~fero/cgi-bin/hgafb.shtml S: Maintained F: drivers/video/fbdev/hgafb.c HIBERNATION (aka Software Suspend, aka swsusp) M: "Rafael J. Wysocki" M: Pavel Machek L: linux-pm@vger.kernel.org B: https://bugzilla.kernel.org S: Supported F: arch/x86/power/ F: drivers/base/power/ F: kernel/power/ F: include/linux/suspend.h F: include/linux/freezer.h F: include/linux/pm.h F: arch/*/include/asm/suspend*.h HID CORE LAYER M: Jiri Kosina M: Benjamin Tissoires L: linux-input@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git S: Maintained F: drivers/hid/ F: include/linux/hid* F: include/uapi/linux/hid* HID SENSOR HUB DRIVERS M: Jiri Kosina M: Jonathan Cameron M: Srinivas Pandruvada L: linux-input@vger.kernel.org L: linux-iio@vger.kernel.org S: Maintained F: Documentation/hid/hid-sensor* F: drivers/hid/hid-sensor-* F: drivers/iio/*/hid-* F: include/linux/hid-sensor-* HIGH-RESOLUTION TIMERS, CLOCKEVENTS M: Thomas Gleixner L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core S: Maintained F: Documentation/timers/ F: kernel/time/hrtimer.c F: kernel/time/clockevents.c F: kernel/time/timer_*.c F: include/linux/clockchips.h F: include/linux/hrtimer.h HIGH-SPEED SCC DRIVER FOR AX.25 L: linux-hams@vger.kernel.org S: Orphan F: drivers/net/hamradio/dmascc.c F: drivers/net/hamradio/scc.c HIGHPOINT ROCKETRAID 3xxx RAID DRIVER M: HighPoint Linux Team W: http://www.highpoint-tech.com S: Supported F: Documentation/scsi/hptiop.txt F: drivers/scsi/hptiop.c HIPPI M: Jes Sorensen L: linux-hippi@sunsite.dk S: Maintained F: include/linux/hippidevice.h F: include/uapi/linux/if_hippi.h F: net/802/hippi.c F: drivers/net/hippi/ HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3) M: Yisen Zhuang M: Salil Mehta L: netdev@vger.kernel.org W: http://www.hisilicon.com S: Maintained F: drivers/net/ethernet/hisilicon/hns3/ HISILICON LPC BUS DRIVER M: john.garry@huawei.com W: http://www.hisilicon.com S: Maintained F: drivers/bus/hisi_lpc.c F: Documentation/devicetree/bindings/arm/hisilicon/hisilicon-low-pin-count.txt HISILICON NETWORK SUBSYSTEM DRIVER M: Yisen Zhuang M: Salil Mehta L: netdev@vger.kernel.org W: http://www.hisilicon.com S: Maintained F: drivers/net/ethernet/hisilicon/ F: Documentation/devicetree/bindings/net/hisilicon*.txt HISILICON PMU DRIVER M: Shaokun Zhang W: http://www.hisilicon.com S: Supported F: drivers/perf/hisilicon F: Documentation/perf/hisi-pmu.txt HISILICON ROCE DRIVER M: Lijun Ou M: Wei Hu(Xavier) L: linux-rdma@vger.kernel.org S: Maintained F: drivers/infiniband/hw/hns/ F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt HISILICON SAS Controller M: John Garry W: http://www.hisilicon.com S: Supported F: drivers/scsi/hisi_sas/ F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt HMM - Heterogeneous Memory Management M: Jérôme Glisse L: linux-mm@kvack.org S: Maintained F: mm/hmm* F: include/linux/hmm* F: Documentation/vm/hmm.rst HOST AP DRIVER M: Jouni Malinen L: linux-wireless@vger.kernel.org W: http://w1.fi/hostap-driver.html S: Obsolete F: drivers/net/wireless/intersil/hostap/ HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER L: platform-driver-x86@vger.kernel.org S: Orphan F: drivers/platform/x86/tc1100-wmi.c HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series M: Jaroslav Kysela S: Maintained F: drivers/net/ethernet/hp/hp100.* HPET: High Precision Event Timers driver M: Clemens Ladisch S: Maintained F: Documentation/timers/hpet.txt F: drivers/char/hpet.c F: include/linux/hpet.h F: include/uapi/linux/hpet.h HPET: x86 S: Orphan F: arch/x86/kernel/hpet.c F: arch/x86/include/asm/hpet.h HPFS FILESYSTEM M: Mikulas Patocka W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi S: Maintained F: fs/hpfs/ HSI SUBSYSTEM M: Sebastian Reichel T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-hsi.git S: Maintained F: Documentation/ABI/testing/sysfs-bus-hsi F: Documentation/driver-api/hsi.rst F: drivers/hsi/ F: include/linux/hsi/ F: include/uapi/linux/hsi/ HSO 3G MODEM DRIVER L: linux-usb@vger.kernel.org S: Orphan F: drivers/net/usb/hso.c HSR NETWORK PROTOCOL M: Arvid Brodin L: netdev@vger.kernel.org S: Maintained F: net/hsr/ HT16K33 LED CONTROLLER DRIVER M: Robin van der Gracht S: Maintained F: drivers/auxdisplay/ht16k33.c F: Documentation/devicetree/bindings/display/ht16k33.txt HTCPEN TOUCHSCREEN DRIVER M: Pau Oliva Fora L: linux-input@vger.kernel.org S: Maintained F: drivers/input/touchscreen/htcpen.c HTS221 TEMPERATURE-HUMIDITY IIO DRIVER M: Lorenzo Bianconi L: linux-iio@vger.kernel.org W: http://www.st.com/ S: Maintained F: drivers/iio/humidity/hts221* F: Documentation/devicetree/bindings/iio/humidity/hts221.txt HUAWEI ETHERNET DRIVER M: Aviad Krawczyk L: netdev@vger.kernel.org S: Supported F: Documentation/networking/hinic.txt F: drivers/net/ethernet/huawei/hinic/ HUGETLB FILESYSTEM M: Mike Kravetz L: linux-mm@kvack.org S: Maintained F: fs/hugetlbfs/ F: mm/hugetlb.c F: include/linux/hugetlb.h F: Documentation/admin-guide/mm/hugetlbpage.rst F: Documentation/vm/hugetlbfs_reserv.rst F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages HVA ST MEDIA DRIVER M: Jean-Christophe Trotin L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Supported F: drivers/media/platform/sti/hva HWPOISON MEMORY FAILURE HANDLING M: Naoya Horiguchi L: linux-mm@kvack.org S: Maintained F: mm/memory-failure.c F: mm/hwpoison-inject.c HYGON PROCESSOR SUPPORT M: Pu Wen L: linux-kernel@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/hygon.c Hyper-V CORE AND DRIVERS M: "K. Y. Srinivasan" M: Haiyang Zhang M: Stephen Hemminger M: Sasha Levin T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git L: devel@linuxdriverproject.org S: Supported F: Documentation/networking/device_drivers/microsoft/netvsc.txt F: arch/x86/include/asm/mshyperv.h F: arch/x86/include/asm/trace/hyperv.h F: arch/x86/include/asm/hyperv-tlfs.h F: arch/x86/kernel/cpu/mshyperv.c F: arch/x86/hyperv F: drivers/hid/hid-hyperv.c F: drivers/hv/ F: drivers/input/serio/hyperv-keyboard.c F: drivers/pci/controller/pci-hyperv.c F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c F: drivers/video/fbdev/hyperv_fb.c F: net/vmw_vsock/hyperv_transport.c F: include/linux/hyperv.h F: include/uapi/linux/hyperv.h F: tools/hv/ F: Documentation/ABI/stable/sysfs-bus-vmbus HYPERVISOR VIRTUAL CONSOLE DRIVER L: linuxppc-dev@lists.ozlabs.org S: Odd Fixes F: drivers/tty/hvc/ I2C ACPI SUPPORT M: Mika Westerberg L: linux-i2c@vger.kernel.org L: linux-acpi@vger.kernel.org S: Maintained F: drivers/i2c/i2c-core-acpi.c I2C CONTROLLER DRIVER FOR NVIDIA GPU M: Ajay Gupta L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/busses/i2c-nvidia-gpu F: drivers/i2c/busses/i2c-nvidia-gpu.c I2C MUXES M: Peter Rosin L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/i2c-topology F: Documentation/i2c/muxes/ F: Documentation/devicetree/bindings/i2c/i2c-mux* F: Documentation/devicetree/bindings/i2c/i2c-arb* F: Documentation/devicetree/bindings/i2c/i2c-gate* F: drivers/i2c/i2c-mux.c F: drivers/i2c/muxes/ F: include/linux/i2c-mux.h I2C MV64XXX MARVELL AND ALLWINNER DRIVER M: Gregory CLEMENT L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-mv64xxx.c I2C OVER PARALLEL PORT M: Jean Delvare L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/busses/i2c-parport F: Documentation/i2c/busses/i2c-parport-light F: drivers/i2c/busses/i2c-parport.c F: drivers/i2c/busses/i2c-parport-light.c I2C SUBSYSTEM M: Wolfram Sang L: linux-i2c@vger.kernel.org W: https://i2c.wiki.kernel.org/ Q: https://patchwork.ozlabs.org/project/linux-i2c/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git S: Maintained F: Documentation/devicetree/bindings/i2c/i2c.txt F: Documentation/i2c/ F: drivers/i2c/* F: include/linux/i2c.h F: include/linux/i2c-dev.h F: include/linux/i2c-smbus.h F: include/uapi/linux/i2c.h F: include/uapi/linux/i2c-*.h I2C SUBSYSTEM HOST DRIVERS L: linux-i2c@vger.kernel.org W: https://i2c.wiki.kernel.org/ Q: https://patchwork.ozlabs.org/project/linux-i2c/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git S: Odd Fixes F: Documentation/devicetree/bindings/i2c/ F: drivers/i2c/algos/ F: drivers/i2c/busses/ I2C-TAOS-EVM DRIVER M: Jean Delvare L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/busses/i2c-taos-evm F: drivers/i2c/busses/i2c-taos-evm.c I2C-TINY-USB DRIVER M: Till Harbaum L: linux-i2c@vger.kernel.org W: http://www.harbaum.org/till/i2c_tiny_usb S: Maintained F: drivers/i2c/busses/i2c-tiny-usb.c I2C/SMBUS CONTROLLER DRIVERS FOR PC M: Jean Delvare L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/busses/i2c-ali1535 F: Documentation/i2c/busses/i2c-ali1563 F: Documentation/i2c/busses/i2c-ali15x3 F: Documentation/i2c/busses/i2c-amd756 F: Documentation/i2c/busses/i2c-amd8111 F: Documentation/i2c/busses/i2c-i801 F: Documentation/i2c/busses/i2c-nforce2 F: Documentation/i2c/busses/i2c-piix4 F: Documentation/i2c/busses/i2c-sis5595 F: Documentation/i2c/busses/i2c-sis630 F: Documentation/i2c/busses/i2c-sis96x F: Documentation/i2c/busses/i2c-via F: Documentation/i2c/busses/i2c-viapro F: drivers/i2c/busses/i2c-ali1535.c F: drivers/i2c/busses/i2c-ali1563.c F: drivers/i2c/busses/i2c-ali15x3.c F: drivers/i2c/busses/i2c-amd756.c F: drivers/i2c/busses/i2c-amd756-s4882.c F: drivers/i2c/busses/i2c-amd8111.c F: drivers/i2c/busses/i2c-i801.c F: drivers/i2c/busses/i2c-isch.c F: drivers/i2c/busses/i2c-nforce2.c F: drivers/i2c/busses/i2c-nforce2-s4985.c F: drivers/i2c/busses/i2c-piix4.c F: drivers/i2c/busses/i2c-sis5595.c F: drivers/i2c/busses/i2c-sis630.c F: drivers/i2c/busses/i2c-sis96x.c F: drivers/i2c/busses/i2c-via.c F: drivers/i2c/busses/i2c-viapro.c I2C/SMBUS INTEL CHT WHISKEY COVE PMIC DRIVER M: Hans de Goede L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-cht-wc.c I2C/SMBUS ISMT DRIVER M: Seth Heasley M: Neil Horman L: linux-i2c@vger.kernel.org F: drivers/i2c/busses/i2c-ismt.c F: Documentation/i2c/busses/i2c-ismt I2C/SMBUS STUB DRIVER M: Jean Delvare L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/i2c-stub.c I3C SUBSYSTEM M: Boris Brezillon L: linux-i3c@lists.infradead.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git S: Maintained F: Documentation/ABI/testing/sysfs-bus-i3c F: Documentation/devicetree/bindings/i3c/ F: Documentation/driver-api/i3c F: drivers/i3c/ F: include/linux/i3c/ F: include/dt-bindings/i3c/ I3C DRIVER FOR SYNOPSYS DESIGNWARE M: Vitor Soares S: Maintained F: Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt F: drivers/i3c/master/dw* IA64 (Itanium) PLATFORM M: Tony Luck M: Fenghua Yu L: linux-ia64@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git S: Maintained F: arch/ia64/ IBM Power 842 compression accelerator M: Haren Myneni S: Supported F: drivers/crypto/nx/Makefile F: drivers/crypto/nx/Kconfig F: drivers/crypto/nx/nx-842* F: include/linux/sw842.h F: crypto/842.c F: lib/842/ IBM Power in-Nest Crypto Acceleration M: Breno Leitão M: Nayna Jain M: Paulo Flabiano Smorigo L: linux-crypto@vger.kernel.org S: Supported F: drivers/crypto/nx/Makefile F: drivers/crypto/nx/Kconfig F: drivers/crypto/nx/nx-aes* F: drivers/crypto/nx/nx-sha* F: drivers/crypto/nx/nx.* F: drivers/crypto/nx/nx_csbcpb.h F: drivers/crypto/nx/nx_debugfs.h IBM Power Linux RAID adapter M: Brian King S: Supported F: drivers/scsi/ipr.* IBM Power SRIOV Virtual NIC Device Driver M: Thomas Falcon M: John Allen L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/ibm/ibmvnic.* IBM Power Virtual Accelerator Switchboard M: Sukadev Bhattiprolu L: linuxppc-dev@lists.ozlabs.org S: Supported F: arch/powerpc/platforms/powernv/vas* F: arch/powerpc/platforms/powernv/copy-paste.h F: arch/powerpc/include/asm/vas.h F: arch/powerpc/include/uapi/asm/vas.h IBM Power Virtual Ethernet Device Driver M: Thomas Falcon L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/ibm/ibmveth.* IBM Power Virtual FC Device Drivers M: Tyrel Datwyler L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/ibmvscsi/ibmvfc* IBM Power Virtual Management Channel Driver M: Steven Royer S: Supported F: drivers/misc/ibmvmc.* IBM Power Virtual SCSI Device Drivers M: Tyrel Datwyler L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/ibmvscsi/ibmvscsi* F: include/scsi/viosrp.h IBM Power Virtual SCSI Device Target Driver M: Michael Cyr L: linux-scsi@vger.kernel.org L: target-devel@vger.kernel.org S: Supported F: drivers/scsi/ibmvscsi_tgt/ IBM Power VMX Cryptographic instructions M: Breno Leitão M: Nayna Jain M: Paulo Flabiano Smorigo L: linux-crypto@vger.kernel.org S: Supported F: drivers/crypto/vmx/Makefile F: drivers/crypto/vmx/Kconfig F: drivers/crypto/vmx/vmx.c F: drivers/crypto/vmx/aes* F: drivers/crypto/vmx/ghash* F: drivers/crypto/vmx/ppc-xlate.pl IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform M: Tyrel Datwyler L: linux-pci@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Supported F: drivers/pci/hotplug/rpaphp* IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform M: Tyrel Datwyler L: linux-pci@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Supported F: drivers/pci/hotplug/rpadlpar* IBM ServeRAID RAID DRIVER S: Orphan F: drivers/scsi/ips.* ICH LPC AND GPIO DRIVER M: Peter Tyser S: Maintained F: drivers/mfd/lpc_ich.c F: drivers/gpio/gpio-ich.c IDE SUBSYSTEM M: "David S. Miller" L: linux-ide@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-ide/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide.git S: Maintained F: Documentation/ide/ F: drivers/ide/ F: include/linux/ide.h IDE/ATAPI DRIVERS M: Borislav Petkov L: linux-ide@vger.kernel.org S: Maintained F: Documentation/cdrom/ide-cd F: drivers/ide/ide-cd* IDEAPAD LAPTOP EXTRAS DRIVER M: Ike Panhc L: platform-driver-x86@vger.kernel.org W: http://launchpad.net/ideapad-laptop S: Maintained F: drivers/platform/x86/ideapad-laptop.c IDEAPAD LAPTOP SLIDEBAR DRIVER M: Andrey Moiseev L: linux-input@vger.kernel.org W: https://github.com/o2genum/ideapad-slidebar S: Maintained F: drivers/input/misc/ideapad_slidebar.c IDT VersaClock 5 CLOCK DRIVER M: Marek Vasut S: Maintained F: drivers/clk/clk-versaclock5.c IEEE 802.15.4 SUBSYSTEM M: Alexander Aring M: Stefan Schmidt L: linux-wpan@vger.kernel.org W: http://wpan.cakelab.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git S: Maintained F: net/ieee802154/ F: net/mac802154/ F: drivers/net/ieee802154/ F: include/linux/nl802154.h F: include/linux/ieee802154.h F: include/net/nl802154.h F: include/net/mac802154.h F: include/net/af_ieee802154.h F: include/net/cfg802154.h F: include/net/ieee802154_netdev.h F: Documentation/networking/ieee802154.txt IFE PROTOCOL M: Yotam Gigi M: Jamal Hadi Salim F: net/ife F: include/net/ife.h F: include/uapi/linux/ife.h IGORPLUG-USB IR RECEIVER M: Sean Young L: linux-media@vger.kernel.org S: Maintained F: drivers/media/rc/igorplugusb.c IGUANAWORKS USB IR TRANSCEIVER M: Sean Young L: linux-media@vger.kernel.org S: Maintained F: drivers/media/rc/iguanair.c IIO DIGITAL POTENTIOMETER DAC M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac F: Documentation/devicetree/bindings/iio/dac/dpot-dac.txt F: drivers/iio/dac/dpot-dac.c IIO ENVELOPE DETECTOR M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector F: Documentation/devicetree/bindings/iio/adc/envelope-detector.txt F: drivers/iio/adc/envelope-detector.c IIO MULTIPLEXER M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt F: drivers/iio/multiplexer/iio-mux.c IIO SUBSYSTEM AND DRIVERS M: Jonathan Cameron R: Hartmut Knaack R: Lars-Peter Clausen R: Peter Meerwald-Stadler L: linux-iio@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git S: Maintained F: Documentation/ABI/testing/configfs-iio* F: Documentation/ABI/testing/sysfs-bus-iio* F: Documentation/devicetree/bindings/iio/ F: drivers/iio/ F: drivers/staging/iio/ F: include/linux/iio/ F: tools/iio/ IIO UNIT CONVERTER M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt F: Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt F: Documentation/devicetree/bindings/iio/afe/voltage-divider.txt F: drivers/iio/afe/iio-rescale.c IKANOS/ADI EAGLE ADSL USB DRIVER M: Matthieu Castet M: Stanislaw Gruszka S: Maintained F: drivers/usb/atm/ueagle-atm.c IMGTEC ASCII LCD DRIVER M: Paul Burton S: Maintained F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt F: drivers/auxdisplay/img-ascii-lcd.c IMGTEC IR DECODER DRIVER M: James Hogan S: Maintained F: drivers/media/rc/img-ir/ IMON SOUNDGRAPH USB IR RECEIVER M: Sean Young L: linux-media@vger.kernel.org S: Maintained F: drivers/media/rc/imon_raw.c F: drivers/media/rc/imon.c IMS TWINTURBO FRAMEBUFFER DRIVER L: linux-fbdev@vger.kernel.org S: Orphan F: drivers/video/fbdev/imsttfb.c INA209 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/ina209 F: Documentation/devicetree/bindings/hwmon/ina2xx.txt F: drivers/hwmon/ina209.c INA2XX HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/ina2xx F: drivers/hwmon/ina2xx.c F: include/linux/platform_data/ina2xx.h INDUSTRY PACK SUBSYSTEM (IPACK) M: Samuel Iglesias Gonsalvez M: Jens Taprogge M: Greg Kroah-Hartman L: industrypack-devel@lists.sourceforge.net W: http://industrypack.sourceforge.net S: Maintained F: drivers/ipack/ INFINIBAND SUBSYSTEM M: Doug Ledford M: Jason Gunthorpe L: linux-rdma@vger.kernel.org W: https://github.com/linux-rdma/rdma-core Q: http://patchwork.kernel.org/project/linux-rdma/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git S: Supported F: Documentation/devicetree/bindings/infiniband/ F: Documentation/infiniband/ F: drivers/infiniband/ F: include/uapi/linux/if_infiniband.h F: include/uapi/rdma/ F: include/rdma/ INGENIC JZ4780 DMA Driver M: Zubair Lutfullah Kakakhel S: Maintained F: drivers/dma/dma-jz4780.c INGENIC JZ4780 NAND DRIVER M: Harvey Hunt L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/nand/raw/jz4780_* INOTIFY M: Jan Kara R: Amir Goldstein L: linux-fsdevel@vger.kernel.org S: Maintained F: Documentation/filesystems/inotify.txt F: fs/notify/inotify/ F: include/linux/inotify.h F: include/uapi/linux/inotify.h INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS M: Dmitry Torokhov L: linux-input@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-input/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git S: Maintained F: drivers/input/ F: include/linux/input.h F: include/uapi/linux/input.h F: include/uapi/linux/input-event-codes.h F: include/linux/input/ F: Documentation/devicetree/bindings/input/ F: Documentation/devicetree/bindings/serio/ F: Documentation/input/ INPUT MULTITOUCH (MT) PROTOCOL M: Henrik Rydberg L: linux-input@vger.kernel.org S: Odd fixes F: Documentation/input/multi-touch-protocol.rst F: drivers/input/input-mt.c K: \b(ABS|SYN)_MT_ INSIDE SECURE CRYPTO DRIVER M: Antoine Tenart F: drivers/crypto/inside-secure/ S: Maintained L: linux-crypto@vger.kernel.org INTEGRITY MEASUREMENT ARCHITECTURE (IMA) M: Mimi Zohar M: Dmitry Kasatkin L: linux-integrity@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git S: Supported F: security/integrity/ima/ INTEL 810/815 FRAMEBUFFER DRIVER M: Antonino Daplas L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/i810/ INTEL ASoC DRIVERS M: Pierre-Louis Bossart M: Liam Girdwood M: Jie Yang L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: sound/soc/intel/ INTEL ATOMISP2 DUMMY / POWER-MANAGEMENT DRIVER M: Hans de Goede L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/intel_atomisp2_pm.c INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support M: Artur Paszkiewicz L: linux-scsi@vger.kernel.org T: git git://git.code.sf.net/p/intel-sas/isci S: Supported F: drivers/scsi/isci/ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) M: Jani Nikula M: Joonas Lahtinen M: Rodrigo Vivi L: intel-gfx@lists.freedesktop.org W: https://01.org/linuxgraphics/ B: https://01.org/linuxgraphics/documentation/how-report-bugs C: irc://chat.freenode.net/intel-gfx Q: http://patchwork.freedesktop.org/project/intel-gfx/ T: git git://anongit.freedesktop.org/drm-intel S: Supported F: drivers/gpu/drm/i915/ F: include/drm/i915* F: include/uapi/drm/i915_drm.h F: Documentation/gpu/i915.rst INTEL ETHERNET DRIVERS M: Jeff Kirsher L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers) W: http://www.intel.com/support/feedback.htm W: http://e1000.sourceforge.net/ Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git S: Supported F: Documentation/networking/device_drivers/intel/e100.rst F: Documentation/networking/device_drivers/intel/e1000.rst F: Documentation/networking/device_drivers/intel/e1000e.rst F: Documentation/networking/device_drivers/intel/fm10k.rst F: Documentation/networking/device_drivers/intel/igb.rst F: Documentation/networking/device_drivers/intel/igbvf.rst F: Documentation/networking/device_drivers/intel/ixgb.rst F: Documentation/networking/device_drivers/intel/ixgbe.rst F: Documentation/networking/device_drivers/intel/ixgbevf.rst F: Documentation/networking/device_drivers/intel/i40e.rst F: Documentation/networking/device_drivers/intel/iavf.rst F: Documentation/networking/device_drivers/intel/ice.rst F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ F: include/linux/avf/virtchnl.h INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) M: Maik Broemme L: linux-fbdev@vger.kernel.org S: Maintained F: Documentation/fb/intelfb.txt F: drivers/video/fbdev/intelfb/ INTEL GPIO DRIVERS M: Andy Shevchenko L: linux-gpio@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git F: drivers/gpio/gpio-ich.c F: drivers/gpio/gpio-intel-mid.c F: drivers/gpio/gpio-lynxpoint.c F: drivers/gpio/gpio-merrifield.c F: drivers/gpio/gpio-ml-ioh.c F: drivers/gpio/gpio-pch.c F: drivers/gpio/gpio-sch.c F: drivers/gpio/gpio-sodaville.c INTEL GVT-g DRIVERS (Intel GPU Virtualization) M: Zhenyu Wang M: Zhi Wang L: intel-gvt-dev@lists.freedesktop.org L: intel-gfx@lists.freedesktop.org W: https://01.org/igvt-g T: git https://github.com/intel/gvt-linux.git S: Supported F: drivers/gpu/drm/i915/gvt/ INTEL HID EVENT DRIVER M: Alex Hung L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/intel-hid.c INTEL I/OAT DMA DRIVER M: Dave Jiang R: Dan Williams L: dmaengine@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ S: Supported F: drivers/dma/ioat* INTEL IDLE DRIVER M: Jacob Pan M: Len Brown L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git B: https://bugzilla.kernel.org S: Supported F: drivers/idle/intel_idle.c INTEL INTEGRATED SENSOR HUB DRIVER M: Srinivas Pandruvada M: Jiri Kosina L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/intel-ish-hid/ INTEL IOMMU (VT-d) M: David Woodhouse L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/iommu-2.6.git S: Supported F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER R: Dan Williams S: Odd fixes F: drivers/dma/iop-adma.c INTEL IPU3 CSI-2 CIO2 DRIVER M: Yong Zhi M: Sakari Ailus M: Bingbu Cao R: Tian Shu Qiu R: Jian Xu Zheng L: linux-media@vger.kernel.org S: Maintained F: drivers/media/pci/intel/ipu3/ F: Documentation/media/uapi/v4l/pixfmt-srggb10-ipu3.rst INTEL IPU3 CSI-2 IMGU DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org S: Maintained F: drivers/staging/media/ipu3/ F: Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst F: Documentation/media/v4l-drivers/ipu3.rst INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT M: Krzysztof Halasa S: Maintained F: arch/arm/mach-ixp4xx/include/mach/qmgr.h F: arch/arm/mach-ixp4xx/include/mach/npe.h F: arch/arm/mach-ixp4xx/ixp4xx_qmgr.c F: arch/arm/mach-ixp4xx/ixp4xx_npe.c F: drivers/net/ethernet/xscale/ixp4xx_eth.c F: drivers/net/wan/ixp4xx_hss.c INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT M: Deepak Saxena S: Maintained F: drivers/char/hw_random/ixp4xx-rng.c INTEL MANAGEMENT ENGINE (mei) M: Tomas Winkler L: linux-kernel@vger.kernel.org S: Supported F: include/uapi/linux/mei.h F: include/linux/mei_cl_bus.h F: drivers/misc/mei/* F: drivers/watchdog/mei_wdt.c F: Documentation/misc-devices/mei/* F: samples/mei/* INTEL MENLOW THERMAL DRIVER M: Sujith Thomas L: platform-driver-x86@vger.kernel.org W: https://01.org/linux-acpi S: Supported F: drivers/platform/x86/intel_menlow.c INTEL MIC DRIVERS (mic) M: Sudeep Dutt M: Ashutosh Dixit S: Supported W: https://github.com/sudeepdutt/mic W: http://software.intel.com/en-us/mic-developer F: include/linux/mic_bus.h F: include/linux/scif.h F: include/uapi/linux/mic_common.h F: include/uapi/linux/mic_ioctl.h F: include/uapi/linux/scif_ioctl.h F: drivers/misc/mic/ F: drivers/dma/mic_x100_dma.c F: drivers/dma/mic_x100_dma.h F: Documentation/mic/ INTEL PMC CORE DRIVER M: Rajneesh Bhardwaj M: Vishwanath Somayaji L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/intel_pmc_core* INTEL PMC/P-Unit IPC DRIVER M: Zha Qipeng L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/intel_pmc_ipc.c F: drivers/platform/x86/intel_punit_ipc.c F: arch/x86/include/asm/intel_pmc_ipc.h F: arch/x86/include/asm/intel_punit_ipc.h INTEL PMIC GPIO DRIVERS M: Andy Shevchenko S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git F: drivers/gpio/gpio-*cove.c F: drivers/gpio/gpio-msic.c INTEL MULTIFUNCTION PMIC DEVICE DRIVERS R: Andy Shevchenko S: Maintained F: drivers/mfd/intel_msic.c F: drivers/mfd/intel_soc_pmic* F: include/linux/mfd/intel_msic.h F: include/linux/mfd/intel_soc_pmic* INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT M: Stanislav Yakovlev L: linux-wireless@vger.kernel.org S: Maintained F: Documentation/networking/device_drivers/intel/ipw2100.txt F: Documentation/networking/device_drivers/intel/ipw2200.txt F: drivers/net/wireless/intel/ipw2x00/ INTEL PSTATE DRIVER M: Srinivas Pandruvada M: Len Brown L: linux-pm@vger.kernel.org S: Supported F: drivers/cpufreq/intel_pstate.c INTEL RDMA RNIC DRIVER M: Faisal Latif M: Shiraz Saleem L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/hw/i40iw/ F: include/uapi/rdma/i40iw-abi.h INTEL TELEMETRY DRIVER M: Rajneesh Bhardwaj M: "David E. Box" L: platform-driver-x86@vger.kernel.org S: Maintained F: arch/x86/include/asm/intel_telemetry.h F: drivers/platform/x86/intel_telemetry* INTEL VIRTUAL BUTTON DRIVER M: AceLan Kao L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/intel-vbtn.c INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy) M: Stanislaw Gruszka L: linux-wireless@vger.kernel.org S: Supported F: drivers/net/wireless/intel/iwlegacy/ INTEL WIRELESS WIFI LINK (iwlwifi) M: Johannes Berg M: Emmanuel Grumbach M: Luca Coelho M: Intel Linux Wireless L: linux-wireless@vger.kernel.org W: http://intellinuxwireless.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git S: Supported F: drivers/net/wireless/intel/iwlwifi/ INTEL WIRELESS WIMAX CONNECTION 2400 M: Inaky Perez-Gonzalez M: linux-wimax@intel.com L: wimax@linuxwimax.org (subscribers-only) S: Supported W: http://linuxwimax.org F: Documentation/wimax/README.i2400m F: drivers/net/wimax/i2400m/ F: include/uapi/linux/wimax/i2400m.h INTEL WMI THUNDERBOLT FORCE POWER DRIVER M: Mario Limonciello S: Maintained F: drivers/platform/x86/intel-wmi-thunderbolt.c INTEL(R) TRACE HUB M: Alexander Shishkin S: Supported F: Documentation/trace/intel_th.rst F: drivers/hwtracing/intel_th/ INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) M: Ning Sun L: tboot-devel@lists.sourceforge.net W: http://tboot.sourceforge.net T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot S: Supported F: Documentation/intel_txt.txt F: include/linux/tboot.h F: arch/x86/kernel/tboot.c INTEL-MID GPIO DRIVER M: David Cohen L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-intel-mid.c INVENSENSE MPU-3050 GYROSCOPE DRIVER M: Linus Walleij L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/gyro/mpu3050* F: Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt IOC3 ETHERNET DRIVER M: Ralf Baechle L: linux-mips@vger.kernel.org S: Maintained F: drivers/net/ethernet/sgi/ioc3-eth.c IOC3 SERIAL DRIVER M: Pat Gefre L: linux-serial@vger.kernel.org S: Maintained F: drivers/tty/serial/ioc3_serial.c IOMAP FILESYSTEM LIBRARY M: Christoph Hellwig M: Darrick J. Wong M: linux-xfs@vger.kernel.org M: linux-fsdevel@vger.kernel.org L: linux-xfs@vger.kernel.org L: linux-fsdevel@vger.kernel.org T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git S: Supported F: fs/iomap.c F: include/linux/iomap.h IOMMU DRIVERS M: Joerg Roedel L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git S: Maintained F: Documentation/devicetree/bindings/iommu/ F: drivers/iommu/ F: include/linux/iommu.h F: include/linux/of_iommu.h F: include/linux/iova.h IP MASQUERADING M: Juanjo Ciarlante S: Maintained F: net/ipv4/netfilter/ipt_MASQUERADE.c IPMI SUBSYSTEM M: Corey Minyard L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) W: http://openipmi.sourceforge.net/ S: Supported F: Documentation/devicetree/bindings/ipmi/ F: Documentation/IPMI.txt F: drivers/char/ipmi/ F: include/linux/ipmi* F: include/uapi/linux/ipmi* IPS SCSI RAID DRIVER M: Adaptec OEM Raid Solutions L: linux-scsi@vger.kernel.org W: http://www.adaptec.com/ S: Maintained F: drivers/scsi/ips* IPVS M: Wensong Zhang M: Simon Horman M: Julian Anastasov L: netdev@vger.kernel.org L: lvs-devel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs-next.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs.git F: Documentation/networking/ipvs-sysctl.txt F: include/net/ip_vs.h F: include/uapi/linux/ip_vs.h F: net/netfilter/ipvs/ IPWIRELESS DRIVER M: Jiri Kosina M: David Sterba S: Odd Fixes F: drivers/tty/ipwireless/ IPX NETWORK LAYER L: netdev@vger.kernel.org S: Obsolete F: include/uapi/linux/ipx.h IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) M: Marc Zyngier S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: Documentation/IRQ-domain.txt F: include/linux/irqdomain.h F: kernel/irq/irqdomain.c F: kernel/irq/msi.c IRQ SUBSYSTEM M: Thomas Gleixner L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: kernel/irq/ IRQCHIP DRIVERS M: Thomas Gleixner M: Jason Cooper M: Marc Zyngier L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: Documentation/devicetree/bindings/interrupt-controller/ F: drivers/irqchip/ ISA M: William Breathitt Gray S: Maintained F: Documentation/isa.txt F: drivers/base/isa.c F: include/linux/isa.h ISA RADIO MODULE M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-isa* ISAPNP M: Jaroslav Kysela S: Maintained F: Documentation/isapnp.txt F: drivers/pnp/isapnp/ F: include/linux/isapnp.h ISCSI M: Lee Duncan M: Chris Leech L: open-iscsi@googlegroups.com W: www.open-iscsi.com S: Maintained F: drivers/scsi/*iscsi* F: include/scsi/*iscsi* iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER M: Peter Jones M: Konrad Rzeszutek Wilk S: Maintained F: drivers/firmware/iscsi_ibft* ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR M: Sagi Grimberg M: Max Gurtovoy L: linux-rdma@vger.kernel.org S: Supported W: http://www.openfabrics.org W: www.open-iscsi.org Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/ulp/iser/ ISCSI EXTENSIONS FOR RDMA (ISER) TARGET M: Sagi Grimberg T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master L: linux-rdma@vger.kernel.org L: target-devel@vger.kernel.org S: Supported W: http://www.linux-iscsi.org F: drivers/infiniband/ulp/isert ISDN SUBSYSTEM M: Karsten Keil L: isdn4linux@listserv.isdn4linux.de (subscribers-only) L: netdev@vger.kernel.org W: http://www.isdn4linux.de T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git S: Maintained F: Documentation/isdn/ F: drivers/isdn/ F: include/linux/isdn.h F: include/linux/isdn/ F: include/uapi/linux/isdn.h F: include/uapi/linux/isdn/ IT87 HARDWARE MONITORING DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/it87 F: drivers/hwmon/it87.c IT913X MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/it913x* IVTV VIDEO4LINUX DRIVER M: Andy Walls L: ivtv-devel@ivtvdriver.org (subscribers-only) L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://www.ivtvdriver.org S: Maintained F: Documentation/media/v4l-drivers/ivtv* F: drivers/media/pci/ivtv/ F: include/uapi/linux/ivtv* IX2505V MEDIA DRIVER M: Malcolm Priestley L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/ix2505v* JAILHOUSE HYPERVISOR INTERFACE M: Jan Kiszka L: jailhouse-dev@googlegroups.com S: Maintained F: arch/x86/kernel/jailhouse.c F: arch/x86/include/asm/jailhouse_para.h JC42.4 TEMPERATURE SENSOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/jc42.c F: Documentation/hwmon/jc42 JFS FILESYSTEM M: Dave Kleikamp L: jfs-discussion@lists.sourceforge.net W: http://jfs.sourceforge.net/ T: git git://github.com/kleikamp/linux-shaggy.git S: Maintained F: Documentation/filesystems/jfs.txt F: fs/jfs/ JME NETWORK DRIVER M: Guo-Fu Tseng L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/jme.* JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) M: David Woodhouse L: linux-mtd@lists.infradead.org W: http://www.linux-mtd.infradead.org/doc/jffs2.html S: Maintained F: fs/jffs2/ F: include/uapi/linux/jffs2.h JOURNALLING LAYER FOR BLOCK DEVICES (JBD2) M: "Theodore Ts'o" M: Jan Kara L: linux-ext4@vger.kernel.org S: Maintained F: fs/jbd2/ F: include/linux/jbd2.h JPU V4L2 MEM2MEM DRIVER FOR RENESAS M: Mikhail Ulyanov L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/rcar_jpu.c JSM Neo PCI based serial card L: linux-serial@vger.kernel.org S: Orphan F: drivers/tty/serial/jsm/ K10TEMP HARDWARE MONITORING DRIVER M: Clemens Ladisch L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/k10temp F: drivers/hwmon/k10temp.c K8TEMP HARDWARE MONITORING DRIVER M: Rudolf Marek L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/k8temp F: drivers/hwmon/k8temp.c KASAN M: Andrey Ryabinin R: Alexander Potapenko R: Dmitry Vyukov L: kasan-dev@googlegroups.com S: Maintained F: arch/*/include/asm/kasan.h F: arch/*/mm/kasan_init* F: Documentation/dev-tools/kasan.rst F: include/linux/kasan*.h F: lib/test_kasan.c F: mm/kasan/ F: scripts/Makefile.kasan KCONFIG M: Masahiro Yamada T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig L: linux-kbuild@vger.kernel.org S: Maintained F: Documentation/kbuild/kconfig* F: scripts/kconfig/ F: scripts/Kconfig.include KDUMP M: Dave Young M: Baoquan He R: Vivek Goyal L: kexec@lists.infradead.org W: http://lse.sourceforge.net/kdump/ S: Maintained F: Documentation/kdump/ KEENE FM RADIO TRANSMITTER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-keene* KERNEL AUTOMOUNTER M: Ian Kent L: autofs@vger.kernel.org S: Maintained F: fs/autofs/ KERNEL BUILD + files below scripts/ (unless maintained elsewhere) M: Masahiro Yamada M: Michal Marek T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git L: linux-kbuild@vger.kernel.org S: Maintained F: Documentation/kbuild/ F: Makefile F: scripts/Kbuild* F: scripts/Makefile* F: scripts/basic/ F: scripts/mk* F: scripts/mod/ F: scripts/package/ KERNEL JANITORS L: kernel-janitors@vger.kernel.org W: http://kernelnewbies.org/KernelJanitors S: Odd Fixes KERNEL NFSD, SUNRPC, AND LOCKD SERVERS M: "J. Bruce Fields" M: Jeff Layton L: linux-nfs@vger.kernel.org W: http://nfs.sourceforge.net/ T: git git://linux-nfs.org/~bfields/linux.git S: Supported F: fs/nfsd/ F: include/uapi/linux/nfsd/ F: fs/lockd/ F: fs/nfs_common/ F: net/sunrpc/ F: include/linux/lockd/ F: include/linux/sunrpc/ F: include/uapi/linux/sunrpc/ KERNEL SELFTEST FRAMEWORK M: Shuah Khan L: linux-kselftest@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git Q: https://patchwork.kernel.org/project/linux-kselftest/list/ S: Maintained F: tools/testing/selftests/ F: Documentation/dev-tools/kselftest* KERNEL USERMODE HELPER M: Luis Chamberlain L: linux-kernel@vger.kernel.org S: Maintained F: kernel/umh.c F: include/linux/umh.h KERNEL VIRTUAL MACHINE (KVM) M: Paolo Bonzini M: Radim Krčmář L: kvm@vger.kernel.org W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git S: Supported F: Documentation/virtual/kvm/ F: include/trace/events/kvm.h F: include/uapi/asm-generic/kvm* F: include/uapi/linux/kvm* F: include/asm-generic/kvm* F: include/linux/kvm* F: include/kvm/iodev.h F: virt/kvm/* F: tools/kvm/ KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd) M: Joerg Roedel L: kvm@vger.kernel.org W: http://www.linux-kvm.org/ S: Maintained F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) M: Christoffer Dall M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu W: http://systems.cs.columbia.edu/projects/kvm-arm T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git S: Supported F: arch/arm/include/uapi/asm/kvm* F: arch/arm/include/asm/kvm* F: arch/arm/kvm/ F: virt/kvm/arm/ F: include/kvm/arm_* KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) M: Christoffer Dall M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu S: Maintained F: arch/arm64/include/uapi/asm/kvm* F: arch/arm64/include/asm/kvm* F: arch/arm64/kvm/ KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) M: James Hogan L: linux-mips@vger.kernel.org S: Supported F: arch/mips/include/uapi/asm/kvm* F: arch/mips/include/asm/kvm* F: arch/mips/kvm/ KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) M: Paul Mackerras L: kvm-ppc@vger.kernel.org W: http://www.linux-kvm.org/ T: git git://github.com/agraf/linux-2.6.git S: Supported F: arch/powerpc/include/uapi/asm/kvm* F: arch/powerpc/include/asm/kvm* F: arch/powerpc/kvm/ F: arch/powerpc/kernel/kvm* KERNEL VIRTUAL MACHINE for s390 (KVM/s390) M: Christian Borntraeger M: Janosch Frank R: David Hildenbrand R: Cornelia Huck L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git S: Supported F: arch/s390/include/uapi/asm/kvm* F: arch/s390/include/asm/gmap.h F: arch/s390/include/asm/kvm* F: arch/s390/kvm/ F: arch/s390/mm/gmap.c KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) M: Paolo Bonzini M: Radim Krčmář L: kvm@vger.kernel.org W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git S: Supported F: arch/x86/kvm/ F: arch/x86/kvm/*/ F: arch/x86/include/uapi/asm/kvm* F: arch/x86/include/asm/kvm* F: arch/x86/include/asm/pvclock-abi.h F: arch/x86/kernel/kvm.c F: arch/x86/kernel/kvmclock.c KERNFS M: Greg Kroah-Hartman M: Tejun Heo T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git S: Supported F: include/linux/kernfs.h F: fs/kernfs/ KEXEC M: Eric Biederman W: http://kernel.org/pub/linux/utils/kernel/kexec/ L: kexec@lists.infradead.org S: Maintained F: include/linux/kexec.h F: include/uapi/linux/kexec.h F: kernel/kexec* KEYS-ENCRYPTED M: Mimi Zohar L: linux-integrity@vger.kernel.org L: keyrings@vger.kernel.org S: Supported F: Documentation/security/keys/trusted-encrypted.rst F: include/keys/encrypted-type.h F: security/keys/encrypted-keys/ KEYS-TRUSTED M: James Bottomley M: Jarkko Sakkinen M: Mimi Zohar L: linux-integrity@vger.kernel.org L: keyrings@vger.kernel.org S: Supported F: Documentation/security/keys/trusted-encrypted.rst F: include/keys/trusted-type.h F: security/keys/trusted.c F: security/keys/trusted.h KEYS/KEYRINGS: M: David Howells L: keyrings@vger.kernel.org S: Maintained F: Documentation/security/keys/core.rst F: include/linux/key.h F: include/linux/key-type.h F: include/linux/keyctl.h F: include/uapi/linux/keyctl.h F: include/keys/ F: security/keys/ KGDB / KDB /debug_core M: Jason Wessel M: Daniel Thompson W: http://kgdb.wiki.kernel.org/ L: kgdb-bugreport@lists.sourceforge.net T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git S: Maintained F: Documentation/dev-tools/kgdb.rst F: drivers/misc/kgdbts.c F: drivers/tty/serial/kgdboc.c F: include/linux/kdb.h F: include/linux/kgdb.h F: kernel/debug/ KMEMLEAK M: Catalin Marinas S: Maintained F: Documentation/dev-tools/kmemleak.rst F: include/linux/kmemleak.h F: mm/kmemleak.c F: mm/kmemleak-test.c KMOD KERNEL MODULE LOADER - USERMODE HELPER M: Luis Chamberlain L: linux-kernel@vger.kernel.org S: Maintained F: kernel/kmod.c F: include/linux/kmod.h F: lib/test_kmod.c F: tools/testing/selftests/kmod/ KPROBES M: Naveen N. Rao M: Anil S Keshavamurthy M: "David S. Miller" M: Masami Hiramatsu S: Maintained F: Documentation/kprobes.txt F: include/linux/kprobes.h F: include/asm-generic/kprobes.h F: kernel/kprobes.c KS0108 LCD CONTROLLER DRIVER M: Miguel Ojeda Sandonis S: Maintained F: Documentation/auxdisplay/ks0108 F: drivers/auxdisplay/ks0108.c F: include/linux/ks0108.h L3MDEV M: David Ahern L: netdev@vger.kernel.org S: Maintained F: net/l3mdev F: include/net/l3mdev.h L7 BPF FRAMEWORK M: John Fastabend M: Daniel Borkmann L: netdev@vger.kernel.org S: Maintained F: include/linux/skmsg.h F: net/core/skmsg.c F: net/core/sock_map.c F: net/ipv4/tcp_bpf.c LANTIQ / INTEL Ethernet drivers M: Hauke Mehrtens L: netdev@vger.kernel.org S: Maintained F: net/dsa/tag_gswip.c F: drivers/net/ethernet/lantiq_xrx200.c F: drivers/net/dsa/lantiq_pce.h F: drivers/net/dsa/lantiq_gswip.c LANTIQ MIPS ARCHITECTURE M: John Crispin L: linux-mips@vger.kernel.org S: Maintained F: arch/mips/lantiq F: drivers/soc/lantiq LAPB module L: linux-x25@vger.kernel.org S: Orphan F: Documentation/networking/lapb-module.txt F: include/*/lapb.h F: net/lapb/ LASI 53c700 driver for PARISC M: "James E.J. Bottomley" L: linux-scsi@vger.kernel.org S: Maintained F: Documentation/scsi/53c700.txt F: drivers/scsi/53c700* LEAKING_ADDRESSES M: Tobin C. Harding M: Tycho Andersen L: kernel-hardening@lists.openwall.com S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tobin/leaks.git F: scripts/leaking_addresses.pl LED SUBSYSTEM M: Jacek Anaszewski M: Pavel Machek L: linux-leds@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git S: Maintained F: Documentation/devicetree/bindings/leds/ F: drivers/leds/ F: include/linux/leds.h LEGACY EEPROM DRIVER M: Jean Delvare S: Maintained F: Documentation/misc-devices/eeprom F: drivers/misc/eeprom/eeprom.c LEGO MINDSTORMS EV3 R: David Lechner S: Maintained F: arch/arm/boot/dts/da850-lego-ev3.dts F: Documentation/devicetree/bindings/power/supply/lego_ev3_battery.txt F: drivers/power/supply/lego_ev3_battery.c LEGO USB Tower driver M: Juergen Stuber L: legousb-devel@lists.sourceforge.net W: http://legousb.sourceforge.net/ S: Maintained F: drivers/usb/misc/legousbtower.c LG LAPTOP EXTRAS M: Matan Ziv-Av L: platform-driver-x86@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-platform-lg-laptop F: Documentation/laptops/lg-laptop.rst F: drivers/platform/x86/lg-laptop.c LG2160 MEDIA DRIVER M: Michael Krufky L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://github.com/mkrufky Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mkrufky/tuners.git S: Maintained F: drivers/media/dvb-frontends/lg2160.* LGDT3305 MEDIA DRIVER M: Michael Krufky L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://github.com/mkrufky Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mkrufky/tuners.git S: Maintained F: drivers/media/dvb-frontends/lgdt3305.* LIBATA PATA ARASAN COMPACT FLASH CONTROLLER M: Viresh Kumar L: linux-ide@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: include/linux/pata_arasan_cf_data.h F: drivers/ata/pata_arasan_cf.c LIBATA PATA DRIVERS M: Bartlomiej Zolnierkiewicz M: Jens Axboe L: linux-ide@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/pata_*.c F: drivers/ata/ata_generic.c LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS M: Linus Walleij L: linux-ide@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/pata_ftide010.c F: drivers/ata/sata_gemini.c F: drivers/ata/sata_gemini.h LIBATA SATA AHCI PLATFORM devices support M: Hans de Goede M: Jens Axboe L: linux-ide@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/ahci_platform.c F: drivers/ata/libahci_platform.c F: include/linux/ahci_platform.h LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER M: Mikael Pettersson L: linux-ide@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/sata_promise.* LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) M: Jens Axboe L: linux-ide@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/ F: include/linux/ata.h F: include/linux/libata.h F: Documentation/devicetree/bindings/ata/ LIBLOCKDEP M: Sasha Levin S: Maintained F: tools/lib/lockdep/ LIBNVDIMM BLK: MMIO-APERTURE DRIVER M: Ross Zwisler M: Dan Williams M: Vishal Verma M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported F: drivers/nvdimm/blk.c F: drivers/nvdimm/region_devs.c LIBNVDIMM BTT: BLOCK TRANSLATION TABLE M: Vishal Verma M: Dan Williams M: Ross Zwisler M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported F: drivers/nvdimm/btt* LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER M: Ross Zwisler M: Dan Williams M: Vishal Verma M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported F: drivers/nvdimm/pmem* LIBNVDIMM: DEVICETREE BINDINGS M: Oliver O'Halloran L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported F: drivers/nvdimm/of_pmem.c F: Documentation/devicetree/bindings/pmem/pmem-region.txt LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM M: Dan Williams M: Ross Zwisler M: Vishal Verma M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git S: Supported F: drivers/nvdimm/* F: drivers/acpi/nfit/* F: include/linux/nd.h F: include/linux/libnvdimm.h F: include/uapi/linux/ndctl.h LIGHTNVM PLATFORM SUPPORT M: Matias Bjorling W: http://github/OpenChannelSSD L: linux-block@vger.kernel.org S: Maintained F: drivers/lightnvm/ F: include/linux/lightnvm.h F: include/uapi/linux/lightnvm.h LINUX FOR POWER MACINTOSH M: Benjamin Herrenschmidt W: http://www.penguinppc.org/ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: arch/powerpc/platforms/powermac/ F: drivers/macintosh/ LINUX FOR POWERPC (32-BIT AND 64-BIT) M: Benjamin Herrenschmidt M: Paul Mackerras M: Michael Ellerman W: https://github.com/linuxppc/linux/wiki L: linuxppc-dev@lists.ozlabs.org Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git S: Supported F: Documentation/ABI/stable/sysfs-firmware-opal-* F: Documentation/devicetree/bindings/powerpc/ F: Documentation/devicetree/bindings/rtc/rtc-opal.txt F: Documentation/devicetree/bindings/i2c/i2c-opal.txt F: Documentation/powerpc/ F: arch/powerpc/ F: drivers/char/tpm/tpm_ibmvtpm* F: drivers/crypto/nx/ F: drivers/crypto/vmx/ F: drivers/i2c/busses/i2c-opal.c F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmvnic.* F: drivers/pci/hotplug/pnv_php.c F: drivers/pci/hotplug/rpa* F: drivers/rtc/rtc-opal.c F: drivers/scsi/ibmvscsi/ F: drivers/tty/hvc/hvc_opal.c F: drivers/watchdog/wdrtas.c F: tools/testing/selftests/powerpc N: /pmac N: powermac N: powernv N: [^a-z0-9]ps3 N: pseries LINUX FOR POWERPC EMBEDDED MPC5XXX M: Anatolij Gustschin L: linuxppc-dev@lists.ozlabs.org T: git git://git.denx.de/linux-denx-agust.git S: Maintained F: arch/powerpc/platforms/512x/ F: arch/powerpc/platforms/52xx/ LINUX FOR POWERPC EMBEDDED PPC4XX M: Alistair Popple M: Matt Porter W: http://www.penguinppc.org/ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: arch/powerpc/platforms/40x/ F: arch/powerpc/platforms/44x/ LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX M: Scott Wood M: Kumar Gala W: http://www.penguinppc.org/ L: linuxppc-dev@lists.ozlabs.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux.git S: Maintained F: arch/powerpc/platforms/83xx/ F: arch/powerpc/platforms/85xx/ F: Documentation/devicetree/bindings/powerpc/fsl/ LINUX FOR POWERPC EMBEDDED PPC8XX M: Vitaly Bordug W: http://www.penguinppc.org/ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: arch/powerpc/platforms/8xx/ LINUX FOR POWERPC EMBEDDED XILINX VIRTEX L: linuxppc-dev@lists.ozlabs.org S: Orphan F: arch/powerpc/*/*virtex* F: arch/powerpc/*/*/*virtex* LINUX FOR POWERPC PA SEMI PWRFICIENT L: linuxppc-dev@lists.ozlabs.org S: Orphan F: arch/powerpc/platforms/pasemi/ F: drivers/*/*pasemi* F: drivers/*/*/*pasemi* LINUX KERNEL DUMP TEST MODULE (LKDTM) M: Kees Cook S: Maintained F: drivers/misc/lkdtm/* LINUX KERNEL MEMORY CONSISTENCY MODEL (LKMM) M: Alan Stern M: Andrea Parri M: Will Deacon M: Peter Zijlstra M: Boqun Feng M: Nicholas Piggin M: David Howells M: Jade Alglave M: Luc Maranget M: "Paul E. McKenney" R: Akira Yokosawa R: Daniel Lustig L: linux-kernel@vger.kernel.org L: linux-arch@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git F: tools/memory-model/ F: Documentation/atomic_bitops.txt F: Documentation/atomic_t.txt F: Documentation/core-api/atomic_ops.rst F: Documentation/core-api/refcount-vs-atomic.rst F: Documentation/memory-barriers.txt LIS3LV02D ACCELEROMETER DRIVER M: Eric Piel S: Maintained F: Documentation/misc-devices/lis3lv02d F: drivers/misc/lis3lv02d/ F: drivers/platform/x86/hp_accel.c LIVE PATCHING M: Josh Poimboeuf M: Jessica Yu M: Jiri Kosina M: Miroslav Benes R: Petr Mladek S: Maintained F: kernel/livepatch/ F: include/linux/livepatch.h F: arch/x86/include/asm/livepatch.h F: arch/x86/kernel/livepatch.c F: Documentation/livepatch/ F: Documentation/ABI/testing/sysfs-kernel-livepatch F: samples/livepatch/ L: live-patching@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git LLC (802.2) L: netdev@vger.kernel.org S: Odd fixes F: include/linux/llc.h F: include/uapi/linux/llc.h F: include/net/llc* F: net/llc/ LM73 HARDWARE MONITOR DRIVER M: Guillaume Ligneul L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/lm73.c LM78 HARDWARE MONITOR DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/lm78 F: drivers/hwmon/lm78.c LM83 HARDWARE MONITOR DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/lm83 F: drivers/hwmon/lm83.c LM90 HARDWARE MONITOR DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/lm90 F: Documentation/devicetree/bindings/hwmon/lm90.txt F: drivers/hwmon/lm90.c F: include/dt-bindings/thermal/lm90.h LM95234 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/lm95234 F: drivers/hwmon/lm95234.c LME2510 MEDIA DRIVER M: Malcolm Priestley L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/usb/dvb-usb-v2/lmedm04* LOADPIN SECURITY MODULE M: Kees Cook T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git lsm/loadpin S: Supported F: security/loadpin/ F: Documentation/admin-guide/LSM/LoadPin.rst LOCKING PRIMITIVES M: Peter Zijlstra M: Ingo Molnar M: Will Deacon L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core S: Maintained F: Documentation/locking/ F: include/linux/lockdep.h F: include/linux/spinlock*.h F: arch/*/include/asm/spinlock*.h F: include/linux/rwlock*.h F: include/linux/mutex*.h F: include/linux/rwsem*.h F: arch/*/include/asm/rwsem.h F: include/linux/seqlock.h F: lib/locking*.[ch] F: kernel/locking/ X: kernel/locking/locktorture.c LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) M: "Richard Russon (FlatCap)" L: linux-ntfs-dev@lists.sourceforge.net W: http://www.linux-ntfs.org/content/view/19/37/ S: Maintained F: Documentation/ldm.txt F: block/partitions/ldm.* LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) M: Sathya Prakash M: Chaitra P B M: Suganath Prabu Subramani L: MPT-FusionLinux.pdl@broadcom.com L: linux-scsi@vger.kernel.org W: http://www.avagotech.com/support/ S: Supported F: drivers/message/fusion/ F: drivers/scsi/mpt3sas/ LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers M: Matthew Wilcox L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/sym53c8xx_2/ LTC1660 DAC DRIVER M: Marcus Folkesson L: linux-iio@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/iio/dac/ltc1660.txt F: drivers/iio/dac/ltc1660.c LTC4261 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/ltc4261 F: drivers/hwmon/ltc4261.c LTC4306 I2C MULTIPLEXER DRIVER M: Michael Hennerich W: http://ez.analog.com/community/linux-device-drivers L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/muxes/i2c-mux-ltc4306.c F: Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt LTP (Linux Test Project) M: Mike Frysinger M: Cyril Hrubis M: Wanlong Gao M: Jan Stancek M: Stanislav Kholmanskikh M: Alexey Kodanev L: ltp@lists.linux.it (subscribers-only) W: http://linux-test-project.github.io/ T: git git://github.com/linux-test-project/ltp.git S: Maintained M68K ARCHITECTURE M: Geert Uytterhoeven L: linux-m68k@lists.linux-m68k.org W: http://www.linux-m68k.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git S: Maintained F: arch/m68k/ F: drivers/zorro/ M68K ON APPLE MACINTOSH M: Joshua Thompson W: http://www.mac.linux-m68k.org/ L: linux-m68k@lists.linux-m68k.org S: Maintained F: arch/m68k/mac/ M68K ON HP9000/300 M: Philip Blundell W: http://www.tazenda.demon.co.uk/phil/linux-hp S: Maintained F: arch/m68k/hp300/ M88DS3103 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/m88ds3103* M88RS2000 MEDIA DRIVER M: Malcolm Priestley L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/m88rs2000* MA901 MASTERKIT USB FM RADIO DRIVER M: Alexey Klimov L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/radio/radio-ma901.c MAC80211 M: Johannes Berg L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git S: Maintained F: Documentation/networking/mac80211-injection.txt F: include/net/mac80211.h F: net/mac80211/ F: drivers/net/wireless/mac80211_hwsim.[ch] F: Documentation/networking/mac80211_hwsim/README MAILBOX API M: Jassi Brar L: linux-kernel@vger.kernel.org S: Maintained F: drivers/mailbox/ F: include/linux/mailbox_client.h F: include/linux/mailbox_controller.h MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 M: Michael Kerrisk W: http://www.kernel.org/doc/man-pages L: linux-man@vger.kernel.org S: Maintained MARDUK (CREATOR CI40) DEVICE TREE SUPPORT M: Rahul Bedarkar L: linux-mips@vger.kernel.org S: Maintained F: arch/mips/boot/dts/img/pistachio_marduk.dts MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER M: Andrew Lunn M: Vivien Didelot L: netdev@vger.kernel.org S: Maintained F: drivers/net/dsa/mv88e6xxx/ F: include/linux/platform_data/mv88e6xxx.h F: Documentation/devicetree/bindings/net/dsa/marvell.txt MARVELL ARMADA DRM SUPPORT M: Russell King S: Maintained T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes F: drivers/gpu/drm/armada/ F: include/uapi/drm/armada_drm.h F: Documentation/devicetree/bindings/display/armada/ MARVELL CRYPTO DRIVER M: Boris Brezillon M: Arnaud Ebalard F: drivers/crypto/marvell/ S: Maintained L: linux-crypto@vger.kernel.org MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) M: Mirko Lindner M: Stephen Hemminger L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/marvell/sk* MARVELL LIBERTAS WIRELESS DRIVER L: libertas-dev@lists.infradead.org S: Orphan F: drivers/net/wireless/marvell/libertas/ MARVELL MACCHIATOBIN SUPPORT M: Russell King L: linux-arm-kernel@lists.infradead.org S: Maintained F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts MARVELL MV643XX ETHERNET DRIVER M: Sebastian Hesselbarth L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/marvell/mv643xx_eth.* F: include/linux/mv643xx.h MARVELL MV88X3310 PHY DRIVER M: Russell King L: netdev@vger.kernel.org S: Maintained F: drivers/net/phy/marvell10g.c MARVELL MVEBU THERMAL DRIVER M: Miquel Raynal S: Maintained F: drivers/thermal/armada_thermal.c MARVELL MVNETA ETHERNET DRIVER M: Thomas Petazzoni L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/marvell/mvneta.* MARVELL MWIFIEX WIRELESS DRIVER M: Amitkumar Karwar M: Nishant Sarmukadam M: Ganapathi Bhat M: Xinming Hu L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/marvell/mwifiex/ MARVELL MWL8K WIRELESS DRIVER M: Lennert Buytenhek L: linux-wireless@vger.kernel.org S: Odd Fixes F: drivers/net/wireless/marvell/mwl8k.c MARVELL NAND CONTROLLER DRIVER M: Miquel Raynal L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/nand/raw/marvell_nand.c F: Documentation/devicetree/bindings/mtd/marvell-nand.txt MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER M: Nicolas Pitre S: Odd Fixes F: drivers/mmc/host/mvsdio.* MARVELL XENON MMC/SD/SDIO HOST CONTROLLER DRIVER M: Hu Ziji L: linux-mmc@vger.kernel.org S: Supported F: drivers/mmc/host/sdhci-xenon* F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt MARVELL OCTEONTX2 RVU ADMIN FUNCTION DRIVER M: Sunil Goutham M: Linu Cherian M: Geetha sowjanya M: Jerin Jacob L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/marvell/octeontx2/af/ MATROX FRAMEBUFFER DRIVER L: linux-fbdev@vger.kernel.org S: Orphan F: drivers/video/fbdev/matrox/matroxfb_* F: include/uapi/linux/matroxfb.h MAX16065 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/max16065 F: drivers/hwmon/max16065.c MAX2175 SDR TUNER DRIVER M: Ramesh Shanmugasundaram L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/devicetree/bindings/media/i2c/max2175.txt F: Documentation/media/v4l-drivers/max2175.rst F: drivers/media/i2c/max2175* F: include/uapi/linux/max2175.h MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER L: linux-hwmon@vger.kernel.org S: Orphan F: Documentation/hwmon/max6650 F: drivers/hwmon/max6650.c MAX6697 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/max6697 F: Documentation/devicetree/bindings/hwmon/max6697.txt F: drivers/hwmon/max6697.c F: include/linux/platform_data/max6697.h MAX9860 MONO AUDIO VOICE CODEC DRIVER M: Peter Rosin L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/max9860.txt F: sound/soc/codecs/max9860.* MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER M: Javier Martinez Canillas L: linux-kernel@vger.kernel.org S: Supported F: drivers/regulator/max77802-regulator.c F: Documentation/devicetree/bindings/*/*max77802.txt F: include/dt-bindings/*/*max77802.h MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS M: Krzysztof Kozlowski M: Bartlomiej Zolnierkiewicz L: linux-pm@vger.kernel.org S: Supported F: drivers/power/supply/max14577_charger.c F: drivers/power/supply/max77693_charger.c MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS M: Chanwoo Choi M: Krzysztof Kozlowski M: Bartlomiej Zolnierkiewicz L: linux-kernel@vger.kernel.org S: Supported F: drivers/*/max14577*.c F: drivers/*/max77686*.c F: drivers/*/max77693*.c F: drivers/extcon/extcon-max14577.c F: drivers/extcon/extcon-max77693.c F: drivers/rtc/rtc-max77686.c F: drivers/clk/clk-max77686.c F: Documentation/devicetree/bindings/mfd/max14577.txt F: Documentation/devicetree/bindings/*/max77686.txt F: Documentation/devicetree/bindings/mfd/max77693.txt F: Documentation/devicetree/bindings/clock/maxim,max77686.txt F: include/linux/mfd/max14577*.h F: include/linux/mfd/max77686*.h F: include/linux/mfd/max77693*.h MAXIRADIO FM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-maxiradio* MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 F: drivers/iio/potentiometer/mcp4018.c F: drivers/iio/potentiometer/mcp4531.c MCR20A IEEE-802.15.4 RADIO DRIVER M: Xue Liu L: linux-wpan@vger.kernel.org W: https://github.com/xueliu/mcr20a-linux S: Maintained F: drivers/net/ieee802154/mcr20a.c F: drivers/net/ieee802154/mcr20a.h F: Documentation/devicetree/bindings/net/ieee802154/mcr20a.txt MEASUREMENT COMPUTING CIO-DAC IIO DRIVER M: William Breathitt Gray L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/dac/cio-dac.c MEDIA DRIVERS FOR ASCOT2E M: Sergey Kozlov M: Abylay Ospan L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/dvb-frontends/ascot2e* MEDIA DRIVERS FOR CXD2099AR CI CONTROLLERS M: Jasmin Jessich L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/dvb-frontends/cxd2099* MEDIA DRIVERS FOR CXD2841ER M: Sergey Kozlov M: Abylay Ospan L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/dvb-frontends/cxd2841er* MEDIA DRIVERS FOR CXD2880 M: Yasunari Takiguchi L: linux-media@vger.kernel.org W: http://linuxtv.org/ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/dvb-frontends/cxd2880/* F: drivers/media/spi/cxd2880* MEDIA DRIVERS FOR DIGITAL DEVICES PCIE DEVICES L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Orphan F: drivers/media/pci/ddbridge/* MEDIA DRIVERS FOR FREESCALE IMX M: Steve Longerbeam M: Philipp Zabel L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/devicetree/bindings/media/imx.txt F: Documentation/media/v4l-drivers/imx.rst F: drivers/staging/media/imx/ F: include/linux/imx-media.h F: include/media/imx.h MEDIA DRIVER FOR FREESCALE IMX PXP M: Philipp Zabel L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/platform/imx-pxp.[ch] MEDIA DRIVERS FOR HELENE M: Abylay Ospan L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/dvb-frontends/helene* MEDIA DRIVERS FOR HORUS3A M: Sergey Kozlov M: Abylay Ospan L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/dvb-frontends/horus3a* MEDIA DRIVERS FOR LNBH25 M: Sergey Kozlov M: Abylay Ospan L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/dvb-frontends/lnbh25* MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Orphan F: drivers/media/dvb-frontends/mxl5xx* MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices M: Sergey Kozlov M: Abylay Ospan L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/pci/netup_unidvb/* MEDIA DRIVERS FOR RENESAS - CEU M: Jacopo Mondi L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported F: Documentation/devicetree/bindings/media/renesas,ceu.txt F: drivers/media/platform/renesas-ceu.c F: include/media/drv-intf/renesas-ceu.h MEDIA DRIVERS FOR RENESAS - DRIF M: Ramesh Shanmugasundaram L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported F: Documentation/devicetree/bindings/media/renesas,drif.txt F: drivers/media/platform/rcar_drif.c MEDIA DRIVERS FOR RENESAS - FCP M: Laurent Pinchart L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported F: Documentation/devicetree/bindings/media/renesas,fcp.txt F: drivers/media/platform/rcar-fcp.c F: include/media/rcar-fcp.h MEDIA DRIVERS FOR RENESAS - FDP1 M: Kieran Bingham L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported F: Documentation/devicetree/bindings/media/renesas,fdp1.txt F: drivers/media/platform/rcar_fdp1.c MEDIA DRIVERS FOR RENESAS - VIN M: Niklas Söderlund L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported F: Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt F: Documentation/devicetree/bindings/media/rcar_vin.txt F: drivers/media/platform/rcar-vin/ MEDIA DRIVERS FOR RENESAS - VSP1 M: Laurent Pinchart M: Kieran Bingham L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported F: Documentation/devicetree/bindings/media/renesas,vsp1.txt F: drivers/media/platform/vsp1/ MEDIA DRIVERS FOR ST STV0910 DEMODULATOR ICs L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Orphan F: drivers/media/dvb-frontends/stv0910* MEDIA DRIVERS FOR ST STV6111 TUNER ICs L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Orphan F: drivers/media/dvb-frontends/stv6111* MEDIA DRIVERS FOR STM32 - DCMI M: Hugues Fruchet L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported F: Documentation/devicetree/bindings/media/st,stm32-dcmi.txt F: drivers/media/platform/stm32/stm32-dcmi.c MEDIA DRIVERS FOR NVIDIA TEGRA - VDE M: Dmitry Osipenko L: linux-media@vger.kernel.org L: linux-tegra@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt F: drivers/staging/media/tegra-vde/ MEDIA INPUT INFRASTRUCTURE (V4L/DVB) M: Mauro Carvalho Chehab P: LinuxTV.org Project L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.kernel.org/project/linux-media/list/ T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/devicetree/bindings/media/ F: Documentation/media/ F: drivers/media/ F: drivers/staging/media/ F: include/linux/platform_data/media/ F: include/media/ F: include/uapi/linux/dvb/ F: include/uapi/linux/videodev2.h F: include/uapi/linux/media.h F: include/uapi/linux/v4l2-* F: include/uapi/linux/meye.h F: include/uapi/linux/ivtv* F: include/uapi/linux/uvcvideo.h MEDIATEK BLUETOOTH DRIVER M: Sean Wang L: linux-bluetooth@vger.kernel.org L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/net/mediatek-bluetooth.txt F: drivers/bluetooth/btmtkuart.c MEDIATEK CIR DRIVER M: Sean Wang S: Maintained F: drivers/media/rc/mtk-cir.c MEDIATEK DMA DRIVER M: Sean Wang L: dmaengine@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/dma/mtk-* F: drivers/dma/mediatek/ MEDIATEK PMIC LED DRIVER M: Sean Wang S: Maintained F: drivers/leds/leds-mt6323.c F: Documentation/devicetree/bindings/leds/leds-mt6323.txt MEDIATEK ETHERNET DRIVER M: Felix Fietkau M: John Crispin M: Sean Wang M: Nelson Chang L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/mediatek/ MEDIATEK SWITCH DRIVER M: Sean Wang L: netdev@vger.kernel.org S: Maintained F: drivers/net/dsa/mt7530.* F: net/dsa/tag_mtk.c MEDIATEK JPEG DRIVER M: Rick Chang M: Bin Liu S: Supported F: drivers/media/platform/mtk-jpeg/ F: Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt MEDIATEK MDP DRIVER M: Minghsiu Tsai M: Houlong Wei M: Andrew-CT Chen S: Supported F: drivers/media/platform/mtk-mdp/ F: drivers/media/platform/mtk-vpu/ F: Documentation/devicetree/bindings/media/mediatek-mdp.txt MEDIATEK MEDIA DRIVER M: Tiffany Lin M: Andrew-CT Chen S: Supported F: drivers/media/platform/mtk-vcodec/ F: drivers/media/platform/mtk-vpu/ F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt F: Documentation/devicetree/bindings/media/mediatek-vpu.txt MEDIATEK MT76 WIRELESS LAN DRIVER M: Felix Fietkau M: Lorenzo Bianconi L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mediatek/mt76/ MEDIATEK MT7601U WIRELESS LAN DRIVER M: Jakub Kicinski L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mediatek/mt7601u/ MEDIATEK NAND CONTROLLER DRIVER M: Xiaolei Li L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/nand/raw/mtk_* F: Documentation/devicetree/bindings/mtd/mtk-nand.txt MEDIATEK RANDOM NUMBER GENERATOR SUPPORT M: Sean Wang S: Maintained F: drivers/char/hw_random/mtk-rng.c MEDIATEK USB3 DRD IP DRIVER M: Chunfeng Yun L: linux-usb@vger.kernel.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/usb/mtu3/ MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES M: Peter Senna Tschudin M: Martin Donnelly M: Martyn Welch S: Maintained F: drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c F: Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt MEGARAID SCSI/SAS DRIVERS M: Kashyap Desai M: Sumit Saxena M: Shivasharan S L: megaraidlinux.pdl@broadcom.com L: linux-scsi@vger.kernel.org W: http://www.avagotech.com/support/ S: Maintained F: Documentation/scsi/megaraid.txt F: drivers/scsi/megaraid.* F: drivers/scsi/megaraid/ MELEXIS MLX90614 DRIVER M: Crt Mori L: linux-iio@vger.kernel.org W: http://www.melexis.com S: Supported F: drivers/iio/temperature/mlx90614.c MELEXIS MLX90632 DRIVER M: Crt Mori L: linux-iio@vger.kernel.org W: http://www.melexis.com S: Supported F: drivers/iio/temperature/mlx90632.c MELFAS MIP4 TOUCHSCREEN DRIVER M: Sangwon Jee W: http://www.melfas.com S: Supported F: drivers/input/touchscreen/melfas_mip4.c F: Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt MELLANOX ETHERNET DRIVER (mlx4_en) M: Tariq Toukan L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx4/en_* MELLANOX ETHERNET DRIVER (mlx5e) M: Saeed Mahameed L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_* MELLANOX ETHERNET INNOVA DRIVERS R: Boris Pismenny L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/* F: drivers/net/ethernet/mellanox/mlx5/core/accel/* F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: include/linux/mlx5/mlx5_ifc_fpga.h MELLANOX ETHERNET INNOVA IPSEC DRIVER R: Boris Pismenny L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/* F: drivers/net/ethernet/mellanox/mlx5/core/ipsec* MELLANOX ETHERNET SWITCH DRIVERS M: Jiri Pirko M: Ido Schimmel L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlxsw/ F: tools/testing/selftests/drivers/net/mlxsw/ MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) M: mlxsw@mellanox.com L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlxfw/ MELLANOX HARDWARE PLATFORM SUPPORT M: Andy Shevchenko M: Darren Hart M: Vadim Pasternak L: platform-driver-x86@vger.kernel.org S: Supported F: drivers/platform/mellanox/ MELLANOX MLX4 core VPI driver M: Tariq Toukan L: netdev@vger.kernel.org L: linux-rdma@vger.kernel.org W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ S: Supported F: drivers/net/ethernet/mellanox/mlx4/ F: include/linux/mlx4/ MELLANOX MLX4 IB driver M: Yishai Hadas L: linux-rdma@vger.kernel.org W: http://www.mellanox.com Q: http://patchwork.kernel.org/project/linux-rdma/list/ S: Supported F: drivers/infiniband/hw/mlx4/ F: include/linux/mlx4/ F: include/uapi/rdma/mlx4-abi.h MELLANOX MLX5 core VPI driver M: Saeed Mahameed M: Leon Romanovsky L: netdev@vger.kernel.org L: linux-rdma@vger.kernel.org W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ S: Supported F: drivers/net/ethernet/mellanox/mlx5/core/ F: include/linux/mlx5/ MELLANOX MLX5 IB driver M: Leon Romanovsky L: linux-rdma@vger.kernel.org W: http://www.mellanox.com Q: http://patchwork.kernel.org/project/linux-rdma/list/ S: Supported F: drivers/infiniband/hw/mlx5/ F: include/linux/mlx5/ F: include/uapi/rdma/mlx5-abi.h MELLANOX MLXCPLD I2C AND MUX DRIVER M: Vadim Pasternak M: Michael Shych L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/busses/i2c-mlxcpld.c F: drivers/i2c/muxes/i2c-mux-mlxcpld.c F: Documentation/i2c/busses/i2c-mlxcpld MELLANOX MLXCPLD LED DRIVER M: Vadim Pasternak L: linux-leds@vger.kernel.org S: Supported F: drivers/leds/leds-mlxcpld.c F: drivers/leds/leds-mlxreg.c F: Documentation/leds/leds-mlxcpld.txt MELLANOX PLATFORM DRIVER M: Vadim Pasternak L: platform-driver-x86@vger.kernel.org S: Supported F: drivers/platform/x86/mlx-platform.c MEMBARRIER SUPPORT M: Mathieu Desnoyers M: "Paul E. McKenney" L: linux-kernel@vger.kernel.org S: Supported F: kernel/sched/membarrier.c F: include/uapi/linux/membarrier.h F: arch/powerpc/include/asm/membarrier.h MEMORY MANAGEMENT L: linux-mm@kvack.org W: http://www.linux-mm.org S: Maintained F: include/linux/mm.h F: include/linux/gfp.h F: include/linux/mmzone.h F: include/linux/memory_hotplug.h F: include/linux/vmalloc.h F: mm/ MEMORY TECHNOLOGY DEVICES (MTD) M: David Woodhouse M: Brian Norris M: Boris Brezillon M: Marek Vasut M: Richard Weinberger L: linux-mtd@lists.infradead.org W: http://www.linux-mtd.infradead.org/ Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ T: git git://git.infradead.org/linux-mtd.git master T: git git://git.infradead.org/linux-mtd.git mtd/next S: Maintained F: Documentation/devicetree/bindings/mtd/ F: drivers/mtd/ F: include/linux/mtd/ F: include/uapi/mtd/ MEN A21 WATCHDOG DRIVER M: Johannes Thumshirn L: linux-watchdog@vger.kernel.org S: Maintained F: drivers/watchdog/mena21_wdt.c MEN CHAMELEON BUS (mcb) M: Johannes Thumshirn S: Maintained F: drivers/mcb/ F: include/linux/mcb.h F: Documentation/men-chameleon-bus.txt MEN F21BMC (Board Management Controller) M: Andreas Werner S: Supported F: drivers/mfd/menf21bmc.c F: drivers/watchdog/menf21bmc_wdt.c F: drivers/leds/leds-menf21bmc.c F: drivers/hwmon/menf21bmc_hwmon.c F: Documentation/hwmon/menf21bmc MEN Z069 WATCHDOG DRIVER M: Johannes Thumshirn L: linux-watchdog@vger.kernel.org S: Maintained F: drivers/watchdog/menz69_wdt.c MESON AO CEC DRIVER FOR AMLOGIC SOCS M: Neil Armstrong L: linux-media@lists.freedesktop.org L: linux-amlogic@lists.infradead.org W: http://linux-meson.com/ S: Supported F: drivers/media/platform/meson/ao-cec.c F: Documentation/devicetree/bindings/media/meson-ao-cec.txt T: git git://linuxtv.org/media_tree.git MICROBLAZE ARCHITECTURE M: Michal Simek W: http://www.monstr.eu/fdt/ T: git git://git.monstr.eu/linux-2.6-microblaze.git S: Supported F: arch/microblaze/ MICROCHIP AT91 SERIAL DRIVER M: Richard Genoud S: Maintained F: drivers/tty/serial/atmel_serial.c F: drivers/tty/serial/atmel_serial.h F: Documentation/devicetree/bindings/mfd/atmel-usart.txt MICROCHIP AUDIO ASOC DRIVERS M: Codrin Ciubotariu L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: sound/soc/atmel MICROCHIP DMA DRIVER M: Ludovic Desroches L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: dmaengine@vger.kernel.org S: Supported F: drivers/dma/at_hdmac.c F: drivers/dma/at_hdmac_regs.h F: include/linux/platform_data/dma-atmel.h F: Documentation/devicetree/bindings/dma/atmel-dma.txt F: include/dt-bindings/dma/at91.h MICROCHIP ECC DRIVER M: Tudor Ambarus L: linux-crypto@vger.kernel.org S: Maintained F: drivers/crypto/atmel-ecc.* MICROCHIP I2C DRIVER M: Ludovic Desroches L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/busses/i2c-at91.c MICROCHIP ISC DRIVER M: Eugen Hristev L: linux-media@vger.kernel.org S: Supported F: drivers/media/platform/atmel/atmel-isc.c F: drivers/media/platform/atmel/atmel-isc-regs.h F: Documentation/devicetree/bindings/media/atmel-isc.txt MICROCHIP ISI DRIVER M: Eugen Hristev L: linux-media@vger.kernel.org S: Supported F: drivers/media/platform/atmel/atmel-isi.c F: drivers/media/platform/atmel/atmel-isi.h MICROCHIP AT91 USART MFD DRIVER M: Radu Pirea L: linux-kernel@vger.kernel.org S: Supported F: drivers/mfd/at91-usart.c F: include/dt-bindings/mfd/at91-usart.h F: Documentation/devicetree/bindings/mfd/atmel-usart.txt MICROCHIP AT91 USART SPI DRIVER M: Radu Pirea L: linux-spi@vger.kernel.org S: Supported F: drivers/spi/spi-at91-usart.c F: Documentation/devicetree/bindings/mfd/atmel-usart.txt MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER M: Woojung Huh M: Microchip Linux Driver Support L: netdev@vger.kernel.org S: Maintained F: net/dsa/tag_ksz.c F: drivers/net/dsa/microchip/* F: include/linux/platform_data/microchip-ksz.h F: Documentation/devicetree/bindings/net/dsa/ksz.txt MICROCHIP LAN743X ETHERNET DRIVER M: Bryan Whitehead M: Microchip Linux Driver Support L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/microchip/lan743x_* MICROCHIP LCDFB DRIVER M: Nicolas Ferre L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/atmel_lcdfb.c F: include/video/atmel_lcdc.h MICROCHIP MMC/SD/SDIO MCI DRIVER M: Ludovic Desroches S: Maintained F: drivers/mmc/host/atmel-mci.c MICROCHIP MCP16502 PMIC DRIVER M: Andrei Stefanescu L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt F: drivers/regulator/mcp16502.c MICROCHIP MCP3911 ADC DRIVER M: Marcus Folkesson M: Kent Gustavsson L: linux-iio@vger.kernel.org S: Supported F: drivers/iio/adc/mcp3911.c F: Documentation/devicetree/bindings/iio/adc/mcp3911.txt MICROCHIP NAND DRIVER M: Tudor Ambarus L: linux-mtd@lists.infradead.org S: Supported F: drivers/mtd/nand/raw/atmel/* F: Documentation/devicetree/bindings/mtd/atmel-nand.txt MICROCHIP PWM DRIVER M: Claudiu Beznea L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-pwm@vger.kernel.org S: Supported F: drivers/pwm/pwm-atmel.c F: Documentation/devicetree/bindings/pwm/atmel-pwm.txt MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVER M: Ludovic Desroches M: Eugen Hristev L: linux-iio@vger.kernel.org S: Supported F: drivers/iio/adc/at91-sama5d2_adc.c F: Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER M: Nicolas Ferre S: Supported F: drivers/power/reset/at91-sama5d2_shdwc.c MICROCHIP SAMA5D2-COMPATIBLE PIOBU GPIO M: Andrei Stefanescu L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-gpio@vger.kernel.org F: drivers/gpio/gpio-sama5d2-piobu.c MICROCHIP SPI DRIVER M: Nicolas Ferre S: Supported F: drivers/spi/spi-atmel.* MICROCHIP SSC DRIVER M: Nicolas Ferre L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/misc/atmel-ssc.c F: include/linux/atmel-ssc.h MICROCHIP TIMER COUNTER (TC) AND CLOCKSOURCE DRIVERS M: Nicolas Ferre L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/misc/atmel_tclib.c F: drivers/clocksource/tcb_clksrc.c MICROCHIP USBA UDC DRIVER M: Cristian Birsan L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/usb/gadget/udc/atmel_usba_udc.* MICROCHIP USB251XB DRIVER M: Richard Leitner L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/misc/usb251xb.c F: Documentation/devicetree/bindings/usb/usb251xb.txt MICROCHIP XDMA DRIVER M: Ludovic Desroches L: linux-arm-kernel@lists.infradead.org L: dmaengine@vger.kernel.org S: Supported F: drivers/dma/at_xdmac.c MICROSEMI MIPS SOCS M: Alexandre Belloni M: Microchip Linux Driver Support L: linux-mips@vger.kernel.org S: Supported F: arch/mips/generic/board-ocelot.c F: arch/mips/configs/generic/board-ocelot.config F: arch/mips/boot/dts/mscc/ F: Documentation/devicetree/bindings/mips/mscc.txt MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi) M: Don Brace L: esc.storagedev@microsemi.com L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/smartpqi/smartpqi*.[ch] F: drivers/scsi/smartpqi/Kconfig F: drivers/scsi/smartpqi/Makefile F: include/linux/cciss*.h F: include/uapi/linux/cciss*.h F: Documentation/scsi/smartpqi.txt MICROSEMI ETHERNET SWITCH DRIVER M: Alexandre Belloni M: Microchip Linux Driver Support L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/mscc/ MICROSOFT SURFACE PRO 3 BUTTON DRIVER M: Chen Yu L: platform-driver-x86@vger.kernel.org S: Supported F: drivers/platform/x86/surfacepro3_button.c MICROTEK X6 SCANNER M: Oliver Neukum S: Maintained F: drivers/usb/image/microtek.* MIPS M: Ralf Baechle M: Paul Burton M: James Hogan L: linux-mips@vger.kernel.org W: http://www.linux-mips.org/ T: git git://git.linux-mips.org/pub/scm/ralf/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git Q: http://patchwork.linux-mips.org/project/linux-mips/list/ S: Supported F: Documentation/devicetree/bindings/mips/ F: Documentation/mips/ F: arch/mips/ F: drivers/platform/mips/ MIPS BOSTON DEVELOPMENT BOARD M: Paul Burton L: linux-mips@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/clock/img,boston-clock.txt F: arch/mips/boot/dts/img/boston.dts F: arch/mips/configs/generic/board-boston.config F: drivers/clk/imgtec/clk-boston.c F: include/dt-bindings/clock/boston-clock.h MIPS GENERIC PLATFORM M: Paul Burton L: linux-mips@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt F: arch/mips/generic/ F: arch/mips/tools/generic-board-config.sh MIPS/LOONGSON1 ARCHITECTURE M: Keguang Zhang L: linux-mips@vger.kernel.org S: Maintained F: arch/mips/loongson32/ F: arch/mips/include/asm/mach-loongson32/ F: drivers/*/*loongson1* F: drivers/*/*/*loongson1* MIPS/LOONGSON2 ARCHITECTURE M: Jiaxun Yang L: linux-mips@vger.kernel.org S: Maintained F: arch/mips/loongson64/fuloong-2e/ F: arch/mips/loongson64/lemote-2f/ F: arch/mips/include/asm/mach-loongson64/ F: drivers/*/*loongson2* F: drivers/*/*/*loongson2* MIPS/LOONGSON3 ARCHITECTURE M: Huacai Chen L: linux-mips@vger.kernel.org S: Maintained F: arch/mips/loongson64/ F: arch/mips/include/asm/mach-loongson64/ F: drivers/platform/mips/cpu_hwmon.c F: drivers/*/*loongson3* F: drivers/*/*/*loongson3* MIPS RINT INSTRUCTION EMULATION M: Aleksandar Markovic L: linux-mips@vger.kernel.org S: Supported F: arch/mips/math-emu/sp_rint.c F: arch/mips/math-emu/dp_rint.c MIROSOUND PCM20 FM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/radio/radio-miropcm20* MMP SUPPORT R: Lubomir Rintel L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Odd Fixes F: arch/arm/boot/dts/mmp* F: arch/arm/mach-mmp/ MMU GATHER AND TLB INVALIDATION M: Will Deacon M: "Aneesh Kumar K.V" M: Andrew Morton M: Nick Piggin M: Peter Zijlstra L: linux-arch@vger.kernel.org L: linux-mm@kvack.org S: Maintained F: arch/*/include/asm/tlb.h F: include/asm-generic/tlb.h F: mm/mmu_gather.c MN88472 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/mn88472* MN88473 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/mn88473* MODULE SUPPORT M: Jessica Yu T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next S: Maintained F: include/linux/module.h F: kernel/module.c MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER W: http://popies.net/meye/ S: Orphan F: Documentation/media/v4l-drivers/meye* F: drivers/media/pci/meye/ F: include/uapi/linux/meye.h MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD M: Jiri Slaby S: Maintained F: Documentation/serial/moxa-smartio F: drivers/tty/mxser.* MR800 AVERMEDIA USB FM RADIO DRIVER M: Alexey Klimov L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/radio/radio-mr800.c MRF24J40 IEEE 802.15.4 RADIO DRIVER M: Alan Ott L: linux-wpan@vger.kernel.org S: Maintained F: drivers/net/ieee802154/mrf24j40.c F: Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt MSI LAPTOP SUPPORT M: "Lee, Chun-Yi" L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/msi-laptop.c MSI WMI SUPPORT L: platform-driver-x86@vger.kernel.org S: Orphan F: drivers/platform/x86/msi-wmi.c MSI001 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/msi001* MSI2500 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/msi2500/ MSYSTEMS DISKONCHIP G3 MTD DRIVER M: Robert Jarzmik L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/devices/docg3* MT9M032 APTINA SENSOR DRIVER M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/mt9m032.c F: include/media/i2c/mt9m032.h MT9P031 APTINA CAMERA SENSOR M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/mt9p031.c F: include/media/i2c/mt9p031.h MT9T001 APTINA CAMERA SENSOR M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/mt9t001.c F: include/media/i2c/mt9t001.h MT9T112 APTINA CAMERA SENSOR M: Jacopo Mondi L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Odd Fixes F: drivers/media/i2c/mt9t112.c F: include/media/i2c/mt9t112.h MT9V032 APTINA CAMERA SENSOR M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/devicetree/bindings/media/i2c/mt9v032.txt F: drivers/media/i2c/mt9v032.c F: include/media/i2c/mt9v032.h MT9V111 APTINA CAMERA SENSOR M: Jacopo Mondi L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt F: drivers/media/i2c/mt9v111.c MULTIFUNCTION DEVICES (MFD) M: Lee Jones T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git S: Supported F: Documentation/devicetree/bindings/mfd/ F: drivers/mfd/ F: include/linux/mfd/ F: include/dt-bindings/mfd/ MULTIMEDIA CARD (MMC) ETC. OVER SPI S: Orphan F: drivers/mmc/host/mmc_spi.c F: include/linux/spi/mmc_spi.h MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM M: Ulf Hansson L: linux-mmc@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git S: Maintained F: Documentation/devicetree/bindings/mmc/ F: drivers/mmc/ F: include/linux/mmc/ F: include/uapi/linux/mmc/ MULTIPLEXER SUBSYSTEM M: Peter Rosin S: Maintained F: Documentation/ABI/testing/sysfs-class-mux* F: Documentation/devicetree/bindings/mux/ F: include/dt-bindings/mux/ F: include/linux/mux/ F: drivers/mux/ MULTITECH MULTIPORT CARD (ISICOM) S: Orphan F: drivers/tty/isicom.c F: include/linux/isicom.h MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER M: Bin Liu L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/musb/ MXL301RF MEDIA DRIVER M: Akihiro Tsukada L: linux-media@vger.kernel.org S: Odd Fixes F: drivers/media/tuners/mxl301rf* MXL5007T MEDIA DRIVER M: Michael Krufky L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://github.com/mkrufky Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mkrufky/tuners.git S: Maintained F: drivers/media/tuners/mxl5007t.* MXSFB DRM DRIVER M: Marek Vasut M: Stefan Agner L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/mxsfb/ F: Documentation/devicetree/bindings/display/mxsfb.txt T: git git://anongit.freedesktop.org/drm/drm-misc MYLEX DAC960 PCI RAID Controller M: Hannes Reinecke L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/myrb.* F: drivers/scsi/myrs.* MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) M: Chris Lee L: netdev@vger.kernel.org W: https://www.cspi.com/ethernet-products/support/downloads/ S: Supported F: drivers/net/ethernet/myricom/myri10ge/ NAND FLASH SUBSYSTEM M: Boris Brezillon M: Miquel Raynal R: Richard Weinberger L: linux-mtd@lists.infradead.org W: http://www.linux-mtd.infradead.org/ Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ T: git git://git.infradead.org/linux-mtd.git nand/fixes T: git git://git.infradead.org/linux-mtd.git nand/next S: Maintained F: drivers/mtd/nand/ F: include/linux/mtd/*nand*.h NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER M: Daniel Mack S: Maintained L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://www.native-instruments.com F: sound/usb/caiaq/ NATSEMI ETHERNET DRIVER (DP8381x) S: Orphan F: drivers/net/ethernet/natsemi/natsemi.c NCR 5380 SCSI DRIVERS M: Finn Thain M: Michael Schmitz L: linux-scsi@vger.kernel.org S: Maintained F: Documentation/scsi/g_NCR5380.txt F: drivers/scsi/NCR5380.* F: drivers/scsi/arm/cumana_1.c F: drivers/scsi/arm/oak.c F: drivers/scsi/atari_scsi.* F: drivers/scsi/dmx3191d.c F: drivers/scsi/g_NCR5380.* F: drivers/scsi/mac_scsi.* F: drivers/scsi/sun3_scsi.* F: drivers/scsi/sun3_scsi_vme.c NCSI LIBRARY: M: Samuel Mendoza-Jonas S: Maintained F: net/ncsi/ NCT6775 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/nct6775 F: drivers/hwmon/nct6775.c NET_FAILOVER MODULE M: Sridhar Samudrala L: netdev@vger.kernel.org S: Supported F: driver/net/net_failover.c F: include/net/net_failover.h F: Documentation/networking/net_failover.rst NETEFFECT IWARP RNIC DRIVER (IW_NES) M: Faisal Latif L: linux-rdma@vger.kernel.org W: http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-Cluster-overview.htm S: Supported F: drivers/infiniband/hw/nes/ F: include/uapi/rdma/nes-abi.h NETEM NETWORK EMULATOR M: Stephen Hemminger L: netem@lists.linux-foundation.org (moderated for non-subscribers) S: Maintained F: net/sched/sch_netem.c NETERION 10GbE DRIVERS (s2io/vxge) M: Jon Mason L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/neterion/s2io.txt F: Documentation/networking/device_drivers/neterion/vxge.txt F: drivers/net/ethernet/neterion/ NETFILTER M: Pablo Neira Ayuso M: Jozsef Kadlecsik M: Florian Westphal L: netfilter-devel@vger.kernel.org L: coreteam@netfilter.org W: http://www.netfilter.org/ W: http://www.iptables.org/ W: http://www.nftables.org/ Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git S: Maintained F: include/linux/netfilter* F: include/linux/netfilter/ F: include/net/netfilter/ F: include/uapi/linux/netfilter* F: include/uapi/linux/netfilter/ F: net/*/netfilter.c F: net/*/netfilter/ F: net/netfilter/ F: net/bridge/br_netfilter*.c NETROM NETWORK LAYER M: Ralf Baechle L: linux-hams@vger.kernel.org W: http://www.linux-ax25.org/ S: Maintained F: include/net/netrom.h F: include/uapi/linux/netrom.h F: net/netrom/ NETRONOME ETHERNET DRIVERS M: Jakub Kicinski L: oss-drivers@netronome.com S: Maintained F: drivers/net/ethernet/netronome/ NETWORK BLOCK DEVICE (NBD) M: Josef Bacik S: Maintained L: linux-block@vger.kernel.org L: nbd@other.debian.org F: Documentation/blockdev/nbd.txt F: drivers/block/nbd.c F: include/uapi/linux/nbd.h NETWORK DROP MONITOR M: Neil Horman L: netdev@vger.kernel.org S: Maintained W: https://fedorahosted.org/dropwatch/ F: net/core/drop_monitor.c NETWORKING DRIVERS M: "David S. Miller" L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net Q: http://patchwork.ozlabs.org/project/netdev/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git S: Odd Fixes F: Documentation/devicetree/bindings/net/ F: drivers/net/ F: include/linux/if_* F: include/linux/netdevice.h F: include/linux/etherdevice.h F: include/linux/fcdevice.h F: include/linux/fddidevice.h F: include/linux/hippidevice.h F: include/linux/inetdevice.h F: include/uapi/linux/if_* F: include/uapi/linux/netdevice.h NETWORKING DRIVERS (WIRELESS) M: Kalle Valo L: linux-wireless@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-wireless/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git S: Maintained F: Documentation/devicetree/bindings/net/wireless/ F: drivers/net/wireless/ NETWORKING [DSA] M: Andrew Lunn M: Vivien Didelot M: Florian Fainelli S: Maintained F: Documentation/devicetree/bindings/net/dsa/ F: net/dsa/ F: include/net/dsa.h F: include/linux/dsa/ F: drivers/net/dsa/ NETWORKING [GENERAL] M: "David S. Miller" L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net Q: http://patchwork.ozlabs.org/project/netdev/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git B: mailto:netdev@vger.kernel.org S: Maintained F: net/ F: include/net/ F: include/linux/in.h F: include/linux/net.h F: include/linux/netdevice.h F: include/uapi/linux/in.h F: include/uapi/linux/net.h F: include/uapi/linux/netdevice.h F: include/uapi/linux/net_namespace.h F: tools/testing/selftests/net/ F: lib/net_utils.c F: lib/random32.c F: Documentation/networking/ NETWORKING [IPSEC] M: Steffen Klassert M: Herbert Xu M: "David S. Miller" L: netdev@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git S: Maintained F: net/xfrm/ F: net/key/ F: net/ipv4/xfrm* F: net/ipv4/esp4* F: net/ipv4/ah4.c F: net/ipv4/ipcomp.c F: net/ipv4/ip_vti.c F: net/ipv6/xfrm* F: net/ipv6/esp6* F: net/ipv6/ah6.c F: net/ipv6/ipcomp6.c F: net/ipv6/ip6_vti.c F: include/uapi/linux/xfrm.h F: include/net/xfrm.h NETWORKING [IPv4/IPv6] M: "David S. Miller" M: Alexey Kuznetsov M: Hideaki YOSHIFUJI L: netdev@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git S: Maintained F: net/ipv4/ F: net/ipv6/ F: include/net/ip* F: arch/x86/net/* NETWORKING [LABELED] (NetLabel, Labeled IPsec, SECMARK) M: Paul Moore W: https://github.com/netlabel L: netdev@vger.kernel.org L: linux-security-module@vger.kernel.org S: Maintained F: Documentation/netlabel/ F: include/net/calipso.h F: include/net/cipso_ipv4.h F: include/net/netlabel.h F: include/uapi/linux/netfilter/xt_SECMARK.h F: include/uapi/linux/netfilter/xt_CONNSECMARK.h F: net/netlabel/ F: net/ipv4/cipso_ipv4.c F: net/ipv6/calipso.c F: net/netfilter/xt_CONNSECMARK.c F: net/netfilter/xt_SECMARK.c NETWORKING [TCP] M: Eric Dumazet L: netdev@vger.kernel.org S: Maintained F: net/ipv4/tcp*.c F: net/ipv4/syncookies.c F: net/ipv6/tcp*.c F: net/ipv6/syncookies.c F: include/uapi/linux/tcp.h F: include/net/tcp.h F: include/linux/tcp.h F: include/trace/events/tcp.h NETWORKING [TLS] M: Boris Pismenny M: Aviad Yehezkel M: Dave Watson M: John Fastabend M: Daniel Borkmann L: netdev@vger.kernel.org S: Maintained F: net/tls/* F: include/uapi/linux/tls.h F: include/net/tls.h NETWORKING [WIRELESS] L: linux-wireless@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-wireless/list/ NETDEVSIM M: Jakub Kicinski S: Maintained F: drivers/net/netdevsim/* NETXEN (1/10) GbE SUPPORT M: Manish Chopra M: Rahul Verma M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/netxen/ NFC SUBSYSTEM M: Samuel Ortiz L: linux-wireless@vger.kernel.org L: linux-nfc@lists.01.org (subscribers-only) S: Supported F: net/nfc/ F: include/net/nfc/ F: include/uapi/linux/nfc.h F: drivers/nfc/ F: include/linux/platform_data/nfcmrvl.h F: include/linux/platform_data/nxp-nci.h F: Documentation/devicetree/bindings/net/nfc/ NFS, SUNRPC, AND LOCKD CLIENTS M: Trond Myklebust M: Anna Schumaker L: linux-nfs@vger.kernel.org W: http://client.linux-nfs.org T: git git://git.linux-nfs.org/projects/trondmy/linux-nfs.git S: Maintained F: fs/lockd/ F: fs/nfs/ F: fs/nfs_common/ F: net/sunrpc/ F: include/linux/lockd/ F: include/linux/nfs* F: include/linux/sunrpc/ F: include/uapi/linux/nfs* F: include/uapi/linux/sunrpc/ NILFS2 FILESYSTEM M: Ryusuke Konishi L: linux-nilfs@vger.kernel.org W: https://nilfs.sourceforge.io/ W: https://nilfs.osdn.jp/ T: git git://github.com/konis/nilfs2.git S: Supported F: Documentation/filesystems/nilfs2.txt F: fs/nilfs2/ F: include/trace/events/nilfs2.h F: include/uapi/linux/nilfs2_api.h F: include/uapi/linux/nilfs2_ondisk.h NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER M: YOKOTA Hiroshi W: http://www.netlab.is.tsukuba.ac.jp/~yokota/izumi/ninja/ S: Maintained F: Documentation/scsi/NinjaSCSI.txt F: drivers/scsi/pcmcia/nsp_* NINJA SCSI-32Bi/UDE PCI/CARDBUS SCSI HOST ADAPTER DRIVER M: GOTO Masanori M: YOKOTA Hiroshi W: http://www.netlab.is.tsukuba.ac.jp/~yokota/izumi/ninja/ S: Maintained F: Documentation/scsi/NinjaSCSI.txt F: drivers/scsi/nsp32* NIOS2 ARCHITECTURE M: Ley Foon Tan L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git S: Maintained F: arch/nios2/ NOHZ, DYNTICKS SUPPORT M: Frederic Weisbecker M: Thomas Gleixner M: Ingo Molnar L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/nohz S: Maintained F: kernel/time/tick*.* F: include/linux/tick.h F: include/linux/sched/nohz.h NOKIA N900 CAMERA SUPPORT (ET8EK8 SENSOR, AD5820 FOCUS) M: Pavel Machek M: Sakari Ailus L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/et8ek8 F: drivers/media/i2c/ad5820.c NOKIA N900 POWER SUPPLY DRIVERS R: Pali Rohár F: include/linux/power/bq2415x_charger.h F: include/linux/power/bq27xxx_battery.h F: include/linux/power/isp1704_charger.h F: drivers/power/supply/bq2415x_charger.c F: drivers/power/supply/bq27xxx_battery.c F: drivers/power/supply/bq27xxx_battery_i2c.c F: drivers/power/supply/isp1704_charger.c F: drivers/power/supply/rx51_battery.c NTB AMD DRIVER M: Shyam Sundar S K L: linux-ntb@googlegroups.com S: Supported F: drivers/ntb/hw/amd/ NTB DRIVER CORE M: Jon Mason M: Dave Jiang M: Allen Hubbe L: linux-ntb@googlegroups.com S: Supported W: https://github.com/jonmason/ntb/wiki T: git git://github.com/jonmason/ntb.git F: drivers/ntb/ F: drivers/net/ntb_netdev.c F: include/linux/ntb.h F: include/linux/ntb_transport.h F: tools/testing/selftests/ntb/ NTB IDT DRIVER M: Serge Semin L: linux-ntb@googlegroups.com S: Supported F: drivers/ntb/hw/idt/ NTB INTEL DRIVER M: Dave Jiang L: linux-ntb@googlegroups.com S: Supported W: https://github.com/davejiang/linux/wiki T: git https://github.com/davejiang/linux.git F: drivers/ntb/hw/intel/ NTFS FILESYSTEM M: Anton Altaparmakov L: linux-ntfs-dev@lists.sourceforge.net W: http://www.tuxera.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs.git S: Supported F: Documentation/filesystems/ntfs.txt F: fs/ntfs/ NUBUS SUBSYSTEM M: Finn Thain L: linux-m68k@lists.linux-m68k.org S: Maintained F: arch/*/include/asm/nubus.h F: drivers/nubus/ F: include/linux/nubus.h F: include/uapi/linux/nubus.h NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER M: Antonino Daplas L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/riva/ F: drivers/video/fbdev/nvidia/ NVM EXPRESS DRIVER M: Keith Busch M: Jens Axboe M: Christoph Hellwig M: Sagi Grimberg L: linux-nvme@lists.infradead.org T: git://git.infradead.org/nvme.git W: http://git.infradead.org/nvme.git S: Supported F: drivers/nvme/host/ F: include/linux/nvme.h F: include/uapi/linux/nvme_ioctl.h NVM EXPRESS FC TRANSPORT DRIVERS M: James Smart L: linux-nvme@lists.infradead.org S: Supported F: include/linux/nvme-fc.h F: include/linux/nvme-fc-driver.h F: drivers/nvme/host/fc.c F: drivers/nvme/target/fc.c F: drivers/nvme/target/fcloop.c NVM EXPRESS TARGET DRIVER M: Christoph Hellwig M: Sagi Grimberg L: linux-nvme@lists.infradead.org T: git://git.infradead.org/nvme.git W: http://git.infradead.org/nvme.git S: Supported F: drivers/nvme/target/ NVMEM FRAMEWORK M: Srinivas Kandagatla S: Maintained F: drivers/nvmem/ F: Documentation/devicetree/bindings/nvmem/ F: Documentation/ABI/stable/sysfs-bus-nvmem F: include/linux/nvmem-consumer.h F: include/linux/nvmem-provider.h NXP SGTL5000 DRIVER M: Fabio Estevam L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/sgtl5000.txt F: sound/soc/codecs/sgtl5000* NXP TDA998X DRM DRIVER M: Russell King S: Maintained T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes F: drivers/gpu/drm/i2c/tda998x_drv.c F: include/drm/i2c/tda998x.h F: include/dt-bindings/display/tda998x.h K: "nxp,tda998x" NXP TFA9879 DRIVER M: Peter Rosin L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/tfa9879.txt F: sound/soc/codecs/tfa9879* NXP-NCI NFC DRIVER M: Clément Perrochaud R: Charles Gorand L: linux-nfc@lists.01.org (moderated for non-subscribers) S: Supported F: drivers/nfc/nxp-nci OBJAGG M: Jiri Pirko L: netdev@vger.kernel.org S: Supported F: lib/objagg.c F: lib/test_objagg.c F: include/linux/objagg.h OBJTOOL M: Josh Poimboeuf M: Peter Zijlstra S: Supported F: tools/objtool/ OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER M: Frederic Barrat M: Andrew Donnellan L: linuxppc-dev@lists.ozlabs.org S: Supported F: arch/powerpc/platforms/powernv/ocxl.c F: arch/powerpc/include/asm/pnv-ocxl.h F: drivers/misc/ocxl/ F: include/misc/ocxl* F: include/uapi/misc/ocxl.h F: Documentation/accelerators/ocxl.rst OMAP AUDIO SUPPORT M: Peter Ujfalusi M: Jarkko Nikula L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: linux-omap@vger.kernel.org S: Maintained F: sound/soc/ti/omap* F: sound/soc/ti/rx51.c F: sound/soc/ti/n810.c F: sound/soc/ti/sdma-pcm.* OMAP CLOCK FRAMEWORK SUPPORT M: Paul Walmsley L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/*omap*/*clock* OMAP DEVICE TREE SUPPORT M: Benoît Cousson M: Tony Lindgren L: linux-omap@vger.kernel.org L: devicetree@vger.kernel.org S: Maintained F: arch/arm/boot/dts/*omap* F: arch/arm/boot/dts/*am3* F: arch/arm/boot/dts/*am4* F: arch/arm/boot/dts/*am5* F: arch/arm/boot/dts/*dra7* OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2) L: linux-omap@vger.kernel.org L: linux-fbdev@vger.kernel.org S: Orphan F: drivers/video/fbdev/omap2/ F: Documentation/arm/OMAP/DSS OMAP FRAMEBUFFER SUPPORT L: linux-fbdev@vger.kernel.org L: linux-omap@vger.kernel.org S: Orphan F: drivers/video/fbdev/omap/ OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT M: Roger Quadros M: Tony Lindgren L: linux-omap@vger.kernel.org S: Maintained F: drivers/memory/omap-gpmc.c F: arch/arm/mach-omap2/*gpmc* OMAP GPIO DRIVER M: Grygorii Strashko M: Santosh Shilimkar M: Kevin Hilman L: linux-omap@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/gpio/gpio-omap.txt F: drivers/gpio/gpio-omap.c OMAP HARDWARE SPINLOCK SUPPORT M: Ohad Ben-Cohen L: linux-omap@vger.kernel.org S: Maintained F: drivers/hwspinlock/omap_hwspinlock.c OMAP HS MMC SUPPORT L: linux-mmc@vger.kernel.org L: linux-omap@vger.kernel.org S: Orphan F: drivers/mmc/host/omap_hsmmc.c OMAP HWMOD DATA M: Paul Walmsley L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/mach-omap2/omap_hwmod*data* OMAP HWMOD DATA FOR OMAP4-BASED DEVICES M: Benoît Cousson L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c OMAP HWMOD SUPPORT M: Benoît Cousson M: Paul Walmsley L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/mach-omap2/omap_hwmod.* OMAP I2C DRIVER M: Vignesh R L: linux-omap@vger.kernel.org L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/i2c/i2c-omap.txt F: drivers/i2c/busses/i2c-omap.c OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS) M: Laurent Pinchart L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/ti,omap3isp.txt F: drivers/media/platform/omap3isp/ F: drivers/staging/media/omap4iss/ OMAP MMC SUPPORT M: Aaro Koskinen L: linux-omap@vger.kernel.org S: Odd Fixes F: drivers/mmc/host/omap.c OMAP POWER MANAGEMENT SUPPORT M: Kevin Hilman L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/*omap*/*pm* F: drivers/cpufreq/omap-cpufreq.c OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT M: Rajendra Nayak M: Paul Walmsley L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/mach-omap2/prm* OMAP RANDOM NUMBER GENERATOR SUPPORT M: Deepak Saxena S: Maintained F: drivers/char/hw_random/omap-rng.c OMAP USB SUPPORT L: linux-usb@vger.kernel.org L: linux-omap@vger.kernel.org S: Orphan F: drivers/usb/*/*omap* F: arch/arm/*omap*/usb* OMAP/NEWFLOW NANOBONE MACHINE SUPPORT M: Mark Jackson L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/boot/dts/am335x-nano.dts OMAP1 SUPPORT M: Aaro Koskinen M: Tony Lindgren L: linux-omap@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-omap/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git S: Maintained F: arch/arm/mach-omap1/ F: arch/arm/plat-omap/ F: arch/arm/configs/omap1_defconfig F: drivers/i2c/busses/i2c-omap.c F: include/linux/platform_data/i2c-omap.h F: include/linux/platform_data/ams-delta-fiq.h OMAP2+ SUPPORT M: Tony Lindgren L: linux-omap@vger.kernel.org W: http://www.muru.com/linux/omap/ W: http://linux.omap.com/ Q: http://patchwork.kernel.org/project/linux-omap/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git S: Maintained F: arch/arm/mach-omap2/ F: arch/arm/plat-omap/ F: arch/arm/configs/omap2plus_defconfig F: drivers/i2c/busses/i2c-omap.c F: drivers/irqchip/irq-omap-intc.c F: drivers/mfd/*omap*.c F: drivers/mfd/menelaus.c F: drivers/mfd/palmas.c F: drivers/mfd/tps65217.c F: drivers/mfd/tps65218.c F: drivers/mfd/tps65910.c F: drivers/mfd/twl-core.[ch] F: drivers/mfd/twl4030*.c F: drivers/mfd/twl6030*.c F: drivers/mfd/twl6040*.c F: drivers/regulator/palmas-regulator*.c F: drivers/regulator/pbias-regulator.c F: drivers/regulator/tps65217-regulator.c F: drivers/regulator/tps65218-regulator.c F: drivers/regulator/tps65910-regulator.c F: drivers/regulator/twl-regulator.c F: drivers/regulator/twl6030-regulator.c F: include/linux/platform_data/i2c-omap.h ONION OMEGA2+ BOARD M: Harvey Hunt L: linux-mips@vger.kernel.org S: Maintained F: arch/mips/boot/dts/ralink/omega2p.dts OMFS FILESYSTEM M: Bob Copeland L: linux-karma-devel@lists.sourceforge.net S: Maintained F: Documentation/filesystems/omfs.txt F: fs/omfs/ OMNIKEY CARDMAN 4000 DRIVER M: Harald Welte S: Maintained F: drivers/char/pcmcia/cm4000_cs.c F: include/linux/cm4000_cs.h F: include/uapi/linux/cm4000_cs.h OMNIKEY CARDMAN 4040 DRIVER M: Harald Welte S: Maintained F: drivers/char/pcmcia/cm4040_cs.* OMNIVISION OV13858 SENSOR DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov13858.c OMNIVISION OV2680 SENSOR DRIVER M: Rui Miguel Silva L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov2680.c F: Documentation/devicetree/bindings/media/i2c/ov2680.txt OMNIVISION OV2685 SENSOR DRIVER M: Shunqian Zheng L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov2685.c OMNIVISION OV5640 SENSOR DRIVER M: Steve Longerbeam L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov5640.c OMNIVISION OV5647 SENSOR DRIVER M: Luis Oliveira L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov5647.c OMNIVISION OV5695 SENSOR DRIVER M: Shunqian Zheng L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov5695.c OMNIVISION OV7670 SENSOR DRIVER M: Jonathan Corbet L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov7670.c F: Documentation/devicetree/bindings/media/i2c/ov7670.txt OMNIVISION OV772x SENSOR DRIVER M: Jacopo Mondi L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Odd fixes F: drivers/media/i2c/ov772x.c F: include/media/i2c/ov772x.h F: Documentation/devicetree/bindings/media/i2c/ov772x.txt OMNIVISION OV7740 SENSOR DRIVER M: Wenyou Yang L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov7740.c F: Documentation/devicetree/bindings/media/i2c/ov7740.txt OMNIVISION OV9650 SENSOR DRIVER M: Sakari Ailus R: Akinobu Mita R: Sylwester Nawrocki L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/ov9650.c F: Documentation/devicetree/bindings/media/i2c/ov9650.txt ONENAND FLASH DRIVER M: Kyungmin Park L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/nand/onenand/ F: include/linux/mtd/onenand*.h ONSTREAM SCSI TAPE DRIVER M: Willem Riede L: osst-users@lists.sourceforge.net L: linux-scsi@vger.kernel.org S: Maintained F: Documentation/scsi/osst.txt F: drivers/scsi/osst.* F: drivers/scsi/osst_*.h F: drivers/scsi/st.h OP-TEE DRIVER M: Jens Wiklander S: Maintained F: drivers/tee/optee/ OPA-VNIC DRIVER M: Dennis Dalessandro M: Niranjana Vishwanathapura L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/ulp/opa_vnic OPEN FIRMWARE AND DEVICE TREE OVERLAYS M: Pantelis Antoniou M: Frank Rowand L: devicetree@vger.kernel.org S: Maintained F: Documentation/devicetree/dynamic-resolution-notes.txt F: Documentation/devicetree/overlay-notes.txt F: drivers/of/overlay.c F: drivers/of/resolver.c K: of_overlay_notifier_ OPEN FIRMWARE AND FLATTENED DEVICE TREE M: Rob Herring M: Frank Rowand L: devicetree@vger.kernel.org W: http://www.devicetree.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git S: Maintained F: drivers/of/ F: include/linux/of*.h F: scripts/dtc/ F: Documentation/ABI/testing/sysfs-firmware-ofw OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS M: Rob Herring M: Mark Rutland L: devicetree@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/ S: Maintained F: Documentation/devicetree/ F: arch/*/boot/dts/ F: include/dt-bindings/ OPENCORES I2C BUS DRIVER M: Peter Korsgaard L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/busses/i2c-ocores F: drivers/i2c/busses/i2c-ocores.c OPENRISC ARCHITECTURE M: Jonas Bonn M: Stefan Kristiansson M: Stafford Horne T: git git://github.com/openrisc/linux.git L: openrisc@lists.librecores.org W: http://openrisc.io S: Maintained F: Documentation/devicetree/bindings/openrisc/ F: Documentation/openrisc/ F: arch/openrisc/ F: drivers/irqchip/irq-ompic.c F: drivers/irqchip/irq-or1k-* OPENVSWITCH M: Pravin B Shelar L: netdev@vger.kernel.org L: dev@openvswitch.org W: http://openvswitch.org S: Maintained F: net/openvswitch/ F: include/uapi/linux/openvswitch.h OPERATING PERFORMANCE POINTS (OPP) M: Viresh Kumar M: Nishanth Menon M: Stephen Boyd L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git F: drivers/opp/ F: include/linux/pm_opp.h F: Documentation/power/opp.txt F: Documentation/devicetree/bindings/opp/ OPL4 DRIVER M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained F: sound/drivers/opl4/ OPROFILE M: Robert Richter L: oprofile-list@lists.sf.net S: Maintained F: arch/*/include/asm/oprofile*.h F: arch/*/oprofile/ F: drivers/oprofile/ F: include/linux/oprofile.h ORACLE CLUSTER FILESYSTEM 2 (OCFS2) M: Mark Fasheh M: Joel Becker L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) W: http://ocfs2.wiki.kernel.org S: Supported F: Documentation/filesystems/ocfs2.txt F: Documentation/filesystems/dlmfs.txt F: fs/ocfs2/ ORANGEFS FILESYSTEM M: Mike Marshall R: Martin Brandenburg L: devel@lists.orangefs.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git S: Supported F: fs/orangefs/ F: Documentation/filesystems/orangefs.txt ORINOCO DRIVER L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/orinoco W: http://www.nongnu.org/orinoco/ S: Orphan F: drivers/net/wireless/intersil/orinoco/ OSD LIBRARY and FILESYSTEM M: Boaz Harrosh S: Maintained F: drivers/scsi/osd/ F: include/scsi/osd_* F: fs/exofs/ OV2659 OMNIVISION SENSOR DRIVER M: "Lad, Prabhakar" L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git S: Maintained F: drivers/media/i2c/ov2659.c F: include/media/i2c/ov2659.h OVERLAY FILESYSTEM M: Miklos Szeredi L: linux-unionfs@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git S: Supported F: fs/overlayfs/ F: Documentation/filesystems/overlayfs.txt P54 WIRELESS DRIVER M: Christian Lamparter L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/p54 S: Maintained F: drivers/net/wireless/intersil/p54/ PA SEMI ETHERNET DRIVER L: netdev@vger.kernel.org S: Orphan F: drivers/net/ethernet/pasemi/* PA SEMI SMBUS DRIVER L: linux-i2c@vger.kernel.org S: Orphan F: drivers/i2c/busses/i2c-pasemi.c PADATA PARALLEL EXECUTION MECHANISM M: Steffen Klassert L: linux-crypto@vger.kernel.org S: Maintained F: kernel/padata.c F: include/linux/padata.h F: Documentation/padata.txt PANASONIC LAPTOP ACPI EXTRAS DRIVER M: Harald Welte L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/panasonic-laptop.c PARALLEL LCD/KEYPAD PANEL DRIVER M: Willy Tarreau M: Ksenija Stanojevic S: Odd Fixes F: Documentation/auxdisplay/lcd-panel-cgram.txt F: drivers/auxdisplay/panel.c PARALLEL PORT SUBSYSTEM M: Sudip Mukherjee M: Sudip Mukherjee L: linux-parport@lists.infradead.org (subscribers-only) S: Maintained F: drivers/parport/ F: include/linux/parport*.h F: drivers/char/ppdev.c F: include/uapi/linux/ppdev.h F: Documentation/parport*.txt PARAVIRT_OPS INTERFACE M: Juergen Gross M: Alok Kataria L: virtualization@lists.linux-foundation.org S: Supported F: Documentation/virtual/paravirt_ops.txt F: arch/*/kernel/paravirt* F: arch/*/include/asm/paravirt*.h F: include/linux/hypervisor.h PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES M: Tim Waugh L: linux-parport@lists.infradead.org (subscribers-only) S: Maintained F: Documentation/blockdev/paride.txt F: drivers/block/paride/ PARISC ARCHITECTURE M: "James E.J. Bottomley" M: Helge Deller L: linux-parisc@vger.kernel.org W: http://www.parisc-linux.org/ Q: http://patchwork.kernel.org/project/linux-parisc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git S: Maintained F: arch/parisc/ F: Documentation/parisc/ F: drivers/parisc/ F: drivers/char/agp/parisc-agp.c F: drivers/input/serio/gscps2.c F: drivers/parport/parport_gsc.* F: drivers/tty/serial/8250/8250_gsc.c F: drivers/video/fbdev/sti* F: drivers/video/console/sti* F: drivers/video/logo/logo_parisc* PARMAN M: Jiri Pirko L: netdev@vger.kernel.org S: Supported F: lib/parman.c F: lib/test_parman.c F: include/linux/parman.h PC87360 HARDWARE MONITORING DRIVER M: Jim Cromie L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/pc87360 F: drivers/hwmon/pc87360.c PC8736x GPIO DRIVER M: Jim Cromie S: Maintained F: drivers/char/pc8736x_gpio.c PC87427 HARDWARE MONITORING DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/pc87427 F: drivers/hwmon/pc87427.c PCA9532 LED DRIVER M: Riku Voipio S: Maintained F: drivers/leds/leds-pca9532.c F: include/linux/leds-pca9532.h PCA9541 I2C BUS MASTER SELECTOR DRIVER M: Guenter Roeck L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/muxes/i2c-mux-pca9541.c PCDP - PRIMARY CONSOLE AND DEBUG PORT M: Khalid Aziz S: Maintained F: drivers/firmware/pcdp.* PCI DRIVER FOR AARDVARK (Marvell Armada 3700) M: Thomas Petazzoni L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/aardvark-pci.txt F: drivers/pci/controller/pci-aardvark.c PCI DRIVER FOR ALTERA PCIE IP M: Ley Foon Tan L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/altera-pcie.txt F: drivers/pci/controller/pcie-altera.c PCI DRIVER FOR APPLIEDMICRO XGENE M: Tanmay Inamdar L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci.txt F: drivers/pci/controller/pci-xgene.c PCI DRIVER FOR ARM VERSATILE PLATFORM M: Rob Herring L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/versatile.txt F: drivers/pci/controller/pci-versatile.c PCI DRIVER FOR ARMADA 8K M: Thomas Petazzoni L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/pci-armada8k.txt F: drivers/pci/controller/dwc/pcie-armada8k.c PCI DRIVER FOR CADENCE PCIE IP M: Alan Douglas L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/cdns,*.txt F: drivers/pci/controller/pcie-cadence* PCI DRIVER FOR FREESCALE LAYERSCAPE M: Minghuan Lian M: Mingkai Hu M: Roy Zang L: linuxppc-dev@lists.ozlabs.org L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: drivers/pci/controller/dwc/*layerscape* PCI DRIVER FOR GENERIC OF HOSTS M: Will Deacon L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/host-generic-pci.txt F: drivers/pci/controller/pci-host-common.c F: drivers/pci/controller/pci-host-generic.c PCI DRIVER FOR IMX6 M: Richard Zhu M: Lucas Stach L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt F: drivers/pci/controller/dwc/*imx6* PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) M: Keith Busch M: Jonathan Derrick L: linux-pci@vger.kernel.org S: Supported F: drivers/pci/controller/vmd.c PCI DRIVER FOR MICROSEMI SWITCHTEC M: Kurt Schwemmer M: Logan Gunthorpe L: linux-pci@vger.kernel.org S: Maintained F: Documentation/switchtec.txt F: Documentation/ABI/testing/sysfs-class-switchtec F: drivers/pci/switch/switchtec* F: include/uapi/linux/switchtec_ioctl.h F: include/linux/switchtec.h F: drivers/ntb/hw/mscc/ PCI DRIVER FOR MOBIVEIL PCIE IP M: Subrahmanya Lingappa L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt F: drivers/pci/controller/pcie-mobiveil.c PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) M: Thomas Petazzoni M: Jason Cooper L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/pci/controller/*mvebu* PCI DRIVER FOR NVIDIA TEGRA M: Thierry Reding L: linux-tegra@vger.kernel.org L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt F: drivers/pci/controller/pci-tegra.c PCI DRIVER FOR RENESAS R-CAR M: Simon Horman L: linux-pci@vger.kernel.org L: linux-renesas-soc@vger.kernel.org S: Maintained F: drivers/pci/controller/*rcar* PCI DRIVER FOR SAMSUNG EXYNOS M: Jingoo Han L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: drivers/pci/controller/dwc/pci-exynos.c PCI DRIVER FOR SYNOPSYS DESIGNWARE M: Jingoo Han M: Gustavo Pimentel L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/designware-pcie.txt F: drivers/pci/controller/dwc/*designware* PCI DRIVER FOR TI DRA7XX M: Kishon Vijay Abraham I L: linux-omap@vger.kernel.org L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/ti-pci.txt F: drivers/pci/controller/dwc/pci-dra7xx.c PCI DRIVER FOR TI KEYSTONE M: Murali Karicheri L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/pci/controller/dwc/pci-keystone.c PCI ENDPOINT SUBSYSTEM M: Kishon Vijay Abraham I M: Lorenzo Pieralisi L: linux-pci@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git S: Supported F: drivers/pci/endpoint/ F: drivers/misc/pci_endpoint_test.c F: tools/pci/ PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC M: Russell Currey M: Sam Bobroff M: Oliver O'Halloran L: linuxppc-dev@lists.ozlabs.org S: Supported F: Documentation/PCI/pci-error-recovery.txt F: drivers/pci/pcie/aer.c F: drivers/pci/pcie/dpc.c F: drivers/pci/pcie/err.c F: Documentation/powerpc/eeh-pci-error-recovery.txt F: arch/powerpc/kernel/eeh*.c F: arch/powerpc/platforms/*/eeh*.c F: arch/powerpc/include/*/eeh*.h PCI ERROR RECOVERY M: Linas Vepstas L: linux-pci@vger.kernel.org S: Supported F: Documentation/PCI/pci-error-recovery.txt PCI MSI DRIVER FOR ALTERA MSI IP M: Ley Foon Tan L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt F: drivers/pci/controller/pcie-altera-msi.c PCI MSI DRIVER FOR APPLIEDMICRO XGENE M: Duc Dang L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt F: drivers/pci/controller/pci-xgene-msi.c PCI SUBSYSTEM M: Bjorn Helgaas L: linux-pci@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-pci/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git S: Supported F: Documentation/devicetree/bindings/pci/ F: Documentation/PCI/ F: drivers/acpi/pci* F: drivers/pci/ F: include/asm-generic/pci* F: include/linux/pci* F: include/linux/of_pci.h F: include/uapi/linux/pci* F: lib/pci* F: arch/x86/pci/ F: arch/x86/kernel/quirks.c F: arch/x86/kernel/early-quirks.c PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS M: Lorenzo Pieralisi L: linux-pci@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-pci/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/ S: Supported F: drivers/pci/controller/ PCIE DRIVER FOR AMLOGIC MESON M: Yue Wang L: linux-pci@vger.kernel.org L: linux-amlogic@lists.infradead.org S: Maintained F: drivers/pci/controller/dwc/pci-meson.c PCIE DRIVER FOR AXIS ARTPEC M: Jesper Nilsson L: linux-arm-kernel@axis.com L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/axis,artpec* F: drivers/pci/controller/dwc/*artpec* PCIE DRIVER FOR CAVIUM THUNDERX M: David Daney L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/pci/pci-thunder-* F: drivers/pci/controller/pci-thunder-* PCIE DRIVER FOR HISILICON M: Zhou Wang L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt F: drivers/pci/controller/dwc/pcie-hisi.c PCIE DRIVER FOR HISILICON KIRIN M: Xiaowei Song M: Binghui Wang L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/kirin-pcie.txt F: drivers/pci/controller/dwc/pcie-kirin.c PCIE DRIVER FOR HISILICON STB M: Shawn Guo L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt F: drivers/pci/controller/dwc/pcie-histb.c PCIE DRIVER FOR MEDIATEK M: Ryder Lee L: linux-pci@vger.kernel.org L: linux-mediatek@lists.infradead.org S: Supported F: Documentation/devicetree/bindings/pci/mediatek* F: drivers/pci/controller/*mediatek* PCIE DRIVER FOR QUALCOMM MSM M: Stanimir Varbanov L: linux-pci@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained F: drivers/pci/controller/dwc/*qcom* PCIE DRIVER FOR ROCKCHIP M: Shawn Lin L: linux-pci@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/rockchip-pcie* F: drivers/pci/controller/pcie-rockchip* PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC M: Linus Walleij L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt F: drivers/pci/controller/pci-v3-semi.c PCIE DRIVER FOR SOCIONEXT UNIPHIER M: Kunihiko Hayashi L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/uniphier-pcie.txt F: drivers/pci/controller/dwc/pcie-uniphier.c PCIE DRIVER FOR ST SPEAR13XX M: Pratyush Anand L: linux-pci@vger.kernel.org S: Maintained F: drivers/pci/controller/dwc/*spear* PCMCIA SUBSYSTEM M: Dominik Brodowski T: git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia.git S: Odd Fixes F: Documentation/pcmcia/ F: tools/pcmcia/ F: drivers/pcmcia/ F: include/pcmcia/ PCNET32 NETWORK DRIVER M: Don Fry L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/amd/pcnet32.c PCRYPT PARALLEL CRYPTO ENGINE M: Steffen Klassert L: linux-crypto@vger.kernel.org S: Maintained F: crypto/pcrypt.c F: include/crypto/pcrypt.h PEAQ WMI HOTKEYS DRIVER M: Hans de Goede L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/peaq-wmi.c PER-CPU MEMORY ALLOCATOR M: Dennis Zhou M: Tejun Heo M: Christoph Lameter T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git S: Maintained F: include/linux/percpu*.h F: mm/percpu*.c F: arch/*/include/asm/percpu.h PER-TASK DELAY ACCOUNTING M: Balbir Singh S: Maintained F: include/linux/delayacct.h F: kernel/delayacct.c PERFORMANCE EVENTS SUBSYSTEM M: Peter Zijlstra M: Ingo Molnar M: Arnaldo Carvalho de Melo R: Alexander Shishkin R: Jiri Olsa R: Namhyung Kim L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core S: Supported F: kernel/events/* F: include/linux/perf_event.h F: include/uapi/linux/perf_event.h F: arch/*/kernel/perf_event*.c F: arch/*/kernel/*/perf_event*.c F: arch/*/kernel/*/*/perf_event*.c F: arch/*/include/asm/perf_event.h F: arch/*/kernel/perf_callchain.c F: arch/*/events/* F: tools/perf/ PERSONALITY HANDLING M: Christoph Hellwig L: linux-abi-devel@lists.sourceforge.net S: Maintained F: include/linux/personality.h F: include/uapi/linux/personality.h PHOENIX RC FLIGHT CONTROLLER ADAPTER M: Marcus Folkesson L: linux-input@vger.kernel.org S: Maintained F: Documentation/input/devices/pxrc.rst F: drivers/input/joystick/pxrc.c PHONET PROTOCOL M: Remi Denis-Courmont S: Supported F: Documentation/networking/phonet.txt F: include/linux/phonet.h F: include/net/phonet/ F: include/uapi/linux/phonet.h F: net/phonet/ PHRAM MTD DRIVER M: Joern Engel L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/devices/phram.c PICOLCD HID DRIVER M: Bruno Prémont L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/hid-picolcd* PICOXCELL SUPPORT M: Jamie Iles L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/jamieiles/linux-2.6-ji.git S: Supported F: arch/arm/boot/dts/picoxcell* F: arch/arm/mach-picoxcell/ F: drivers/crypto/picoxcell* PIN CONTROL SUBSYSTEM M: Linus Walleij L: linux-gpio@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git S: Maintained F: Documentation/devicetree/bindings/pinctrl/ F: Documentation/driver-api/pinctl.rst F: drivers/pinctrl/ F: include/linux/pinctrl/ PIN CONTROLLER - MICROCHIP AT91 M: Ludovic Desroches L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-gpio@vger.kernel.org S: Supported F: drivers/pinctrl/pinctrl-at91* PIN CONTROLLER - FREESCALE M: Dong Aisheng M: Fabio Estevam M: Shawn Guo M: Stefan Agner R: Pengutronix Kernel Team L: linux-gpio@vger.kernel.org S: Maintained F: drivers/pinctrl/freescale/ F: Documentation/devicetree/bindings/pinctrl/fsl,* PIN CONTROLLER - INTEL M: Mika Westerberg M: Andy Shevchenko T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git S: Maintained F: drivers/pinctrl/intel/ PIN CONTROLLER - MEDIATEK M: Sean Wang L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt F: drivers/pinctrl/mediatek/ PIN CONTROLLER - QUALCOMM M: Bjorn Andersson S: Maintained L: linux-arm-msm@vger.kernel.org F: Documentation/devicetree/bindings/pinctrl/qcom,*.txt F: drivers/pinctrl/qcom/ PIN CONTROLLER - RENESAS M: Geert Uytterhoeven L: linux-renesas-soc@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git sh-pfc S: Maintained F: drivers/pinctrl/pinctrl-rz* F: drivers/pinctrl/sh-pfc/ PIN CONTROLLER - SAMSUNG M: Tomasz Figa M: Krzysztof Kozlowski M: Sylwester Nawrocki L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git S: Maintained F: drivers/pinctrl/samsung/ F: include/dt-bindings/pinctrl/samsung.h F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt PIN CONTROLLER - SINGLE M: Tony Lindgren M: Haojian Zhuang L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-omap@vger.kernel.org S: Maintained F: drivers/pinctrl/pinctrl-single.c PIN CONTROLLER - ST SPEAR M: Viresh Kumar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.st.com/spear S: Maintained F: drivers/pinctrl/spear/ PISTACHIO SOC SUPPORT M: James Hartley L: linux-mips@vger.kernel.org S: Odd Fixes F: arch/mips/pistachio/ F: arch/mips/include/asm/mach-pistachio/ F: arch/mips/boot/dts/img/pistachio* F: arch/mips/configs/pistachio*_defconfig PKTCDVD DRIVER S: Orphan M: linux-block@vger.kernel.org F: drivers/block/pktcdvd.c F: include/linux/pktcdvd.h F: include/uapi/linux/pktcdvd.h PKUNITY SOC DRIVERS M: Guan Xuetao W: http://mprc.pku.edu.cn/~guanxuetao/linux S: Maintained T: git git://github.com/gxt/linux.git F: drivers/input/serio/i8042-unicore32io.h F: drivers/i2c/busses/i2c-puv3.c F: drivers/video/fbdev/fb-puv3.c F: drivers/rtc/rtc-puv3.c PMBUS HARDWARE MONITORING DRIVERS M: Guenter Roeck L: linux-hwmon@vger.kernel.org W: http://hwmon.wiki.kernel.org/ W: http://www.roeck-us.net/linux/drivers/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git S: Maintained F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt F: Documentation/devicetree/bindings/hwmon/max31785.txt F: Documentation/devicetree/bindings/hwmon/ltc2978.txt F: Documentation/hwmon/adm1275 F: Documentation/hwmon/ibm-cffps F: Documentation/hwmon/ir35221 F: Documentation/hwmon/lm25066 F: Documentation/hwmon/ltc2978 F: Documentation/hwmon/ltc3815 F: Documentation/hwmon/max16064 F: Documentation/hwmon/max20751 F: Documentation/hwmon/max31785 F: Documentation/hwmon/max34440 F: Documentation/hwmon/max8688 F: Documentation/hwmon/pmbus F: Documentation/hwmon/pmbus-core F: Documentation/hwmon/tps40422 F: Documentation/hwmon/ucd9000 F: Documentation/hwmon/ucd9200 F: Documentation/hwmon/zl6100 F: drivers/hwmon/pmbus/ F: include/linux/pmbus.h PMC SIERRA MaxRAID DRIVER L: linux-scsi@vger.kernel.org W: http://www.pmc-sierra.com/ S: Orphan F: drivers/scsi/pmcraid.* PMC SIERRA PM8001 DRIVER M: Jack Wang M: lindar_liu@usish.com L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/pm8001/ PNP SUPPORT M: "Rafael J. Wysocki" S: Maintained F: drivers/pnp/ PNI RM3100 IIO DRIVER M: Song Qiang L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/magnetometer/rm3100* F: Documentation/devicetree/bindings/iio/magnetometer/pni,rm3100.txt POSIX CLOCKS and TIMERS M: Thomas Gleixner L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core S: Maintained F: fs/timerfd.c F: include/linux/timer* F: kernel/time/*timer* POWER MANAGEMENT CORE M: "Rafael J. Wysocki" L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm B: https://bugzilla.kernel.org S: Supported F: drivers/base/power/ F: include/linux/pm.h F: include/linux/pm_* F: include/linux/powercap.h F: drivers/powercap/ F: kernel/configs/nopm.config POWER STATE COORDINATION INTERFACE (PSCI) M: Mark Rutland M: Lorenzo Pieralisi L: linux-arm-kernel@lists.infradead.org S: Maintained F: drivers/firmware/psci*.c F: include/linux/psci.h F: include/uapi/linux/psci.h POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS M: Sebastian Reichel L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git S: Maintained F: Documentation/ABI/testing/sysfs-class-power F: Documentation/devicetree/bindings/power/supply/ F: include/linux/power_supply.h F: drivers/power/supply/ POWERNV OPERATOR PANEL LCD DISPLAY DRIVER M: Suraj Jitindar Singh L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/char/powernv-op-panel.c PPP OVER ATM (RFC 2364) M: Mitchell Blank Jr S: Maintained F: net/atm/pppoatm.c F: include/uapi/linux/atmppp.h PPP OVER ETHERNET M: Michal Ostrowski S: Maintained F: drivers/net/ppp/pppoe.c F: drivers/net/ppp/pppox.c PPP OVER L2TP M: James Chapman S: Maintained F: net/l2tp/l2tp_ppp.c F: include/linux/if_pppol2tp.h F: include/uapi/linux/if_pppol2tp.h PPP PROTOCOL DRIVERS AND COMPRESSORS M: Paul Mackerras L: linux-ppp@vger.kernel.org S: Maintained F: drivers/net/ppp/ppp_* PPS SUPPORT M: Rodolfo Giometti W: http://wiki.enneenne.com/index.php/LinuxPPS_support L: linuxpps@ml.enneenne.com (subscribers-only) S: Maintained F: Documentation/pps/ F: Documentation/devicetree/bindings/pps/pps-gpio.txt F: Documentation/ABI/testing/sysfs-pps F: drivers/pps/ F: include/linux/pps*.h F: include/uapi/linux/pps.h PPTP DRIVER M: Dmitry Kozlov L: netdev@vger.kernel.org S: Maintained F: drivers/net/ppp/pptp.c W: http://sourceforge.net/projects/accel-pptp PREEMPTIBLE KERNEL M: Robert Love L: kpreempt-tech@lists.sourceforge.net W: https://www.kernel.org/pub/linux/kernel/people/rml/preempt-kernel S: Supported F: Documentation/preempt-locking.txt F: include/linux/preempt.h PRINTK M: Petr Mladek M: Sergey Senozhatsky R: Steven Rostedt S: Maintained F: kernel/printk/ F: include/linux/printk.h PRISM54 WIRELESS DRIVER M: Luis Chamberlain L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/p54 S: Obsolete F: drivers/net/wireless/intersil/prism54/ PROC FILESYSTEM R: Alexey Dobriyan L: linux-kernel@vger.kernel.org L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/proc/ F: include/linux/proc_fs.h F: tools/testing/selftests/proc/ F: Documentation/filesystems/proc.txt PROC SYSCTL M: Luis Chamberlain M: Kees Cook L: linux-kernel@vger.kernel.org L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/proc/proc_sysctl.c F: include/linux/sysctl.h F: kernel/sysctl.c F: tools/testing/selftests/sysctl/ PS3 NETWORK SUPPORT M: Geoff Levand L: netdev@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/net/ethernet/toshiba/ps3_gelic_net.* PS3 PLATFORM SUPPORT M: Geoff Levand L: linuxppc-dev@lists.ozlabs.org S: Maintained F: arch/powerpc/boot/ps3* F: arch/powerpc/include/asm/lv1call.h F: arch/powerpc/include/asm/ps3*.h F: arch/powerpc/platforms/ps3/ F: drivers/*/ps3* F: drivers/ps3/ F: drivers/rtc/rtc-ps3.c F: drivers/usb/host/*ps3.c F: sound/ppc/snd_ps3* PS3VRAM DRIVER M: Jim Paris M: Geoff Levand L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/block/ps3vram.c PSAMPLE PACKET SAMPLING SUPPORT: M: Yotam Gigi S: Maintained F: net/psample F: include/net/psample.h F: include/uapi/linux/psample.h PSTORE FILESYSTEM M: Kees Cook M: Anton Vorontsov M: Colin Cross M: Tony Luck S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/pstore F: fs/pstore/ F: include/linux/pstore* F: drivers/firmware/efi/efi-pstore.c F: drivers/acpi/apei/erst.c F: Documentation/admin-guide/ramoops.rst F: Documentation/devicetree/bindings/reserved-memory/ramoops.txt K: \b(pstore|ramoops) PTP HARDWARE CLOCK SUPPORT M: Richard Cochran L: netdev@vger.kernel.org S: Maintained W: http://linuxptp.sourceforge.net/ F: Documentation/ABI/testing/sysfs-ptp F: Documentation/ptp/* F: drivers/net/phy/dp83640* F: drivers/ptp/* F: include/linux/ptp_cl* PTRACE SUPPORT M: Oleg Nesterov S: Maintained F: include/asm-generic/syscall.h F: include/linux/ptrace.h F: include/linux/regset.h F: include/linux/tracehook.h F: include/uapi/linux/ptrace.h F: include/uapi/linux/ptrace.h F: include/asm-generic/ptrace.h F: kernel/ptrace.c F: arch/*/ptrace*.c F: arch/*/*/ptrace*.c F: arch/*/include/asm/ptrace*.h PULSE8-CEC DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/pulse8-cec/* F: Documentation/media/cec-drivers/pulse8-cec.rst PVRUSB2 VIDEO4LINUX DRIVER M: Mike Isely L: pvrusb2@isely.net (subscribers-only) L: linux-media@vger.kernel.org W: http://www.isely.net/pvrusb2/ T: git git://linuxtv.org/media_tree.git S: Maintained F: Documentation/media/v4l-drivers/pvrusb2* F: drivers/media/usb/pvrusb2/ PWC WEBCAM DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Odd Fixes F: drivers/media/usb/pwc/* PWM FAN DRIVER M: Kamil Debski M: Bartlomiej Zolnierkiewicz L: linux-hwmon@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt F: Documentation/hwmon/pwm-fan F: drivers/hwmon/pwm-fan.c PWM IR Transmitter M: Sean Young L: linux-media@vger.kernel.org S: Maintained F: drivers/media/rc/pwm-ir-tx.c PWM SUBSYSTEM M: Thierry Reding L: linux-pwm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/thierry.reding/linux-pwm.git F: Documentation/pwm.txt F: Documentation/devicetree/bindings/pwm/ F: include/linux/pwm.h F: drivers/pwm/ F: drivers/video/backlight/pwm_bl.c F: include/linux/pwm_backlight.h F: drivers/gpio/gpio-mvebu.c F: Documentation/devicetree/bindings/gpio/gpio-mvebu.txt PXA GPIO DRIVER M: Robert Jarzmik L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-pxa.c PXA MMCI DRIVER S: Orphan PXA RTC DRIVER M: Robert Jarzmik L: linux-rtc@vger.kernel.org S: Maintained PXA2xx/PXA3xx SUPPORT M: Daniel Mack M: Haojian Zhuang M: Robert Jarzmik L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/hzhuang1/linux.git T: git git://github.com/rjarzmik/linux.git S: Maintained F: arch/arm/boot/dts/pxa* F: arch/arm/mach-pxa/ F: drivers/dma/pxa* F: drivers/pcmcia/pxa2xx* F: drivers/pinctrl/pxa/ F: drivers/spi/spi-pxa2xx* F: drivers/usb/gadget/udc/pxa2* F: include/sound/pxa2xx-lib.h F: sound/arm/pxa* F: sound/soc/pxa/ QAT DRIVER M: Giovanni Cabiddu L: qat-linux@intel.com S: Supported F: drivers/crypto/qat/ QCOM AUDIO (ASoC) DRIVERS M: Patrick Lai M: Banajit Goswami L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: sound/soc/qcom/ QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT M: Gabriel Somlo M: "Michael S. Tsirkin" L: qemu-devel@nongnu.org S: Maintained F: drivers/firmware/qemu_fw_cfg.c F: include/uapi/linux/qemu_fw_cfg.h QIB DRIVER M: Dennis Dalessandro M: Mike Marciniszyn L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/hw/qib/ QLOGIC QL41xxx FCOE DRIVER M: QLogic-Storage-Upstream@cavium.com L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/qedf/ QLOGIC QL41xxx ISCSI DRIVER M: QLogic-Storage-Upstream@cavium.com L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/qedi/ QLOGIC QL4xxx ETHERNET DRIVER M: Ariel Elior M: everest-linux-l2@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qed/ F: include/linux/qed/ F: drivers/net/ethernet/qlogic/qede/ QLOGIC QL4xxx RDMA DRIVER M: Michal Kalderon M: Ariel Elior L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/hw/qedr/ F: include/uapi/rdma/qedr-abi.h QLOGIC QLA1280 SCSI DRIVER M: Michael Reed L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/qla1280.[ch] QLOGIC QLA2XXX FC-SCSI DRIVER M: qla2xxx-upstream@qlogic.com L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/LICENSE.qla2xxx F: drivers/scsi/qla2xxx/ QLOGIC QLA3XXX NETWORK DRIVER M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLA4XXX iSCSI DRIVER M: QLogic-Storage-Upstream@qlogic.com L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/LICENSE.qla4xxx F: drivers/scsi/qla4xxx/ QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER M: Shahed Shaikh M: Manish Chopra M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlcnic/ QLOGIC QLGE 10Gb ETHERNET DRIVER M: Manish Chopra M: Dept-GELinuxNICDev@cavium.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlge/ QM1D1B0004 MEDIA DRIVER M: Akihiro Tsukada L: linux-media@vger.kernel.org S: Odd Fixes F: drivers/media/tuners/qm1d1b0004* QM1D1C0042 MEDIA DRIVER M: Akihiro Tsukada L: linux-media@vger.kernel.org S: Odd Fixes F: drivers/media/tuners/qm1d1c0042* QNX4 FILESYSTEM M: Anders Larsen W: http://www.alarsen.net/linux/qnx4fs/ S: Maintained F: fs/qnx4/ F: include/uapi/linux/qnx4_fs.h F: include/uapi/linux/qnxtypes.h QORIQ DPAA2 FSL-MC BUS DRIVER M: Stuart Yoder M: Laurentiu Tudor L: linux-kernel@vger.kernel.org S: Maintained F: drivers/bus/fsl-mc/ F: Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt F: Documentation/networking/device_drivers/freescale/dpaa2/overview.rst QT1010 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/qt1010* QUALCOMM ATHEROS ATH10K WIRELESS DRIVER M: Kalle Valo L: ath10k@lists.infradead.org W: http://wireless.kernel.org/en/users/Drivers/ath10k T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git S: Supported F: drivers/net/wireless/ath/ath10k/ QUALCOMM ATHEROS ATH9K WIRELESS DRIVER M: QCA ath9k Development L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/ath9k S: Supported F: drivers/net/wireless/ath/ath9k/ QUALCOMM CAMERA SUBSYSTEM DRIVER M: Todor Tomov L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/qcom,camss.txt F: Documentation/media/v4l-drivers/qcom_camss.rst F: drivers/media/platform/qcom/camss/ QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096 M: Ilia Lin L: linux-pm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt F: drivers/cpufreq/qcom-cpufreq-kryo.c QUALCOMM EMAC GIGABIT ETHERNET DRIVER M: Timur Tabi L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/qualcomm/emac/ QUALCOMM GENERIC INTERFACE I2C DRIVER M: Alok Chauhan M: Karthikeyan Ramasubramanian L: linux-i2c@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Supported F: drivers/i2c/busses/i2c-qcom-geni.c QUALCOMM HEXAGON ARCHITECTURE M: Richard Kuo L: linux-hexagon@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.git S: Supported F: arch/hexagon/ QUALCOMM HIDMA DRIVER M: Sinan Kaya L: linux-arm-kernel@lists.infradead.org L: linux-arm-msm@vger.kernel.org L: dmaengine@vger.kernel.org S: Supported F: drivers/dma/qcom/hidma* QUALCOMM IOMMU M: Rob Clark L: iommu@lists.linux-foundation.org L: linux-arm-msm@vger.kernel.org S: Maintained F: drivers/iommu/qcom_iommu.c QUALCOMM TSENS THERMAL DRIVER M: Amit Kucheria L: linux-pm@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained F: drivers/thermal/qcom/ QUALCOMM VENUS VIDEO ACCELERATOR DRIVER M: Stanimir Varbanov L: linux-media@vger.kernel.org L: linux-arm-msm@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/platform/qcom/venus/ QUALCOMM WCN36XX WIRELESS DRIVER M: Kalle Valo L: wcn36xx@lists.infradead.org W: http://wireless.kernel.org/en/users/Drivers/wcn36xx T: git git://github.com/KrasnikovEugene/wcn36xx.git S: Supported F: drivers/net/wireless/ath/wcn36xx/ QUANTENNA QTNFMAC WIRELESS DRIVER M: Igor Mitsyanko M: Avinash Patil M: Sergey Matyukevich L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/quantenna RADEON and AMDGPU DRM DRIVERS M: Alex Deucher M: Christian König M: David (ChunMing) Zhou L: amd-gfx@lists.freedesktop.org T: git git://people.freedesktop.org/~agd5f/linux S: Supported F: drivers/gpu/drm/radeon/ F: include/uapi/drm/radeon_drm.h F: drivers/gpu/drm/amd/ F: include/uapi/drm/amdgpu_drm.h RADEON FRAMEBUFFER DISPLAY DRIVER M: Benjamin Herrenschmidt L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/aty/radeon* F: include/uapi/linux/radeonfb.h RADIOSHARK RADIO DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/radio/radio-shark.c RADIOSHARK2 RADIO DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/radio/radio-shark2.c F: drivers/media/radio/radio-tea5777.c RADOS BLOCK DEVICE (RBD) M: Ilya Dryomov M: Sage Weil M: Alex Elder L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git T: git git://github.com/ceph/ceph-client.git S: Supported F: Documentation/ABI/testing/sysfs-bus-rbd F: drivers/block/rbd.c F: drivers/block/rbd_types.h RAGE128 FRAMEBUFFER DISPLAY DRIVER M: Paul Mackerras L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/aty/aty128fb.c RAINSHADOW-CEC DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/rainshadow-cec/* RALINK MIPS ARCHITECTURE M: John Crispin L: linux-mips@vger.kernel.org S: Maintained F: arch/mips/ralink RALINK RT2X00 WIRELESS LAN DRIVER P: rt2x00 project M: Stanislaw Gruszka M: Helmut Schaa L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/ralink/rt2x00/ RAMDISK RAM BLOCK DEVICE DRIVER M: Jens Axboe S: Maintained F: Documentation/blockdev/ramdisk.txt F: drivers/block/brd.c RANCHU VIRTUAL BOARD FOR MIPS M: Miodrag Dinic L: linux-mips@vger.kernel.org S: Supported F: arch/mips/generic/board-ranchu.c F: arch/mips/configs/generic/board-ranchu.config RANDOM NUMBER DRIVER M: "Theodore Ts'o" S: Maintained F: drivers/char/random.c RAPIDIO SUBSYSTEM M: Matt Porter M: Alexandre Bounine S: Maintained F: drivers/rapidio/ RAYLINK/WEBGEAR 802.11 WIRELESS LAN DRIVER L: linux-wireless@vger.kernel.org S: Orphan F: drivers/net/wireless/ray* RCUTORTURE TEST FRAMEWORK M: "Paul E. McKenney" M: Josh Triplett R: Steven Rostedt R: Mathieu Desnoyers R: Lai Jiangshan L: linux-kernel@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git F: tools/testing/selftests/rcutorture RDC R-321X SoC M: Florian Fainelli S: Maintained RDC R6040 FAST ETHERNET DRIVER M: Florian Fainelli L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/rdc/r6040.c RDMAVT - RDMA verbs software M: Dennis Dalessandro M: Mike Marciniszyn L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/sw/rdmavt RDS - RELIABLE DATAGRAM SOCKETS M: Santosh Shilimkar L: netdev@vger.kernel.org L: linux-rdma@vger.kernel.org L: rds-devel@oss.oracle.com (moderated for non-subscribers) W: https://oss.oracle.com/projects/rds/ S: Supported F: net/rds/ F: Documentation/networking/rds.txt RDT - RESOURCE ALLOCATION M: Fenghua Yu M: Reinette Chatre L: linux-kernel@vger.kernel.org S: Supported F: arch/x86/kernel/cpu/resctrl/ F: arch/x86/include/asm/resctrl_sched.h F: Documentation/x86/resctrl* READ-COPY UPDATE (RCU) M: "Paul E. McKenney" M: Josh Triplett R: Steven Rostedt R: Mathieu Desnoyers R: Lai Jiangshan R: Joel Fernandes L: linux-kernel@vger.kernel.org W: http://www.rdrop.com/users/paulmck/RCU/ S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git F: Documentation/RCU/ X: Documentation/RCU/torture.txt F: include/linux/rcu* X: include/linux/srcu*.h F: kernel/rcu/ X: kernel/rcu/srcu*.c REAL TIME CLOCK (RTC) SUBSYSTEM M: Alessandro Zummo M: Alexandre Belloni L: linux-rtc@vger.kernel.org Q: http://patchwork.ozlabs.org/project/rtc-linux/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git S: Maintained F: Documentation/devicetree/bindings/rtc/ F: Documentation/rtc.txt F: drivers/rtc/ F: include/linux/rtc.h F: include/uapi/linux/rtc.h F: include/linux/rtc/ F: include/linux/platform_data/rtc-* F: tools/testing/selftests/rtc/ REALTEK AUDIO CODECS M: Bard Liao M: Oder Chiou S: Maintained F: sound/soc/codecs/rt* F: include/sound/rt*.h REALTEK RTL83xx SMI DSA ROUTER CHIPS M: Linus Walleij S: Maintained F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt F: drivers/net/dsa/realtek-smi* F: drivers/net/dsa/rtl83* REGISTER MAP ABSTRACTION M: Mark Brown L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git S: Supported F: Documentation/devicetree/bindings/regmap/ F: drivers/base/regmap/ F: include/linux/regmap.h REISERFS FILE SYSTEM L: reiserfs-devel@vger.kernel.org S: Supported F: fs/reiserfs/ REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM M: Ohad Ben-Cohen M: Bjorn Andersson L: linux-remoteproc@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git S: Maintained F: Documentation/devicetree/bindings/remoteproc/ F: Documentation/remoteproc.txt F: drivers/remoteproc/ F: include/linux/remoteproc.h REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM M: Ohad Ben-Cohen M: Bjorn Andersson L: linux-remoteproc@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git S: Maintained F: drivers/rpmsg/ F: Documentation/rpmsg.txt F: include/linux/rpmsg.h F: include/linux/rpmsg/ RENESAS CLOCK DRIVERS M: Geert Uytterhoeven L: linux-renesas-soc@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git clk-renesas S: Supported F: drivers/clk/renesas/ RENESAS EMEV2 I2C DRIVER M: Wolfram Sang S: Supported F: drivers/i2c/busses/i2c-emev2.c RENESAS ETHERNET DRIVERS R: Sergei Shtylyov L: netdev@vger.kernel.org L: linux-renesas-soc@vger.kernel.org F: Documentation/devicetree/bindings/net/renesas,*.txt F: Documentation/devicetree/bindings/net/sh_eth.txt F: drivers/net/ethernet/renesas/ F: include/linux/sh_eth.h RENESAS R-CAR GYROADC DRIVER M: Marek Vasut L: linux-iio@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/iio/adc/renesas,gyroadc.txt F: drivers/iio/adc/rcar-gyroadc.c RENESAS R-CAR I2C DRIVERS M: Wolfram Sang S: Supported F: drivers/i2c/busses/i2c-rcar.c F: drivers/i2c/busses/i2c-sh_mobile.c RENESAS RIIC DRIVER M: Chris Brandt S: Supported F: Documentation/devicetree/bindings/i2c/i2c-riic.txt F: drivers/i2c/busses/i2c-riic.c RENESAS USB PHY DRIVER M: Yoshihiro Shimoda L: linux-renesas-soc@vger.kernel.org S: Maintained F: drivers/phy/renesas/phy-rcar-gen3-usb*.c RESET CONTROLLER FRAMEWORK M: Philipp Zabel T: git git://git.pengutronix.de/git/pza/linux S: Maintained F: drivers/reset/ F: Documentation/devicetree/bindings/reset/ F: include/dt-bindings/reset/ F: include/linux/reset.h F: include/linux/reset-controller.h RESTARTABLE SEQUENCES SUPPORT M: Mathieu Desnoyers M: Peter Zijlstra M: "Paul E. McKenney" M: Boqun Feng L: linux-kernel@vger.kernel.org S: Supported F: kernel/rseq.c F: include/uapi/linux/rseq.h F: include/trace/events/rseq.h F: tools/testing/selftests/rseq/ RFKILL M: Johannes Berg L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git S: Maintained F: Documentation/rfkill.txt F: Documentation/ABI/stable/sysfs-class-rfkill F: net/rfkill/ F: include/linux/rfkill.h F: include/uapi/linux/rfkill.h RHASHTABLE M: Thomas Graf M: Herbert Xu L: netdev@vger.kernel.org S: Maintained F: lib/rhashtable.c F: lib/test_rhashtable.c F: include/linux/rhashtable.h F: include/linux/rhashtable-types.h RICOH R5C592 MEMORYSTICK DRIVER M: Maxim Levitsky S: Maintained F: drivers/memstick/host/r592.* RICOH SMARTMEDIA/XD DRIVER M: Maxim Levitsky S: Maintained F: drivers/mtd/nand/raw/r852.c F: drivers/mtd/nand/raw/r852.h RISC-V ARCHITECTURE M: Palmer Dabbelt M: Albert Ou L: linux-riscv@lists.infradead.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git S: Supported F: arch/riscv/ K: riscv N: riscv ROCCAT DRIVERS M: Stefan Achatz W: http://sourceforge.net/projects/roccat/ S: Maintained F: drivers/hid/hid-roccat* F: include/linux/hid-roccat* F: Documentation/ABI/*/sysfs-driver-hid-roccat* ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER M: Jacob chen L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/rockchip/rga/ F: Documentation/devicetree/bindings/media/rockchip-rga.txt ROCKCHIP VPU CODEC DRIVER M: Ezequiel Garcia L: linux-media@vger.kernel.org S: Maintained F: drivers/staging/media/platform/rockchip/vpu/ F: Documentation/devicetree/bindings/media/rockchip-vpu.txt ROCKER DRIVER M: Jiri Pirko L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/rocker/ ROCKETPORT DRIVER P: Comtrol Corp. W: http://www.comtrol.com S: Maintained F: Documentation/serial/rocket.txt F: drivers/tty/rocket* ROCKETPORT EXPRESS/INFINITY DRIVER M: Kevin Cernekee L: linux-serial@vger.kernel.org S: Odd Fixes F: drivers/tty/serial/rp2.* ROHM MULTIFUNCTION BD9571MWV-M PMIC DEVICE DRIVERS M: Marek Vasut L: linux-kernel@vger.kernel.org L: linux-renesas-soc@vger.kernel.org S: Supported F: drivers/mfd/bd9571mwv.c F: drivers/regulator/bd9571mwv-regulator.c F: drivers/gpio/gpio-bd9571mwv.c F: include/linux/mfd/bd9571mwv.h F: Documentation/devicetree/bindings/mfd/bd9571mwv.txt ROSE NETWORK LAYER M: Ralf Baechle L: linux-hams@vger.kernel.org W: http://www.linux-ax25.org/ S: Maintained F: include/net/rose.h F: include/uapi/linux/rose.h F: net/rose/ RTL2830 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/rtl2830* RTL2832 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/rtl2832* RTL2832_SDR MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/rtl2832_sdr* RTL8180 WIRELESS DRIVER L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git S: Orphan F: drivers/net/wireless/realtek/rtl818x/rtl8180/ RTL8187 WIRELESS DRIVER M: Herton Ronaldo Krzesinski M: Hin-Tak Leung M: Larry Finger L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git S: Maintained F: drivers/net/wireless/realtek/rtl818x/rtl8187/ REALTEK WIRELESS DRIVER (rtlwifi family) M: Ping-Ke Shih L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git S: Maintained F: drivers/net/wireless/realtek/rtlwifi/ RTL8XXXU WIRELESS DRIVER (rtl8xxxu) M: Jes Sorensen L: linux-wireless@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel S: Maintained F: drivers/net/wireless/realtek/rtl8xxxu/ RXRPC SOCKETS (AF_RXRPC) M: David Howells L: linux-afs@lists.infradead.org S: Supported F: net/rxrpc/ F: include/keys/rxrpc-type.h F: include/net/af_rxrpc.h F: include/trace/events/rxrpc.h F: include/uapi/linux/rxrpc.h F: Documentation/networking/rxrpc.txt W: https://www.infradead.org/~dhowells/kafs/ S3 SAVAGE FRAMEBUFFER DRIVER M: Antonino Daplas L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/savage/ S390 M: Martin Schwidefsky M: Heiko Carstens L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git S: Supported F: arch/s390/ F: drivers/s390/ F: Documentation/s390/ F: Documentation/driver-api/s390-drivers.rst S390 COMMON I/O LAYER M: Sebastian Ott M: Peter Oberparleiter L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/cio/ S390 DASD DRIVER M: Stefan Haberland M: Jan Hoeppner L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/block/dasd* F: block/partitions/ibm.c S390 IOMMU (PCI) M: Gerald Schaefer L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/iommu/s390-iommu.c S390 IUCV NETWORK LAYER M: Julian Wiedmann M: Ursula Braun L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/net/*iucv* F: include/net/iucv/ F: net/iucv/ S390 NETWORK DRIVERS M: Julian Wiedmann M: Ursula Braun L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/net/ S390 PCI SUBSYSTEM M: Sebastian Ott M: Gerald Schaefer L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: arch/s390/pci/ F: drivers/pci/hotplug/s390_pci_hpc.c S390 VFIO-CCW DRIVER M: Cornelia Huck M: Farhan Ali M: Eric Farman R: Halil Pasic L: linux-s390@vger.kernel.org L: kvm@vger.kernel.org S: Supported F: drivers/s390/cio/vfio_ccw* F: Documentation/s390/vfio-ccw.txt F: include/uapi/linux/vfio_ccw.h S390 ZCRYPT DRIVER M: Harald Freudenberger L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/crypto/ S390 VFIO AP DRIVER M: Tony Krowiak M: Pierre Morel M: Halil Pasic L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/crypto/vfio_ap_drv.c F: drivers/s390/crypto/vfio_ap_private.h F: drivers/s390/crypto/vfio_ap_ops.c F: Documentation/s390/vfio-ap.txt S390 ZFCP DRIVER M: Steffen Maier M: Benjamin Block L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/scsi/zfcp_* S3C24XX SD/MMC Driver M: Ben Dooks L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/mmc/host/s3cmci.* SAA6588 RDS RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/i2c/saa6588* SAA7134 VIDEO4LINUX DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes F: Documentation/media/v4l-drivers/saa7134* F: drivers/media/pci/saa7134/ SAA7146 VIDEO4LINUX-2 DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/common/saa7146/ F: drivers/media/pci/saa7146/ F: include/media/drv-intf/saa7146* SAMSUNG AUDIO (ASoC) DRIVERS M: Krzysztof Kozlowski M: Sangbeom Kim M: Sylwester Nawrocki L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: sound/soc/samsung/ F: Documentation/devicetree/bindings/sound/samsung* SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER M: Krzysztof Kozlowski L: linux-crypto@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained F: drivers/crypto/exynos-rng.c F: Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER M: Łukasz Stelmach L: linux-samsung-soc@vger.kernel.org S: Maintained F: drivers/char/hw_random/exynos-trng.c F: Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt SAMSUNG FRAMEBUFFER DRIVER M: Jingoo Han L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/s3c-fb.c SAMSUNG LAPTOP DRIVER M: Corentin Chary L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/samsung-laptop.c SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS M: Sangbeom Kim M: Krzysztof Kozlowski M: Bartlomiej Zolnierkiewicz L: linux-kernel@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Supported F: drivers/mfd/sec*.c F: drivers/regulator/s2m*.c F: drivers/regulator/s5m*.c F: drivers/clk/clk-s2mps11.c F: drivers/rtc/rtc-s5m.c F: include/linux/mfd/samsung/ F: Documentation/devicetree/bindings/mfd/samsung,sec-core.txt F: Documentation/devicetree/bindings/regulator/samsung,s2m*.txt F: Documentation/devicetree/bindings/regulator/samsung,s5m*.txt F: Documentation/devicetree/bindings/clock/samsung,s2mps11.txt SAMSUNG S3C24XX/S3C64XX SOC SERIES CAMIF DRIVER M: Sylwester Nawrocki L: linux-media@vger.kernel.org L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: drivers/media/platform/s3c-camif/ F: include/media/drv-intf/s3c_camif.h SAMSUNG S3FWRN5 NFC DRIVER M: Robert Baldyga M: Krzysztof Opasiak L: linux-nfc@lists.01.org (moderated for non-subscribers) S: Supported F: drivers/nfc/s3fwrn5 SAMSUNG S5C73M3 CAMERA DRIVER M: Kyungmin Park M: Andrzej Hajda L: linux-media@vger.kernel.org S: Supported F: drivers/media/i2c/s5c73m3/* SAMSUNG S5K5BAF CAMERA DRIVER M: Kyungmin Park M: Andrzej Hajda L: linux-media@vger.kernel.org S: Supported F: drivers/media/i2c/s5k5baf.c SAMSUNG S5P Security SubSystem (SSS) DRIVER M: Krzysztof Kozlowski M: Vladimir Zapolskiy M: Kamil Konieczny L: linux-crypto@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained F: drivers/crypto/s5p-sss.c SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS M: Kyungmin Park M: Sylwester Nawrocki L: linux-media@vger.kernel.org Q: https://patchwork.linuxtv.org/project/linux-media/list/ S: Supported F: drivers/media/platform/exynos4-is/ SAMSUNG SOC CLOCK DRIVERS M: Sylwester Nawrocki M: Tomasz Figa M: Chanwoo Choi S: Supported L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git F: drivers/clk/samsung/ F: include/dt-bindings/clock/exynos*.h F: Documentation/devicetree/bindings/clock/exynos*.txt SAMSUNG SPI DRIVERS M: Kukjin Kim M: Krzysztof Kozlowski M: Andi Shyti L: linux-spi@vger.kernel.org L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/spi/spi-samsung.txt F: drivers/spi/spi-s3c* F: include/linux/platform_data/spi-s3c64xx.h SAMSUNG SXGBE DRIVERS M: Byungho An M: Girish K S M: Vipul Pandya S: Supported L: netdev@vger.kernel.org F: drivers/net/ethernet/samsung/sxgbe/ SAMSUNG THERMAL DRIVER M: Bartlomiej Zolnierkiewicz L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Supported T: git https://github.com/lmajewski/linux-samsung-thermal.git F: drivers/thermal/samsung/ SAMSUNG USB2 PHY DRIVER M: Kamil Debski M: Sylwester Nawrocki L: linux-kernel@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/phy/samsung-phy.txt F: Documentation/phy/samsung-usb2.txt F: drivers/phy/samsung/phy-exynos4210-usb2.c F: drivers/phy/samsung/phy-exynos4x12-usb2.c F: drivers/phy/samsung/phy-exynos5250-usb2.c F: drivers/phy/samsung/phy-s5pv210-usb2.c F: drivers/phy/samsung/phy-samsung-usb2.c F: drivers/phy/samsung/phy-samsung-usb2.h SC1200 WDT DRIVER M: Zwane Mwaikambo S: Maintained F: drivers/watchdog/sc1200wdt.c SCHEDULER M: Ingo Molnar M: Peter Zijlstra L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core S: Maintained F: kernel/sched/ F: include/linux/sched.h F: include/uapi/linux/sched.h F: include/linux/wait.h SCR24X CHIP CARD INTERFACE DRIVER M: Lubomir Rintel S: Supported F: drivers/char/pcmcia/scr24x_cs.c SCSI CDROM DRIVER M: Jens Axboe L: linux-scsi@vger.kernel.org W: http://www.kernel.dk S: Maintained F: drivers/scsi/sr* SCSI RDMA PROTOCOL (SRP) INITIATOR M: Bart Van Assche L: linux-rdma@vger.kernel.org S: Supported Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/ulp/srp/ F: include/scsi/srp.h SCSI RDMA PROTOCOL (SRP) TARGET M: Bart Van Assche L: linux-rdma@vger.kernel.org L: target-devel@vger.kernel.org S: Supported Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/ulp/srpt/ SCSI SG DRIVER M: Doug Gilbert L: linux-scsi@vger.kernel.org W: http://sg.danny.cz/sg S: Maintained F: Documentation/scsi/scsi-generic.txt F: drivers/scsi/sg.c F: include/scsi/sg.h SCSI SUBSYSTEM M: "James E.J. Bottomley" T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git M: "Martin K. Petersen" T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git L: linux-scsi@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/scsi/ F: drivers/scsi/ F: include/scsi/ SCSI TAPE DRIVER M: Kai Mäkisara L: linux-scsi@vger.kernel.org S: Maintained F: Documentation/scsi/st.txt F: drivers/scsi/st.* F: drivers/scsi/st_*.h SCTP PROTOCOL M: Vlad Yasevich M: Neil Horman M: Marcelo Ricardo Leitner L: linux-sctp@vger.kernel.org W: http://lksctp.sourceforge.net S: Maintained F: Documentation/networking/sctp.txt F: include/linux/sctp.h F: include/uapi/linux/sctp.h F: include/net/sctp/ F: net/sctp/ SCx200 CPU SUPPORT M: Jim Cromie S: Odd Fixes F: Documentation/i2c/busses/scx200_acb F: arch/x86/platform/scx200/ F: drivers/watchdog/scx200_wdt.c F: drivers/i2c/busses/scx200* F: drivers/mtd/maps/scx200_docflash.c F: include/linux/scx200.h SCx200 GPIO DRIVER M: Jim Cromie S: Maintained F: drivers/char/scx200_gpio.c F: include/linux/scx200_gpio.h SCx200 HRT CLOCKSOURCE DRIVER M: Jim Cromie S: Maintained F: drivers/clocksource/scx200_hrt.c SDRICOH_CS MMC/SD HOST CONTROLLER INTERFACE DRIVER M: Sascha Sommer L: sdricohcs-devel@lists.sourceforge.net (subscribers-only) S: Maintained F: drivers/mmc/host/sdricoh_cs.c SECO BOARDS CEC DRIVER M: Ettore Chimenti S: Maintained F: drivers/media/platform/seco-cec/seco-cec.c F: drivers/media/platform/seco-cec/seco-cec.h SECURE COMPUTING M: Kees Cook R: Andy Lutomirski R: Will Drewry T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git seccomp S: Supported F: kernel/seccomp.c F: include/uapi/linux/seccomp.h F: include/linux/seccomp.h F: tools/testing/selftests/seccomp/* F: tools/testing/selftests/kselftest_harness.h F: Documentation/userspace-api/seccomp_filter.rst K: \bsecure_computing K: \bTIF_SECCOMP\b SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) Broadcom BRCMSTB DRIVER M: Al Cooper L: linux-mmc@vger.kernel.org L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: drivers/mmc/host/sdhci-brcmstb* SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER M: Adrian Hunter L: linux-mmc@vger.kernel.org T: git git://git.infradead.org/users/ahunter/linux-sdhci.git S: Maintained F: drivers/mmc/host/sdhci* F: include/linux/mmc/sdhci* SYNOPSYS SDHCI COMPLIANT DWC MSHC DRIVER M: Prabu Thangamuthu M: Manjunath M B L: linux-mmc@vger.kernel.org S: Maintained F: drivers/mmc/host/sdhci-pci-dwc-mshc.c SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) MICROCHIP DRIVER M: Ludovic Desroches L: linux-mmc@vger.kernel.org S: Supported F: drivers/mmc/host/sdhci-of-at91.c SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER M: Ben Dooks M: Jaehoon Chung L: linux-mmc@vger.kernel.org S: Maintained F: drivers/mmc/host/sdhci-s3c* SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER M: Viresh Kumar L: linux-mmc@vger.kernel.org S: Maintained F: drivers/mmc/host/sdhci-spear.c SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) TI OMAP DRIVER M: Kishon Vijay Abraham I L: linux-mmc@vger.kernel.org S: Maintained F: drivers/mmc/host/sdhci-omap.c SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER M: Scott Bauer M: Jonathan Derrick L: linux-block@vger.kernel.org S: Supported F: block/sed* F: block/opal_proto.h F: include/linux/sed* F: include/uapi/linux/sed* SECURITY CONTACT M: Security Officers S: Supported SECURITY SUBSYSTEM M: James Morris M: "Serge E. Hallyn" L: linux-security-module@vger.kernel.org (suggested Cc:) T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git W: http://kernsec.org/ S: Supported F: security/ X: security/selinux/ SELINUX SECURITY MODULE M: Paul Moore M: Stephen Smalley M: Eric Paris L: selinux@vger.kernel.org W: https://selinuxproject.org W: https://github.com/SELinuxProject T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git S: Supported F: include/linux/selinux* F: security/selinux/ F: scripts/selinux/ F: Documentation/admin-guide/LSM/SELinux.rst SENSABLE PHANTOM M: Jiri Slaby S: Maintained F: drivers/misc/phantom.c F: include/uapi/linux/phantom.h SERIAL DEVICE BUS M: Rob Herring L: linux-serial@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/serial/slave-device.txt F: drivers/tty/serdev/ F: include/linux/serdev.h SERIAL DRIVERS M: Greg Kroah-Hartman L: linux-serial@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/serial/ F: drivers/tty/serial/ SERIAL IR RECEIVER M: Sean Young L: linux-media@vger.kernel.org S: Maintained F: drivers/media/rc/serial_ir.c SFC NETWORK DRIVER M: Solarflare linux maintainers M: Edward Cree M: Bert Kenward L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/sfc/ SGI GRU DRIVER M: Dimitri Sivanich S: Maintained F: drivers/misc/sgi-gru/ SGI SN-IA64 (Altix) SERIAL CONSOLE DRIVER M: Pat Gefre L: linux-ia64@vger.kernel.org S: Supported F: Documentation/ia64/serial.txt F: drivers/tty/serial/ioc?_serial.c F: include/linux/ioc?.h SGI XP/XPC/XPNET DRIVER M: Cliff Whickman M: Robin Holt S: Maintained F: drivers/misc/sgi-xp/ SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS M: Ursula Braun L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: net/smc/ SHARP RJ54N1CB0C SENSOR DRIVER M: Jacopo Mondi L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Odd fixes F: drivers/media/i2c/rj54n1cb0c.c F: include/media/i2c/rj54n1cb0c.h SH_VEU V4L2 MEM2MEM DRIVER L: linux-media@vger.kernel.org S: Orphan F: drivers/media/platform/sh_veu.c SH_VOU V4L2 OUTPUT DRIVER L: linux-media@vger.kernel.org S: Orphan F: drivers/media/platform/sh_vou.c F: include/media/drv-intf/sh_vou.h SI2157 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/si2157* SI2165 MEDIA DRIVER M: Matthias Schwarzott L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/si2165* SI2168 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/si2168* SI470X FM RADIO RECEIVER I2C DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/radio/si470x/radio-si470x-i2c.c SI470X FM RADIO RECEIVER USB DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/si470x/radio-si470x-common.c F: drivers/media/radio/si470x/radio-si470x.h F: drivers/media/radio/si470x/radio-si470x-usb.c SI4713 FM RADIO TRANSMITTER I2C DRIVER M: Eduardo Valentin L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/radio/si4713/si4713.? SI4713 FM RADIO TRANSMITTER PLATFORM DRIVER M: Eduardo Valentin L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/radio/si4713/radio-platform-si4713.c SI4713 FM RADIO TRANSMITTER USB DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/si4713/radio-usb-si4713.c SIANO DVB DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes F: drivers/media/common/siano/ F: drivers/media/usb/siano/ F: drivers/media/usb/siano/ F: drivers/media/mmc/siano/ SIFIVE DRIVERS M: Palmer Dabbelt L: linux-riscv@lists.infradead.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git S: Supported K: sifive N: sifive SILEAD TOUCHSCREEN DRIVER M: Hans de Goede L: linux-input@vger.kernel.org L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/input/touchscreen/silead.c F: drivers/platform/x86/touchscreen_dmi.c SILICON MOTION SM712 FRAME BUFFER DRIVER M: Sudip Mukherjee M: Teddy Wang M: Sudip Mukherjee L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/sm712* F: Documentation/fb/sm712fb.txt SIMPLE FIRMWARE INTERFACE (SFI) M: Len Brown L: sfi-devel@simplefirmware.org W: http://simplefirmware.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-sfi-2.6.git S: Supported F: arch/x86/platform/sfi/ F: drivers/sfi/ F: include/linux/sfi*.h SIMPLEFB FB DRIVER M: Hans de Goede L: linux-fbdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/display/simple-framebuffer.txt F: drivers/video/fbdev/simplefb.c F: include/linux/platform_data/simplefb.h SIMTEC EB110ATX (Chalice CATS) P: Ben Dooks P: Vincent Sanders M: Simtec Linux Team W: http://www.simtec.co.uk/products/EB110ATX/ S: Supported SIMTEC EB2410ITX (BAST) P: Ben Dooks P: Vincent Sanders M: Simtec Linux Team W: http://www.simtec.co.uk/products/EB2410ITX/ S: Supported F: arch/arm/mach-s3c24xx/mach-bast.c F: arch/arm/mach-s3c24xx/bast-ide.c F: arch/arm/mach-s3c24xx/bast-irq.c SIPHASH PRF ROUTINES M: Jason A. Donenfeld S: Maintained F: lib/siphash.c F: lib/test_siphash.c F: include/linux/siphash.h SIOX M: Gavin Schenk M: Uwe Kleine-König R: Pengutronix Kernel Team S: Supported F: drivers/siox/* F: drivers/gpio/gpio-siox.c F: include/trace/events/siox.h SIS 190 ETHERNET DRIVER M: Francois Romieu L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/sis/sis190.c SIS 900/7016 FAST ETHERNET DRIVER M: Daniele Venzano W: http://www.brownhat.org/sis900.html L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/sis/sis900.* SIS FRAMEBUFFER DRIVER M: Thomas Winischhofer W: http://www.winischhofer.net/linuxsisvga.shtml S: Maintained F: Documentation/fb/sisfb.txt F: drivers/video/fbdev/sis/ F: include/video/sisfb.h SIS USB2VGA DRIVER M: Thomas Winischhofer W: http://www.winischhofer.at/linuxsisusbvga.shtml S: Maintained F: drivers/usb/misc/sisusbvga/ SLAB ALLOCATOR M: Christoph Lameter M: Pekka Enberg M: David Rientjes M: Joonsoo Kim M: Andrew Morton L: linux-mm@kvack.org S: Maintained F: include/linux/sl?b*.h F: mm/sl?b* SLEEPABLE READ-COPY UPDATE (SRCU) M: Lai Jiangshan M: "Paul E. McKenney" M: Josh Triplett R: Steven Rostedt R: Mathieu Desnoyers L: linux-kernel@vger.kernel.org W: http://www.rdrop.com/users/paulmck/RCU/ S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git F: include/linux/srcu*.h F: kernel/rcu/srcu*.c SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus) M: Srinivas Kandagatla L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: drivers/slimbus/ F: Documentation/devicetree/bindings/slimbus/ F: include/linux/slimbus.h SMACK SECURITY MODULE M: Casey Schaufler L: linux-security-module@vger.kernel.org W: http://schaufler-ca.com T: git git://github.com/cschaufler/smack-next S: Maintained F: Documentation/admin-guide/LSM/Smack.rst F: security/smack/ SMC91x ETHERNET DRIVER M: Nicolas Pitre S: Odd Fixes F: drivers/net/ethernet/smsc/smc91x.* SMIA AND SMIA++ IMAGE SENSOR DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/smiapp/ F: include/media/i2c/smiapp.h F: drivers/media/i2c/smiapp-pll.c F: drivers/media/i2c/smiapp-pll.h F: include/uapi/linux/smiapp.h F: Documentation/devicetree/bindings/media/i2c/nokia,smia.txt SMM665 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/smm665 F: drivers/hwmon/smm665.c SMSC EMC2103 HARDWARE MONITOR DRIVER M: Steve Glendinning L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/emc2103 F: drivers/hwmon/emc2103.c SMSC SCH5627 HARDWARE MONITOR DRIVER M: Hans de Goede L: linux-hwmon@vger.kernel.org S: Supported F: Documentation/hwmon/sch5627 F: drivers/hwmon/sch5627.c SMSC UFX6000 and UFX7000 USB to VGA DRIVER M: Steve Glendinning L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/smscufx.c SMSC47B397 HARDWARE MONITOR DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/smsc47b397 F: drivers/hwmon/smsc47b397.c SMSC911x ETHERNET DRIVER M: Steve Glendinning L: netdev@vger.kernel.org S: Maintained F: include/linux/smsc911x.h F: drivers/net/ethernet/smsc/smsc911x.* SMSC9420 PCI ETHERNET DRIVER M: Steve Glendinning L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/smsc/smsc9420.* SOC-CAMERA V4L2 SUBSYSTEM L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Orphan F: include/media/soc* F: drivers/media/i2c/soc_camera/ F: drivers/media/platform/soc_camera/ SOCIONEXT SYNQUACER I2C DRIVER M: Ard Biesheuvel L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-synquacer.c F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt SOCIONEXT UNIPHIER SOUND DRIVER L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Orphan F: sound/soc/uniphier/ SOEKRIS NET48XX LED SUPPORT M: Chris Boot S: Maintained F: drivers/leds/leds-net48xx.c SOFT-ROCE DRIVER (rxe) M: Moni Shoua L: linux-rdma@vger.kernel.org S: Supported W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/sw/rxe/ F: include/uapi/rdma/rdma_user_rxe.h SOFTLOGIC 6x10 MPEG CODEC M: Bluecherry Maintainers M: Anton Sviridenko M: Andrey Utkin M: Andrey Utkin M: Ismael Luceno L: linux-media@vger.kernel.org S: Supported F: drivers/media/pci/solo6x10/ SOFTWARE DELEGATED EXCEPTION INTERFACE (SDEI) M: James Morse L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/arm/firmware/sdei.txt F: drivers/firmware/arm_sdei.c F: include/linux/arm_sdei.h F: include/uapi/linux/arm_sdei.h SOFTWARE RAID (Multiple Disks) SUPPORT M: Shaohua Li L: linux-raid@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git S: Supported F: drivers/md/Makefile F: drivers/md/Kconfig F: drivers/md/md* F: drivers/md/raid* F: include/linux/raid/ F: include/uapi/linux/raid/ SOCIONEXT (SNI) AVE NETWORK DRIVER M: Kunihiko Hayashi L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/socionext/sni_ave.c F: Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt SOCIONEXT (SNI) NETSEC NETWORK DRIVER M: Jassi Brar L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/socionext/netsec.c F: Documentation/devicetree/bindings/net/socionext-netsec.txt SOLIDRUN CLEARFOG SUPPORT M: Russell King S: Maintained F: arch/arm/boot/dts/armada-388-clearfog* F: arch/arm/boot/dts/armada-38x-solidrun-* SOLIDRUN CUBOX-I/HUMMINGBOARD SUPPORT M: Russell King S: Maintained F: arch/arm/boot/dts/imx6*-cubox-i* F: arch/arm/boot/dts/imx6*-hummingboard* F: arch/arm/boot/dts/imx6*-sr-* SONIC NETWORK DRIVER M: Thomas Bogendoerfer L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/natsemi/sonic.* SONICS SILICON BACKPLANE DRIVER (SSB) M: Michael Buesch L: linux-wireless@vger.kernel.org S: Maintained F: drivers/ssb/ F: include/linux/ssb/ SONY IMX214 SENSOR DRIVER M: Ricardo Ribalda L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/imx214.c F: Documentation/devicetree/bindings/media/i2c/sony,imx214.txt SONY IMX258 SENSOR DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/imx258.c SONY IMX274 SENSOR DRIVER M: Leon Luo L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/imx274.c F: Documentation/devicetree/bindings/media/i2c/imx274.txt SONY IMX319 SENSOR DRIVER M: Bingbu Cao L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/imx319.c SONY IMX355 SENSOR DRIVER M: Tianshu Qiu L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/imx355.c SONY MEMORYSTICK CARD SUPPORT M: Alex Dubov W: http://tifmxx.berlios.de/ S: Maintained F: drivers/memstick/host/tifm_ms.c SONY MEMORYSTICK STANDARD SUPPORT M: Maxim Levitsky S: Maintained F: drivers/memstick/core/ms_block.* SONY VAIO CONTROL DEVICE DRIVER M: Mattia Dongili L: platform-driver-x86@vger.kernel.org W: http://www.linux.it/~malattia/wiki/index.php/Sony_drivers S: Maintained F: Documentation/laptops/sony-laptop.txt F: drivers/char/sonypi.c F: drivers/platform/x86/sony-laptop.c F: include/linux/sony-laptop.h SOUND M: Jaroslav Kysela M: Takashi Iwai L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://www.alsa-project.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git T: git git://git.alsa-project.org/alsa-kernel.git Q: http://patchwork.kernel.org/project/alsa-devel/list/ S: Maintained F: Documentation/sound/ F: include/sound/ F: include/uapi/sound/ F: sound/ SOUND - COMPRESSED AUDIO M: Vinod Koul L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Supported F: Documentation/sound/designs/compress-offload.rst F: include/sound/compress_driver.h F: include/uapi/sound/compress_* F: sound/core/compress_offload.c F: sound/soc/soc-compress.c SOUND - DMAENGINE HELPERS M: Lars-Peter Clausen S: Supported F: include/sound/dmaengine_pcm.h F: sound/core/pcm_dmaengine.c F: sound/soc/soc-generic-dmaengine-pcm.c SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) M: Liam Girdwood M: Mark Brown T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://alsa-project.org/main/index.php/ASoC S: Supported F: Documentation/devicetree/bindings/sound/ F: Documentation/sound/soc/ F: sound/soc/ F: include/dt-bindings/sound/ F: include/sound/soc* SOUNDWIRE SUBSYSTEM M: Vinod Koul M: Sanyog Kale R: Pierre-Louis Bossart L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: Documentation/driver-api/soundwire/ F: drivers/soundwire/ F: include/linux/soundwire/ SP2 MEDIA DRIVER M: Olli Salonen L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/sp2* SPARC + UltraSPARC (sparc/sparc64) M: "David S. Miller" L: sparclinux@vger.kernel.org Q: http://patchwork.ozlabs.org/project/sparclinux/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next.git S: Maintained F: arch/sparc/ F: drivers/sbus/ SPARC SERIAL DRIVERS M: "David S. Miller" L: sparclinux@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next.git S: Maintained F: include/linux/sunserialcore.h F: drivers/tty/serial/suncore.c F: drivers/tty/serial/sunhv.c F: drivers/tty/serial/sunsab.c F: drivers/tty/serial/sunsab.h F: drivers/tty/serial/sunsu.c F: drivers/tty/serial/sunzilog.c F: drivers/tty/serial/sunzilog.h F: drivers/tty/vcc.c SPARSE CHECKER M: "Luc Van Oostenryck" L: linux-sparse@vger.kernel.org W: https://sparse.wiki.kernel.org/ T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git S: Maintained F: include/linux/compiler.h SPEAR CLOCK FRAMEWORK SUPPORT M: Viresh Kumar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.st.com/spear S: Maintained F: drivers/clk/spear/ SPEAR PLATFORM SUPPORT M: Viresh Kumar M: Shiraz Hashim L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.st.com/spear S: Maintained F: arch/arm/boot/dts/spear* F: arch/arm/mach-spear/ SPI NOR SUBSYSTEM M: Marek Vasut L: linux-mtd@lists.infradead.org W: http://www.linux-mtd.infradead.org/ Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ T: git git://git.infradead.org/linux-mtd.git spi-nor/fixes T: git git://git.infradead.org/linux-mtd.git spi-nor/next S: Maintained F: drivers/mtd/spi-nor/ F: include/linux/mtd/spi-nor.h SPI SUBSYSTEM M: Mark Brown L: linux-spi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git Q: http://patchwork.kernel.org/project/spi-devel-general/list/ S: Maintained F: Documentation/devicetree/bindings/spi/ F: Documentation/spi/ F: drivers/spi/ F: include/linux/spi/ F: include/uapi/linux/spi/ F: tools/spi/ SPIDERNET NETWORK DRIVER for CELL M: Ishizaki Kou L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/toshiba/spider_net.txt F: drivers/net/ethernet/toshiba/spider_net* SPMI SUBSYSTEM R: Stephen Boyd L: linux-arm-msm@vger.kernel.org F: Documentation/devicetree/bindings/spmi/ F: drivers/spmi/ F: include/dt-bindings/spmi/spmi.h F: include/linux/spmi.h F: include/trace/events/spmi.h SPU FILE SYSTEM M: Jeremy Kerr L: linuxppc-dev@lists.ozlabs.org W: http://www.ibm.com/developerworks/power/cell/ S: Supported F: Documentation/filesystems/spufs.txt F: arch/powerpc/platforms/cell/spufs/ SQUASHFS FILE SYSTEM M: Phillip Lougher L: squashfs-devel@lists.sourceforge.net (subscribers-only) W: http://squashfs.org.uk T: git git://git.kernel.org/pub/scm/linux/kernel/git/pkl/squashfs-next.git S: Maintained F: Documentation/filesystems/squashfs.txt F: fs/squashfs/ SRM (Alpha) environment access M: Jan-Benedict Glaw S: Maintained F: arch/alpha/kernel/srm_env.c ST LSM6DSx IMU IIO DRIVER M: Lorenzo Bianconi L: linux-iio@vger.kernel.org W: http://www.st.com/ S: Maintained F: drivers/iio/imu/st_lsm6dsx/ F: Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt ST STM32 I2C/SMBUS DRIVER M: Pierre-Yves MORDRET L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-stm32* ST VL53L0X ToF RANGER(I2C) IIO DRIVER M: Song Qiang L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/proximity/vl53l0x-i2c.c F: Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt STABLE BRANCH M: Greg Kroah-Hartman M: Sasha Levin L: stable@vger.kernel.org S: Supported F: Documentation/process/stable-kernel-rules.rst STAGING - COMEDI M: Ian Abbott M: H Hartley Sweeten S: Odd Fixes F: drivers/staging/comedi/ STAGING - EROFS FILE SYSTEM M: Gao Xiang M: Chao Yu L: linux-erofs@lists.ozlabs.org S: Maintained F: drivers/staging/erofs/ STAGING - INDUSTRIAL IO M: Jonathan Cameron L: linux-iio@vger.kernel.org S: Odd Fixes F: Documentation/devicetree/bindings/staging/iio/ F: drivers/staging/iio/ STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) M: Marc Dietrich L: ac100@lists.launchpad.net (moderated for non-subscribers) L: linux-tegra@vger.kernel.org S: Maintained F: drivers/staging/nvec/ STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON) M: Jens Frederich M: Daniel Drake M: Jon Nettleton W: http://wiki.laptop.org/go/DCON S: Maintained F: drivers/staging/olpc_dcon/ STAGING - REALTEK RTL8712U DRIVERS M: Larry Finger M: Florian Schilhabel . S: Odd Fixes F: drivers/staging/rtl8712/ STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER M: Sudip Mukherjee M: Teddy Wang M: Sudip Mukherjee L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/staging/sm750fb/ STAGING - SPEAKUP CONSOLE SPEECH DRIVER M: William Hubbs M: Chris Brannon M: Kirk Reiser M: Samuel Thibault L: speakup@linux-speakup.org W: http://www.linux-speakup.org/ S: Odd Fixes F: drivers/staging/speakup/ STAGING - VIA VT665X DRIVERS M: Forest Bond S: Odd Fixes F: drivers/staging/vt665?/ STAGING - WILC1000 WIFI DRIVER M: Adham Abozaeid M: Ajay Singh L: linux-wireless@vger.kernel.org S: Supported F: drivers/staging/wilc1000/ STAGING - XGI Z7,Z9,Z11 PCI DISPLAY DRIVER M: Arnaud Patard S: Odd Fixes F: drivers/staging/xgifb/ STAGING SUBSYSTEM M: Greg Kroah-Hartman T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git L: devel@driverdev.osuosl.org S: Supported F: drivers/staging/ STARFIRE/DURALAN NETWORK DRIVER M: Ion Badulescu S: Odd Fixes F: drivers/net/ethernet/adaptec/starfire* STEC S1220 SKD DRIVER M: Bart Van Assche L: linux-block@vger.kernel.org S: Maintained F: drivers/block/skd*[ch] STI AUDIO (ASoC) DRIVERS M: Arnaud Pouliquen L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt F: sound/soc/sti/ STI CEC DRIVER M: Benjamin Gaignard S: Maintained F: drivers/media/platform/sti/cec/ F: Documentation/devicetree/bindings/media/stih-cec.txt STK1160 USB VIDEO CAPTURE DRIVER M: Ezequiel Garcia L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/stk1160/ STM32 AUDIO (ASoC) DRIVERS M: Olivier Moysan M: Arnaud Pouliquen L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/st,stm32-*.txt F: sound/soc/stm/ STM32 TIMER/LPTIMER DRIVERS M: Fabrice Gasnier S: Maintained F: drivers/*/stm32-*timer* F: drivers/pwm/pwm-stm32* F: include/linux/*/stm32-*tim* F: Documentation/ABI/testing/*timer-stm32 F: Documentation/devicetree/bindings/*/stm32-*timer* F: Documentation/devicetree/bindings/pwm/pwm-stm32* STMMAC ETHERNET DRIVER M: Giuseppe Cavallaro M: Alexandre Torgue M: Jose Abreu L: netdev@vger.kernel.org W: http://www.stlinux.com S: Supported F: drivers/net/ethernet/stmicro/stmmac/ SUN3/3X M: Sam Creasey W: http://sammy.net/sun3/ S: Maintained F: arch/m68k/kernel/*sun3* F: arch/m68k/sun3*/ F: arch/m68k/include/asm/sun3* F: drivers/net/ethernet/i825xx/sun3* SUN4I LOW RES ADC ATTACHED TABLET KEYS DRIVER M: Hans de Goede L: linux-input@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt F: drivers/input/keyboard/sun4i-lradc-keys.c SUNDANCE NETWORK DRIVER M: Denis Kirjanov L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/dlink/sundance.c SUPERH M: Yoshinori Sato M: Rich Felker L: linux-sh@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-sh/list/ S: Maintained F: Documentation/sh/ F: arch/sh/ F: drivers/sh/ SUSPEND TO RAM M: "Rafael J. Wysocki" M: Len Brown M: Pavel Machek L: linux-pm@vger.kernel.org B: https://bugzilla.kernel.org S: Supported F: Documentation/power/ F: arch/x86/kernel/acpi/ F: drivers/base/power/ F: kernel/power/ F: include/linux/suspend.h F: include/linux/freezer.h F: include/linux/pm.h SVGA HANDLING M: Martin Mares L: linux-video@atrey.karlin.mff.cuni.cz S: Maintained F: Documentation/svga.txt F: arch/x86/boot/video* SWIOTLB SUBSYSTEM M: Konrad Rzeszutek Wilk L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git S: Supported F: kernel/dma/swiotlb.c F: arch/*/kernel/pci-swiotlb.c F: include/linux/swiotlb.h SWITCHDEV M: Jiri Pirko M: Ivan Vecera L: netdev@vger.kernel.org S: Supported F: net/switchdev/ F: include/net/switchdev.h SY8106A REGULATOR DRIVER M: Icenowy Zheng S: Maintained F: drivers/regulator/sy8106a-regulator.c F: Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt SYNC FILE FRAMEWORK M: Sumit Semwal R: Gustavo Padovan S: Maintained L: linux-media@vger.kernel.org L: dri-devel@lists.freedesktop.org F: drivers/dma-buf/sync_* F: drivers/dma-buf/dma-fence* F: drivers/dma-buf/sw_sync.c F: include/linux/sync_file.h F: include/uapi/linux/sync_file.h F: Documentation/sync_file.txt T: git git://anongit.freedesktop.org/drm/drm-misc SYNOPSYS ARC ARCHITECTURE M: Vineet Gupta L: linux-snps-arc@lists.infradead.org S: Supported F: arch/arc/ F: Documentation/devicetree/bindings/arc/* F: Documentation/devicetree/bindings/interrupt-controller/snps,arc* F: drivers/clocksource/arc_timer.c F: drivers/tty/serial/arc_uart.c T: git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git SYNOPSYS ARC HSDK SDP pll clock driver M: Eugeniy Paltsev S: Supported F: drivers/clk/clk-hsdk-pll.c F: Documentation/devicetree/bindings/clock/snps,hsdk-pll-clock.txt SYNOPSYS ARC SDP clock driver M: Eugeniy Paltsev S: Supported F: drivers/clk/axs10x/* F: Documentation/devicetree/bindings/clock/snps,pll-clock.txt SYNOPSYS ARC SDP platform support M: Alexey Brodkin S: Supported F: arch/arc/plat-axs10x F: arch/arc/boot/dts/ax* F: Documentation/devicetree/bindings/arc/axs10* SYNOPSYS AXS10x RESET CONTROLLER DRIVER M: Eugeniy Paltsev S: Supported F: drivers/reset/reset-axs10x.c F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt SYNOPSYS CREG GPIO DRIVER M: Eugeniy Paltsev S: Maintained F: drivers/gpio/gpio-creg-snps.c F: Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt SYNOPSYS DESIGNWARE 8250 UART DRIVER R: Andy Shevchenko S: Maintained F: drivers/tty/serial/8250/8250_dw.c SYNOPSYS DESIGNWARE APB GPIO DRIVER M: Hoan Tran L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-dwapb.c F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt SYNOPSYS DESIGNWARE AXI DMAC DRIVER M: Eugeniy Paltsev S: Maintained F: drivers/dma/dwi-axi-dmac/ F: Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt SYNOPSYS DESIGNWARE DMAC DRIVER M: Viresh Kumar R: Andy Shevchenko S: Maintained F: Documentation/devicetree/bindings/dma/snps-dma.txt F: drivers/dma/dw/ F: include/dt-bindings/dma/dw-dmac.h F: include/linux/dma/dw.h F: include/linux/platform_data/dma-dw.h SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER M: Jose Abreu L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/synopsys/ SYNOPSYS DESIGNWARE I2C DRIVER M: Jarkko Nikula R: Andy Shevchenko R: Mika Westerberg L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-designware-* F: include/linux/platform_data/i2c-designware.h SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER M: Jaehoon Chung L: linux-mmc@vger.kernel.org S: Maintained F: drivers/mmc/host/dw_mmc* SYNOPSYS HSDK RESET CONTROLLER DRIVER M: Eugeniy Paltsev S: Supported F: drivers/reset/reset-hsdk.c F: include/dt-bindings/reset/snps,hsdk-reset.h F: Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt SYSTEM CONFIGURATION (SYSCON) M: Lee Jones M: Arnd Bergmann T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git S: Supported F: drivers/mfd/syscon.c SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/arm/arm,sc[mp]i.txt F: drivers/clk/clk-sc[mp]i.c F: drivers/cpufreq/sc[mp]i-cpufreq.c F: drivers/firmware/arm_scpi.c F: drivers/firmware/arm_scmi/ F: include/linux/sc[mp]i_protocol.h SYSTEM RESET/SHUTDOWN DRIVERS M: Sebastian Reichel L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git S: Maintained F: Documentation/devicetree/bindings/power/reset/ F: drivers/power/reset/ SYSTEM TRACE MODULE CLASS M: Alexander Shishkin S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git F: Documentation/trace/stm.rst F: drivers/hwtracing/stm/ F: include/linux/stm.h F: include/uapi/linux/stm.h SYSV FILESYSTEM M: Christoph Hellwig S: Maintained F: Documentation/filesystems/sysv-fs.txt F: fs/sysv/ F: include/linux/sysv_fs.h TARGET SUBSYSTEM M: "Nicholas A. Bellinger" L: linux-scsi@vger.kernel.org L: target-devel@vger.kernel.org W: http://www.linux-iscsi.org W: http://groups.google.com/group/linux-iscsi-target-dev T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master S: Supported F: drivers/target/ F: include/target/ F: Documentation/target/ TASKSTATS STATISTICS INTERFACE M: Balbir Singh S: Maintained F: Documentation/accounting/taskstats* F: include/linux/taskstats* F: kernel/taskstats.c TC subsystem M: Jamal Hadi Salim M: Cong Wang M: Jiri Pirko L: netdev@vger.kernel.org S: Maintained F: include/net/pkt_cls.h F: include/net/pkt_sched.h F: include/net/tc_act/ F: include/uapi/linux/pkt_cls.h F: include/uapi/linux/pkt_sched.h F: include/uapi/linux/tc_act/ F: include/uapi/linux/tc_ematch/ F: net/sched/ TC90522 MEDIA DRIVER M: Akihiro Tsukada L: linux-media@vger.kernel.org S: Odd Fixes F: drivers/media/dvb-frontends/tc90522* TCP LOW PRIORITY MODULE M: "Wong Hoi Sing, Edison" M: "Hung Hing Lun, Mike" W: http://tcp-lp-mod.sourceforge.net/ S: Maintained F: net/ipv4/tcp_lp.c TDA10071 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/dvb-frontends/tda10071* TDA18212 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/tda18212* TDA18218 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/tda18218* TDA18250 MEDIA DRIVER M: Olli Salonen L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/tuners/tda18250* TDA18271 MEDIA DRIVER M: Michael Krufky L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://github.com/mkrufky Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mkrufky/tuners.git S: Maintained F: drivers/media/tuners/tda18271* TDA1997x MEDIA DRIVER M: Tim Harvey L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/i2c/tda1997x.* TDA827x MEDIA DRIVER M: Michael Krufky L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://github.com/mkrufky Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mkrufky/tuners.git S: Maintained F: drivers/media/tuners/tda8290.* TDA8290 MEDIA DRIVER M: Michael Krufky L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://github.com/mkrufky Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mkrufky/tuners.git S: Maintained F: drivers/media/tuners/tda8290.* TDA9840 MEDIA DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/i2c/tda9840* TEA5761 TUNER DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes F: drivers/media/tuners/tea5761.* TEA5767 TUNER DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/tuners/tea5767.* TEA6415C MEDIA DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/i2c/tea6415c* TEA6420 MEDIA DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/i2c/tea6420* TEAM DRIVER M: Jiri Pirko L: netdev@vger.kernel.org S: Supported F: drivers/net/team/ F: include/linux/if_team.h F: include/uapi/linux/if_team.h TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT M: "Savoir-faire Linux Inc." S: Maintained F: arch/x86/platform/ts5500/ TECHNOTREND USB IR RECEIVER M: Sean Young L: linux-media@vger.kernel.org S: Maintained F: drivers/media/rc/ttusbir.c TECHWELL TW9910 VIDEO DECODER L: linux-media@vger.kernel.org S: Orphan F: drivers/media/i2c/tw9910.c F: include/media/i2c/tw9910.h TEE SUBSYSTEM M: Jens Wiklander S: Maintained F: include/linux/tee_drv.h F: include/uapi/linux/tee.h F: drivers/tee/ F: Documentation/tee.txt TEGRA ARCHITECTURE SUPPORT M: Thierry Reding M: Jonathan Hunter L: linux-tegra@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git S: Supported N: [^a-z]tegra TEGRA CLOCK DRIVER M: Peter De Schrijver M: Prashant Gaikwad S: Supported F: drivers/clk/tegra/ TEGRA DMA DRIVERS M: Laxman Dewangan M: Jon Hunter S: Supported F: drivers/dma/tegra* TEGRA I2C DRIVER M: Laxman Dewangan S: Supported F: drivers/i2c/busses/i2c-tegra.c TEGRA IOMMU DRIVERS M: Thierry Reding L: linux-tegra@vger.kernel.org S: Supported F: drivers/iommu/tegra* TEGRA KBC DRIVER M: Laxman Dewangan S: Supported F: drivers/input/keyboard/tegra-kbc.c TEGRA NAND DRIVER M: Stefan Agner M: Lucas Stach S: Maintained F: Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt F: drivers/mtd/nand/raw/tegra_nand.c TEGRA PWM DRIVER M: Thierry Reding S: Supported F: drivers/pwm/pwm-tegra.c TEGRA SERIAL DRIVER M: Laxman Dewangan S: Supported F: drivers/tty/serial/serial-tegra.c TEGRA SPI DRIVER M: Laxman Dewangan S: Supported F: drivers/spi/spi-tegra* TEHUTI ETHERNET DRIVER M: Andy Gospodarek L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/tehuti/* Telecom Clock Driver for MCPL0010 M: Mark Gross S: Supported F: drivers/char/tlclk.c TENSILICA XTENSA PORT (xtensa) M: Chris Zankel M: Max Filippov L: linux-xtensa@linux-xtensa.org T: git git://github.com/czankel/xtensa-linux.git S: Maintained F: arch/xtensa/ F: drivers/irqchip/irq-xtensa-* Texas Instruments' System Control Interface (TISCI) Protocol Driver M: Nishanth Menon M: Tero Kristo M: Santosh Shilimkar L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt F: drivers/firmware/ti_sci* F: include/linux/soc/ti/ti_sci_protocol.h F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt F: drivers/soc/ti/ti_sci_pm_domains.c F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt F: drivers/clk/keystone/sci-clk.c F: drivers/reset/reset-ti-sci.c Texas Instruments ASoC drivers M: Peter Ujfalusi L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/soc/ti/ THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-raremono.c THERMAL M: Zhang Rui M: Eduardo Valentin R: Daniel Lezcano L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git Q: https://patchwork.kernel.org/project/linux-pm/list/ S: Supported F: drivers/thermal/ F: include/linux/thermal.h F: include/uapi/linux/thermal.h F: include/linux/cpu_cooling.h F: Documentation/devicetree/bindings/thermal/ THERMAL/CPU_COOLING M: Amit Daniel Kachhap M: Viresh Kumar M: Javi Merino L: linux-pm@vger.kernel.org S: Supported F: Documentation/thermal/cpu-cooling-api.txt F: drivers/thermal/cpu_cooling.c F: include/linux/cpu_cooling.h THINKPAD ACPI EXTRAS DRIVER M: Henrique de Moraes Holschuh L: ibm-acpi-devel@lists.sourceforge.net L: platform-driver-x86@vger.kernel.org W: http://ibm-acpi.sourceforge.net W: http://thinkwiki.org/wiki/Ibm-acpi T: git git://repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git S: Maintained F: drivers/platform/x86/thinkpad_acpi.c THUNDERBOLT DRIVER M: Andreas Noever M: Michael Jamet M: Mika Westerberg M: Yehezkel Bernat T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git S: Maintained F: Documentation/admin-guide/thunderbolt.rst F: drivers/thunderbolt/ F: include/linux/thunderbolt.h THUNDERBOLT NETWORK DRIVER M: Michael Jamet M: Mika Westerberg M: Yehezkel Bernat L: netdev@vger.kernel.org S: Maintained F: drivers/net/thunderbolt.c THUNDERX GPIO DRIVER M: David Daney S: Maintained F: drivers/gpio/gpio-thunderx.c TI AM437X VPFE DRIVER M: "Lad, Prabhakar" L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git S: Maintained F: drivers/media/platform/am437x/ TI BANDGAP AND THERMAL DRIVER M: Eduardo Valentin M: Keerthy L: linux-pm@vger.kernel.org L: linux-omap@vger.kernel.org S: Maintained F: drivers/thermal/ti-soc-thermal/ TI BQ27XXX POWER SUPPLY DRIVER R: Andrew F. Davis F: include/linux/power/bq27xxx_battery.h F: drivers/power/supply/bq27xxx_battery.c F: drivers/power/supply/bq27xxx_battery_i2c.c TI CDCE706 CLOCK DRIVER M: Max Filippov S: Maintained F: drivers/clk/clk-cdce706.c TI CLOCK DRIVER M: Tero Kristo L: linux-omap@vger.kernel.org S: Maintained F: drivers/clk/ti/ F: include/linux/clk/ti.h TI DAVINCI MACHINE SUPPORT M: Sekhar Nori M: Kevin Hilman L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git S: Supported F: arch/arm/mach-davinci/ F: drivers/i2c/busses/i2c-davinci.c F: arch/arm/boot/dts/da850* TI DAVINCI SERIES CLOCK DRIVER M: David Lechner R: Sekhar Nori S: Maintained F: Documentation/devicetree/bindings/clock/ti/davinci/ F: drivers/clk/davinci/ TI DAVINCI SERIES GPIO DRIVER M: Keerthy L: linux-gpio@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/gpio/gpio-davinci.txt F: drivers/gpio/gpio-davinci.c TI DAVINCI SERIES MEDIA DRIVER M: "Lad, Prabhakar" L: linux-media@vger.kernel.org W: https://linuxtv.org Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git S: Maintained F: drivers/media/platform/davinci/ F: include/media/davinci/ TI ETHERNET SWITCH DRIVER (CPSW) R: Grygorii Strashko L: linux-omap@vger.kernel.org L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/ti/cpsw* F: drivers/net/ethernet/ti/davinci* TI FLASH MEDIA INTERFACE DRIVER M: Alex Dubov S: Maintained F: drivers/misc/tifm* F: drivers/mmc/host/tifm_sd.c F: include/linux/tifm.h TI KEYSTONE MULTICORE NAVIGATOR DRIVERS M: Santosh Shilimkar L: linux-kernel@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/soc/ti/* T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git TI LM49xxx FAMILY ASoC CODEC DRIVERS M: M R Swami Reddy M: Vishwas A Deshpande L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/soc/codecs/lm49453* F: sound/soc/codecs/isabelle* TI LP855x BACKLIGHT DRIVER M: Milo Kim S: Maintained F: Documentation/backlight/lp855x-driver.txt F: drivers/video/backlight/lp855x_bl.c F: include/linux/platform_data/lp855x.h TI LP8727 CHARGER DRIVER M: Milo Kim S: Maintained F: drivers/power/supply/lp8727_charger.c F: include/linux/platform_data/lp8727.h TI LP8788 MFD DRIVER M: Milo Kim S: Maintained F: drivers/iio/adc/lp8788_adc.c F: drivers/leds/leds-lp8788.c F: drivers/mfd/lp8788*.c F: drivers/power/supply/lp8788-charger.c F: drivers/regulator/lp8788-*.c F: include/linux/mfd/lp8788*.h TI NETCP ETHERNET DRIVER M: Wingman Kwok M: Murali Karicheri L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/ti/netcp* TI PCM3060 ASoC CODEC DRIVER M: Kirill Marinushkin L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/pcm3060.txt F: sound/soc/codecs/pcm3060* TI TAS571X FAMILY ASoC CODEC DRIVER M: Kevin Cernekee L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Odd Fixes F: sound/soc/codecs/tas571x* TI TRF7970A NFC DRIVER M: Mark Greer L: linux-wireless@vger.kernel.org L: linux-nfc@lists.01.org (moderated for non-subscribers) S: Supported F: drivers/nfc/trf7970a.c F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt TI TWL4030 SERIES SOC CODEC DRIVER M: Peter Ujfalusi L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/soc/codecs/twl4030* TI VPE/CAL DRIVERS M: Benoit Parrot L: linux-media@vger.kernel.org W: http://linuxtv.org/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/platform/ti-vpe/ TI WILINK WIRELESS DRIVERS L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/wl12xx W: http://wireless.kernel.org/en/users/Drivers/wl1251 T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git S: Orphan F: drivers/net/wireless/ti/ F: include/linux/wl12xx.h TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER M: John Stultz M: Thomas Gleixner R: Stephen Boyd L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core S: Supported F: include/linux/clocksource.h F: include/linux/time.h F: include/linux/timex.h F: include/uapi/linux/time.h F: include/uapi/linux/timex.h F: kernel/time/clocksource.c F: kernel/time/time*.c F: kernel/time/alarmtimer.c F: kernel/time/ntp.c F: tools/testing/selftests/timers/ TIPC NETWORK LAYER M: Jon Maloy M: Ying Xue L: netdev@vger.kernel.org (core kernel code) L: tipc-discussion@lists.sourceforge.net (user apps, general discussion) W: http://tipc.sourceforge.net/ S: Maintained F: include/uapi/linux/tipc*.h F: net/tipc/ TLAN NETWORK DRIVER M: Samuel Chessman L: tlan-devel@lists.sourceforge.net (subscribers-only) W: http://sourceforge.net/projects/tlan/ S: Maintained F: Documentation/networking/device_drivers/ti/tlan.txt F: drivers/net/ethernet/ti/tlan.* TM6000 VIDEO4LINUX DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes F: drivers/media/usb/tm6000/ F: Documentation/media/v4l-drivers/tm6000* TMIO/SDHI MMC DRIVER M: Wolfram Sang L: linux-mmc@vger.kernel.org S: Supported F: drivers/mmc/host/tmio_mmc* F: drivers/mmc/host/renesas_sdhi* F: include/linux/mfd/tmio.h TMP401 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/tmp401 F: drivers/hwmon/tmp401.c TMPFS (SHMEM FILESYSTEM) M: Hugh Dickins L: linux-mm@kvack.org S: Maintained F: include/linux/shmem_fs.h F: mm/shmem.c TOMOYO SECURITY MODULE M: Kentaro Takeda M: Tetsuo Handa L: tomoyo-dev-en@lists.sourceforge.jp (subscribers-only, for developers in English) L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English) L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese) L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese) W: http://tomoyo.sourceforge.jp/ T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.5.x/tomoyo-lsm/patches/ S: Maintained F: security/tomoyo/ TOPSTAR LAPTOP EXTRAS DRIVER M: Herton Ronaldo Krzesinski L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/topstar-laptop.c TORTURE-TEST MODULES M: Davidlohr Bueso M: "Paul E. McKenney" M: Josh Triplett L: linux-kernel@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git F: Documentation/RCU/torture.txt F: kernel/torture.c F: kernel/rcu/rcutorture.c F: kernel/rcu/rcuperf.c F: kernel/locking/locktorture.c TOSHIBA ACPI EXTRAS DRIVER M: Azael Avalos L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/toshiba_acpi.c TOSHIBA BLUETOOTH DRIVER M: Azael Avalos L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/toshiba_bluetooth.c TOSHIBA HDD ACTIVE PROTECTION SENSOR DRIVER M: Azael Avalos L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/toshiba_haps.c TOSHIBA SMM DRIVER M: Jonathan Buzzard W: http://www.buzzard.org.uk/toshiba/ S: Maintained F: drivers/char/toshiba.c F: include/linux/toshiba.h F: include/uapi/linux/toshiba.h TOSHIBA TC358743 DRIVER M: Mats Randgaard L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/tc358743* F: include/media/i2c/tc358743.h TOSHIBA WMI HOTKEYS DRIVER M: Azael Avalos L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/toshiba-wmi.c TPM DEVICE DRIVER M: Peter Huewe M: Jarkko Sakkinen R: Jason Gunthorpe L: linux-integrity@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-integrity/list/ W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity T: git git://git.infradead.org/users/jjs/linux-tpmdd.git S: Maintained F: drivers/char/tpm/ TRACING M: Steven Rostedt M: Ingo Molnar T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core S: Maintained F: Documentation/trace/ftrace.rst F: arch/*/*/*/ftrace.h F: arch/*/kernel/ftrace.c F: include/*/ftrace.h F: include/linux/trace*.h F: include/trace/ F: kernel/trace/ F: tools/testing/selftests/ftrace/ TRACING MMIO ACCESSES (MMIOTRACE) M: Steven Rostedt M: Ingo Molnar R: Karol Herbst R: Pekka Paalanen S: Maintained L: linux-kernel@vger.kernel.org L: nouveau@lists.freedesktop.org F: kernel/trace/trace_mmiotrace.c F: include/linux/mmiotrace.h F: arch/x86/mm/kmmio.c F: arch/x86/mm/mmio-mod.c F: arch/x86/mm/testmmiotrace.c TRIVIAL PATCHES M: Jiri Kosina T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git S: Maintained K: ^Subject:.*(?i)trivial TEMPO SEMICONDUCTOR DRIVERS M: Steven Eckhoff S: Maintained F: sound/soc/codecs/tscs*.c F: sound/soc/codecs/tscs*.h F: Documentation/devicetree/bindings/sound/tscs*.txt TTY LAYER M: Greg Kroah-Hartman M: Jiri Slaby S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git F: Documentation/serial/ F: drivers/tty/ F: drivers/tty/serial/serial_core.c F: include/linux/serial_core.h F: include/linux/serial.h F: include/linux/tty.h F: include/uapi/linux/serial_core.h F: include/uapi/linux/serial.h F: include/uapi/linux/tty.h TUA9001 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/tua9001* TULIP NETWORK DRIVERS L: netdev@vger.kernel.org L: linux-parisc@vger.kernel.org S: Orphan F: drivers/net/ethernet/dec/tulip/ TUN/TAP driver M: Maxim Krasnyansky W: http://vtun.sourceforge.net/tun S: Maintained F: Documentation/networking/tuntap.txt F: arch/um/os-Linux/drivers/ TURBOCHANNEL SUBSYSTEM M: "Maciej W. Rozycki" M: Ralf Baechle L: linux-mips@vger.kernel.org Q: http://patchwork.linux-mips.org/project/linux-mips/list/ S: Maintained F: drivers/tc/ F: include/linux/tc.h TURBOSTAT UTILITY M: "Len Brown" L: linux-pm@vger.kernel.org B: https://bugzilla.kernel.org Q: https://patchwork.kernel.org/project/linux-pm/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git turbostat S: Supported F: tools/power/x86/turbostat/ TW5864 VIDEO4LINUX DRIVER M: Bluecherry Maintainers M: Anton Sviridenko M: Andrey Utkin M: Andrey Utkin L: linux-media@vger.kernel.org S: Supported F: drivers/media/pci/tw5864/ TW68 VIDEO4LINUX DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/pci/tw68/ TW686X VIDEO4LINUX DRIVER M: Ezequiel Garcia L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org S: Maintained F: drivers/media/pci/tw686x/ UBI FILE SYSTEM (UBIFS) M: Richard Weinberger M: Artem Bityutskiy M: Adrian Hunter L: linux-mtd@lists.infradead.org T: git git://git.infradead.org/ubifs-2.6.git W: http://www.linux-mtd.infradead.org/doc/ubifs.html S: Supported F: Documentation/filesystems/ubifs.txt F: fs/ubifs/ UCLINUX (M68KNOMMU AND COLDFIRE) M: Greg Ungerer W: http://www.linux-m68k.org/ W: http://www.uclinux.org/ L: linux-m68k@lists.linux-m68k.org L: uclinux-dev@uclinux.org (subscribers-only) T: git git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu.git S: Maintained F: arch/m68k/coldfire/ F: arch/m68k/68*/ F: arch/m68k/*/*_no.* F: arch/m68k/include/asm/*_no.* UDF FILESYSTEM M: Jan Kara S: Maintained F: Documentation/filesystems/udf.txt F: fs/udf/ UDRAW TABLET M: Bastien Nocera L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/hid-udraw-ps3.c UFS FILESYSTEM M: Evgeniy Dushistov S: Maintained F: Documentation/filesystems/ufs.txt F: fs/ufs/ UHID USERSPACE HID IO DRIVER: M: David Herrmann L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/uhid.c F: include/uapi/linux/uhid.h ULPI BUS M: Heikki Krogerus L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/common/ulpi.c F: include/linux/ulpi/ ULTRA-WIDEBAND (UWB) SUBSYSTEM: L: linux-usb@vger.kernel.org S: Orphan F: drivers/uwb/ F: include/linux/uwb.h F: include/linux/uwb/ UNICORE32 ARCHITECTURE: M: Guan Xuetao W: http://mprc.pku.edu.cn/~guanxuetao/linux S: Maintained T: git git://github.com/gxt/linux.git F: arch/unicore32/ UNIFDEF M: Tony Finch W: http://dotat.at/prog/unifdef S: Maintained F: scripts/unifdef.c UNIFORM CDROM DRIVER M: Jens Axboe W: http://www.kernel.dk S: Maintained F: Documentation/cdrom/ F: drivers/cdrom/cdrom.c F: include/linux/cdrom.h F: include/uapi/linux/cdrom.h UNISYS S-PAR DRIVERS M: David Kershner L: sparmaintainer@unisys.com (Unisys internal) S: Supported F: include/linux/visorbus.h F: drivers/visorbus/ F: drivers/staging/unisys/ UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER M: Vinayak Holikatti L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/ufs.txt F: drivers/scsi/ufs/ UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS M: Joao Pinto L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/ufs/*dwc* UNSORTED BLOCK IMAGES (UBI) M: Artem Bityutskiy M: Richard Weinberger W: http://www.linux-mtd.infradead.org/ L: linux-mtd@lists.infradead.org T: git git://git.infradead.org/ubifs-2.6.git S: Supported F: drivers/mtd/ubi/ F: include/linux/mtd/ubi.h F: include/uapi/mtd/ubi-user.h USB "USBNET" DRIVER FRAMEWORK M: Oliver Neukum L: netdev@vger.kernel.org W: http://www.linux-usb.org/usbnet S: Maintained F: drivers/net/usb/usbnet.c F: include/linux/usb/usbnet.h USB ACM DRIVER M: Oliver Neukum L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/acm.txt F: drivers/usb/class/cdc-acm.* USB AR5523 WIRELESS DRIVER M: Pontus Fuchs L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/ath/ar5523/ USB ATTACHED SCSI M: Oliver Neukum L: linux-usb@vger.kernel.org L: linux-scsi@vger.kernel.org S: Maintained F: drivers/usb/storage/uas.c USB CDC ETHERNET DRIVER M: Oliver Neukum L: linux-usb@vger.kernel.org S: Maintained F: drivers/net/usb/cdc_*.c F: include/uapi/linux/usb/cdc.h USB CHAOSKEY DRIVER M: Keith Packard L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/misc/chaoskey.c USB CYPRESS C67X00 DRIVER M: Peter Korsgaard L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/c67x00/ USB DAVICOM DM9601 DRIVER M: Peter Korsgaard L: netdev@vger.kernel.org W: http://www.linux-usb.org/usbnet S: Maintained F: drivers/net/usb/dm9601.c USB DIAMOND RIO500 DRIVER M: Cesar Miquel L: rio500-users@lists.sourceforge.net W: http://rio500.sourceforge.net S: Maintained F: drivers/usb/misc/rio500* USB EHCI DRIVER M: Alan Stern L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/ehci.txt F: drivers/usb/host/ehci* USB GADGET/PERIPHERAL SUBSYSTEM M: Felipe Balbi L: linux-usb@vger.kernel.org W: http://www.linux-usb.org/gadget T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained F: drivers/usb/gadget/ F: include/linux/usb/gadget* USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) M: Jiri Kosina M: Benjamin Tissoires L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git S: Maintained F: Documentation/hid/hiddev.txt F: drivers/hid/usbhid/ USB INTEL XHCI ROLE MUX DRIVER M: Hans de Goede L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/roles/intel-xhci-usb-role-switch.c USB ISP116X DRIVER M: Olav Kongas L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/host/isp116x* F: include/linux/usb/isp116x.h USB LAN78XX ETHERNET DRIVER M: Woojung Huh M: Microchip Linux Driver Support L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/microchip,lan78xx.txt F: drivers/net/usb/lan78xx.* F: include/dt-bindings/net/microchip-lan78xx.h USB MASS STORAGE DRIVER M: Alan Stern L: linux-usb@vger.kernel.org L: usb-storage@lists.one-eyed-alien.net S: Maintained W: http://www.one-eyed-alien.net/~mdharm/linux-usb/ F: drivers/usb/storage/ USB MIDI DRIVER M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained F: sound/usb/midi.* USB NETWORKING DRIVERS L: linux-usb@vger.kernel.org S: Odd Fixes F: drivers/net/usb/ USB OHCI DRIVER M: Alan Stern L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/ohci.txt F: drivers/usb/host/ohci* USB OTG FSM (Finite State Machine) M: Peter Chen T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/common/usb-otg-fsm.c USB OVER IP DRIVER M: Valentina Manea M: Shuah Khan L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/usbip_protocol.txt F: drivers/usb/usbip/ F: tools/usb/usbip/ F: tools/testing/selftests/drivers/usb/usbip/ USB PEGASUS DRIVER M: Petko Manolov L: linux-usb@vger.kernel.org L: netdev@vger.kernel.org T: git git://github.com/petkan/pegasus.git W: https://github.com/petkan/pegasus S: Maintained F: drivers/net/usb/pegasus.* USB PHY LAYER M: Felipe Balbi L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained F: drivers/usb/phy/ USB PRINTER DRIVER (usblp) M: Pete Zaitcev L: linux-usb@vger.kernel.org S: Supported F: drivers/usb/class/usblp.c USB QMI WWAN NETWORK DRIVER M: Bjørn Mork L: netdev@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-class-net-qmi F: drivers/net/usb/qmi_wwan.c USB RTL8150 DRIVER M: Petko Manolov L: linux-usb@vger.kernel.org L: netdev@vger.kernel.org T: git git://github.com/petkan/rtl8150.git W: https://github.com/petkan/rtl8150 S: Maintained F: drivers/net/usb/rtl8150.c USB SERIAL SUBSYSTEM M: Johan Hovold L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/usb-serial.git S: Maintained F: Documentation/usb/usb-serial.txt F: drivers/usb/serial/ F: include/linux/usb/serial.h USB SMSC75XX ETHERNET DRIVER M: Steve Glendinning L: netdev@vger.kernel.org S: Maintained F: drivers/net/usb/smsc75xx.* USB SMSC95XX ETHERNET DRIVER M: Steve Glendinning M: Microchip Linux Driver Support L: netdev@vger.kernel.org S: Maintained F: drivers/net/usb/smsc95xx.* USB SUBSYSTEM M: Greg Kroah-Hartman L: linux-usb@vger.kernel.org W: http://www.linux-usb.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git S: Supported F: Documentation/devicetree/bindings/usb/ F: Documentation/usb/ F: drivers/usb/ F: include/linux/usb.h F: include/linux/usb/ USB TYPEC PI3USB30532 MUX DRIVER M: Hans de Goede L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/typec/mux/pi3usb30532.c USB TYPEC CLASS M: Heikki Krogerus L: linux-usb@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-class-typec F: Documentation/driver-api/usb/typec.rst F: drivers/usb/typec/ F: include/linux/usb/typec.h USB TYPEC BUS FOR ALTERNATE MODES M: Heikki Krogerus L: linux-usb@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-bus-typec F: Documentation/driver-api/usb/typec_bus.rst F: drivers/usb/typec/altmodes/ F: include/linux/usb/typec_altmode.h USB TYPEC PORT CONTROLLER DRIVERS M: Guenter Roeck L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/typec/tcpm/ USB UHCI DRIVER M: Alan Stern L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/host/uhci* USB VIDEO CLASS M: Laurent Pinchart L: linux-uvc-devel@lists.sourceforge.net (subscribers-only) L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://www.ideasonboard.org/uvc/ S: Maintained F: drivers/media/usb/uvc/ F: include/uapi/linux/uvcvideo.h USB VISION DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Odd Fixes F: drivers/media/usb/usbvision/ USB WEBCAM GADGET M: Laurent Pinchart L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/gadget/function/*uvc* F: drivers/usb/gadget/legacy/webcam.c F: include/uapi/linux/usb/g_uvc.h USB WIRELESS RNDIS DRIVER (rndis_wlan) M: Jussi Kivilinna L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/rndis_wlan.c USB XHCI DRIVER M: Mathias Nyman L: linux-usb@vger.kernel.org S: Supported F: drivers/usb/host/xhci* F: drivers/usb/host/pci-quirks* USB ZD1201 DRIVER L: linux-wireless@vger.kernel.org W: http://linux-lc100020.sourceforge.net S: Orphan F: drivers/net/wireless/zydas/zd1201.* USB ZR364XX DRIVER M: Antoine Jacquet L: linux-usb@vger.kernel.org L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://royale.zerezo.com/zr364xx/ S: Maintained F: Documentation/media/v4l-drivers/zr364xx* F: drivers/media/usb/zr364xx/ USER-MODE LINUX (UML) M: Jeff Dike M: Richard Weinberger M: Anton Ivanov L: linux-um@lists.infradead.org W: http://user-mode-linux.sourceforge.net Q: https://patchwork.ozlabs.org/project/linux-um/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git S: Maintained F: Documentation/virtual/uml/ F: arch/um/ F: arch/x86/um/ F: fs/hostfs/ USERSPACE COPYIN/COPYOUT (UIOVEC) M: Alexander Viro S: Maintained F: lib/iov_iter.c F: include/linux/uio.h USERSPACE DMA BUFFER DRIVER M: Gerd Hoffmann S: Maintained L: dri-devel@lists.freedesktop.org F: drivers/dma-buf/udmabuf.c F: include/uapi/linux/udmabuf.h T: git git://anongit.freedesktop.org/drm/drm-misc USERSPACE I/O (UIO) M: Greg Kroah-Hartman S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git F: Documentation/driver-api/uio-howto.rst F: drivers/uio/ F: include/linux/uio_driver.h UTIL-LINUX PACKAGE M: Karel Zak L: util-linux@vger.kernel.org W: http://en.wikipedia.org/wiki/Util-linux T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git S: Maintained UUID HELPERS M: Christoph Hellwig R: Andy Shevchenko L: linux-kernel@vger.kernel.org T: git git://git.infradead.org/users/hch/uuid.git F: lib/uuid.c F: lib/test_uuid.c F: include/linux/uuid.h F: include/uapi/linux/uuid.h S: Maintained UVESAFB DRIVER M: Michal Januszewski L: linux-fbdev@vger.kernel.org W: https://github.com/mjanusz/v86d S: Maintained F: Documentation/fb/uvesafb.txt F: drivers/video/fbdev/uvesafb.* VF610 NAND DRIVER M: Stefan Agner L: linux-mtd@lists.infradead.org S: Supported F: drivers/mtd/nand/raw/vf610_nfc.c VFAT/FAT/MSDOS FILESYSTEM M: OGAWA Hirofumi S: Maintained F: Documentation/filesystems/vfat.txt F: fs/fat/ VFIO DRIVER M: Alex Williamson L: kvm@vger.kernel.org T: git git://github.com/awilliam/linux-vfio.git S: Maintained F: Documentation/vfio.txt F: drivers/vfio/ F: include/linux/vfio.h F: include/uapi/linux/vfio.h VFIO MEDIATED DEVICE DRIVERS M: Kirti Wankhede L: kvm@vger.kernel.org S: Maintained F: Documentation/vfio-mediated-device.txt F: drivers/vfio/mdev/ F: include/linux/mdev.h F: samples/vfio-mdev/ VFIO PLATFORM DRIVER M: Eric Auger L: kvm@vger.kernel.org S: Maintained F: drivers/vfio/platform/ VGA_SWITCHEROO R: Lukas Wunner S: Maintained F: Documentation/gpu/vga-switcheroo.rst F: drivers/gpu/vga/vga_switcheroo.c F: include/linux/vga_switcheroo.h T: git git://anongit.freedesktop.org/drm/drm-misc VIA RHINE NETWORK DRIVER S: Orphan F: drivers/net/ethernet/via/via-rhine.c VIA SD/MMC CARD CONTROLLER DRIVER M: Bruce Chang M: Harald Welte S: Maintained F: drivers/mmc/host/via-sdmmc.c VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER M: Florian Tobias Schandinat L: linux-fbdev@vger.kernel.org S: Maintained F: include/linux/via-core.h F: include/linux/via-gpio.h F: include/linux/via_i2c.h F: drivers/video/fbdev/via/ VIA VELOCITY NETWORK DRIVER M: Francois Romieu L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/via/via-velocity.* VICODEC VIRTUAL CODEC DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/platform/vicodec/* VIDEO MULTIPLEXER DRIVER M: Philipp Zabel L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/video-mux.c VIDEO I2C POLLING DRIVER M: Matt Ranostay L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/video-i2c.c VIDEOBUF2 FRAMEWORK M: Pawel Osciak M: Marek Szyprowski M: Kyungmin Park L: linux-media@vger.kernel.org S: Maintained F: drivers/media/common/videobuf2/* F: include/media/videobuf2-* VIMC VIRTUAL MEDIA CONTROLLER DRIVER M: Helen Koike L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/platform/vimc/* VIRT LIB M: Alex Williamson M: Paolo Bonzini L: kvm@vger.kernel.org S: Supported F: virt/lib/ VIRTIO AND VHOST VSOCK DRIVER M: Stefan Hajnoczi L: kvm@vger.kernel.org L: virtualization@lists.linux-foundation.org L: netdev@vger.kernel.org S: Maintained F: include/linux/virtio_vsock.h F: include/uapi/linux/virtio_vsock.h F: include/uapi/linux/vsockmon.h F: include/uapi/linux/vm_sockets_diag.h F: net/vmw_vsock/diag.c F: net/vmw_vsock/af_vsock_tap.c F: net/vmw_vsock/virtio_transport_common.c F: net/vmw_vsock/virtio_transport.c F: drivers/net/vsockmon.c F: drivers/vhost/vsock.c F: tools/testing/vsock/ VIRTIO CONSOLE DRIVER M: Amit Shah L: virtualization@lists.linux-foundation.org S: Maintained F: drivers/char/virtio_console.c F: include/linux/virtio_console.h F: include/uapi/linux/virtio_console.h VIRTIO CORE, NET AND BLOCK DRIVERS M: "Michael S. Tsirkin" M: Jason Wang L: virtualization@lists.linux-foundation.org S: Maintained F: Documentation/devicetree/bindings/virtio/ F: drivers/virtio/ F: tools/virtio/ F: drivers/net/virtio_net.c F: drivers/block/virtio_blk.c F: include/linux/virtio*.h F: include/uapi/linux/virtio_*.h F: drivers/crypto/virtio/ F: mm/balloon_compaction.c VIRTIO CRYPTO DRIVER M: Gonglei L: virtualization@lists.linux-foundation.org L: linux-crypto@vger.kernel.org S: Maintained F: drivers/crypto/virtio/ F: include/uapi/linux/virtio_crypto.h VIRTIO DRIVERS FOR S390 M: Cornelia Huck M: Halil Pasic L: linux-s390@vger.kernel.org L: virtualization@lists.linux-foundation.org L: kvm@vger.kernel.org S: Supported F: drivers/s390/virtio/ F: arch/s390/include/uapi/asm/virtio-ccw.h VIRTIO GPU DRIVER M: David Airlie M: Gerd Hoffmann L: dri-devel@lists.freedesktop.org L: virtualization@lists.linux-foundation.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/virtio/ F: include/uapi/linux/virtio_gpu.h VIRTIO HOST (VHOST) M: "Michael S. Tsirkin" M: Jason Wang L: kvm@vger.kernel.org L: virtualization@lists.linux-foundation.org L: netdev@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git S: Maintained F: drivers/vhost/ F: include/uapi/linux/vhost.h VIRTIO INPUT DRIVER M: Gerd Hoffmann S: Maintained F: drivers/virtio/virtio_input.c F: include/uapi/linux/virtio_input.h VIRTUAL BOX GUEST DEVICE DRIVER M: Hans de Goede M: Arnd Bergmann M: Greg Kroah-Hartman S: Maintained F: include/linux/vbox_utils.h F: include/uapi/linux/vbox*.h F: drivers/virt/vboxguest/ VIRTUAL SERIO DEVICE DRIVER M: Stephen Chandler Paul S: Maintained F: drivers/input/serio/userio.c F: include/uapi/linux/userio.h VIVID VIRTUAL VIDEO DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org S: Maintained F: drivers/media/platform/vivid/* VLYNQ BUS M: Florian Fainelli L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Maintained F: drivers/vlynq/vlynq.c F: include/linux/vlynq.h VME SUBSYSTEM M: Martyn Welch M: Manohar Vanga M: Greg Kroah-Hartman L: devel@driverdev.osuosl.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git F: Documentation/driver-api/vme.rst F: drivers/staging/vme/ F: drivers/vme/ F: include/linux/vme* VMWARE BALLOON DRIVER M: Julien Freche M: Nadav Amit M: "VMware, Inc." L: linux-kernel@vger.kernel.org S: Maintained F: drivers/misc/vmw_balloon.c VMWARE HYPERVISOR INTERFACE M: Alok Kataria L: virtualization@lists.linux-foundation.org S: Supported F: arch/x86/kernel/cpu/vmware.c VMWARE PVRDMA DRIVER M: Adit Ranadive M: VMware PV-Drivers L: linux-rdma@vger.kernel.org S: Maintained F: drivers/infiniband/hw/vmw_pvrdma/ VMware PVSCSI driver M: Jim Gill M: VMware PV-Drivers L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/vmw_pvscsi.c F: drivers/scsi/vmw_pvscsi.h VMWARE VMMOUSE SUBDRIVER M: "VMware Graphics" M: "VMware, Inc." L: linux-input@vger.kernel.org S: Maintained F: drivers/input/mouse/vmmouse.c F: drivers/input/mouse/vmmouse.h VMWARE VMXNET3 ETHERNET DRIVER M: Ronak Doshi M: "VMware, Inc." L: netdev@vger.kernel.org S: Maintained F: drivers/net/vmxnet3/ VOCORE VOCORE2 BOARD M: Harvey Hunt L: linux-mips@vger.kernel.org S: Maintained F: arch/mips/boot/dts/ralink/vocore2.dts VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood M: Mark Brown L: linux-kernel@vger.kernel.org W: http://www.slimlogic.co.uk/?p=48 T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git S: Supported F: Documentation/devicetree/bindings/regulator/ F: Documentation/power/regulator/ F: drivers/regulator/ F: include/dt-bindings/regulator/ F: include/linux/regulator/ VRF M: David Ahern M: Shrijeet Mukherjee L: netdev@vger.kernel.org S: Maintained F: drivers/net/vrf.c F: Documentation/networking/vrf.txt VT1211 HARDWARE MONITOR DRIVER M: Juerg Haefliger L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/vt1211 F: drivers/hwmon/vt1211.c VT8231 HARDWARE MONITOR DRIVER M: Roger Lucas L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/vt8231.c VUB300 USB to SDIO/SD/MMC bridge chip M: Tony Olech L: linux-mmc@vger.kernel.org L: linux-usb@vger.kernel.org S: Supported F: drivers/mmc/host/vub300.c W1 DALLAS'S 1-WIRE BUS M: Evgeniy Polyakov S: Maintained F: Documentation/devicetree/bindings/w1/ F: Documentation/w1/ F: drivers/w1/ F: include/linux/w1.h W83791D HARDWARE MONITORING DRIVER M: Marc Hulsman L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/w83791d F: drivers/hwmon/w83791d.c W83793 HARDWARE MONITORING DRIVER M: Rudolf Marek L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/w83793 F: drivers/hwmon/w83793.c W83795 HARDWARE MONITORING DRIVER M: Jean Delvare L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/w83795.c W83L51xD SD/MMC CARD INTERFACE DRIVER M: Pierre Ossman S: Maintained F: drivers/mmc/host/wbsd.* WACOM PROTOCOL 4 SERIAL TABLETS M: Julian Squires M: Hans de Goede L: linux-input@vger.kernel.org S: Maintained F: drivers/input/tablet/wacom_serial4.c WATCHDOG DEVICE DRIVERS M: Wim Van Sebroeck M: Guenter Roeck L: linux-watchdog@vger.kernel.org W: http://www.linux-watchdog.org/ T: git git://www.linux-watchdog.org/linux-watchdog.git S: Maintained F: Documentation/devicetree/bindings/watchdog/ F: Documentation/watchdog/ F: drivers/watchdog/ F: include/linux/watchdog.h F: include/uapi/linux/watchdog.h WHISKEYCOVE PMIC GPIO DRIVER M: Kuppuswamy Sathyanarayanan L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-wcove.c WIIMOTE HID DRIVER M: David Herrmann L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/hid-wiimote* WILOCITY WIL6210 WIRELESS DRIVER M: Maya Erez L: linux-wireless@vger.kernel.org L: wil6210@qti.qualcomm.com S: Supported W: http://wireless.kernel.org/en/users/Drivers/wil6210 F: drivers/net/wireless/ath/wil6210/ WIMAX STACK M: Inaky Perez-Gonzalez M: linux-wimax@intel.com L: wimax@linuxwimax.org (subscribers-only) S: Supported W: http://linuxwimax.org F: Documentation/wimax/README.wimax F: include/linux/wimax/debug.h F: include/net/wimax.h F: include/uapi/linux/wimax.h F: net/wimax/ WINBOND CIR DRIVER M: David Härdeman S: Maintained F: drivers/media/rc/winbond-cir.c WINSYSTEMS EBC-C384 WATCHDOG DRIVER M: William Breathitt Gray L: linux-watchdog@vger.kernel.org S: Maintained F: drivers/watchdog/ebc-c384_wdt.c WINSYSTEMS WS16C48 GPIO DRIVER M: William Breathitt Gray L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-ws16c48.c WISTRON LAPTOP BUTTON DRIVER M: Miloslav Trmac S: Maintained F: drivers/input/misc/wistron_btns.c WL3501 WIRELESS PCMCIA CARD DRIVER L: linux-wireless@vger.kernel.org S: Odd fixes F: drivers/net/wireless/wl3501* WOLFSON MICROELECTRONICS DRIVERS L: patches@opensource.cirrus.com T: git https://github.com/CirrusLogic/linux-drivers.git W: https://github.com/CirrusLogic/linux-drivers/wiki S: Supported F: Documentation/hwmon/wm83?? F: Documentation/devicetree/bindings/extcon/extcon-arizona.txt F: Documentation/devicetree/bindings/regulator/arizona-regulator.txt F: Documentation/devicetree/bindings/mfd/arizona.txt F: Documentation/devicetree/bindings/mfd/wm831x.txt F: Documentation/devicetree/bindings/sound/wlf,arizona.txt F: arch/arm/mach-s3c64xx/mach-crag6410* F: drivers/clk/clk-wm83*.c F: drivers/extcon/extcon-arizona.c F: drivers/leds/leds-wm83*.c F: drivers/gpio/gpio-*wm*.c F: drivers/gpio/gpio-arizona.c F: drivers/hwmon/wm83??-hwmon.c F: drivers/input/misc/wm831x-on.c F: drivers/input/touchscreen/wm831x-ts.c F: drivers/input/touchscreen/wm97*.c F: drivers/mfd/arizona* F: drivers/mfd/wm*.c F: drivers/mfd/cs47l24* F: drivers/power/supply/wm83*.c F: drivers/rtc/rtc-wm83*.c F: drivers/regulator/wm8*.c F: drivers/regulator/arizona* F: drivers/video/backlight/wm83*_bl.c F: drivers/watchdog/wm83*_wdt.c F: include/linux/mfd/arizona/ F: include/linux/mfd/wm831x/ F: include/linux/mfd/wm8350/ F: include/linux/mfd/wm8400* F: include/linux/regulator/arizona* F: include/linux/wm97xx.h F: include/sound/wm????.h F: sound/soc/codecs/arizona.? F: sound/soc/codecs/wm* F: sound/soc/codecs/cs47l24* WORKQUEUE M: Tejun Heo R: Lai Jiangshan T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git S: Maintained F: include/linux/workqueue.h F: kernel/workqueue.c F: Documentation/core-api/workqueue.rst X-POWERS AXP288 PMIC DRIVERS M: Hans de Goede S: Maintained N: axp288 F: drivers/acpi/pmic/intel_pmic_xpower.c X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS M: Chen-Yu Tsai L: linux-kernel@vger.kernel.org S: Maintained N: axp[128] X.25 NETWORK LAYER M: Andrew Hendry L: linux-x25@vger.kernel.org S: Odd Fixes F: Documentation/networking/x25* F: include/net/x25* F: net/x25/ X86 ARCHITECTURE (32-BIT AND 64-BIT) M: Thomas Gleixner M: Ingo Molnar M: Borislav Petkov R: "H. Peter Anvin" M: x86@kernel.org L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core S: Maintained F: Documentation/devicetree/bindings/x86/ F: Documentation/x86/ F: arch/x86/ X86 ENTRY CODE M: Andy Lutomirski L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm S: Maintained F: arch/x86/entry/ X86 MCE INFRASTRUCTURE M: Tony Luck M: Borislav Petkov L: linux-edac@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/mcheck/* X86 MICROCODE UPDATE SUPPORT M: Borislav Petkov S: Maintained F: arch/x86/kernel/cpu/microcode/* X86 MM M: Dave Hansen M: Andy Lutomirski M: Peter Zijlstra L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm S: Maintained F: arch/x86/mm/ X86 PLATFORM DRIVERS M: Darren Hart M: Andy Shevchenko L: platform-driver-x86@vger.kernel.org T: git git://git.infradead.org/linux-platform-drivers-x86.git S: Maintained F: drivers/platform/x86/ F: drivers/platform/olpc/ X86 VDSO M: Andy Lutomirski L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso S: Maintained F: arch/x86/entry/vdso/ XARRAY M: Matthew Wilcox L: linux-fsdevel@vger.kernel.org S: Supported F: Documentation/core-api/xarray.rst F: lib/idr.c F: lib/xarray.c F: include/linux/idr.h F: include/linux/xarray.h F: tools/testing/radix-tree XBOX DVD IR REMOTE M: Benjamin Valentin S: Maintained F: drivers/media/rc/xbox_remote.c F: drivers/media/rc/keymaps/rc-xbox-dvd.c XC2028/3028 TUNER DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/tuners/tuner-xc2028.* XDP SOCKETS (AF_XDP) M: Björn Töpel M: Magnus Karlsson L: netdev@vger.kernel.org S: Maintained F: kernel/bpf/xskmap.c F: net/xdp/ XEN BLOCK SUBSYSTEM M: Konrad Rzeszutek Wilk M: Roger Pau Monné L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: drivers/block/xen-blkback/* F: drivers/block/xen* XEN HYPERVISOR ARM M: Stefano Stabellini L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Maintained F: arch/arm/xen/ F: arch/arm/include/asm/xen/ XEN HYPERVISOR ARM64 M: Stefano Stabellini L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Maintained F: arch/arm64/xen/ F: arch/arm64/include/asm/xen/ XEN HYPERVISOR INTERFACE M: Boris Ostrovsky M: Juergen Gross R: Stefano Stabellini L: xen-devel@lists.xenproject.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git S: Supported F: arch/x86/xen/ F: arch/x86/platform/pvh/ F: drivers/*/xen-*front.c F: drivers/xen/ F: arch/x86/include/asm/xen/ F: arch/x86/include/asm/pvclock-abi.h F: include/xen/ F: include/uapi/xen/ F: Documentation/ABI/stable/sysfs-hypervisor-xen F: Documentation/ABI/testing/sysfs-hypervisor-xen XEN NETWORK BACKEND DRIVER M: Wei Liu M: Paul Durrant L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: netdev@vger.kernel.org S: Supported F: drivers/net/xen-netback/* XEN PCI SUBSYSTEM M: Konrad Rzeszutek Wilk L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: arch/x86/pci/*xen* F: drivers/pci/*xen* XEN PVSCSI DRIVERS M: Juergen Gross L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/xen-scsifront.c F: drivers/xen/xen-scsiback.c F: include/xen/interface/io/vscsiif.h XEN SWIOTLB SUBSYSTEM M: Konrad Rzeszutek Wilk L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: iommu@lists.linux-foundation.org S: Supported F: arch/x86/xen/*swiotlb* F: drivers/xen/*swiotlb* XEN SOUND FRONTEND DRIVER M: Oleksandr Andrushchenko L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: sound/xen/* XFS FILESYSTEM M: Darrick J. Wong M: linux-xfs@vger.kernel.org L: linux-xfs@vger.kernel.org W: http://xfs.org/ T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git S: Supported F: Documentation/filesystems/xfs.txt F: fs/xfs/ XILINX AXI ETHERNET DRIVER M: Anirudha Sarangi M: John Linn S: Maintained F: drivers/net/ethernet/xilinx/xilinx_axienet* XILINX UARTLITE SERIAL DRIVER M: Peter Korsgaard L: linux-serial@vger.kernel.org S: Maintained F: drivers/tty/serial/uartlite.c XILINX VIDEO IP CORES M: Hyun Kwon M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported F: Documentation/devicetree/bindings/media/xilinx/ F: drivers/media/platform/xilinx/ F: include/uapi/linux/xilinx-v4l2-controls.h XILLYBUS DRIVER M: Eli Billauer L: linux-kernel@vger.kernel.org S: Supported F: drivers/char/xillybus/ XLP9XX I2C DRIVER M: George Cherian M: Jan Glauber L: linux-i2c@vger.kernel.org W: http://www.cavium.com S: Supported F: drivers/i2c/busses/i2c-xlp9xx.c XRA1403 GPIO EXPANDER M: Nandor Han M: Semi Malinen L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-xra1403.c F: Documentation/devicetree/bindings/gpio/gpio-xra1403.txt XTENSA XTFPGA PLATFORM SUPPORT M: Max Filippov L: linux-xtensa@linux-xtensa.org S: Maintained F: drivers/spi/spi-xtensa-xtfpga.c F: sound/soc/xtensa/xtfpga-i2s.c YAM DRIVER FOR AX.25 M: Jean-Paul Roubelat L: linux-hams@vger.kernel.org S: Maintained F: drivers/net/hamradio/yam* F: include/linux/yam.h YAMA SECURITY MODULE M: Kees Cook T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git yama/tip S: Supported F: security/yama/ F: Documentation/admin-guide/LSM/Yama.rst YEALINK PHONE DRIVER M: Henk Vergonet L: usbb2k-api-dev@nongnu.org S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* Z8530 DRIVER FOR AX.25 M: Joerg Reuter W: http://yaina.de/jreuter/ W: http://www.qsl.net/dl1bke/ L: linux-hams@vger.kernel.org S: Maintained F: Documentation/networking/z8530drv.txt F: drivers/net/hamradio/*scc.c F: drivers/net/hamradio/z8530.h ZBUD COMPRESSED PAGE ALLOCATOR M: Seth Jennings M: Dan Streetman L: linux-mm@kvack.org S: Maintained F: mm/zbud.c F: include/linux/zbud.h ZD1211RW WIRELESS DRIVER M: Daniel Drake M: Ulrich Kunitz W: http://zd1211.ath.cx/wiki/DriverRewrite L: linux-wireless@vger.kernel.org L: zd1211-devs@lists.sourceforge.net (subscribers-only) S: Maintained F: drivers/net/wireless/zydas/zd1211rw/ ZD1301 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org/ W: http://palosaari.fi/linux/ Q: https://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/usb/dvb-usb-v2/zd1301* ZD1301_DEMOD MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org W: https://linuxtv.org/ W: http://palosaari.fi/linux/ Q: https://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/zd1301_demod* ZPOOL COMPRESSED PAGE STORAGE API M: Dan Streetman L: linux-mm@kvack.org S: Maintained F: mm/zpool.c F: include/linux/zpool.h ZR36067 VIDEO FOR LINUX DRIVER L: mjpeg-users@lists.sourceforge.net L: linux-media@vger.kernel.org W: http://mjpeg.sourceforge.net/driver-zoran/ T: hg https://linuxtv.org/hg/v4l-dvb S: Odd Fixes F: drivers/staging/media/zoran/ ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER M: Minchan Kim M: Nitin Gupta R: Sergey Senozhatsky L: linux-kernel@vger.kernel.org S: Maintained F: drivers/block/zram/ F: Documentation/blockdev/zram.txt ZS DECSTATION Z85C30 SERIAL DRIVER M: "Maciej W. Rozycki" S: Maintained F: drivers/tty/serial/zs.* ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR M: Minchan Kim M: Nitin Gupta R: Sergey Senozhatsky L: linux-mm@kvack.org S: Maintained F: mm/zsmalloc.c F: include/linux/zsmalloc.h F: Documentation/vm/zsmalloc.rst ZSWAP COMPRESSED SWAP CACHING M: Seth Jennings M: Dan Streetman L: linux-mm@kvack.org S: Maintained F: mm/zswap.c THE REST M: Linus Torvalds L: linux-kernel@vger.kernel.org Q: http://patchwork.kernel.org/project/LKML/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git S: Buried alive in reporters F: * F: */ backport-iwlwifi-dkms-7906/Makefile000066400000000000000000000143131351064634500173430ustar00rootroot00000000000000# # Makefile for the output source package # ifeq ($(KERNELRELEASE),) MAKEFLAGS += --no-print-directory SHELL := /bin/bash BACKPORT_DIR := $(shell pwd) KMODDIR ?= updates ifneq ($(origin KLIB), undefined) KMODPATH_ARG := "INSTALL_MOD_PATH=$(KLIB)" else KLIB := /lib/modules/$(shell uname -r)/ KMODPATH_ARG := endif KLIB_BUILD ?= $(KLIB)/build/ KERNEL_CONFIG := $(KLIB_BUILD)/.config KERNEL_MAKEFILE := $(KLIB_BUILD)/Makefile CONFIG_MD5 := $(shell md5sum $(KERNEL_CONFIG) 2>/dev/null | sed 's/\s.*//') export KLIB KLIB_BUILD BACKPORT_DIR KMODDIR KMODPATH_ARG # disable built-in rules for this file .SUFFIXES: .PHONY: default default: @$(MAKE) modules .PHONY: mrproper mrproper: @test -f .config && $(MAKE) clean || true @rm -f .config @rm -f .kernel_config_md5 Kconfig.versions Kconfig.kernel @rm -f backport-include/backport/autoconf.h .DEFAULT: @set -e ; test -f local-symbols || ( \ echo "/--------------" ;\ echo "| You shouldn't run make in the backports tree, but only in" ;\ echo "| the generated output. This here is only the skeleton code" ;\ echo "| copied into the output directory. To use the backport system" ;\ echo "| from scratch, go into the top-level directory and run" ;\ echo "| ./gentree.py /path/to/linux-next/ /tmp/output" ;\ echo "| and then make menuconfig/... in the output directory. See" ;\ echo "| ./gentree.py --help" ;\ echo "| for more options." ;\ echo "\\--" ;\ false) @set -e ; test -f $(KERNEL_CONFIG) || ( \ echo "/--------------" ;\ echo "| Your kernel headers are incomplete/not installed." ;\ echo "| Please install kernel headers, including a .config" ;\ echo "| file or use the KLIB/KLIB_BUILD make variables to" ;\ echo "| set the kernel to build against, e.g." ;\ echo "| make KLIB=/lib/modules/3.1.7/" ;\ echo "| to compile/install for the installed kernel 3.1.7" ;\ echo "| (that isn't currently running.)" ;\ echo "\\--" ;\ false) @set -e ; if [ "$$(cat .kernel_config_md5 2>/dev/null)" != "$(CONFIG_MD5)" ] ;\ then \ echo -n "Generating local configuration database from kernel ..." ;\ grep -v -f local-symbols $(KERNEL_CONFIG) | grep = | ( \ while read l ; do \ if [ "$${l:0:7}" != "CONFIG_" ] ; then \ continue ;\ fi ;\ l=$${l:7} ;\ n=$${l%%=*} ;\ v=$${l#*=} ;\ if [ "$$v" = "m" ] ; then \ echo config $$n ;\ echo ' tristate' ;\ elif [ "$$v" = "y" ] ; then \ echo config $$n ;\ echo ' bool' ;\ else \ continue ;\ fi ;\ echo " default $$v" ;\ echo "" ;\ done \ ) > Kconfig.kernel ;\ kver=$$($(MAKE) --no-print-directory -C $(KLIB_BUILD) kernelversion | \ sed 's/^\(\([3-5]\|2\.6\)\.[0-9]\+\).*/\1/;t;d') ;\ test "$$kver" != "" || echo "Kernel version parse failed!" ;\ test "$$kver" != "" ;\ kvers="$$(seq 14 39 | sed 's/^/2.6./')" ;\ kvers="$$kvers $$(seq 0 19 | sed 's/^/3./')" ;\ kvers="$$kvers $$(seq 0 20 | sed 's/^/4./')" ;\ kvers="$$kvers $$(seq 0 99 | sed 's/^/5./')" ;\ print=0 ;\ for v in $$kvers ; do \ if [ "$$print" = "1" ] ; then \ echo config KERNEL_$$(echo $$v | tr . _) ;\ echo " def_bool y" ;\ fi ;\ if [ "$$v" = "$$kver" ] ; then print=1 ; fi ;\ done > Kconfig.versions ;\ # RHEL as well, sadly we need to grep for it ;\ RHEL_MAJOR=$$(grep '^RHEL_MAJOR' $(KERNEL_MAKEFILE) | \ sed 's/.*=\s*\([0-9]*\)/\1/;t;d') ;\ RHEL_MINOR=$$(grep '^RHEL_MINOR' $(KERNEL_MAKEFILE) | \ sed 's/.*=\s*\([0-9]*\)/\1/;t;d') ;\ for v in $$(seq 0 $$RHEL_MINOR) ; do \ echo config BACKPORT_RHEL_KERNEL_$${RHEL_MAJOR}_$$v ;\ echo " def_bool y" ;\ done >> Kconfig.versions ;\ echo " done." ;\ fi ;\ echo "$(CONFIG_MD5)" > .kernel_config_md5 @$(MAKE) -f Makefile.real "$@" .PHONY: defconfig-help defconfig-help: @echo "Driver or subsystem configuration targets:" @set -e ;\ bk_configs="$$(ls defconfigs/*)" ;\ for cfg in $$bk_configs; do \ echo " defconfig-$${cfg##defconfigs/}" ;\ done @echo "" .PHONY: help help: defconfig-help @echo "Cleaning targets:" @echo " clean - Remove most generated files but keep the config and" @echo " enough build support to build external modules" @echo " mrproper - Remove all generated files + config + various backup files" @echo "" @echo "Driver configuration help:" @echo " defconfig-help - List all prearranged defconfig-targets we have" @echo " designed for you. You can use this to find" @echo " driver specific configs in case all you really" @echo " need is to just compile one or a small group " @echo " of drivers." @echo "" @echo "Configuration targets:" @echo " menuconfig - Update current config utilising a menu based program" @echo " oldconfig - Update current config utilising a provided .config as base" @echo " oldaskconfig - ??" @echo " silentoldconfig - Same as oldconfig, but quietly, additionally update deps" @echo " allnoconfig - New config where all options are answered with no" @echo " allyesconfig - New config where all options are accepted with yes" @echo " allmodconfig - New config selecting modules when possible" @echo " alldefconfig - New config with all symbols set to default" @echo " randconfig - New config with random answer to all options" @echo " listnewconfig - List new options" @echo " olddefconfig - Same as silentoldconfig but sets new symbols to their default value" @echo "" @echo "Other generic targets:" @echo " all - Build all targets marked with [*]" @echo "* modules - Build all modules" @echo "" @echo "Architecture specific targets:" @echo " install - Install modules" @echo " uninstall - Uninstall modules" @echo "" @echo "Execute "make" or "make all" to build all targets marked with [*]" else include $(BACKPORT_DIR)/Makefile.kernel endif htmldocs: @make srctree=$(shell pwd) src=Documentation obj=Documentation -f Documentation/Makefile.sphinx htmldocs backport-iwlwifi-dkms-7906/Makefile.build000066400000000000000000000002601351064634500204350ustar00rootroot00000000000000-include .config export .PHONY: modules modules: @$(MAKE) -C $(KLIB_BUILD) M=$(BACKPORT_DIR) modules .PHONY: clean clean: @$(MAKE) -C $(KLIB_BUILD) M=$(BACKPORT_DIR) clean backport-iwlwifi-dkms-7906/Makefile.kernel000066400000000000000000000040721351064634500206230ustar00rootroot00000000000000ifeq ($(CONFIG_BACKPORT_INTEGRATE),) # Since 2.6.21, try-run is available, but cc-disable-warning # was only added later, so we add it here ourselves: backport-cc-disable-warning = $(call try-run,\ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) backport-cc-disable-error = $(call try-run,\ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-error=$(strip $(1))) KBUILD_CFLAGS := $(KBUILD_CFLAGS) -Wformat-security \ $(call backport-cc-disable-error, date-time) \ $(call backport-cc-disable-warning, date-time) NOSTDINC_FLAGS := \ -I$(M)/backport-include/ \ -I$(M)/backport-include/uapi \ -I$(M)/include/ \ -I$(M)/include/uapi \ -include $(M)/backport-include/backport/backport.h \ $(call backport-cc-disable-warning, unused-but-set-variable) \ -DCPTCFG_VERSION=\"$(BACKPORTS_VERSION)\" \ -DCPTCFG_KERNEL_VERSION=\"$(BACKPORTED_KERNEL_VERSION)\" \ -DCPTCFG_KERNEL_NAME=\"$(BACKPORTED_KERNEL_NAME)\" \ $(BACKPORTS_GIT_TRACKER_DEF) \ $(BACKPORTS_BUILD_TSTAMP_DEF) $(BACKPORTS_BRANCH_TSTAMP_DEF) \ $(CFLAGS) export backport_srctree = $(M) else export BACKPORT_DIR = backports/ export backport_srctree = $(BACKPORT_DIR) NOSTDINC_FLAGS := \ -I$(BACKPORT_DIR)/backport-include/ \ -I$(BACKPORT_DIR)/backport-include/uapi \ -I$(BACKPORT_DIR)/include/ \ -I$(BACKPORT_DIR)/include/uapi \ -include $(BACKPORT_DIR)/backport-include/backport/backport.h \ $(CFLAGS) endif subdir-ccflags-y := $(call cc-option, -fno-pie) $(call cc-option, -no-pie) ifeq ($(CPTCFG_KERNEL_4_3),y) subdir-ccflags-y += -Wno-pointer-sign endif subdir-ccflags-y += -D__CHECK_ENDIAN__ -Wunused-but-set-variable obj-y += compat/ obj-$(CPTCFG_CFG80211) += net/wireless/ obj-$(CPTCFG_MAC80211) += net/mac80211/ obj-$(CPTCFG_WLAN) += drivers/net/wireless/ #obj-$(CPTCFG_SSB) += drivers/ssb/ #obj-$(CPTCFG_BCMA) += drivers/bcma/ #obj-$(CPTCFG_USB_NET_RNDIS_WLAN) += drivers/net/usb/ # #obj-$(CPTCFG_USB_WDM) += drivers/usb/class/ #obj-$(CPTCFG_USB_USBNET) += drivers/net/usb/ # #obj-$(CPTCFG_STAGING) += drivers/staging/ backport-iwlwifi-dkms-7906/Makefile.real000066400000000000000000000071761351064634500202760ustar00rootroot00000000000000include versions export BACKPORTS_VERSION BACKPORTED_KERNEL_VERSION BACKPORTED_KERNEL_NAME ifdef BACKPORTS_GIT_TRACKED export BACKPORTS_GIT_TRACKER_DEF=-DBACKPORTS_GIT_TRACKED=\"$(BACKPORTS_GIT_TRACKED)\" else export BACKPORTS_GIT_TRACKER_DEF= endif ifdef BACKPORTS_BUILD_TSTAMP export BACKPORTS_BUILD_TSTAMP_DEF="-DBACKPORTS_BUILD_TSTAMP=$(BACKPORTS_BUILD_TSTAMP)" else export BACKPORTS_BUILD_TSTAMP_DEF= endif ifdef BACKPORTS_BRANCH_TSTAMP export BACKPORTS_BRANCH_TSTAMP_DEF=-DBACKPORTS_BRANCH_TSTAMP=\"$(BACKPORTS_BRANCH_TSTAMP)\" else export BACKPORTS_BRANCH_TSTAMP_DEF= endif # disable built-in rules for this file .SUFFIXES: export CONFIG_=CPTCFG_ .PHONY: menuconfig menuconfig: @$(MAKE) -C kconf mconf @./kconf/mconf Kconfig .PHONY: listnewconfig oldaskconfig oldconfig \ silentoldconfig olddefconfig oldnoconfig \ allnoconfig allyesconfig allmodconfig \ alldefconfig randconfig listnewconfig oldaskconfig oldconfig \ silentoldconfig olddefconfig oldnoconfig \ allnoconfig allyesconfig allmodconfig \ alldefconfig randconfig: @$(MAKE) -C kconf conf @./kconf/conf --$@ Kconfig .PHONY: usedefconfig usedefconfig: @$(MAKE) -C kconf conf @./kconf/conf --defconfig=defconfig Kconfig .PHONY: savedefconfig savedefconfig: @$(MAKE) -C kconf conf @./kconf/conf --savedefconfig=defconfig Kconfig defconfig-%:: @$(MAKE) -C kconf conf @./kconf/conf --defconfig=defconfigs/$(@:defconfig-%=%) Kconfig .config: @test -f defconfig && $(MAKE) usedefconfig || ( \ echo "/--------------" ;\ echo "| Your backport package isn't configured, please configure it" ;\ echo "| using one of the following options:" ;\ echo "| To configure manually:" ;\ echo "| make oldconfig" ;\ echo "| make menuconfig" ;\ echo "|" ;\ echo "| To get defaults for certain drivers:" ;\ (cd defconfigs ; for f in $$(ls) ; do \ echo "| make defconfig-$$f" ;\ done ) ;\ echo "\--" ;\ false ) backport-include/backport/autoconf.h: .config Kconfig.versions Kconfig.kernel @$(MAKE) oldconfig @echo -n "Building backport-include/backport/autoconf.h ..." @grep -f local-symbols .config | ( \ echo "#ifndef COMPAT_AUTOCONF_INCLUDED" ;\ echo "#define COMPAT_AUTOCONF_INCLUDED" ;\ echo "/*" ;\ echo " * Automatically generated file, don't edit!" ;\ echo " * Changes will be overwritten" ;\ echo " */" ;\ echo "" ;\ while read l ; do \ n=$${l%%=*} ;\ v=$${l#*=} ;\ case $$v in \ y) echo "#define $$n 1" ;; \ m) echo "#define $${n}_MODULE 1" ;; \ \"*) echo "#define $$n $$v" ;; \ [0-9]*) echo "#define $$n $$v" ;; \ *) echo "#warning unknown value for $$n";;\ esac ;\ done ;\ echo "#endif /* COMPAT_AUTOCONF_INCLUDED */" ;\ ) > backport-include/backport/autoconf.h @echo " done." .PHONY: modules modules: backport-include/backport/autoconf.h @$(MAKE) -f Makefile.build modules .PHONY: install install: modules @$(MAKE) -C $(KLIB_BUILD) M=$(BACKPORT_DIR) \ INSTALL_MOD_DIR=$(KMODDIR) $(KMODPATH_ARG) \ modules_install @./scripts/blacklist.sh $(KLIB)/ $(KLIB)/$(KMODDIR) @./scripts/compress_modules.sh $(KLIB)/$(KMODDIR) @./scripts/check_depmod.sh @/sbin/depmod -a @./scripts/update-initramfs.sh $(KLIB) @echo @echo Your backported driver modules should be installed now. @echo Reboot. @echo .PHONY: modules_install modules_install: install .PHONY: uninstall uninstall: @./scripts/uninstall.sh @/sbin/depmod -a @./scripts/update-initramfs.sh $(KLIB) @echo @echo Your backported driver modules should be uninstalled now. @echo Reboot. @echo .PHONY: clean clean: @$(MAKE) -f Makefile.build clean @$(MAKE) -C kconf clean backport-iwlwifi-dkms-7906/backport-include/000077500000000000000000000000001351064634500211275ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/asm-generic/000077500000000000000000000000001351064634500233215ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/asm-generic/bug.h000066400000000000000000000021571351064634500242540ustar00rootroot00000000000000#ifndef __BACKPORT_ASM_GENERIC_BUG_H #define __BACKPORT_ASM_GENERIC_BUG_H #include_next #ifndef __WARN #define __WARN(foo) dump_stack() #endif #ifndef WARN_ONCE #define WARN_ONCE(condition, format...) ({ \ static int __warned; \ int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once)) \ if (WARN(!__warned, format)) \ __warned = 1; \ unlikely(__ret_warn_once); \ }) #endif #ifndef __WARN_printf /* * To port this properly we'd have to port warn_slowpath_null(), * which I'm lazy to do so just do a regular print for now. If you * want to port this read kernel/panic.c */ #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) #endif #ifndef WARN #define WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ __WARN_printf(format); \ unlikely(__ret_warn_on); \ }) #endif #endif /* __BACKPORT_ASM_GENERIC_BUG_H */ backport-iwlwifi-dkms-7906/backport-include/asm-generic/pci-dma-compat.h000066400000000000000000000010151351064634500262620ustar00rootroot00000000000000#ifndef __BACKPORT_ASM_GENERIC_PCI_DMA_COMPAT_H #define __BACKPORT_ASM_GENERIC_PCI_DMA_COMPAT_H #include_next #if LINUX_VERSION_IS_LESS(3,17,0) #define pci_zalloc_consistent LINUX_BACKPORT(pci_zalloc_consistent) static inline void *pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { void *ret = pci_alloc_consistent(hwdev, size, dma_handle); if (ret) memset(ret, 0, size); return ret; } #endif #endif /* __BACKPORT_ASM_GENERIC_PCI_DMA_COMPAT_H */ backport-iwlwifi-dkms-7906/backport-include/asm/000077500000000000000000000000001351064634500217075ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/asm/atomic.h000066400000000000000000000011221351064634500233300ustar00rootroot00000000000000#ifndef __BACKPORT_ASM_ATOMIC_H #define __BACKPORT_ASM_ATOMIC_H #include_next #include #include #if LINUX_VERSION_IS_LESS(3,1,0) /* * In many versions, several architectures do not seem to include an * atomic64_t implementation, and do not include the software emulation from * asm-generic/atomic64_t. * Detect and handle this here. */ #if (!defined(ATOMIC64_INIT) && !defined(CONFIG_X86) && !(defined(CONFIG_ARM) && !defined(CONFIG_GENERIC_ATOMIC64))) #include #endif #endif #endif /* __BACKPORT_ASM_ATOMIC_H */ backport-iwlwifi-dkms-7906/backport-include/asm/barrier.h000066400000000000000000000007101351064634500235040ustar00rootroot00000000000000#ifndef __BACKPORT_ASM_BARRIER_H #define __BACKPORT_ASM_BARRIER_H #include #if LINUX_VERSION_IS_GEQ(3,4,0) || \ defined(CONFIG_ALPHA) || defined(CONFIG_MIPS) #include_next #endif /* >= 3.4 */ #ifndef dma_rmb #define dma_rmb() rmb() #endif #ifndef dma_wmb #define dma_wmb() wmb() #endif #ifndef smp_mb__after_atomic #define smp_mb__after_atomic smp_mb__after_clear_bit #endif #endif /* __BACKPORT_ASM_BARRIER_H */ backport-iwlwifi-dkms-7906/backport-include/asm/errno.h000066400000000000000000000012701351064634500232050ustar00rootroot00000000000000#ifndef __BACKPORT_ASM_ERRNO_H #define __BACKPORT_ASM_ERRNO_H #include_next #ifndef ERFKILL #if !defined(CONFIG_ALPHA) && !defined(CONFIG_MIPS) && !defined(CONFIG_PARISC) && !defined(CONFIG_SPARC) #define ERFKILL 132 /* Operation not possible due to RF-kill */ #endif #ifdef CONFIG_ALPHA #define ERFKILL 138 /* Operation not possible due to RF-kill */ #endif #ifdef CONFIG_MIPS #define ERFKILL 167 /* Operation not possible due to RF-kill */ #endif #ifdef CONFIG_PARISC #define ERFKILL 256 /* Operation not possible due to RF-kill */ #endif #ifdef CONFIG_SPARC #define ERFKILL 134 /* Operation not possible due to RF-kill */ #endif #endif #endif /* __BACKPORT_ASM_ERRNO_H */ backport-iwlwifi-dkms-7906/backport-include/asm/ioctls.h000066400000000000000000000002721351064634500233560ustar00rootroot00000000000000#ifndef __BACKPORT_ASM_IOCTLS_H #define __BACKPORT_ASM_IOCTLS_H #include_next #ifndef TIOCPKT_IOCTL #define TIOCPKT_IOCTL 64 #endif #endif /* __BACKPORT_ASM_IOCTLS_H */ backport-iwlwifi-dkms-7906/backport-include/backport/000077500000000000000000000000001351064634500227345ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/backport/backport.h000066400000000000000000000005331351064634500247130ustar00rootroot00000000000000#ifndef __BACKPORT_H #define __BACKPORT_H #include #ifndef CONFIG_BACKPORT_INTEGRATE #include #endif #include #ifndef __ASSEMBLY__ #define LINUX_BACKPORT(__sym) backport_ ##__sym #ifndef CONFIG_BACKPORT_INTEGRATE #include #endif #endif #endif /* __BACKPORT_H */ backport-iwlwifi-dkms-7906/backport-include/backport/checks.h000066400000000000000000000005761351064634500243550ustar00rootroot00000000000000#ifndef __BACKPORT_CHECKS #define __BACKPORT_CHECKS #if defined(CPTCFG_MAC80211) && defined(CPTCFG_MAC80211) #error "You must not have mac80211 built into your kernel if you want to enable it" #endif #if defined(CPTCFG_CFG80211) && defined(CPTCFG_CFG80211) #error "You must not have cfg80211 built into your kernel if you want to enable it" #endif #endif /* __BACKPORT_CHECKS */ backport-iwlwifi-dkms-7906/backport-include/backport/leds-disabled.h000066400000000000000000000133041351064634500256020ustar00rootroot00000000000000#ifndef __BACKPORT_LED_DISABLED_SUPPORT #define __BACKPORT_LED_DISABLED_SUPPORT /* * LED support is strange, with the NEW_LEDS, LEDS_CLASS and LEDS_TRIGGERS * Kconfig symbols ... If any of them are not defined, we build our * "compatibility" code that really just makes it all non-working but * allows compilation. */ #ifdef CPTCFG_BPAUTO_BUILD_LEDS #include #include #include #include #include #define led_classdev LINUX_BACKPORT(led_classdev) #define led_trigger LINUX_BACKPORT(led_trigger) struct led_classdev { const char *name; enum led_brightness brightness; enum led_brightness max_brightness; int flags; /* Lower 16 bits reflect status */ #ifndef LED_SUSPENDED #define LED_SUSPENDED (1 << 0) /* Upper 16 bits reflect control information */ #define LED_CORE_SUSPENDRESUME (1 << 16) #define LED_BLINK_ONESHOT (1 << 17) #define LED_BLINK_ONESHOT_STOP (1 << 18) #define LED_BLINK_INVERT (1 << 19) #define LED_SYSFS_DISABLE (1 << 20) #define SET_BRIGHTNESS_ASYNC (1 << 21) #define SET_BRIGHTNESS_SYNC (1 << 22) #define LED_DEV_CAP_FLASH (1 << 23) #endif /* Set LED brightness level */ /* Must not sleep, use a workqueue if needed */ void (*brightness_set)(struct led_classdev *led_cdev, enum led_brightness brightness); /* * Set LED brightness level immediately - it can block the caller for * the time required for accessing a LED device register. */ int (*brightness_set_sync)(struct led_classdev *led_cdev, enum led_brightness brightness); /* Get LED brightness level */ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); /* * Activate hardware accelerated blink, delays are in milliseconds * and if both are zero then a sensible default should be chosen. * The call should adjust the timings in that case and if it can't * match the values specified exactly. * Deactivate blinking again when the brightness is set to a fixed * value via the brightness_set() callback. */ int (*blink_set)(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off); struct device *dev; const struct attribute_group **groups; struct list_head node; /* LED Device list */ const char *default_trigger; /* Trigger to use */ unsigned long blink_delay_on, blink_delay_off; struct timer_list blink_timer; int blink_brightness; void (*flash_resume)(struct led_classdev *led_cdev); struct work_struct set_brightness_work; int delayed_set_value; /* Protects the trigger data below */ struct rw_semaphore trigger_lock; struct led_trigger *trigger; struct list_head trig_list; void *trigger_data; /* true if activated - deactivate routine uses it to do cleanup */ bool activated; /* Ensures consistent access to the LED Flash Class device */ struct mutex led_access; }; struct led_trigger { const char *name; void (*activate)(struct led_classdev *led_cdev); void (*deactivate)(struct led_classdev *led_cdev); rwlock_t leddev_list_lock; struct list_head led_cdevs; struct list_head next_trig; }; #undef led_classdev_register #define led_classdev_register LINUX_BACKPORT(led_classdev_register) #undef led_classdev_unregister #define led_classdev_unregister LINUX_BACKPORT(led_classdev_unregister) #undef led_blink_set #define led_blink_set LINUX_BACKPORT(led_blink_set) #undef led_set_brightness #define led_set_brightness LINUX_BACKPORT(led_set_brightness) #undef led_classdev_suspend #define led_classdev_suspend LINUX_BACKPORT(led_classdev_suspend) #undef led_classdev_resume #define led_classdev_resume LINUX_BACKPORT(led_classdev_resume) #undef led_trigger_register #define led_trigger_register LINUX_BACKPORT(led_trigger_register) #undef led_trigger_unregister #define led_trigger_unregister LINUX_BACKPORT(led_trigger_unregister) #undef led_trigger_register_simple #define led_trigger_register_simple LINUX_BACKPORT(led_trigger_register_simple) #undef led_trigger_unregister_simple #define led_trigger_unregister_simple LINUX_BACKPORT(led_trigger_unregister_simple) #undef led_trigger_event #define led_trigger_event LINUX_BACKPORT(led_trigger_event) #undef DEFINE_LED_TRIGGER #define DEFINE_LED_TRIGGER(x) static struct led_trigger *x; static inline int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) { return 0; } static inline void led_classdev_unregister(struct led_classdev *led_cdev) { } static inline void led_trigger_register_simple(const char *name, struct led_trigger **trigger) { } static inline void led_trigger_unregister_simple(struct led_trigger *trigger) { } static inline void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { } static inline void led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { } static inline void led_classdev_suspend(struct led_classdev *led_cdev) { } static inline void led_classdev_resume(struct led_classdev *led_cdev) { } static inline int led_trigger_register(struct led_trigger *trigger) { INIT_LIST_HEAD(&trigger->led_cdevs); INIT_LIST_HEAD(&trigger->next_trig); rwlock_init(&trigger->leddev_list_lock); return 0; } static inline void led_trigger_unregister(struct led_trigger *trigger) { } static inline void led_trigger_event(struct led_trigger *trigger, enum led_brightness event) { } static inline void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on, unsigned long *delay_off) { } static inline void led_trigger_blink_oneshot(struct led_trigger *trigger, unsigned long *delay_on, unsigned long *delay_off, int invert) { } #endif #endif /* __BACKPORT_LED_DISABLED_SUPPORT */ backport-iwlwifi-dkms-7906/backport-include/backport/magic.h000066400000000000000000000010021351064634500241560ustar00rootroot00000000000000/* * These tricks are taken from * http://efesx.com/2010/07/17/variadic-macro-to-count-number-of-arguments/ * and * http://efesx.com/2010/08/31/overloading-macros/ */ #define VA_NUM_ARGS(...) VA_NUM_ARGS_IMPL(__VA_ARGS__, 6,5,4,3,2,1) #define VA_NUM_ARGS_IMPL(_1,_2,_3,_4,_5,_6,N,...) N #define macro_dispatcher(func, ...) \ macro_dispatcher_(func, VA_NUM_ARGS(__VA_ARGS__)) #define macro_dispatcher_(func, nargs) \ macro_dispatcher__(func, nargs) #define macro_dispatcher__(func, nargs) \ func ## nargs backport-iwlwifi-dkms-7906/backport-include/crypto/000077500000000000000000000000001351064634500224475ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/crypto/aead.h000066400000000000000000000020101351064634500235030ustar00rootroot00000000000000#ifndef __BACKPORT_CRYPTO_AEAD_H #define __BACKPORT_CRYPTO_AEAD_H #include_next #include #if LINUX_VERSION_IS_LESS(4,2,0) #define aead_request_set_ad LINUX_BACKPORT(aead_request_set_ad) static inline void aead_request_set_ad(struct aead_request *req, unsigned int assoclen) { req->assoclen = assoclen; } #define crypto_aead_reqsize LINUX_BACKPORT(crypto_aead_reqsize) unsigned int crypto_aead_reqsize(struct crypto_aead *tfm); struct aead_request *crypto_backport_convert(struct aead_request *req); static inline int backport_crypto_aead_encrypt(struct aead_request *req) { return crypto_aead_encrypt(crypto_backport_convert(req)); } #define crypto_aead_encrypt LINUX_BACKPORT(crypto_aead_encrypt) static inline int backport_crypto_aead_decrypt(struct aead_request *req) { return crypto_aead_decrypt(crypto_backport_convert(req)); } #define crypto_aead_decrypt LINUX_BACKPORT(crypto_aead_decrypt) #endif /* LINUX_VERSION_IS_LESS(4,2,0) */ #endif /* __BACKPORT_CRYPTO_AEAD_H */ backport-iwlwifi-dkms-7906/backport-include/crypto/algapi.h000066400000000000000000000007661351064634500240660ustar00rootroot00000000000000#ifndef __BP_ALGAPI_H #define __BP_ALGAPI_H #include #include_next #if LINUX_VERSION_IS_LESS(3,13,0) #define __crypto_memneq LINUX_BACKPORT(__crypto_memneq) noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); #define crypto_memneq LINUX_BACKPORT(crypto_memneq) static inline int crypto_memneq(const void *a, const void *b, size_t size) { return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; } #endif #endif /* __BP_ALGAPI_H */ backport-iwlwifi-dkms-7906/backport-include/crypto/hash.h000066400000000000000000000022261351064634500235450ustar00rootroot00000000000000#ifndef _BACKPORT_CRYPTO_HASH_H #define _BACKPORT_CRYPTO_HASH_H #include_next #include #if LINUX_VERSION_IS_LESS(4,6,0) #define shash_desc_zero LINUX_BACKPORT(shash_desc_zero) static inline void shash_desc_zero(struct shash_desc *desc) { memzero_explicit(desc, sizeof(*desc) + crypto_shash_descsize(desc->tfm)); } #endif #if LINUX_VERSION_IS_LESS(4,6,0) #define ahash_request_zero LINUX_BACKPORT(ahash_request_zero) static inline void ahash_request_zero(struct ahash_request *req) { memzero_explicit(req, sizeof(*req) + crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); } #endif #ifndef AHASH_REQUEST_ON_STACK #define AHASH_REQUEST_ON_STACK(name, ahash) \ char __##name##_desc[sizeof(struct ahash_request) + \ crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \ struct ahash_request *name = (void *)__##name##_desc #endif #ifndef SHASH_DESC_ON_STACK #define SHASH_DESC_ON_STACK(shash, ctx) \ char __##shash##_desc[sizeof(struct shash_desc) + \ crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc #endif #endif /* _BACKPORT_CRYPTO_HASH_H */ backport-iwlwifi-dkms-7906/backport-include/generated/000077500000000000000000000000001351064634500230655ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/generated/utsrelease.h000066400000000000000000000014621351064634500254150ustar00rootroot00000000000000#ifndef __BACKPORT_GENERATED_UTS_RELEASE_H #define __BACKPORT_GENERATED_UTS_RELEASE_H #include_next /* * We only want the UTS_UBUNTU_RELEASE_ABI var when we are on a normal * Ubuntu distribution kernel and not when we are on a Ubuntu mainline * kernel. Some of the Ubuntu mainline kernel do have an invalid octal * number in this field like 031418 and we do not want to evaluate this * at all on the Ubuntu mainline kernels. All Ubuntu distribution * kernel have CONFIG_VERSION_SIGNATURE set so this way we can detect * the which type of kernel we are on. */ #ifndef UTS_UBUNTU_RELEASE_ABI #define UTS_UBUNTU_RELEASE_ABI 0 #elif !defined(CONFIG_VERSION_SIGNATURE) #undef UTS_UBUNTU_RELEASE_ABI #define UTS_UBUNTU_RELEASE_ABI 0 #endif #endif /* __BACKPORT_GENERATED_UTS_RELEASE_H */ backport-iwlwifi-dkms-7906/backport-include/keys/000077500000000000000000000000001351064634500221025ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/keys/asymmetric-type.h000066400000000000000000000016211351064634500254070ustar00rootroot00000000000000#ifndef __BP_ASYMMETRIC_TYPE_H #define __BP_ASYMMETRIC_TYPE_H #ifdef CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION #include struct asymmetric_key_id { unsigned short len; unsigned char data[]; }; struct asymmetric_key_ids { struct asymmetric_key_id *id[2]; }; static inline bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1, const struct asymmetric_key_id *kid2) { if (!kid1 || !kid2) return false; if (kid1->len != kid2->len) return false; return memcmp(kid1->data, kid2->data, kid1->len) == 0; } extern struct asymmetric_key_id * asymmetric_key_generate_id(const void *val_1, size_t len_1, const void *val_2, size_t len_2); extern struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_0, const struct asymmetric_key_id *id_1, bool partial); #endif #endif /* __BP_ASYMMETRIC_TYPE_H */ backport-iwlwifi-dkms-7906/backport-include/keys/system_keyring.h000066400000000000000000000004651351064634500253340ustar00rootroot00000000000000#ifndef __BP_SYSTEM_KEYRING_H #define __BP_SYSTEM_KEYRING_H #ifndef CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION #include_next #else #include #define is_hash_blacklisted(...) 0 #endif /* CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION */ #endif /* __BP_SYSTEM_KEYRING_H */ backport-iwlwifi-dkms-7906/backport-include/linux/000077500000000000000000000000001351064634500222665ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/linux/acpi.h000066400000000000000000000046511351064634500233610ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_ACPI_H #define __BACKPORT_LINUX_ACPI_H #include_next #include #if LINUX_VERSION_IS_LESS(3,8,0) /* * Backports * * commit 95f8a082b9b1ead0c2859f2a7b1ac91ff63d8765 * Author: Rafael J. Wysocki * Date: Wed Nov 21 00:21:50 2012 +0100 * * ACPI / driver core: Introduce struct acpi_dev_node and related macros * * To avoid adding an ACPI handle pointer to struct device on * architectures that don't use ACPI, or generally when CONFIG_ACPI is * not set, in which cases that pointer is useless, define struct * acpi_dev_node that will contain the handle pointer if CONFIG_ACPI is * set and will be empty otherwise and use it to represent the ACPI * device node field in struct device. * * In addition to that define macros for reading and setting the ACPI * handle of a device that don't generate code when CONFIG_ACPI is * unset. Modify the ACPI subsystem to use those macros instead of * referring to the given device's ACPI handle directly. * * Signed-off-by: Rafael J. Wysocki * Reviewed-by: Mika Westerberg * Acked-by: Greg Kroah-Hartman */ #ifdef CONFIG_ACPI #define ACPI_HANDLE(dev) DEVICE_ACPI_HANDLE(dev) #else #define ACPI_HANDLE(dev) (NULL) #endif /* CONFIG_ACPI */ #endif /* LINUX_VERSION_IS_LESS(3,8,0) */ #ifndef ACPI_COMPANION #ifdef CONFIG_ACPI static inline struct acpi_device *_acpi_get_companion(struct device *dev) { struct acpi_device *adev; int ret; ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev); if (ret < 0) adev = NULL; return adev; } #define ACPI_COMPANION(dev) _acpi_get_companion(dev) #else #define ACPI_COMPANION(dev) (NULL) #endif /* CONFIG_ACPI */ #endif /* ACPI_COMPANION */ #if LINUX_VERSION_IS_LESS(3,19,0) #define acpi_dev_remove_driver_gpios LINUX_BACKPORT(acpi_dev_remove_driver_gpios) static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} #endif /* LINUX_VERSION_IS_LESS(3, 19, 0) */ #if LINUX_VERSION_IN_RANGE(3,19,0, 4,13,0) #define devm_acpi_dev_add_driver_gpios LINUX_BACKPORT(devm_acpi_dev_add_driver_gpios) static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, const struct acpi_gpio_mapping *gpios) { return -ENXIO; } #endif /* LINUX_VERSION_IN_RANGE(3,19,0, 4,13,0) */ #endif /* __BACKPORT_LINUX_ACPI_H */ backport-iwlwifi-dkms-7906/backport-include/linux/atomic.h000066400000000000000000000036731351064634500237240ustar00rootroot00000000000000#ifndef __BP_ATOMIC_H #define __BP_ATOMIC_H #include_next /* atomic_cmpxchg_relaxed */ #ifndef atomic_cmpxchg_relaxed #define atomic_cmpxchg_relaxed atomic_cmpxchg #define atomic_cmpxchg_acquire atomic_cmpxchg #define atomic_cmpxchg_release atomic_cmpxchg #else /* atomic_cmpxchg_relaxed */ #ifndef atomic_cmpxchg_acquire #define atomic_cmpxchg_acquire(...) \ __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) #endif #ifndef atomic_cmpxchg_release #define atomic_cmpxchg_release(...) \ __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) #endif #ifndef atomic_cmpxchg #define atomic_cmpxchg(...) \ __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) #endif #endif /* atomic_cmpxchg_relaxed */ /* these were introduced together, so just a single check is enough */ #ifndef atomic_try_cmpxchg_acquire #ifndef atomic_try_cmpxchg #define __atomic_try_cmpxchg(type, _p, _po, _n) \ ({ \ typeof(_po) __po = (_po); \ typeof(*(_po)) __r, __o = *__po; \ __r = atomic_cmpxchg##type((_p), __o, (_n)); \ if (unlikely(__r != __o)) \ *__po = __r; \ likely(__r == __o); \ }) #define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n) #define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n) #define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n) #define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n) #else /* atomic_try_cmpxchg */ #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg #define atomic_try_cmpxchg_release atomic_try_cmpxchg #endif /* atomic_try_cmpxchg */ #endif /* atomic_try_cmpxchg_acquire */ #if LINUX_VERSION_IS_LESS(4,19,0) #ifndef atomic_fetch_add_unless static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { return __atomic_add_unless(v, a, u); } #endif #endif #endif /* __BP_ATOMIC_H */ backport-iwlwifi-dkms-7906/backport-include/linux/bcm47xx_nvram.h000066400000000000000000000012311351064634500251330ustar00rootroot00000000000000#ifndef __BACKPORTS_BCM47XX_NVRAM_H #define __BACKPORTS_BCM47XX_NVRAM_H #include #if LINUX_VERSION_IS_GEQ(4,1,0) #include_next #else #include #include #endif #if LINUX_VERSION_IS_LESS(4,2,0) #define bcm47xx_nvram_get_contents LINUX_BACKPORT(bcm47xx_nvram_get_contents) static inline char *bcm47xx_nvram_get_contents(size_t *val_len) { return NULL; } #define bcm47xx_nvram_release_contents LINUX_BACKPORT(bcm47xx_nvram_release_contents) static inline void bcm47xx_nvram_release_contents(char *nvram) { } #endif /* LINUX_VERSION_IS_GEQ(4,1,0) */ #endif /* __BACKPORTS_BCM47XX_NVRAM_H */ backport-iwlwifi-dkms-7906/backport-include/linux/bitops.h000066400000000000000000000011071351064634500237360ustar00rootroot00000000000000#ifndef __BACKPORT_BITOPS_H #define __BACKPORT_BITOPS_H #include_next #include #include #ifndef GENMASK /* * Create a contiguous bitmask starting at bit position @l and ending at * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ #define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) #define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) #endif #ifndef BIT_ULL #define BIT_ULL(nr) (1ULL << (nr)) #endif #endif /* __BACKPORT_BITOPS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/bp-devcoredump.h000066400000000000000000000021601351064634500253520ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_DEVCOREDUMP_H #define __BACKPORT_LINUX_DEVCOREDUMP_H #include #include /* We only need to add our wrapper inside the range from 3.18 until * 4.6, outside that we can let our BPAUTO mechanism handle it. */ #if (LINUX_VERSION_IS_GEQ(3,18,0) && \ LINUX_VERSION_IS_LESS(4,7,0)) || \ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) static inline void backport_dev_coredumpm(struct device *dev, struct module *owner, void *data, size_t datalen, gfp_t gfp, ssize_t (*read_fn)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen), void (*free_fn)(void *data)) { return dev_coredumpm(dev, owner, (const void *)data, datalen, gfp, (void *)read_fn, (void *)free_fn); } #define dev_coredumpm LINUX_BACKPORT(dev_coredumpm) #define dev_coredumpsg LINUX_BACKPORT(dev_coredumpsg) void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp); #endif /* (LINUX_VERSION_IS_GEQ(3,18,0) && \ LINUX_VERSION_IS_LESS(4,7,0)) */ #endif /* __BACKPORT_LINUX_DEVCOREDUMP_H */ backport-iwlwifi-dkms-7906/backport-include/linux/bug.h000066400000000000000000000006571351064634500232240ustar00rootroot00000000000000#ifndef __BP_BUG_H #define __BP_BUG_H #include_next #ifndef __BUILD_BUG_ON_NOT_POWER_OF_2 #ifdef __CHECKER__ #define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) #else #define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ BUILD_BUG_ON(((n) & ((n) - 1)) != 0) #endif /* __CHECKER__ */ #endif /* __BUILD_BUG_ON_NOT_POWER_OF_2 */ #ifndef BUILD_BUG_ON_MSG #define BUILD_BUG_ON_MSG(x, msg) BUILD_BUG_ON(x) #endif #endif /* __BP_BUG_H */ backport-iwlwifi-dkms-7906/backport-include/linux/build_bug.h000066400000000000000000000003771351064634500244020ustar00rootroot00000000000000#ifndef __BP_BUILD_BUG_H #define __BP_BUILD_BUG_H #if LINUX_VERSION_IS_GEQ(4,13,0) #include_next #else /* LINUX_VERSION_IS_GEQ(4,13,0) */ #include #endif /* LINUX_VERSION_IS_GEQ(4,13,0) */ #endif /* __BP_BUILD_BUG_H */ backport-iwlwifi-dkms-7906/backport-include/linux/cache.h000066400000000000000000000002531351064634500235020ustar00rootroot00000000000000#ifndef _BACKPORT_CACHE_H #define _BACKPORT_CACHE_H #include_next #ifndef __ro_after_init #define __ro_after_init #endif #endif /* _BACKPORT_CACHE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/clk.h000066400000000000000000000056501351064634500232160ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_CLK_H #define __BACKPORT_LINUX_CLK_H #include_next #include /* * commit 93abe8e4 - we only backport the non CONFIG_COMMON_CLK * case as the CONFIG_COMMON_CLK case requires arch support. By * using the backport_ namespace for older kernels we force usage * of these helpers and that's required given that 3.5 added some * of these helpers expecting a few exported symbols for the non * CONFIG_COMMON_CLK case. The 3.5 kernel is not supported as * per kernel.org so we don't send a fix upstream for that. */ #if LINUX_VERSION_IS_LESS(3,6,0) #ifndef CONFIG_COMMON_CLK /* * Whoopsie! * * clk_enable() and clk_disable() have been left without * a nop export symbols when !CONFIG_COMMON_CLK since its * introduction on v2.6.16, but fixed until 3.6. */ #if 0 #define clk_enable LINUX_BACKPORT(clk_enable) static inline int clk_enable(struct clk *clk) { return 0; } #define clk_disable LINUX_BACKPORT(clk_disable) static inline void clk_disable(struct clk *clk) {} #endif #define clk_get LINUX_BACKPORT(clk_get) static inline struct clk *clk_get(struct device *dev, const char *id) { return NULL; } #define devm_clk_get LINUX_BACKPORT(devm_clk_get) static inline struct clk *devm_clk_get(struct device *dev, const char *id) { return NULL; } #define clk_put LINUX_BACKPORT(clk_put) static inline void clk_put(struct clk *clk) {} #define devm_clk_put LINUX_BACKPORT(devm_clk_put) static inline void devm_clk_put(struct device *dev, struct clk *clk) {} #define clk_get_rate LINUX_BACKPORT(clk_get_rate) static inline unsigned long clk_get_rate(struct clk *clk) { return 0; } #define clk_set_rate LINUX_BACKPORT(clk_set_rate) static inline int clk_set_rate(struct clk *clk, unsigned long rate) { return 0; } #define clk_round_rate LINUX_BACKPORT(clk_round_rate) static inline long clk_round_rate(struct clk *clk, unsigned long rate) { return 0; } #define clk_set_parent LINUX_BACKPORT(clk_set_parent) static inline int clk_set_parent(struct clk *clk, struct clk *parent) { return 0; } #define clk_get_parent LINUX_BACKPORT(clk_get_parent) static inline struct clk *clk_get_parent(struct clk *clk) { return NULL; } #endif /* CONFIG_COMMON_CLK */ #endif /* #if LINUX_VERSION_IS_LESS(3,0,0) */ #if LINUX_VERSION_IS_LESS(3,3,0) && \ LINUX_VERSION_IS_GEQ(3,2,0) #define clk_prepare_enable LINUX_BACKPORT(clk_prepare_enable) /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ static inline int clk_prepare_enable(struct clk *clk) { int ret; ret = clk_prepare(clk); if (ret) return ret; ret = clk_enable(clk); if (ret) clk_unprepare(clk); return ret; } #define clk_disable_unprepare LINUX_BACKPORT(clk_disable_unprepare) /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ static inline void clk_disable_unprepare(struct clk *clk) { clk_disable(clk); clk_unprepare(clk); } #endif /* < 3,3,0 && >= 3,2,0 */ #endif /* __LINUX_CLK_H */ backport-iwlwifi-dkms-7906/backport-include/linux/compat.h000066400000000000000000000010201351064634500237130ustar00rootroot00000000000000#ifndef __BACKPORT_COMPAT_H #define __BACKPORT_COMPAT_H #include_next #include #if LINUX_VERSION_IS_LESS(3,4,0) #ifdef CONFIG_X86_X32_ABI #define COMPAT_USE_64BIT_TIME \ (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)) #else #define COMPAT_USE_64BIT_TIME 0 #endif #endif #if LINUX_VERSION_IS_LESS(3,4,0) #define compat_put_timespec LINUX_BACKPORT(compat_put_timespec) extern int compat_put_timespec(const struct timespec *, void __user *); #endif #endif /* __BACKPORT_COMPAT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/compiler-gcc5.h000066400000000000000000000000411351064634500250630ustar00rootroot00000000000000#include backport-iwlwifi-dkms-7906/backport-include/linux/compiler-gcc6.h000066400000000000000000000000411351064634500250640ustar00rootroot00000000000000#include backport-iwlwifi-dkms-7906/backport-include/linux/compiler-gcc7.h000066400000000000000000000000411351064634500250650ustar00rootroot00000000000000#include backport-iwlwifi-dkms-7906/backport-include/linux/compiler-gccN.h000066400000000000000000000074151351064634500251300ustar00rootroot00000000000000/* gcc version specific checks */ #ifndef GCC_VERSION #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) #endif #if GCC_VERSION < 30200 # error Sorry, your compiler is too old - please upgrade it. #endif #if GCC_VERSION < 30300 # define __used __attribute__((__unused__)) #else # define __used __attribute__((__used__)) #endif #ifdef CONFIG_GCOV_KERNEL # if GCC_VERSION < 30400 # error "GCOV profiling support for gcc versions below 3.4 not included" # endif /* __GNUC_MINOR__ */ #endif /* CONFIG_GCOV_KERNEL */ #if GCC_VERSION >= 30400 #define __must_check __attribute__((warn_unused_result)) #endif #if GCC_VERSION >= 40000 /* GCC 4.1.[01] miscompiles __weak */ #ifdef __KERNEL__ # if GCC_VERSION >= 40100 && GCC_VERSION <= 40101 # error Your version of gcc miscompiles the __weak directive # endif #endif #define __used __attribute__((__used__)) #define __compiler_offsetof(a, b) \ __builtin_offsetof(a, b) #if GCC_VERSION >= 40100 && GCC_VERSION < 40600 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0) #endif #if GCC_VERSION >= 40300 /* Mark functions as cold. gcc will assume any path leading to a call * to them will be unlikely. This means a lot of manual unlikely()s * are unnecessary now for any paths leading to the usual suspects * like BUG(), printk(), panic() etc. [but let's keep them for now for * older compilers] * * Early snapshots of gcc 4.3 don't support this and we can't detect this * in the preprocessor, but we can live with this because they're unreleased. * Maketime probing would be overkill here. * * gcc also has a __attribute__((__hot__)) to move hot functions into * a special section, but I don't see any sense in this right now in * the kernel context */ #define __cold __attribute__((__cold__)) #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) #ifndef __CHECKER__ # define __compiletime_warning(message) __attribute__((warning(message))) # define __compiletime_error(message) __attribute__((error(message))) #endif /* __CHECKER__ */ #endif /* GCC_VERSION >= 40300 */ #if GCC_VERSION >= 40500 /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer * control elsewhere. * * Early snapshots of gcc 4.5 don't support this and we can't detect * this in the preprocessor, but we can live with this because they're * unreleased. Really, we need to have autoconf for the kernel. */ #define unreachable() __builtin_unreachable() /* Mark a function definition as prohibited from being cloned. */ #define __noclone __attribute__((__noclone__)) #endif /* GCC_VERSION >= 40500 */ #if GCC_VERSION >= 40600 /* * Tell the optimizer that something else uses this function or variable. */ #define __visible __attribute__((externally_visible)) #endif /* * GCC 'asm goto' miscompiles certain code sequences: * * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 * * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. * * (asm goto is automatically volatile - the naming reflects this.) */ #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP #if GCC_VERSION >= 40400 #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ #endif #if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) #define __HAVE_BUILTIN_BSWAP16__ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ #if GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 #elif GCC_VERSION >= 40902 #define KASAN_ABI_VERSION 3 #endif #endif /* gcc version >= 40000 specific checks */ #ifndef OPTIMIZER_HIDE_VAR #define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) #endif backport-iwlwifi-dkms-7906/backport-include/linux/compiler.h000066400000000000000000000044361351064634500242600ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_COMPILER_H #define __BACKPORT_LINUX_COMPILER_H #include_next #ifndef __rcu #define __rcu #endif #ifndef __always_unused #ifdef __GNUC__ #define __always_unused __attribute__((unused)) #else #define __always_unused /* unimplemented */ #endif #endif #ifndef __PASTE /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ #define ___PASTE(a,b) a##b #define __PASTE(a,b) ___PASTE(a,b) #endif /* Not-quite-unique ID. */ #ifndef __UNIQUE_ID # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) #endif #ifndef barrier_data #ifdef __GNUC__ #define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") #else /* __GNUC__ */ # define barrier_data(ptr) barrier() #endif /* __GNUC__ */ #endif #ifndef READ_ONCE #include #define __READ_ONCE_SIZE \ ({ \ switch (size) { \ case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ default: \ barrier(); \ __builtin_memcpy((void *)res, (const void *)p, size); \ barrier(); \ } \ }) static __always_inline void __read_once_size(const volatile void *p, void *res, int size) { __READ_ONCE_SIZE; } #define __READ_ONCE(x, check) \ ({ \ union { typeof(x) __val; char __c[1]; } __u; \ __read_once_size(&(x), __u.__c, sizeof(x)); \ __u.__val; \ }) #define READ_ONCE(x) __READ_ONCE(x, 1) static __always_inline void __write_once_size(volatile void *p, void *res, int size) { switch (size) { case 1: *(volatile __u8 *)p = *(__u8 *)res; break; case 2: *(volatile __u16 *)p = *(__u16 *)res; break; case 4: *(volatile __u32 *)p = *(__u32 *)res; break; case 8: *(volatile __u64 *)p = *(__u64 *)res; break; default: barrier(); __builtin_memcpy((void *)p, (const void *)res, size); barrier(); } } #define WRITE_ONCE(x, val) \ ({ \ union { typeof(x) __val; char __c[1]; } __u = \ { .__val = (__force typeof(x)) (val) }; \ __write_once_size(&(x), __u.__c, sizeof(x)); \ __u.__val; \ }) #endif #ifndef OPTIMIZER_HIDE_VAR #define OPTIMIZER_HIDE_VAR(var) barrier() #endif #endif /* __BACKPORT_LINUX_COMPILER_H */ backport-iwlwifi-dkms-7906/backport-include/linux/completion.h000066400000000000000000000012461351064634500246130ustar00rootroot00000000000000#ifndef __BACKPORT_COMPLETION_H #define __BACKPORT_COMPLETION_H #include_next #include #if LINUX_VERSION_IS_LESS(3,13,0) /** * reinit_completion - reinitialize a completion structure * @x: pointer to completion structure that is to be reinitialized * * This inline function should be used to reinitialize a completion structure so it can * be reused. This is especially important after complete_all() is used. */ #define reinit_completion LINUX_BACKPORT(reinit_completion) static inline void reinit_completion(struct completion *x) { x->done = 0; } #endif /* LINUX_VERSION_IS_LESS(3,13,0) */ #endif /* __BACKPORT_COMPLETION_H */ backport-iwlwifi-dkms-7906/backport-include/linux/cordic.h000066400000000000000000000046721351064634500237130ustar00rootroot00000000000000#ifndef _BACKPORT_LINUX_CORDIC_H #define _BACKPORT_LINUX_CORDIC_H 1 #include #if (LINUX_VERSION_CODE > KERNEL_VERSION(3,1,0)) #include_next #else /* * Copyright (c) 2011 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __CORDIC_H_ #define __CORDIC_H_ #include /** * struct cordic_iq - i/q coordinate. * * @i: real part of coordinate (in phase). * @q: imaginary part of coordinate (quadrature). */ struct cordic_iq { s32 i; s32 q; }; /** * cordic_calc_iq() - calculates the i/q coordinate for given angle. * * @theta: angle in degrees for which i/q coordinate is to be calculated. * @coord: function output parameter holding the i/q coordinate. * * The function calculates the i/q coordinate for a given angle using * cordic algorithm. The coordinate consists of a real (i) and an * imaginary (q) part. The real part is essentially the cosine of the * angle and the imaginary part is the sine of the angle. The returned * values are scaled by 2^16 for precision. The range for theta is * for -180 degrees to +180 degrees. Passed values outside this range are * converted before doing the actual calculation. */ #define cordic_calc_iq LINUX_BACKPORT(cordic_calc_iq) struct cordic_iq cordic_calc_iq(s32 theta); #endif /* __CORDIC_H_ */ #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3,1,0)) */ #ifndef CORDIC_FLOAT #define CORDIC_ANGLE_GEN 39797 #define CORDIC_PRECISION_SHIFT 16 #define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) #define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) #define CORDIC_FLOAT(X) (((X) >= 0) \ ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) #endif #endif /* _BACKPORT_LINUX_CORDIC_H */ backport-iwlwifi-dkms-7906/backport-include/linux/crc7.h000066400000000000000000000005421351064634500232760ustar00rootroot00000000000000#ifndef _BACKPORT_LINUX_CRC7_H #define _BACKPORT_LINUX_CRC7_H #include_next #include #if LINUX_VERSION_IS_LESS(3,16,0) #define crc7_be LINUX_BACKPORT(crc7_be) static inline u8 crc7_be(u8 crc, const u8 *buffer, size_t len) { return crc7(crc, buffer, len) << 1; } #endif /* < 3.16 */ #endif /* _BACKPORT_LINUX_CRC7_H */ backport-iwlwifi-dkms-7906/backport-include/linux/debugfs.h000066400000000000000000000037321351064634500240630ustar00rootroot00000000000000#ifndef __BACKPORT_DEBUGFS_H_ #define __BACKPORT_DEBUGFS_H_ #include_next #include #include #include #if LINUX_VERSION_IS_LESS(3,19,0) #define debugfs_create_devm_seqfile LINUX_BACKPORT(debugfs_create_devm_seqfile) #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, struct dentry *parent, int (*read_fn)(struct seq_file *s, void *data)); #else static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, struct dentry *parent, int (*read_fn)(struct seq_file *s, void *data)) { return ERR_PTR(-ENODEV); } #endif /* CONFIG_DEBUG_FS */ #endif /* LINUX_VERSION_IS_LESS(3,19,0) */ #if LINUX_VERSION_IS_LESS(4,4,0) #define debugfs_create_bool LINUX_BACKPORT(debugfs_create_bool) #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, bool *value); #else static inline struct dentry * debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, bool *value) { return ERR_PTR(-ENODEV); } #endif #endif /* LINUX_VERSION_IS_LESS(4,4,0) */ #if LINUX_VERSION_IS_LESS(4,9,0) && \ !LINUX_VERSION_IN_RANGE(4,8,4, 4,9,0) && \ !LINUX_VERSION_IN_RANGE(4,7,10, 4,8,0) static inline const struct file_operations * debugfs_real_fops(const struct file *filp) { /* * Neither the pointer to the struct file_operations, nor its * contents ever change -- srcu_dereference() is not needed here. */ return filp->f_path.dentry->d_fsdata; } #endif /* <4.9.0 but not >= 4.8.4, 4.7.10 */ #ifndef DEFINE_DEBUGFS_ATTRIBUTE #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) #define debugfs_create_file_unsafe(name, mode, parent, data, fops) \ debugfs_create_file(name, mode, parent, data, fops) #endif #endif /* __BACKPORT_DEBUGFS_H_ */ backport-iwlwifi-dkms-7906/backport-include/linux/device.h000066400000000000000000000207101351064634500236760ustar00rootroot00000000000000#ifndef __BACKPORT_DEVICE_H #define __BACKPORT_DEVICE_H #include #include_next #include /* * string.h is usually included from the asm/ folder in most configuration, * but on some older kernels it doesn't. As we're using memcpy() in the code * below, we need to be safe and make sure string.h is indeed there. */ #include #if LINUX_VERSION_IS_LESS(3,9,0) /* backport * commit 9f3b795a626ee79574595e06d1437fe0c7d51d29 * Author: Michał Mirosław * Date: Fri Feb 1 20:40:17 2013 +0100 * * driver-core: constify data for class_find_device() */ typedef int (backport_device_find_function_t)(struct device *, void *); #define class_find_device(cls, start, idx, fun) \ class_find_device((cls), (start), (void *)(idx),\ (backport_device_find_function_t *)(fun)) #endif #ifndef module_driver /** * module_driver() - Helper macro for drivers that don't do anything * special in module init/exit. This eliminates a lot of boilerplate. * Each module may only use this macro once, and calling it replaces * module_init() and module_exit(). * * Use this macro to construct bus specific macros for registering * drivers, and do not use it on its own. */ #define module_driver(__driver, __register, __unregister) \ static int __init __driver##_init(void) \ { \ return __register(&(__driver)); \ } \ module_init(__driver##_init); \ static void __exit __driver##_exit(void) \ { \ __unregister(&(__driver)); \ } \ module_exit(__driver##_exit); #endif #if LINUX_VERSION_IS_LESS(3,9,0) #define devm_ioremap_resource LINUX_BACKPORT(devm_ioremap_resource) void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); #endif #if LINUX_VERSION_IS_LESS(3,5,0) && \ LINUX_VERSION_IS_GEQ(3,2,0) #define devres_release LINUX_BACKPORT(devres_release) extern int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); #endif #if LINUX_VERSION_IS_LESS(3,5,0) #include #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ if (__ratelimit(&_rs)) \ dev_level(dev, fmt, ##__VA_ARGS__); \ } while (0) #define dev_emerg_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__) #define dev_alert_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__) #define dev_crit_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__) #define dev_err_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__) #define dev_warn_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__) #define dev_notice_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) #define dev_info_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) #if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) #define dev_dbg_ratelimited(dev, fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ __ratelimit(&_rs)) \ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ ##__VA_ARGS__); \ } while (0) #else #define dev_dbg_ratelimited(dev, fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* dynamic debug */ #endif /* <= 3.5 */ #if LINUX_VERSION_CODE <= KERNEL_VERSION(3,6,0) static inline void backport_device_release_driver(struct device *dev) { device_release_driver(dev); device_lock(dev); dev_set_drvdata(dev, NULL); device_unlock(dev); } #define device_release_driver LINUX_BACKPORT(device_release_driver) #define kobj_to_dev LINUX_BACKPORT(kobj_to_dev) static inline struct device *kobj_to_dev(struct kobject *kobj) { return container_of(kobj, struct device, kobj); } #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3,6,0) */ #if LINUX_VERSION_IS_LESS(3,10,0) int devm_add_action(struct device *dev, void (*action) (void *), void *data); #endif #if LINUX_VERSION_IS_LESS(3,11,0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0) #ifndef DEVICE_ATTR_RO #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_ ## _name = __ATTR_RO(_name); #endif #ifndef DEVICE_ATTR_RW #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_ ## _name = __ATTR_RW(_name) #endif #endif #ifndef CLASS_ATTR_RW #define CLASS_ATTR_RW(_name) \ struct class_attribute class_attr_##_name = __ATTR_RW(_name) #endif #ifndef CLASS_ATTR_RO #define CLASS_ATTR_RO(_name) \ struct class_attribute class_attr_##_name = __ATTR_RO(_name) #endif #define ATTRIBUTE_GROUPS_BACKPORT(_name) \ static struct BP_ATTR_GRP_STRUCT _name##_dev_attrs[ARRAY_SIZE(_name##_attrs)];\ static void init_##_name##_attrs(void) \ { \ int i; \ for (i = 0; _name##_attrs[i]; i++) \ _name##_dev_attrs[i] = \ *container_of(_name##_attrs[i], \ struct BP_ATTR_GRP_STRUCT, \ attr); \ } #ifndef __ATTRIBUTE_GROUPS #define __ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group *_name##_groups[] = { \ &_name##_group, \ NULL, \ } #endif /* __ATTRIBUTE_GROUPS */ #undef ATTRIBUTE_GROUPS #define ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ .attrs = _name##_attrs, \ }; \ static inline void init_##_name##_attrs(void) {} \ __ATTRIBUTE_GROUPS(_name) #if LINUX_VERSION_IS_LESS(3,13,0) #define devm_kmalloc(dev, size, flags) devm_kzalloc(dev, size, flags) #endif #if LINUX_VERSION_IS_LESS(3,15,0) #define devm_kstrdup LINUX_BACKPORT(devm_kstrdup) extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); #endif #if LINUX_VERSION_IS_LESS(3,13,0) #define devm_kmalloc_array LINUX_BACKPORT(devm_kmalloc_array) static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags) { if (size != 0 && n > SIZE_MAX / size) return NULL; return devm_kmalloc(dev, n * size, flags); } #define devm_kcalloc LINUX_BACKPORT(devm_kcalloc) static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags) { return devm_kmalloc_array(dev, n, size, flags); } #endif #if LINUX_VERSION_IS_LESS(3,16,0) #define devm_kmemdup LINUX_BACKPORT(devm_kmemdup) static inline void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) { void *p; p = devm_kmalloc(dev, len, gfp); if (p) memcpy(p, src, len); return p; } #endif #ifndef dev_level_once #ifdef CONFIG_PRINTK #define dev_level_once(dev_level, dev, fmt, ...) \ do { \ static bool __print_once __read_mostly; \ \ if (!__print_once) { \ __print_once = true; \ dev_level(dev, fmt, ##__VA_ARGS__); \ } \ } while (0) #else #define dev_level_once(dev_level, dev, fmt, ...) \ do { \ if (0) \ dev_level(dev, fmt, ##__VA_ARGS__); \ } while (0) #endif #define dev_emerg_once(dev, fmt, ...) \ dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) #define dev_alert_once(dev, fmt, ...) \ dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) #define dev_crit_once(dev, fmt, ...) \ dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) #define dev_err_once(dev, fmt, ...) \ dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) #define dev_warn_once(dev, fmt, ...) \ dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) #define dev_notice_once(dev, fmt, ...) \ dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) #define dev_info_once(dev, fmt, ...) \ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) #define dev_dbg_once(dev, fmt, ...) \ dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) #endif /* dev_level_once */ #if LINUX_VERSION_IS_LESS(3,17,0) #define devm_kvasprintf LINUX_BACKPORT(devm_kvasprintf) extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap); #define devm_kasprintf LINUX_BACKPORT(devm_kasprintf) extern char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); #endif /* < 3.17 */ #if LINUX_VERSION_IS_LESS(4, 1, 0) #define dev_of_node LINUX_BACKPORT(dev_of_node) static inline struct device_node *dev_of_node(struct device *dev) { #ifndef CONFIG_OF return NULL; #else return dev->of_node; #endif } #endif #endif /* __BACKPORT_DEVICE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/dma-buf.h000066400000000000000000000031151351064634500237520ustar00rootroot00000000000000#ifndef _BACKPORT_DMA_BUF_H__ #define _BACKPORT_DMA_BUF_H__ #include #if LINUX_VERSION_IS_GEQ(3,3,0) #include_next #endif /* LINUX_VERSION_IS_GEQ(3,3,0) */ #include #include #include #if !defined(DEFINE_DMA_BUF_EXPORT_INFO) && LINUX_VERSION_IS_GEQ(3,3,0) /** * helper macro for exporters; zeros and fills in most common values */ #define DEFINE_DMA_BUF_EXPORT_INFO(a) \ struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME } struct dma_buf_export_info { const char *exp_name; const struct dma_buf_ops *ops; size_t size; int flags; struct reservation_object *resv; void *priv; }; #ifdef dma_buf_export #undef dma_buf_export #endif static inline struct dma_buf *backport_dma_buf_export(const struct dma_buf_export_info *exp_info) { #if LINUX_VERSION_IS_LESS(3,4,0) return dma_buf_export(exp_info->priv, (struct dma_buf_ops *)exp_info->ops, exp_info->size, exp_info->flags); #elif LINUX_VERSION_IS_LESS(3,10,0) return dma_buf_export(exp_info->priv, exp_info->ops, exp_info->size, exp_info->flags); #elif LINUX_VERSION_IS_LESS(3,17,0) return dma_buf_export_named(exp_info->priv, exp_info->ops, exp_info->size, exp_info->flags, exp_info->exp_name); #else return dma_buf_export_named(exp_info->priv, exp_info->ops, exp_info->size, exp_info->flags, exp_info->exp_name, exp_info->resv); #endif } #define dma_buf_export LINUX_BACKPORT(dma_buf_export) #endif /* !defined(DEFINE_DMA_BUF_EXPORT_INFO) */ #endif /* _BACKPORT_DMA_BUF_H__ */ backport-iwlwifi-dkms-7906/backport-include/linux/dma-mapping.h000066400000000000000000000021401351064634500246260ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_DMA_MAPPING_H #define __BACKPORT_LINUX_DMA_MAPPING_H #include_next #include #if LINUX_VERSION_IS_LESS(3,2,0) #define dma_zalloc_coherent LINUX_BACKPORT(dma_zalloc_coherent) static inline void *dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); if (ret) memset(ret, 0, size); return ret; } #endif #if LINUX_VERSION_IS_LESS(3,13,0) /* * Set both the DMA mask and the coherent DMA mask to the same thing. * Note that we don't check the return value from dma_set_coherent_mask() * as the DMA API guarantees that the coherent DMA mask can be set to * the same or smaller than the streaming DMA mask. */ #define dma_set_mask_and_coherent LINUX_BACKPORT(dma_set_mask_and_coherent) static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) { int rc = dma_set_mask(dev, mask); if (rc == 0) dma_set_coherent_mask(dev, mask); return rc; } #endif /* LINUX_VERSION_IS_LESS(3,13,0) */ #endif /* __BACKPORT_LINUX_DMA_MAPPING_H */ backport-iwlwifi-dkms-7906/backport-include/linux/dynamic_debug.h000066400000000000000000000027241351064634500252360ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_DYNAMIC_DEBUG_H #define __BACKPORT_LINUX_DYNAMIC_DEBUG_H #include #include_next #if LINUX_VERSION_IS_LESS(3,2,0) /* backports 07613b0b */ #if defined(CONFIG_DYNAMIC_DEBUG) #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,4)) #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ static struct _ddebug __used __aligned(8) \ __attribute__((section("__verbose"))) name = { \ .modname = KBUILD_MODNAME, \ .function = __func__, \ .filename = __FILE__, \ .format = (fmt), \ .lineno = __LINE__, \ .flags = _DPRINTK_FLAGS_DEFAULT, \ .enabled = false, \ } #else #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ static struct _ddebug __used __aligned(8) \ __attribute__((section("__verbose"))) name = { \ .modname = KBUILD_MODNAME, \ .function = __func__, \ .filename = __FILE__, \ .format = (fmt), \ .lineno = __LINE__, \ .flags = _DPRINTK_FLAGS_DEFAULT, \ } #endif /* RHEL_RELEASE_CODE < 6.4 */ #endif /* defined(CONFIG_DYNAMIC_DEBUG) */ #endif /* < 3.2 */ #endif /* __BACKPORT_LINUX_DYNAMIC_DEBUG_H */ backport-iwlwifi-dkms-7906/backport-include/linux/eeprom_93cx6.h000066400000000000000000000004021351064634500246560ustar00rootroot00000000000000#ifndef _COMPAT_LINUX_EEPROM_93CX6_H #define _COMPAT_LINUX_EEPROM_93CX6_H 1 #include_next #ifndef PCI_EEPROM_WIDTH_93C86 #define PCI_EEPROM_WIDTH_93C86 8 #endif /* PCI_EEPROM_WIDTH_93C86 */ #endif /* _COMPAT_LINUX_EEPROM_93CX6_H */ backport-iwlwifi-dkms-7906/backport-include/linux/err.h000066400000000000000000000003521351064634500232270ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_ERR_H #define __BACKPORT_LINUX_ERR_H #include_next #include #if LINUX_VERSION_IS_LESS(3,12,0) #define PTR_ERR_OR_ZERO(p) PTR_RET(p) #endif #endif /* __BACKPORT_LINUX_ERR_H */ backport-iwlwifi-dkms-7906/backport-include/linux/etherdevice.h000066400000000000000000000155141351064634500247340ustar00rootroot00000000000000#ifndef _BACKPORT_LINUX_ETHERDEVICE_H #define _BACKPORT_LINUX_ETHERDEVICE_H #include_next #include /* * newer kernels include this already and some * users rely on getting this indirectly */ #include #if LINUX_VERSION_IS_LESS(3,4,0) #define eth_hw_addr_random LINUX_BACKPORT(eth_hw_addr_random) static inline void eth_hw_addr_random(struct net_device *dev) { dev->addr_assign_type |= NET_ADDR_RANDOM; random_ether_addr(dev->dev_addr); } #endif #if LINUX_VERSION_IS_LESS(3,6,0) #include /** * eth_broadcast_addr - Assign broadcast address * @addr: Pointer to a six-byte array containing the Ethernet address * * Assign the broadcast address to the given address array. */ #define eth_broadcast_addr LINUX_BACKPORT(eth_broadcast_addr) static inline void eth_broadcast_addr(u8 *addr) { memset(addr, 0xff, ETH_ALEN); } /** * eth_random_addr - Generate software assigned random Ethernet address * @addr: Pointer to a six-byte array containing the Ethernet address * * Generate a random Ethernet address (MAC) that is not multicast * and has the local assigned bit set. */ #define eth_random_addr LINUX_BACKPORT(eth_random_addr) static inline void eth_random_addr(u8 *addr) { get_random_bytes(addr, ETH_ALEN); addr[0] &= 0xfe; /* clear multicast bit */ addr[0] |= 0x02; /* set local assignment bit (IEEE802) */ } #endif /* LINUX_VERSION_IS_LESS(3,6,0) */ #if LINUX_VERSION_IS_LESS(3,7,0) /* This backports: * * commit 6d57e9078e880a3dd232d579f42ac437a8f1ef7b * Author: Duan Jiong * Date: Sat Sep 8 16:32:28 2012 +0000 * * etherdevice: introduce help function eth_zero_addr() */ #define eth_zero_addr LINUX_BACKPORT(eth_zero_addr) static inline void eth_zero_addr(u8 *addr) { memset(addr, 0x00, ETH_ALEN); } #endif #if LINUX_VERSION_IS_LESS(3,5,0) #define ether_addr_equal LINUX_BACKPORT(ether_addr_equal) static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) { return !compare_ether_addr(addr1, addr2); } #endif #if LINUX_VERSION_IS_LESS(3,9,0) #define eth_prepare_mac_addr_change LINUX_BACKPORT(eth_prepare_mac_addr_change) extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p); #define eth_commit_mac_addr_change LINUX_BACKPORT(eth_commit_mac_addr_change) extern void eth_commit_mac_addr_change(struct net_device *dev, void *p); #endif /* < 3.9 */ #if LINUX_VERSION_IS_LESS(3,12,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /** * eth_hw_addr_inherit - Copy dev_addr from another net_device * @dst: pointer to net_device to copy dev_addr to * @src: pointer to net_device to copy dev_addr from * * Copy the Ethernet address from one net_device to another along with * the address attributes (addr_assign_type). */ static inline void eth_hw_addr_inherit(struct net_device *dst, struct net_device *src) { dst->addr_assign_type = src->addr_assign_type; memcpy(dst->dev_addr, src->dev_addr, ETH_ALEN); } #endif /* LINUX_VERSION_IS_LESS(3,13,0) */ #if LINUX_VERSION_IS_LESS(3,5,0) /** * ether_addr_equal_64bits - Compare two Ethernet addresses * @addr1: Pointer to an array of 8 bytes * @addr2: Pointer to an other array of 8 bytes * * Compare two Ethernet addresses, returns true if equal, false otherwise. * * The function doesn't need any conditional branches and possibly uses * word memory accesses on CPU allowing cheap unaligned memory reads. * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 } * * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. */ #define ether_addr_equal_64bits LINUX_BACKPORT(ether_addr_equal_64bits) static inline bool ether_addr_equal_64bits(const u8 addr1[6+2], const u8 addr2[6+2]) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); #ifdef __BIG_ENDIAN return (fold >> 16) == 0; #else return (fold << 16) == 0; #endif #else return ether_addr_equal(addr1, addr2); #endif } #endif /* LINUX_VERSION_IS_LESS(3,5,0) */ #if LINUX_VERSION_IS_LESS(3,14,0) /** * ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address * * Compare two Ethernet addresses, returns true if equal * * Please note: Use only when any Ethernet address may not be u16 aligned. */ #define ether_addr_equal_unaligned LINUX_BACKPORT(ether_addr_equal_unaligned) static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return ether_addr_equal(addr1, addr2); #else return memcmp(addr1, addr2, ETH_ALEN) == 0; #endif } /** * ether_addr_copy - Copy an Ethernet address * @dst: Pointer to a six-byte array Ethernet address destination * @src: Pointer to a six-byte array Ethernet address source * * Please note: dst & src must both be aligned to u16. */ #define ether_addr_copy LINUX_BACKPORT(ether_addr_copy) static inline void ether_addr_copy(u8 *dst, const u8 *src) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) *(u32 *)dst = *(const u32 *)src; *(u16 *)(dst + 4) = *(const u16 *)(src + 4); #else u16 *a = (u16 *)dst; const u16 *b = (const u16 *)src; a[0] = b[0]; a[1] = b[1]; a[2] = b[2]; #endif } #endif /* LINUX_VERSION_IS_LESS(3,14,0) */ #if LINUX_VERSION_IS_LESS(3,18,0) #define eth_get_headlen LINUX_BACKPORT(eth_get_headlen) int eth_get_headlen(unsigned char *data, unsigned int max_len); #endif /* LINUX_VERSION_IS_LESS(3,18,0) */ #if LINUX_VERSION_IS_LESS(3,19,0) #define eth_skb_pad LINUX_BACKPORT(eth_skb_pad) /** * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame * @skb: Buffer to pad * * An Ethernet frame should have a minimum size of 60 bytes. This function * takes short frames and pads them with zeros up to the 60 byte limit. */ static inline int eth_skb_pad(struct sk_buff *skb) { return skb_put_padto(skb, ETH_ZLEN); } #endif /* LINUX_VERSION_IS_LESS(3,19,0) */ #if LINUX_VERSION_IS_LESS(4,11,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /** * ether_addr_to_u64 - Convert an Ethernet address into a u64 value. * @addr: Pointer to a six-byte array containing the Ethernet address * * Return a u64 value of the address */ static inline u64 ether_addr_to_u64(const u8 *addr) { u64 u = 0; int i; for (i = 0; i < ETH_ALEN; i++) u = u << 8 | addr[i]; return u; } /** * u64_to_ether_addr - Convert a u64 to an Ethernet address. * @u: u64 to convert to an Ethernet MAC address * @addr: Pointer to a six-byte array to contain the Ethernet address */ static inline void u64_to_ether_addr(u64 u, u8 *addr) { int i; for (i = ETH_ALEN - 1; i >= 0; i--) { addr[i] = u & 0xff; u = u >> 8; } } #endif /* LINUX_VERSION_IS_LESS(4,11,0) */ #endif /* _BACKPORT_LINUX_ETHERDEVICE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/ethtool.h000066400000000000000000000006071351064634500241200ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_ETHTOOL_H #define __BACKPORT_LINUX_ETHTOOL_H #include_next #include #ifndef SPEED_UNKNOWN #define SPEED_UNKNOWN -1 #endif /* SPEED_UNKNOWN */ #ifndef DUPLEX_UNKNOWN #define DUPLEX_UNKNOWN 0xff #endif /* DUPLEX_UNKNOWN */ #ifndef ETHTOOL_FWVERS_LEN #define ETHTOOL_FWVERS_LEN 32 #endif #endif /* __BACKPORT_LINUX_ETHTOOL_H */ backport-iwlwifi-dkms-7906/backport-include/linux/eventpoll.h000066400000000000000000000013711351064634500244510ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_EVENTPOLL_H #define __BACKPORT_LINUX_EVENTPOLL_H #include_next #ifndef EPOLLIN #define EPOLLIN 0x00000001 #endif #ifndef EPOLLPRI #define EPOLLPRI 0x00000002 #endif #ifndef EPOLLOUT #define EPOLLOUT 0x00000004 #endif #ifndef EPOLLERR #define EPOLLERR 0x00000008 #endif #ifndef EPOLLHUP #define EPOLLHUP 0x00000010 #endif #ifndef EPOLLRDNORM #define EPOLLRDNORM 0x00000040 #endif #ifndef EPOLLRDBAND #define EPOLLRDBAND 0x00000080 #endif #ifndef EPOLLWRNORM #define EPOLLWRNORM 0x00000100 #endif #ifndef EPOLLWRBAND #define EPOLLWRBAND 0x00000200 #endif #ifndef EPOLLMSG #define EPOLLMSG 0x00000400 #endif #ifndef EPOLLRDHUP #define EPOLLRDHUP 0x00002000 #endif #endif /* __BACKPORT_LINUX_EVENTPOLL_H */ backport-iwlwifi-dkms-7906/backport-include/linux/export.h000066400000000000000000000006151351064634500237620ustar00rootroot00000000000000#ifndef _COMPAT_LINUX_EXPORT_H #define _COMPAT_LINUX_EXPORT_H 1 #include #if LINUX_VERSION_IS_GEQ(3,2,0) #include_next #else #ifndef pr_fmt #define backport_undef_pr_fmt #endif #include #ifdef backport_undef_pr_fmt #undef pr_fmt #undef backport_undef_pr_fmt #endif #endif /* LINUX_VERSION_IS_GEQ(3,2,0) */ #endif /* _COMPAT_LINUX_EXPORT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/firmware.h000066400000000000000000000013111351064634500242470ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_FIRMWARE_H #define __BACKPORT_LINUX_FIRMWARE_H #include_next #if LINUX_VERSION_IS_LESS(3,14,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define request_firmware_direct(fw, name, device) request_firmware(fw, name, device) #endif #if LINUX_VERSION_IS_LESS(4,18,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define firmware_request_nowarn(fw, name, device) request_firmware(fw, name, device) #endif #if LINUX_VERSION_IS_LESS(4,17,0) #define firmware_request_cache LINUX_BACKPORT(firmware_request_cache) static inline int firmware_request_cache(struct device *device, const char *name) { return 0; } #endif #endif /* __BACKPORT_LINUX_FIRMWARE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/fs.h000066400000000000000000000025201351064634500230460ustar00rootroot00000000000000#ifndef _COMPAT_LINUX_FS_H #define _COMPAT_LINUX_FS_H #include_next #include /* * some versions don't have this and thus don't * include it from the original fs.h */ #include #if LINUX_VERSION_IS_LESS(3,4,0) #define simple_open LINUX_BACKPORT(simple_open) extern int simple_open(struct inode *inode, struct file *file); #endif #if LINUX_VERSION_IS_LESS(3,9,0) /** * backport of: * * commit 496ad9aa8ef448058e36ca7a787c61f2e63f0f54 * Author: Al Viro * Date: Wed Jan 23 17:07:38 2013 -0500 * * new helper: file_inode(file) */ static inline struct inode *file_inode(struct file *f) { return f->f_path.dentry->d_inode; } #endif #ifndef replace_fops /* * This one is to be used *ONLY* from ->open() instances. * fops must be non-NULL, pinned down *and* module dependencies * should be sufficient to pin the caller down as well. */ #define replace_fops(f, fops) \ do { \ struct file *__file = (f); \ fops_put(__file->f_op); \ BUG_ON(!(__file->f_op = (fops))); \ } while(0) #endif /* replace_fops */ #if (LINUX_VERSION_IS_LESS(4,5,0) && \ LINUX_VERSION_IS_GEQ(3,2,0)) #define no_seek_end_llseek LINUX_BACKPORT(no_seek_end_llseek) extern loff_t no_seek_end_llseek(struct file *, loff_t, int); #endif /* < 4.5 && >= 3.2 */ #endif /* _COMPAT_LINUX_FS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/ftrace_event.h000066400000000000000000000005641351064634500251110ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_FTRACE_EVENT_H #define __BACKPORT_LINUX_FTRACE_EVENT_H #include_next #if LINUX_VERSION_IS_LESS(4,0,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) const char *ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len, size_t el_size); #endif #endif /* __BACKPORT_LINUX_FTRACE_EVENT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/genetlink.h000066400000000000000000000010411351064634500244130ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_GENETLINK_H #define __BACKPORT_LINUX_GENETLINK_H #include_next /* This backports: * * commit e9412c37082b5c932e83364aaed0c38c2ce33acb * Author: Neil Horman * Date: Tue May 29 09:30:41 2012 +0000 * * genetlink: Build a generic netlink family module alias */ #ifndef MODULE_ALIAS_GENL_FAMILY #define MODULE_ALIAS_GENL_FAMILY(family)\ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) #endif #endif /* __BACKPORT_LINUX_GENETLINK_H */ backport-iwlwifi-dkms-7906/backport-include/linux/gfp.h000066400000000000000000000014171351064634500232160ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_GFP_H #define __BACKPORT_LINUX_GFP_H #include_next #ifndef ___GFP_KSWAPD_RECLAIM #define ___GFP_KSWAPD_RECLAIM 0x0u #endif #ifndef __GFP_KSWAPD_RECLAIM #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ #endif #if LINUX_VERSION_IS_LESS(4,10,0) && LINUX_VERSION_IS_GEQ(4,2,0) #define page_frag_alloc LINUX_BACKPORT(page_frag_alloc) static inline void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask) { return __alloc_page_frag(nc, fragsz, gfp_mask); } #define __page_frag_cache_drain LINUX_BACKPORT(__page_frag_cache_drain) void __page_frag_cache_drain(struct page *page, unsigned int count); #endif /* < 4.10 && >= 4.2 */ #endif /* __BACKPORT_LINUX_GFP_H */ backport-iwlwifi-dkms-7906/backport-include/linux/gpio.h000066400000000000000000000017221351064634500233770ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_GPIO_H #define __BACKPORT_LINUX_GPIO_H #include_next #if LINUX_VERSION_IS_LESS(3,5,0) #define devm_gpio_request_one LINUX_BACKPORT(devm_gpio_request_one) #define devm_gpio_request LINUX_BACKPORT(devm_gpio_request) #ifdef CONFIG_GPIOLIB int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label); void devm_gpio_free(struct device *dev, unsigned int gpio); #else static inline int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) { WARN_ON(1); return -EINVAL; } static inline int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label) { WARN_ON(1); return -EINVAL; } static inline void devm_gpio_free(struct device *dev, unsigned int gpio) { WARN_ON(1); } #endif /* CONFIG_GPIOLIB */ #endif #endif /* __BACKPORT_LINUX_GPIO_H */ backport-iwlwifi-dkms-7906/backport-include/linux/gpio/000077500000000000000000000000001351064634500232245ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/linux/gpio/driver.h000066400000000000000000000003431351064634500246700ustar00rootroot00000000000000#ifndef __BP_GPIO_DRIVER_H #define __BP_GPIO_DRIVER_H #include #if LINUX_VERSION_IS_LESS(3,13,0) #include #else #include_next #endif #endif /* __BP_GPIO_DRIVER_H */ backport-iwlwifi-dkms-7906/backport-include/linux/hashtable.h000066400000000000000000000023541351064634500243760ustar00rootroot00000000000000#ifndef __BACKPORT_HASHTABLE_H #define __BACKPORT_HASHTABLE_H #include_next #include #if LINUX_VERSION_IS_LESS(3,9,0) /** * backport: * * commit 0bbacca7c3911451cea923b0ad6389d58e3d9ce9 * Author: Sasha Levin * Date: Thu Feb 7 12:32:18 2013 +1100 * * hlist: drop the node parameter from iterators */ #include #include #undef hash_for_each #define hash_for_each(name, bkt, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry(obj, &name[bkt], member) #undef hash_for_each_safe #define hash_for_each_safe(name, bkt, tmp, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) #undef hash_for_each_possible #define hash_for_each_possible(name, obj, member, key) \ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) #undef hash_for_each_possible_safe #define hash_for_each_possible_safe(name, obj, tmp, member, key) \ hlist_for_each_entry_safe(obj, tmp,\ &name[hash_min(key, HASH_BITS(name))], member) #endif #endif /* __BACKPORT_HASHTABLE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/hid.h000066400000000000000000000037601351064634500232110ustar00rootroot00000000000000#ifndef __BACKPORT_HID_H #define __BACKPORT_HID_H #include_next #include #if LINUX_VERSION_IS_LESS(3,8,0) #define hid_ignore LINUX_BACKPORT(hid_ignore) extern bool hid_ignore(struct hid_device *); #endif #if LINUX_VERSION_IS_LESS(3,1,0) #define HID_TYPE_USBNONE 2 #endif #ifndef HID_QUIRK_NO_IGNORE #define HID_QUIRK_NO_IGNORE 0x40000000 #endif #ifndef HID_QUIRK_HIDDEV_FORCE #define HID_QUIRK_HIDDEV_FORCE 0x00000010 #endif #ifndef HID_QUIRK_IGNORE #define HID_QUIRK_IGNORE 0x00000004 #endif #ifndef HID_USB_DEVICE #define HID_USB_DEVICE(ven, prod) \ .bus = BUS_USB, .vendor = (ven), .product = (prod) #endif #ifndef HID_BLUETOOTH_DEVICE #define HID_BLUETOOTH_DEVICE(ven, prod) \ .bus = BUS_BLUETOOTH, .vendor = (ven), .product = (prod) #endif #ifndef hid_printk #define hid_printk(level, hid, fmt, arg...) \ dev_printk(level, &(hid)->dev, fmt, ##arg) #endif #ifndef hid_emerg #define hid_emerg(hid, fmt, arg...) \ dev_emerg(&(hid)->dev, fmt, ##arg) #endif #ifndef hid_crit #define hid_crit(hid, fmt, arg...) \ dev_crit(&(hid)->dev, fmt, ##arg) #endif #ifndef hid_alert #define hid_alert(hid, fmt, arg...) \ dev_alert(&(hid)->dev, fmt, ##arg) #endif #ifndef hid_err #define hid_err(hid, fmt, arg...) \ dev_err(&(hid)->dev, fmt, ##arg) #endif #ifndef hid_notice #define hid_notice(hid, fmt, arg...) \ dev_notice(&(hid)->dev, fmt, ##arg) #endif #ifndef hid_warn #define hid_warn(hid, fmt, arg...) \ dev_warn(&(hid)->dev, fmt, ##arg) #endif #ifndef hid_info #define hid_info(hid, fmt, arg...) \ dev_info(&(hid)->dev, fmt, ##arg) #endif #ifndef hid_dbg #define hid_dbg(hid, fmt, arg...) \ dev_dbg(&(hid)->dev, fmt, ##arg) #endif #if LINUX_VERSION_IS_LESS(3,12,0) #define hid_alloc_report_buf LINUX_BACKPORT(hid_alloc_report_buf) u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags); #endif #endif /* __BACKPORT_HID_H */ backport-iwlwifi-dkms-7906/backport-include/linux/hwmon.h000066400000000000000000000021741351064634500235730ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_HWMON_H #define __BACKPORT_LINUX_HWMON_H #include_next #include #if LINUX_VERSION_IS_LESS(3,13,0) /* * Backports * * commit bab2243ce1897865e31ea6d59b0478391f51812b * Author: Guenter Roeck * Date: Sat Jul 6 13:57:23 2013 -0700 * * hwmon: Introduce hwmon_device_register_with_groups * * hwmon_device_register_with_groups() lets callers register a hwmon device * together with all sysfs attributes in a single call. * * When using hwmon_device_register_with_groups(), hwmon attributes are attached * to the hwmon device directly and no longer with its parent device. * * Signed-off-by: Guenter Roeck */ struct device * hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, const struct attribute_group **groups); struct device * devm_hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, const struct attribute_group **groups); #endif /* LINUX_VERSION_IS_LESS(3,13,0) */ #endif /* __BACKPORT_LINUX_HWMON_H */ backport-iwlwifi-dkms-7906/backport-include/linux/i2c-mux.h000066400000000000000000000011411351064634500237200ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_I2C_MUX_H #define __BACKPORT_LINUX_I2C_MUX_H #include_next #include #if LINUX_VERSION_IS_LESS(3,5,0) #define i2c_add_mux_adapter(parent, mux_dev, mux_priv, force_nr, chan_id, class, select, deselect) \ i2c_add_mux_adapter(parent, mux_priv, force_nr, chan_id, select, deselect) #elif LINUX_VERSION_IS_LESS(3,7,0) #define i2c_add_mux_adapter(parent, mux_dev, mux_priv, force_nr, chan_id, class, select, deselect) \ i2c_add_mux_adapter(parent, mux_dev, mux_priv, force_nr, chan_id, select, deselect) #endif #endif /* __BACKPORT_LINUX_I2C_MUX_H */ backport-iwlwifi-dkms-7906/backport-include/linux/i2c.h000066400000000000000000000017611351064634500231210ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_I2C_H #define __BACKPORT_LINUX_I2C_H #include_next #include /* This backports * * commit 14674e70119ea01549ce593d8901a797f8a90f74 * Author: Mark Brown * Date: Wed May 30 10:55:34 2012 +0200 * * i2c: Split I2C_M_NOSTART support out of I2C_FUNC_PROTOCOL_MANGLING */ #ifndef I2C_FUNC_NOSTART #define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */ #endif /* This backports: * * commit 7c92784a546d2945b6d6973a30f7134be78eb7a4 * Author: Lars-Peter Clausen * Date: Wed Nov 16 10:13:36 2011 +0100 * * I2C: Add helper macro for i2c_driver boilerplate */ #ifndef module_i2c_driver #define module_i2c_driver(__i2c_driver) \ module_driver(__i2c_driver, i2c_add_driver, \ i2c_del_driver) #endif #ifndef I2C_CLIENT_SCCB #define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ /* Must match I2C_M_STOP|IGNORE_NAK */ #endif #endif /* __BACKPORT_LINUX_I2C_H */ backport-iwlwifi-dkms-7906/backport-include/linux/idr.h000066400000000000000000000032331351064634500232160ustar00rootroot00000000000000#ifndef __BACKPORT_IDR_H #define __BACKPORT_IDR_H /* some versions have a broken idr header */ #include #include_next #include #if LINUX_VERSION_IS_LESS(3,1,0) #define ida_simple_get LINUX_BACKPORT(ida_simple_get) int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask); #define ida_simple_remove LINUX_BACKPORT(ida_simple_remove) void ida_simple_remove(struct ida *ida, unsigned int id); #endif #if LINUX_VERSION_IS_LESS(3,9,0) #include /** * backport of idr idr_alloc() usage * * This backports a patch series send by Tejun Heo: * https://lkml.org/lkml/2013/2/2/159 */ static inline void compat_idr_destroy(struct idr *idp) { idr_remove_all(idp); idr_destroy(idp); } #define idr_destroy(idp) compat_idr_destroy(idp) static inline int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) { int id, ret; do { if (!idr_pre_get(idr, gfp_mask)) return -ENOMEM; ret = idr_get_new_above(idr, ptr, start, &id); if (!ret && id > end) { idr_remove(idr, id); ret = -ENOSPC; } } while (ret == -EAGAIN); return ret ? ret : id; } static inline void idr_preload(gfp_t gfp_mask) { } static inline void idr_preload_end(void) { } #endif #ifndef idr_for_each_entry #define idr_for_each_entry(idp, entry, id) \ for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) #endif #if LINUX_VERSION_IS_LESS(4, 11, 0) static inline void *backport_idr_remove(struct idr *idr, int id) { void *item = idr_find(idr, id); idr_remove(idr, id); return item; } #define idr_remove backport_idr_remove #endif #endif /* __BACKPORT_IDR_H */ backport-iwlwifi-dkms-7906/backport-include/linux/if_arp.h000066400000000000000000000005401351064634500236760ustar00rootroot00000000000000#ifndef _BACKPORTS_LINUX_AF_ARP_H #define _BACKPORTS_LINUX_AF_ARP_H 1 #include_next #ifndef ARPHRD_IEEE802154_MONITOR #define ARPHRD_IEEE802154_MONITOR 805 /* IEEE 802.15.4 network monitor */ #endif #ifndef ARPHRD_6LOWPAN #define ARPHRD_6LOWPAN 825 /* IPv6 over LoWPAN */ #endif #endif /* _BACKPORTS_LINUX_AF_ARP_H */ backport-iwlwifi-dkms-7906/backport-include/linux/if_ether.h000066400000000000000000000021771351064634500242330ustar00rootroot00000000000000#ifndef __BACKPORT_IF_ETHER_H #define __BACKPORT_IF_ETHER_H #include_next /* See commit b62faf3c in next-20140311 */ #ifndef ETH_P_80221 #define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */ #endif /* * backport of: * commit e5c5d22e8dcf7c2d430336cbf8e180bd38e8daf1 * Author: Simon Horman * Date: Thu Mar 28 13:38:25 2013 +0900 * * net: add ETH_P_802_3_MIN */ #ifndef ETH_P_802_3_MIN #define ETH_P_802_3_MIN 0x0600 #endif #ifndef ETH_P_TDLS #define ETH_P_TDLS 0x890D /* TDLS */ #endif #ifndef ETH_P_LINK_CTL #define ETH_P_LINK_CTL 0x886c #endif #ifndef ETH_P_PAE #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ #endif #ifndef ETH_P_TEB #define ETH_P_TEB 0x6558 /* Trans Ether Bridging */ #endif #ifndef ETH_P_8021AD #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ #endif #ifndef ETH_P_PREAUTH #define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */ #endif #ifndef ETH_MIN_MTU #define ETH_MIN_MTU 68 #endif #ifndef ETH_MAX_MTU #define ETH_MAX_MTU 0xFFFFU #endif #endif /* __BACKPORT_IF_ETHER_H */ backport-iwlwifi-dkms-7906/backport-include/linux/if_vlan.h000066400000000000000000000024331351064634500240570ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_IF_VLAN_H_ #define __BACKPORT_LINUX_IF_VLAN_H_ #include_next #if LINUX_VERSION_IS_LESS(3,10,0) #define vlan_insert_tag(__skb, __vlan_proto, __vlan_tci) vlan_insert_tag(__skb, __vlan_tci) #define __vlan_put_tag(__skb, __vlan_proto, __vlan_tci) __vlan_put_tag(__skb, __vlan_tci) #define vlan_put_tag(__skb, __vlan_proto, __vlan_tci) vlan_put_tag(__skb, __vlan_tci) #define __vlan_hwaccel_put_tag(__skb, __vlan_proto, __vlan_tag) __vlan_hwaccel_put_tag(__skb, __vlan_tag) #define __vlan_find_dev_deep(__real_dev, __vlan_proto, __vlan_id) __vlan_find_dev_deep(__real_dev, __vlan_id) #endif #ifndef VLAN_PRIO_MASK #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ #endif #ifndef VLAN_PRIO_SHIFT #define VLAN_PRIO_SHIFT 13 #endif #if LINUX_VERSION_IS_LESS(3,16,0) #define __vlan_find_dev_deep_rcu(real_dev, vlan_proto, vlan_id) __vlan_find_dev_deep(real_dev, vlan_proto, vlan_id) #endif #ifndef skb_vlan_tag_present #define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) #endif #ifndef skb_vlan_tag_get #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) #endif #ifndef skb_vlan_tag_get_id #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #endif #endif /* __BACKPORT_LINUX_IF_VLAN_H_ */ backport-iwlwifi-dkms-7906/backport-include/linux/init.h000066400000000000000000000007061351064634500234050ustar00rootroot00000000000000#ifndef __BACKPORT_INIT_H #define __BACKPORT_INIT_H #include_next /* * Backports 312b1485fb509c9bc32eda28ad29537896658cb8 * Author: Sam Ravnborg * Date: Mon Jan 28 20:21:15 2008 +0100 * * Introduce new section reference annotations tags: __ref, __refdata, __refconst */ #ifndef __ref #define __ref __init_refok #endif #ifndef __refdata #define __refdata __initdata_refok #endif #endif /* __BACKPORT_INIT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/input.h000066400000000000000000000005311351064634500235750ustar00rootroot00000000000000#ifndef __BACKPORT_INPUT_H #define __BACKPORT_INPUT_H #include_next #ifndef KEY_WIMAX #define KEY_WIMAX 246 #endif #ifndef KEY_WPS_BUTTON #define KEY_WPS_BUTTON 0x211 #endif #ifndef KEY_RFKILL #define KEY_RFKILL 247 #endif #ifndef SW_RFKILL_ALL #define SW_RFKILL_ALL 0x03 #endif #endif /* __BACKPORT_INPUT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/interrupt.h000066400000000000000000000007171351064634500245000ustar00rootroot00000000000000#ifndef _BP_LINUX_INTERRUPT_H #define _BP_LINUX_INTERRUPT_H #include #include_next #include #if LINUX_VERSION_IS_LESS(4,10,0) static inline void backport_hrtimer_start(struct hrtimer *timer, s64 time, const enum hrtimer_mode mode) { ktime_t _time = { .tv64 = time }; hrtimer_start(timer, _time, mode); } #define hrtimer_start LINUX_BACKPORT(hrtimer_start) #endif #endif /* _BP_LINUX_INTERRUPT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/io.h000066400000000000000000000003071351064634500230460ustar00rootroot00000000000000#ifndef __BP_LINUX_IO_H #define __BP_LINUX_IO_H #include_next #ifndef IOMEM_ERR_PTR #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) #endif #endif /* __BP_LINUX_IO_H */ backport-iwlwifi-dkms-7906/backport-include/linux/ioport.h000066400000000000000000000003151351064634500237520ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_IOPORT_H #define __BACKPORT_LINUX_IOPORT_H #include_next #ifndef IORESOURCE_REG #define IORESOURCE_REG 0x00000300 #endif #endif /* __BACKPORT_LINUX_IOPORT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/irq.h000066400000000000000000000007151351064634500232350ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_IRQ_H #define __BACKPORT_LINUX_IRQ_H #include_next #ifdef CONFIG_HAVE_GENERIC_HARDIRQS #if LINUX_VERSION_IS_LESS(3,11,0) #define irq_get_trigger_type LINUX_BACKPORT(irq_get_trigger_type) static inline u32 irq_get_trigger_type(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? irqd_get_trigger_type(d) : 0; } #endif #endif /* CONFIG_HAVE_GENERIC_HARDIRQS */ #endif /* __BACKPORT_LINUX_IRQ_H */ backport-iwlwifi-dkms-7906/backport-include/linux/irqdomain.h000066400000000000000000000003321351064634500244200ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_IRQDOMAIN_H #define __BACKPORT_LINUX_IRQDOMAIN_H #include #if LINUX_VERSION_IS_GEQ(3,1,0) #include_next #endif #endif /* __BACKPORT_LINUX_IRQDOMAIN_H */ backport-iwlwifi-dkms-7906/backport-include/linux/jiffies.h000066400000000000000000000015331351064634500240600ustar00rootroot00000000000000#ifndef __BACKPORT_LNIUX_JIFFIES_H #define __BACKPORT_LNIUX_JIFFIES_H #ifndef NSEC_PER_SEC #define NSEC_PER_SEC 1000000000L #endif #include_next #ifndef time_is_before_jiffies #define time_is_before_jiffies(a) time_after(jiffies, a) #endif #ifndef time_is_after_jiffies #define time_is_after_jiffies(a) time_before(jiffies, a) #endif #ifndef time_is_before_eq_jiffies #define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a) #endif #ifndef time_is_after_eq_jiffies #define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a) #endif /* * This function is available, but not exported in kernel < 3.17, add * an own version. */ #if LINUX_VERSION_IS_LESS(3,17,0) #define nsecs_to_jiffies LINUX_BACKPORT(nsecs_to_jiffies) extern unsigned long nsecs_to_jiffies(u64 n); #endif /* 3.17 */ #endif /* __BACKPORT_LNIUX_JIFFIES_H */ backport-iwlwifi-dkms-7906/backport-include/linux/kconfig.h000066400000000000000000000025141351064634500240610ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_KCONFIG_H #define __BACKPORT_LINUX_KCONFIG_H #include #if LINUX_VERSION_IS_GEQ(3,1,0) #include_next #endif #ifndef __ARG_PLACEHOLDER_1 #define __ARG_PLACEHOLDER_1 0, #define config_enabled(cfg) _config_enabled(cfg) #define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) #define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) #define ___config_enabled(__ignored, val, ...) val /* * 3.1 - 3.3 had a broken version of this, so undef * (they didn't have __ARG_PLACEHOLDER_1) */ #undef IS_ENABLED #define IS_ENABLED(option) \ (config_enabled(option) || config_enabled(option##_MODULE)) #endif /* * Since 4.9 config_enabled has been removed in favor of __is_defined. */ #ifndef config_enabled #define config_enabled(cfg) __is_defined(cfg) #endif #undef IS_BUILTIN #define IS_BUILTIN(option) config_enabled(option) #ifndef IS_REACHABLE /* * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled * code can call a function defined in code compiled based on CONFIG_FOO. * This is similar to IS_ENABLED(), but returns false when invoked from * built-in code when CONFIG_FOO is set to 'm'. */ #define IS_REACHABLE(option) (config_enabled(option) || \ (config_enabled(option##_MODULE) && config_enabled(MODULE))) #endif #endif backport-iwlwifi-dkms-7906/backport-include/linux/kernel.h000066400000000000000000000133651351064634500237270ustar00rootroot00000000000000#ifndef __BACKPORT_KERNEL_H #define __BACKPORT_KERNEL_H #include_next #include #include /* * some older kernels don't have this and thus don't * include it from kernel.h like new kernels */ #include /* * This backports: * * From a3860c1c5dd1137db23d7786d284939c5761d517 Mon Sep 17 00:00:00 2001 * From: Xi Wang * Date: Thu, 31 May 2012 16:26:04 -0700 * Subject: [PATCH] introduce SIZE_MAX */ #ifndef SIZE_MAX #define SIZE_MAX (~(size_t)0) #endif /* This backports: * * commit 36a26c69b4c70396ef569c3452690fba0c1dec08 * Author: Nicholas Bellinger * Date: Tue Jul 26 00:35:26 2011 -0700 * * kernel.h: Add DIV_ROUND_UP_ULL and DIV_ROUND_UP_SECTOR_T macro usage */ #ifndef DIV_ROUND_UP_ULL #define DIV_ROUND_UP_ULL(ll,d) \ ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; }) #endif #ifndef USHRT_MAX #define USHRT_MAX ((u16)(~0U)) #endif #ifndef SHRT_MAX #define SHRT_MAX ((s16)(USHRT_MAX>>1)) #endif #ifndef SHRT_MIN #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) #endif #ifndef U8_MAX #define U8_MAX ((u8)~0U) #endif #ifndef S8_MAX #define S8_MAX ((s8)(U8_MAX>>1)) #endif #ifndef S8_MIN #define S8_MIN ((s8)(-S8_MAX - 1)) #endif #ifndef U16_MAX #define U16_MAX ((u16)~0U) #endif #ifndef S16_MAX #define S16_MAX ((s16)(U16_MAX>>1)) #endif #ifndef S16_MIN #define S16_MIN ((s16)(-S16_MAX - 1)) #endif #ifndef U32_MAX #define U32_MAX ((u32)~0U) #endif #ifndef S32_MAX #define S32_MAX ((s32)(U32_MAX>>1)) #endif #ifndef S32_MIN #define S32_MIN ((s32)(-S32_MAX - 1)) #endif #ifndef __round_mask #define __round_mask(x, y) ((__typeof__(x))((y)-1)) #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) #define round_down(x, y) ((x) & ~__round_mask(x, y)) #endif #ifndef DIV_ROUND_CLOSEST #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ typeof(x) __x = x; \ typeof(divisor) __d = divisor; \ (((typeof(x))-1) > 0 || \ ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ (((__x) + ((__d) / 2)) / (__d)) : \ (((__x) - ((__d) / 2)) / (__d)); \ } \ ) #endif #ifndef DIV_ROUND_CLOSEST_ULL #define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ { \ typeof(divisor) __d = divisor; \ unsigned long long _tmp = (x) + (__d) / 2; \ do_div(_tmp, __d); \ _tmp; \ } \ ) #endif #ifndef swap #define swap(a, b) \ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) #endif #ifndef lower_32_bits #define lower_32_bits(n) ((u32)(n)) #endif #ifndef clamp #define clamp(val, min, max) ({ \ typeof(val) __val = (val); \ typeof(min) __min = (min); \ typeof(max) __max = (max); \ (void) (&__val == &__min); \ (void) (&__val == &__max); \ __val = __val < __min ? __min: __val; \ __val > __max ? __max: __val; }) #endif #ifndef clamp_t #define clamp_t(type, val, min, max) ({ \ type __val = (val); \ type __min = (min); \ type __max = (max); \ __val = __val < __min ? __min: __val; \ __val > __max ? __max: __val; }) #endif #ifndef clamp_val #define clamp_val(val, min, max) ({ \ typeof(val) __val = (val); \ typeof(val) __min = (min); \ typeof(val) __max = (max); \ __val = __val < __min ? __min: __val; \ __val > __max ? __max: __val; }) #endif #ifndef rounddown #define rounddown(x, y) ( \ { \ typeof(x) __x = (x); \ __x - (__x % (y)); \ } \ ) #endif /* rounddown */ #if LINUX_VERSION_IS_LESS(3,2,0) #define hex_byte_pack pack_hex_byte /* kernels before 3.2 didn't have error checking for the function */ #define hex2bin LINUX_BACKPORT(hex2bin) int __must_check hex2bin(u8 *dst, const char *src, size_t count); #endif /* < 3.2 */ #if LINUX_VERSION_IS_LESS(3,18,0) #undef clamp #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) #endif /* < 3.18 */ #if LINUX_VERSION_IS_LESS(4,6,0) #define kstrtobool LINUX_BACKPORT(kstrtobool) int __must_check kstrtobool(const char *s, bool *res); #define kstrtobool_from_user LINUX_BACKPORT(kstrtobool_from_user) int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); #endif #if LINUX_VERSION_IS_LESS(4,5,0) #undef abs /** * abs - return absolute value of an argument * @x: the value. If it is unsigned type, it is converted to signed type first. * char is treated as if it was signed (regardless of whether it really is) * but the macro's return type is preserved as char. * * Return: an absolute value of x. */ #define abs(x) __abs_choose_expr(x, long long, \ __abs_choose_expr(x, long, \ __abs_choose_expr(x, int, \ __abs_choose_expr(x, short, \ __abs_choose_expr(x, char, \ __builtin_choose_expr( \ __builtin_types_compatible_p(typeof(x), char), \ (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ ((void)0))))))) #define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ __builtin_types_compatible_p(typeof(x), signed type) || \ __builtin_types_compatible_p(typeof(x), unsigned type), \ ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) #endif #if LINUX_VERSION_IS_LESS(3,14,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline u32 reciprocal_scale(u32 val, u32 ep_ro) { return (u32)(((u64) val * ep_ro) >> 32); } #endif /* LINUX_VERSION_IS_LESS(3,14,0) */ #if LINUX_VERSION_IS_LESS(3,18,0) #define bin2hex LINUX_BACKPORT(bin2hex) extern char *bin2hex(char *dst, const void *src, size_t count); #endif #endif /* __BACKPORT_KERNEL_H */ /* * We have to do this outside the include guard, because * out own header (linux/export.h) has to include kernel.h * indirectly (through module.h) and then undef's pr_fmt. * Then, when the real kernel.h gets included again, it's * not defined and we get problems ... */ #ifndef pr_fmt #define pr_fmt(msg) msg #endif backport-iwlwifi-dkms-7906/backport-include/linux/key.h000066400000000000000000000030501351064634500232250ustar00rootroot00000000000000#ifndef __BP_KEY_H #define __BP_KEY_H #ifndef CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION #include_next #else #include #include #include #include typedef uint32_t key_perm_t; struct key { refcount_t refcount; const char *description; s32 serial; struct list_head list; struct asymmetric_key_ids kids; struct public_key *public_key; struct public_key_signature *sig; bool keyring; }; typedef struct __key_reference_with_attributes *key_ref_t; static inline key_ref_t make_key_ref(const struct key *key, bool possession) { return (key_ref_t) ((unsigned long) key | possession); } static inline struct key *key_ref_to_ptr(const key_ref_t key_ref) { return (struct key *) ((unsigned long) key_ref & ~1UL); } #define key_put LINUX_BACKPORT(key_put) extern void key_put(struct key *key); static inline void key_ref_put(key_ref_t key_ref) { key_put(key_ref_to_ptr(key_ref)); } #define key_create_or_update(keyring, type, desc, payload, plen, perm, flags) \ bp_key_create_or_update(keyring, desc, payload, plen) extern key_ref_t bp_key_create_or_update(key_ref_t keyring, const char *description, const void *payload, size_t plen); #define keyring_alloc(desc, uid, gid, cred, perm, flags, restrict, dest) \ bp_keyring_alloc(); extern struct key *bp_keyring_alloc(void); static inline s32 key_serial(const struct key *key) { return key ? key->serial : 0; } #endif /* CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION */ #endif /* __BP_KEY_H */ backport-iwlwifi-dkms-7906/backport-include/linux/kfifo.h000066400000000000000000000024721351064634500235420ustar00rootroot00000000000000#ifndef BACKPORT_LINUX_KFIFO_H #define BACKPORT_LINUX_KFIFO_H #include #include_next #if LINUX_VERSION_IS_LESS(3,13,0) #undef kfifo_put /** * kfifo_put - put data into the fifo * @fifo: address of the fifo to be used * @val: the data to be added * * This macro copies the given value into the fifo. * It returns 0 if the fifo was full. Otherwise it returns the number * processed elements. * * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these macro. */ #define kfifo_put(fifo, val) \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ typeof((&val) + 1) __val = (&val); \ unsigned int __ret; \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ if (0) { \ typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \ __dummy = (typeof(__val))NULL; \ } \ if (__recsize) \ __ret = __kfifo_in_r(__kfifo, __val, sizeof(*__val), \ __recsize); \ else { \ __ret = !kfifo_is_full(__tmp); \ if (__ret) { \ (__is_kfifo_ptr(__tmp) ? \ ((typeof(__tmp->type))__kfifo->data) : \ (__tmp->buf) \ )[__kfifo->in & __tmp->kfifo.mask] = \ *(typeof(__tmp->type))__val; \ smp_wmb(); \ __kfifo->in++; \ } \ } \ __ret; \ }) #endif #endif /* BACKPORT_LINUX_KFIFO_H */ backport-iwlwifi-dkms-7906/backport-include/linux/kmemleak.h000066400000000000000000000002311351064634500242210ustar00rootroot00000000000000#ifndef __BACKPORT_KMEMLEAK_H #define __BACKPORT_KMEMLEAK_H #include #include_next #endif /* __BACKPORT_KMEMLEAK_H */ backport-iwlwifi-dkms-7906/backport-include/linux/ktime.h000066400000000000000000000016571351064634500235610ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_KTIME_H #define __BACKPORT_LINUX_KTIME_H #include_next #include #include #if LINUX_VERSION_IS_LESS(3,16,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /** * ktime_before - Compare if a ktime_t value is smaller than another one. * @cmp1: comparable1 * @cmp2: comparable2 * * Return: true if cmp1 happened before cmp2. */ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) { return ktime_compare(cmp1, cmp2) < 0; } #endif #if LINUX_VERSION_IS_LESS(3,17,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define ktime_get_raw LINUX_BACKPORT(ktime_get_raw) extern ktime_t ktime_get_raw(void); #endif /* < 3.17 */ #ifndef ktime_to_timespec64 /* Map the ktime_t to timespec conversion to ns_to_timespec function */ #define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) #endif #endif /* __BACKPORT_LINUX_KTIME_H */ backport-iwlwifi-dkms-7906/backport-include/linux/leds.h000066400000000000000000000045741351064634500234000ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_LEDS_H #define __BACKPORT_LINUX_LEDS_H #include_next #include #include #ifndef CPTCFG_BPAUTO_BUILD_LEDS #if LINUX_VERSION_IS_LESS(3,6,0) /* * Backports * * commit 959d62fa865d2e616b61a509e1cc5b88741f065e * Author: Shuah Khan * Date: Thu Jun 14 04:34:30 2012 +0800 * * leds: Rename led_brightness_set() to led_set_brightness() * * Rename leds external interface led_brightness_set() to led_set_brightness(). * This is the second phase of the change to reduce confusion between the * leds internal and external interfaces that set brightness. With this change, * now the external interface is led_set_brightness(). The first phase renamed * the internal interface led_set_brightness() to __led_set_brightness(). * There are no changes to the interface implementations. * * Signed-off-by: Shuah Khan * Signed-off-by: Bryan Wu */ #define led_set_brightness(_dev, _switch) led_brightness_set(_dev, _switch) #endif /* LINUX_VERSION_IS_LESS(3,6,0) */ #endif /* CPTCFG_BPAUTO_BUILD_LEDS */ #if LINUX_VERSION_IS_LESS(4,2,0) /* * There is no LINUX_BACKPORT() guard here because we want it to point to * the original function which is exported normally. */ #ifdef CONFIG_LEDS_TRIGGERS extern void led_trigger_remove(struct led_classdev *led_cdev); #else static inline void led_trigger_remove(struct led_classdev *led_cdev) {} #endif #endif #if LINUX_VERSION_IS_LESS(4,5,0) && \ LINUX_VERSION_IS_GEQ(3,19,0) #define led_set_brightness_sync LINUX_BACKPORT(led_set_brightness_sync) /** * led_set_brightness_sync - set LED brightness synchronously * @led_cdev: the LED to set * @brightness: the brightness to set it to * * Set an LED's brightness immediately. This function will block * the caller for the time required for accessing device registers, * and it can sleep. * * Returns: 0 on success or negative error value on failure */ extern int led_set_brightness_sync(struct led_classdev *led_cdev, enum led_brightness value); #endif /* < 4.5 && >= 3.19 */ #if LINUX_VERSION_IS_LESS(4,5,0) #define devm_led_trigger_register LINUX_BACKPORT(devm_led_trigger_register) extern int devm_led_trigger_register(struct device *dev, struct led_trigger *trigger); #endif /* < 4.5 */ #endif /* __BACKPORT_LINUX_LEDS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/list.h000066400000000000000000000055441351064634500234220ustar00rootroot00000000000000#ifndef __BACKPORT_LIST_H #define __BACKPORT_LIST_H #include_next #include #if LINUX_VERSION_IS_LESS(3,9,0) /** * backport: * * commit 0bbacca7c3911451cea923b0ad6389d58e3d9ce9 * Author: Sasha Levin * Date: Thu Feb 7 12:32:18 2013 +1100 * * hlist: drop the node parameter from iterators */ #include #undef hlist_entry_safe #define hlist_entry_safe(ptr, type, member) \ ({ typeof(ptr) ____ptr = (ptr); \ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ }) #define hlist_for_each_entry4(tpos, pos, head, member) \ for (pos = (head)->first; \ pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;});\ pos = pos->next) #define hlist_for_each_entry_safe5(tpos, pos, n, head, member) \ for (pos = (head)->first; \ pos && ({ n = pos->next; 1; }) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;});\ pos = n) #define hlist_for_each_entry3(pos, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) #define hlist_for_each_entry_safe4(pos, n, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ pos && ({ n = pos->member.next; 1; }); \ pos = hlist_entry_safe(n, typeof(*pos), member)) #undef hlist_for_each_entry #define hlist_for_each_entry(...) \ macro_dispatcher(hlist_for_each_entry, __VA_ARGS__)(__VA_ARGS__) #undef hlist_for_each_entry_safe #define hlist_for_each_entry_safe(...) \ macro_dispatcher(hlist_for_each_entry_safe, __VA_ARGS__)(__VA_ARGS__) #endif #ifndef list_first_entry_or_null /** * list_first_entry_or_null - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_struct within the struct. * * Note that if the list is empty, it returns NULL. */ #define list_first_entry_or_null(ptr, type, member) \ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) #endif /* list_first_entry_or_null */ #ifndef list_next_entry /** * list_next_entry - get the next element in list * @pos: the type * to cursor * @member: the name of the list_struct within the struct. */ #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) #endif /* list_next_entry */ #ifndef list_last_entry /** * list_last_entry - get the last element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_struct within the struct. * * Note, that list is expected to be not empty. */ #define list_last_entry(ptr, type, member) \ list_entry((ptr)->prev, type, member) #endif #endif /* __BACKPORT_LIST_H */ backport-iwlwifi-dkms-7906/backport-include/linux/list_nulls.h000066400000000000000000000003311351064634500246240ustar00rootroot00000000000000#ifndef __BACKPORT_LIST_NULLS #define __BACKPORT_LIST_NULLS #include_next #ifndef NULLS_MARKER #define NULLS_MARKER(value) (1UL | (((long)value) << 1)) #endif #endif /* __BACKPORT_LIST_NULLS */ backport-iwlwifi-dkms-7906/backport-include/linux/lockdep.h000066400000000000000000000007551351064634500240670ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_LOCKDEP_H #define __BACKPORT_LINUX_LOCKDEP_H #include_next #include #if LINUX_VERSION_IS_LESS(3,9,0) #undef lockdep_assert_held #ifdef CONFIG_LOCKDEP #define lockdep_assert_held(l) do { \ WARN_ON(debug_locks && !lockdep_is_held(l)); \ } while (0) #else #define lockdep_assert_held(l) do { (void)(l); } while (0) #endif /* CONFIG_LOCKDEP */ #endif /* LINUX_VERSION_IS_LESS(3,9,0) */ #endif /* __BACKPORT_LINUX_LOCKDEP_H */ backport-iwlwifi-dkms-7906/backport-include/linux/math64.h000066400000000000000000000013401351064634500235400ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_MATH64_H #define __BACKPORT_LINUX_MATH64_H #include_next #if LINUX_VERSION_IS_LESS(3,12,0) #if BITS_PER_LONG == 64 /** * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder */ #define div64_u64_rem LINUX_BACKPORT(div64_u64_rem) static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) { *remainder = dividend % divisor; return dividend / divisor; } #elif BITS_PER_LONG == 32 #ifndef div64_u64_rem #define div64_u64_rem LINUX_BACKPORT(div64_u64_rem) #define backports_div64_u64_rem_add 1 extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); #endif #endif /* BITS_PER_LONG */ #endif /* < 3.12 */ #endif /* __BACKPORT_LINUX_MATH64_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mdio.h000066400000000000000000000055161351064634500233760ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_MDIO_H #define __BACKPORT_LINUX_MDIO_H #include_next #ifndef MDIO_EEE_100TX /* EEE Supported/Advertisement/LP Advertisement registers. * * EEE capability Register (3.20), Advertisement (7.60) and * Link partner ability (7.61) registers have and can use the same identical * bit masks. */ #define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */ #define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */ /* Note: the two defines above can be potentially used by the user-land * and cannot remove them now. * So, we define the new generic MDIO_EEE_100TX and MDIO_EEE_1000T macros * using the previous ones (that can be considered obsolete). */ #define MDIO_EEE_100TX MDIO_AN_EEE_ADV_100TX /* 100TX EEE cap */ #define MDIO_EEE_1000T MDIO_AN_EEE_ADV_1000T /* 1000T EEE cap */ #define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ #define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ #define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ #define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ #endif /* MDIO_EEE_100TX */ #if LINUX_VERSION_IS_LESS(3,7,0) /** * mmd_eee_adv_to_ethtool_adv_t * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers * * A small helper function that translates the MMD EEE Advertisment (7.60) * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement * settings. */ #define mmd_eee_adv_to_ethtool_adv_t LINUX_BACKPORT(mmd_eee_adv_to_ethtool_adv_t) static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) { u32 adv = 0; if (eee_adv & MDIO_EEE_100TX) adv |= ADVERTISED_100baseT_Full; if (eee_adv & MDIO_EEE_1000T) adv |= ADVERTISED_1000baseT_Full; if (eee_adv & MDIO_EEE_10GT) adv |= ADVERTISED_10000baseT_Full; if (eee_adv & MDIO_EEE_1000KX) adv |= ADVERTISED_1000baseKX_Full; if (eee_adv & MDIO_EEE_10GKX4) adv |= ADVERTISED_10000baseKX4_Full; if (eee_adv & MDIO_EEE_10GKR) adv |= ADVERTISED_10000baseKR_Full; return adv; } #define ethtool_adv_to_mmd_eee_adv_t LINUX_BACKPORT(ethtool_adv_to_mmd_eee_adv_t) /** * ethtool_adv_to_mmd_eee_adv_t * @adv: the ethtool advertisement settings * * A small helper function that translates ethtool advertisement settings * to EEE advertisements for the MMD EEE Advertisement (7.60) and * MMD EEE Link Partner Ability (7.61) registers. */ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) { u16 reg = 0; if (adv & ADVERTISED_100baseT_Full) reg |= MDIO_EEE_100TX; if (adv & ADVERTISED_1000baseT_Full) reg |= MDIO_EEE_1000T; if (adv & ADVERTISED_10000baseT_Full) reg |= MDIO_EEE_10GT; if (adv & ADVERTISED_1000baseKX_Full) reg |= MDIO_EEE_1000KX; if (adv & ADVERTISED_10000baseKX4_Full) reg |= MDIO_EEE_10GKX4; if (adv & ADVERTISED_10000baseKR_Full) reg |= MDIO_EEE_10GKR; return reg; } #endif /* LINUX_VERSION_IS_LESS(3,7,0) */ #endif /* __BACKPORT_LINUX_MDIO_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mei_cl_bus.h000066400000000000000000000022351351064634500245420ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_MEI_CL_BUS_H #define __BACKPORT_LINUX_MEI_CL_BUS_H #include_next #if LINUX_VERSION_IS_LESS(4,3,0) #define mei_cldev_register_event_cb(cldev, event_mask, read_cb, context) \ mei_cl_register_event_cb(cldev, read_cb, context) #elif LINUX_VERSION_IS_LESS(4,4,0) #define mei_cldev_register_event_cb(cldev, event_mask, read_cb, context) \ mei_cl_register_event_cb(cldev, event_mask, read_cb, context) #endif #if LINUX_VERSION_IS_LESS(4,4,0) #define __mei_cldev_driver_register(cldrv, owner) __mei_cl_driver_register(cldrv, owner) #define mei_cldev_driver_register(cldrv) mei_cl_driver_register(cldrv) #define mei_cldev_driver_unregister(cldrv) mei_cl_driver_unregister(cldrv) #define mei_cldev_send(cldev, buf, length) mei_cl_send(cldev, buf, length) #define mei_cldev_recv(cldev, buf, length) mei_cl_recv(cldev, buf, length) #define mei_cldev_get_drvdata(cldev) mei_cl_get_drvdata(cldev) #define mei_cldev_set_drvdata(cldev, data) mei_cl_set_drvdata(cldev, data) #define mei_cldev_enable(cldev) mei_cl_enable_device(cldev) #define mei_cldev_disable(cldev) mei_cl_disable_device(cldev) #endif #endif /* __BACKPORT_LINUX_MEI_CL_BUS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mii.h000066400000000000000000000077161351064634500232300ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_MII_H #define __BACKPORT_LINUX_MII_H #include_next #include #if LINUX_VERSION_IS_LESS(3,3,0) #include #define ethtool_adv_to_mii_adv_t LINUX_BACKPORT(ethtool_adv_to_mii_adv_t) static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) { u32 result = 0; if (ethadv & ADVERTISED_10baseT_Half) result |= ADVERTISE_10HALF; if (ethadv & ADVERTISED_10baseT_Full) result |= ADVERTISE_10FULL; if (ethadv & ADVERTISED_100baseT_Half) result |= ADVERTISE_100HALF; if (ethadv & ADVERTISED_100baseT_Full) result |= ADVERTISE_100FULL; if (ethadv & ADVERTISED_Pause) result |= ADVERTISE_PAUSE_CAP; if (ethadv & ADVERTISED_Asym_Pause) result |= ADVERTISE_PAUSE_ASYM; return result; } #define mii_adv_to_ethtool_adv_t LINUX_BACKPORT(mii_adv_to_ethtool_adv_t) static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) { u32 result = 0; if (adv & ADVERTISE_10HALF) result |= ADVERTISED_10baseT_Half; if (adv & ADVERTISE_10FULL) result |= ADVERTISED_10baseT_Full; if (adv & ADVERTISE_100HALF) result |= ADVERTISED_100baseT_Half; if (adv & ADVERTISE_100FULL) result |= ADVERTISED_100baseT_Full; if (adv & ADVERTISE_PAUSE_CAP) result |= ADVERTISED_Pause; if (adv & ADVERTISE_PAUSE_ASYM) result |= ADVERTISED_Asym_Pause; return result; } #define ethtool_adv_to_mii_ctrl1000_t LINUX_BACKPORT(ethtool_adv_to_mii_ctrl1000_t) static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) { u32 result = 0; if (ethadv & ADVERTISED_1000baseT_Half) result |= ADVERTISE_1000HALF; if (ethadv & ADVERTISED_1000baseT_Full) result |= ADVERTISE_1000FULL; return result; } #define mii_ctrl1000_to_ethtool_adv_t LINUX_BACKPORT(mii_ctrl1000_to_ethtool_adv_t) static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) { u32 result = 0; if (adv & ADVERTISE_1000HALF) result |= ADVERTISED_1000baseT_Half; if (adv & ADVERTISE_1000FULL) result |= ADVERTISED_1000baseT_Full; return result; } #define mii_lpa_to_ethtool_lpa_t LINUX_BACKPORT(mii_lpa_to_ethtool_lpa_t) static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) { u32 result = 0; if (lpa & LPA_LPACK) result |= ADVERTISED_Autoneg; return result | mii_adv_to_ethtool_adv_t(lpa); } #define mii_stat1000_to_ethtool_lpa_t LINUX_BACKPORT(mii_stat1000_to_ethtool_lpa_t) static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) { u32 result = 0; if (lpa & LPA_1000HALF) result |= ADVERTISED_1000baseT_Half; if (lpa & LPA_1000FULL) result |= ADVERTISED_1000baseT_Full; return result; } #define ethtool_adv_to_mii_adv_x LINUX_BACKPORT(ethtool_adv_to_mii_adv_x) static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) { u32 result = 0; if (ethadv & ADVERTISED_1000baseT_Half) result |= ADVERTISE_1000XHALF; if (ethadv & ADVERTISED_1000baseT_Full) result |= ADVERTISE_1000XFULL; if (ethadv & ADVERTISED_Pause) result |= ADVERTISE_1000XPAUSE; if (ethadv & ADVERTISED_Asym_Pause) result |= ADVERTISE_1000XPSE_ASYM; return result; } #define mii_adv_to_ethtool_adv_x LINUX_BACKPORT(mii_adv_to_ethtool_adv_x) static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) { u32 result = 0; if (adv & ADVERTISE_1000XHALF) result |= ADVERTISED_1000baseT_Half; if (adv & ADVERTISE_1000XFULL) result |= ADVERTISED_1000baseT_Full; if (adv & ADVERTISE_1000XPAUSE) result |= ADVERTISED_Pause; if (adv & ADVERTISE_1000XPSE_ASYM) result |= ADVERTISED_Asym_Pause; return result; } #define mii_lpa_to_ethtool_lpa_x LINUX_BACKPORT(mii_lpa_to_ethtool_lpa_x) static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) { u32 result = 0; if (lpa & LPA_LPACK) result |= ADVERTISED_Autoneg; return result | mii_adv_to_ethtool_adv_x(lpa); } #endif #if LINUX_VERSION_IN_RANGE(4,6,0, 4,11,0) extern int mii_ethtool_get_link_ksettings( struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_set_link_ksettings( struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd); #endif /* LINUX_VERSION_IN_RANGE(4,6,0, 4,11,0) */ #endif /* __BACKPORT_LINUX_MII_H */ backport-iwlwifi-dkms-7906/backport-include/linux/miscdevice.h000066400000000000000000000003131351064634500245470ustar00rootroot00000000000000#ifndef _BACKPORT_LINUX_MISCDEVICE_H #define _BACKPORT_LINUX_MISCDEVICE_H #include_next #ifndef VHCI_MINOR #define VHCI_MINOR 137 #endif #endif /* _BACKPORT_LINUX_MISCDEVICE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mm.h000066400000000000000000000061551351064634500230570ustar00rootroot00000000000000#ifndef __BACKPORT_MM_H #define __BACKPORT_MM_H #include_next #include #include #include #include #include #if LINUX_VERSION_IS_LESS(3,15,0) #define kvfree LINUX_BACKPORT(kvfree) void kvfree(const void *addr); #endif /* < 3.15 */ #if LINUX_VERSION_IS_LESS(3,20,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define get_user_pages_locked LINUX_BACKPORT(get_user_pages_locked) long get_user_pages_locked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked); #define get_user_pages_unlocked LINUX_BACKPORT(get_user_pages_unlocked) long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages); #elif LINUX_VERSION_IS_LESS(4,6,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline long backport_get_user_pages_locked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) { return get_user_pages_locked(current, current->mm, start, nr_pages, write, force, pages, locked); } #define get_user_pages_locked LINUX_BACKPORT(get_user_pages_locked) static inline long backport_get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) { return get_user_pages_unlocked(current, current->mm, start, nr_pages, write, force, pages); } #define get_user_pages_unlocked LINUX_BACKPORT(get_user_pages_unlocked) #endif #if LINUX_VERSION_IS_LESS(4,6,0) static inline long backport_get_user_pages(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { return get_user_pages(current, current->mm, start, nr_pages, write, force, pages, vmas); } #define get_user_pages LINUX_BACKPORT(get_user_pages) #endif #ifndef FOLL_TRIED #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ #endif #if LINUX_VERSION_IS_LESS(4,1,9) && \ LINUX_VERSION_IS_GEQ(3,6,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define page_is_pfmemalloc LINUX_BACKPORT(page_is_pfmemalloc) static inline bool page_is_pfmemalloc(struct page *page) { return page->pfmemalloc; } #endif /* < 4.2 */ #if LINUX_VERSION_IS_LESS(4,12,0) #define kvmalloc LINUX_BACKPORT(kvmalloc) static inline void *kvmalloc(size_t size, gfp_t flags) { gfp_t kmalloc_flags = flags; void *ret; if ((flags & GFP_KERNEL) != GFP_KERNEL) return kmalloc(size, flags); if (size > PAGE_SIZE) kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY; ret = kmalloc(size, flags); if (ret || size < PAGE_SIZE) return ret; return vmalloc(size); } #define kvmalloc_array LINUX_BACKPORT(kvmalloc_array) static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return kvmalloc(bytes, flags); } #define kvzalloc LINUX_BACKPORT(kvzalloc) static inline void *kvzalloc(size_t size, gfp_t flags) { return kvmalloc(size, flags | __GFP_ZERO); } #endif #endif /* __BACKPORT_MM_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mmc/000077500000000000000000000000001351064634500230425ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/linux/mmc/host.h000066400000000000000000000007401351064634500241710ustar00rootroot00000000000000#ifndef _BACKPORTLINUX_MMC_HOST_H #define _BACKPORTLINUX_MMC_HOST_H #include_next #include #include #if LINUX_VERSION_IS_LESS(3,16,0) #define mmc_card_hs LINUX_BACKPORT(mmc_card_hs) static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || card->host->ios.timing == MMC_TIMING_MMC_HS; } #endif /* LINUX_VERSION_IS_LESS(3,16,0) */ #endif /* _BACKPORTLINUX_MMC_HOST_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mmc/sdio.h000066400000000000000000000011031351064634500241440ustar00rootroot00000000000000#ifndef __BACKPORT_MMC_SDIO_H #define __BACKPORT_MMC_SDIO_H #include #include_next /* backports b4625dab */ #ifndef SDIO_CCCR_REV_3_00 #define SDIO_CCCR_REV_3_00 3 /* CCCR/FBR Version 3.00 */ #endif #ifndef SDIO_SDIO_REV_3_00 #define SDIO_SDIO_REV_3_00 4 /* SDIO Spec Version 3.00 */ #endif #ifndef SDIO_BUS_ECSI #define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */ #endif #ifndef SDIO_BUS_SCSI #define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */ #endif #endif /* __BACKPORT_MMC_SDIO_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mmc/sdio_func.h000066400000000000000000000004241351064634500251640ustar00rootroot00000000000000#ifndef __BACKPORT_MMC_SDIO_FUNC_H #define __BACKPORT_MMC_SDIO_FUNC_H #include #include_next #ifndef dev_to_sdio_func #define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) #endif #endif /* __BACKPORT_MMC_SDIO_FUNC_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mmc/sdio_ids.h000066400000000000000000000005621351064634500250130ustar00rootroot00000000000000#ifndef __BACKPORT_MMC_SDIO_IDS_H #define __BACKPORT_MMC_SDIO_IDS_H #include #include_next #ifndef SDIO_CLASS_BT_AMP #define SDIO_CLASS_BT_AMP 0x09 /* Type-A Bluetooth AMP interface */ #endif #ifndef SDIO_DEVICE_ID_MARVELL_8688WLAN #define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 #endif #endif /* __BACKPORT_MMC_SDIO_IDS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/mod_devicetable.h000066400000000000000000000006221351064634500255450ustar00rootroot00000000000000#ifndef __BACKPORT_MOD_DEVICETABLE_H #define __BACKPORT_MOD_DEVICETABLE_H #include_next #ifndef HID_BUS_ANY #define HID_BUS_ANY 0xffff #endif #ifndef HID_GROUP_ANY #define HID_GROUP_ANY 0x0000 #endif #ifndef HID_ANY_ID #define HID_ANY_ID (~0) #endif #endif /* __BACKPORT_MOD_DEVICETABLE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/module.h000066400000000000000000000044301351064634500237250ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_MODULE_H #define __BACKPORT_LINUX_MODULE_H #include_next #include /* * The define overwriting module_init is based on the original module_init * which looks like this: * #define module_init(initfn) \ * static inline initcall_t __inittest(void) \ * { return initfn; } \ * int init_module(void) __attribute__((alias(#initfn))); * * To the call to the initfn we added the symbol dependency on compat * to make sure that compat.ko gets loaded for any compat modules. */ extern void backport_dependency_symbol(void); #ifdef BACKPORTS_GIT_TRACKED #define BACKPORT_MOD_VERSIONS MODULE_VERSION(BACKPORTS_GIT_TRACKED); #else #define BACKPORT_MOD_VERSIONS \ MODULE_VERSION("backported from " CPTCFG_KERNEL_NAME \ " (" CPTCFG_KERNEL_VERSION ")" \ " using backports " CPTCFG_VERSION); #endif #ifdef MODULE #undef module_init #define module_init(initfn) \ static int __init __init_backport(void) \ { \ backport_dependency_symbol(); \ return initfn(); \ } \ int init_module(void) __attribute__((alias("__init_backport")));\ BACKPORT_MOD_VERSIONS /* * The define overwriting module_exit is based on the original module_exit * which looks like this: * #define module_exit(exitfn) \ * static inline exitcall_t __exittest(void) \ * { return exitfn; } \ * void cleanup_module(void) __attribute__((alias(#exitfn))); * * We replaced the call to the actual function exitfn() with a call to our * function which calls the original exitfn() and then rcu_barrier() * * As a module will not be unloaded that ofter it should not have a big * performance impact when rcu_barrier() is called on every module exit, * also when no kfree_rcu() backport is used in that module. */ #undef module_exit #define module_exit(exitfn) \ static void __exit __exit_compat(void) \ { \ exitfn(); \ rcu_barrier(); \ } \ void cleanup_module(void) __attribute__((alias("__exit_compat"))); #endif #if LINUX_VERSION_IS_LESS(3,3,0) #undef param_check_bool #define param_check_bool(name, p) __param_check(name, p, bool) #endif #endif /* __BACKPORT_LINUX_MODULE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/moduleparam.h000066400000000000000000000026751351064634500247570ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_MODULEPARAM_H #define __BACKPORT_LINUX_MODULEPARAM_H #include_next #if LINUX_VERSION_IS_LESS(4,2,0) #define kernel_param_lock LINUX_BACKPORT(kernel_param_lock) static inline void kernel_param_lock(struct module *mod) { __kernel_param_lock(); } #define kernel_param_unlock LINUX_BACKPORT(kernel_param_unlock) static inline void kernel_param_unlock(struct module *mod) { __kernel_param_unlock(); } #endif #if LINUX_VERSION_IS_LESS(3,8,0) #undef __MODULE_INFO #ifdef MODULE #define __MODULE_INFO(tag, name, info) \ static const char __UNIQUE_ID(name)[] \ __used __attribute__((section(".modinfo"), unused, aligned(1))) \ = __stringify(tag) "=" info #else /* !MODULE */ /* This struct is here for syntactic coherency, it is not used */ #define __MODULE_INFO(tag, name, info) \ struct __UNIQUE_ID(name) {} #endif #endif /* < 3.8 */ #if LINUX_VERSION_IS_LESS(3,17,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) extern struct kernel_param_ops param_ops_ullong; extern int param_set_ullong(const char *val, const struct kernel_param *kp); extern int param_get_ullong(char *buffer, const struct kernel_param *kp); #define param_check_ullong(name, p) __param_check(name, p, unsigned long long) #endif #ifndef module_param_hw_array #define module_param_hw_array(name, type, hwtype, nump, perm) \ module_param_array(name, type, nump, perm) #endif #endif /* __BACKPORT_LINUX_MODULEPARAM_H */ backport-iwlwifi-dkms-7906/backport-include/linux/net.h000066400000000000000000000075211351064634500232320ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_NET_H #define __BACKPORT_LINUX_NET_H #include_next #include /* This backports: * * commit 2033e9bf06f07e049bbc77e9452856df846714cc -- from v3.5 * Author: Neil Horman * Date: Tue May 29 09:30:40 2012 +0000 * * net: add MODULE_ALIAS_NET_PF_PROTO_NAME */ #ifndef MODULE_ALIAS_NET_PF_PROTO_NAME #define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ name) #endif #ifndef net_ratelimited_function #define net_ratelimited_function(function, ...) \ do { \ if (net_ratelimit()) \ function(__VA_ARGS__); \ } while (0) #define net_emerg_ratelimited(fmt, ...) \ net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__) #define net_alert_ratelimited(fmt, ...) \ net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__) #define net_crit_ratelimited(fmt, ...) \ net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__) #define net_err_ratelimited(fmt, ...) \ net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__) #define net_notice_ratelimited(fmt, ...) \ net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__) #define net_warn_ratelimited(fmt, ...) \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #define net_info_ratelimited(fmt, ...) \ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) #define net_dbg_ratelimited(fmt, ...) \ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) #endif #ifndef DECLARE_SOCKADDR #define DECLARE_SOCKADDR(type, dst, src) \ type dst = ({ __sockaddr_check_size(sizeof(*dst)); (type) src; }) #endif /* * Avoid backporting this if a distro did the work already, this * takes the check a bit further than just using LINUX_BACKPORT() * namespace, curious if any distro will hit a wall with this. * Also curious if any distro will be daring enough to even try * to backport this to a release older than 3.5. */ #ifndef ___NET_RANDOM_STATIC_KEY_INIT /* * Backporting this before 3.5 is extremely tricky -- I tried, due * to the fact that it relies on static keys, which were refactored * and optimized through a series of generation of patches from jump * labels. These in turn have also been optimized through kernel revisions * and have architecture specific code, which if you commit to backporting * may affect tracing. My recommendation is that if you have a need for * static keys you just require at least 3.5 to remain sane. */ #if LINUX_VERSION_IS_GEQ(3,5,0) && !defined(net_get_random_once) #define __BACKPORT_NET_GET_RANDOM_ONCE 1 #endif #endif /* ___NET_RANDOM_STATIC_KEY_INIT */ #ifdef __BACKPORT_NET_GET_RANDOM_ONCE #define __net_get_random_once LINUX_BACKPORT(__net_get_random_once) bool __net_get_random_once(void *buf, int nbytes, bool *done, struct static_key *done_key); #ifdef HAVE_JUMP_LABEL #define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \ { .enabled = ATOMIC_INIT(0), .entries = (void *)1 }) #else /* !HAVE_JUMP_LABEL */ #define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #endif /* HAVE_JUMP_LABEL */ #define net_get_random_once(buf, nbytes) \ ({ \ bool ___ret = false; \ static bool ___done = false; \ static struct static_key ___done_key = \ ___NET_RANDOM_STATIC_KEY_INIT; \ if (!static_key_true(&___done_key)) \ ___ret = __net_get_random_once(buf, \ nbytes, \ &___done, \ &___done_key); \ ___ret; \ }) #endif /* __BACKPORT_NET_GET_RANDOM_ONCE */ #if LINUX_VERSION_IS_LESS(4,2,0) #define sock_create_kern(net, family, type, proto, res) \ __sock_create(net, family, type, proto, res, 1) #endif #ifndef SOCKWQ_ASYNC_NOSPACE #define SOCKWQ_ASYNC_NOSPACE SOCK_ASYNC_NOSPACE #endif #ifndef SOCKWQ_ASYNC_WAITDATA #define SOCKWQ_ASYNC_WAITDATA SOCK_ASYNC_WAITDATA #endif #endif /* __BACKPORT_LINUX_NET_H */ backport-iwlwifi-dkms-7906/backport-include/linux/netdev_features.h000066400000000000000000000026541351064634500256310ustar00rootroot00000000000000#ifndef __BACKPORT_NETDEV_FEATURES_H #define __BACKPORT_NETDEV_FEATURES_H #include #if LINUX_VERSION_IS_LESS(3,3,0) #include #include /* added via 9356b8fc */ #define NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_RX #define NETIF_F_HW_VLAN_CTAG_TX NETIF_F_HW_VLAN_TX /* added via d314774c */ #define NETIF_F_HW_VLAN_CTAG_FILTER NETIF_F_HW_VLAN_FILTER /* c8f44aff made this u32 but later a861a8b2 changed it to u64 both on v3.3 */ typedef u32 netdev_features_t; #else #include_next #if LINUX_VERSION_IS_LESS(3,10,0) /* See commit f646968f8f on next-20130423 */ #define NETIF_F_HW_VLAN_CTAG_TX_BIT NETIF_F_HW_VLAN_TX_BIT #define NETIF_F_HW_VLAN_CTAG_RX_BIT NETIF_F_HW_VLAN_RX_BIT #define NETIF_F_HW_VLAN_CTAG_FILTER_BIT NETIF_F_HW_VLAN_FILTER_BIT #define NETIF_F_HW_VLAN_CTAG_FILTER NETIF_F_HW_VLAN_FILTER #define NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_RX #define NETIF_F_HW_VLAN_CTAG_TX NETIF_F_HW_VLAN_TX #endif #endif /* LINUX_VERSION_IS_LESS(3,3,0) */ #if !defined(NETIF_F_RXCSUM) #define NETIF_F_RXCSUM 0 #endif #if !defined(NETIF_F_RXALL) #define NETIF_F_RXALL 0 #endif #if !defined(NETIF_F_RXFCS) #define NETIF_F_RXFCS 0 #endif /* this was renamed in commit 53692b1de : sctp: Rename NETIF_F_SCTP_CSUM to NETIF_F_SCTP_CRC */ #ifndef NETIF_F_SCTP_CRC #define NETIF_F_SCTP_CRC __NETIF_F(SCTP_CSUM) #endif #endif /* __BACKPORT_NETDEV_FEATURES_H */ backport-iwlwifi-dkms-7906/backport-include/linux/netdevice.h000066400000000000000000000260741351064634500244160ustar00rootroot00000000000000#ifndef __BACKPORT_NETDEVICE_H #define __BACKPORT_NETDEVICE_H #include_next #include #include #include /* * This is declared implicitly in newer kernels by netdevice.h using * this pointer in struct net_device, but declare it here anyway so * pointers to it are accepted as function arguments without warning. */ struct inet6_dev; /* older kernels don't include this here, we need it */ #include #include /* * new kernels include which * has this ... and some drivers rely on it :-( */ #include #if LINUX_VERSION_IS_LESS(3,14,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /* * Backports note: if in-kernel support is provided we could then just * take the kernel's implementation of __dev_kfree_skb_irq() as it requires * raise_softirq_irqoff() which is not exported. For the backport case we * just use slightly less optimized version and we don't get the ability * to distinguish the two different reasons to free the skb -- whether it * was consumed or dropped. * * The upstream documentation for this: * * It is not allowed to call kfree_skb() or consume_skb() from hardware * interrupt context or with hardware interrupts being disabled. * (in_irq() || irqs_disabled()) * * We provide four helpers that can be used in following contexts : * * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, * replacing kfree_skb(skb) * * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. * Typically used in place of consume_skb(skb) in TX completion path * * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, * replacing kfree_skb(skb) * * dev_consume_skb_any(skb) when caller doesn't know its current irq context, * and consumed a packet. Used in place of consume_skb(skb) */ #define skb_free_reason LINUX_BACKPORT(skb_free_reason) enum skb_free_reason { SKB_REASON_CONSUMED, SKB_REASON_DROPPED, }; #define __dev_kfree_skb_irq LINUX_BACKPORT(__dev_kfree_skb_irq) static inline void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) { dev_kfree_skb_irq(skb); } #define __dev_kfree_skb_any LINUX_BACKPORT(__dev_kfree_skb_any) static inline void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) { dev_kfree_skb_any(skb); } #define dev_consume_skb_irq LINUX_BACKPORT(dev_consume_skb_irq) static inline void dev_consume_skb_irq(struct sk_buff *skb) { dev_kfree_skb_irq(skb); } #define dev_consume_skb_any LINUX_BACKPORT(dev_consume_skb_any) static inline void dev_consume_skb_any(struct sk_buff *skb) { dev_kfree_skb_any(skb); } #if (LINUX_VERSION_CODE != KERNEL_VERSION(3,13,11) || UTS_UBUNTU_RELEASE_ABI < 24) struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; }; #endif #define netdev_tstats(dev) ((struct pcpu_sw_netstats *)dev->ml_priv) #define netdev_assign_tstats(dev, e) dev->ml_priv = (e); #else #define netdev_tstats(dev) dev->tstats #define netdev_assign_tstats(dev, e) dev->tstats = (e); #endif /* LINUX_VERSION_IS_LESS(3,14,0) */ #if LINUX_VERSION_IS_LESS(3,7,8) #define netdev_set_default_ethtool_ops LINUX_BACKPORT(netdev_set_default_ethtool_ops) extern void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); #endif #if LINUX_VERSION_IS_LESS(3,3,0) /* * BQL was added as of v3.3 but some Linux distributions * have backported BQL to their v3.2 kernels or older. To * address this we assume that they also enabled CONFIG_BQL * and test for that here and simply avoid adding the static * inlines if it was defined */ #ifndef CONFIG_BQL #define netdev_tx_sent_queue LINUX_BACKPORT(netdev_tx_sent_queue) static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, unsigned int bytes) { } #define netdev_sent_queue LINUX_BACKPORT(netdev_sent_queue) static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) { } #define netdev_tx_completed_queue LINUX_BACKPORT(netdev_tx_completed_queue) static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, unsigned pkts, unsigned bytes) { } #define netdev_completed_queue LINUX_BACKPORT(netdev_completed_queue) static inline void netdev_completed_queue(struct net_device *dev, unsigned pkts, unsigned bytes) { } #define netdev_tx_reset_queue LINUX_BACKPORT(netdev_tx_reset_queue) static inline void netdev_tx_reset_queue(struct netdev_queue *q) { } #define netdev_reset_queue LINUX_BACKPORT(netdev_reset_queue) static inline void netdev_reset_queue(struct net_device *dev_queue) { } #endif /* CONFIG_BQL */ #endif /* < 3.3 */ #ifndef NETDEV_PRE_UP #define NETDEV_PRE_UP 0x000D #endif #if LINUX_VERSION_IS_LESS(3,11,0) #define netdev_notifier_info_to_dev(ndev) ndev #endif #if LINUX_VERSION_IS_LESS(3,7,0) #define netdev_notify_peers(dev) netif_notify_peers(dev) #define napi_gro_flush(napi, old) napi_gro_flush(napi) #endif #ifndef IFF_LIVE_ADDR_CHANGE #define IFF_LIVE_ADDR_CHANGE 0x100000 #endif #ifndef IFF_SUPP_NOFCS #define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */ #endif #ifndef IFF_UNICAST_FLT #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ #endif #ifndef QUEUE_STATE_ANY_XOFF #define __QUEUE_STATE_DRV_XOFF __QUEUE_STATE_XOFF #define __QUEUE_STATE_STACK_XOFF __QUEUE_STATE_XOFF #endif #if LINUX_VERSION_IS_LESS(3,17,0) #ifndef NET_NAME_UNKNOWN #define NET_NAME_UNKNOWN 0 #endif #ifndef NET_NAME_ENUM #define NET_NAME_ENUM 1 #endif #ifndef NET_NAME_PREDICTABLE #define NET_NAME_PREDICTABLE 2 #endif #ifndef NET_NAME_USER #define NET_NAME_USER 3 #endif #ifndef NET_NAME_RENAMED #define NET_NAME_RENAMED 4 #endif #define alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, txqs, rxqs) \ alloc_netdev_mqs(sizeof_priv, name, setup, txqs, rxqs) #undef alloc_netdev #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) #undef alloc_netdev_mq #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ count) #endif /* LINUX_VERSION_IS_LESS(3,17,0) */ /* * This backports this commit from upstream: * commit 87757a917b0b3c0787e0563c679762152be81312 * net: force a list_del() in unregister_netdevice_many() */ #if (!(LINUX_VERSION_IS_GEQ(3,10,45) && \ LINUX_VERSION_IS_LESS(3,11,0)) && \ !(LINUX_VERSION_IS_GEQ(3,12,23) && \ LINUX_VERSION_IS_LESS(3,13,0)) && \ !(LINUX_VERSION_IS_GEQ(3,14,9) && \ LINUX_VERSION_IS_LESS(3,15,0)) && \ !(LINUX_VERSION_IS_GEQ(3,15,2) && \ LINUX_VERSION_IS_LESS(3,16,0)) && \ LINUX_VERSION_IS_LESS(3,16,0)) static inline void backport_unregister_netdevice_many(struct list_head *head) { unregister_netdevice_many(head); if (!(head->next == LIST_POISON1 && head->prev == LIST_POISON2)) list_del(head); } #define unregister_netdevice_many LINUX_BACKPORT(unregister_netdevice_many) #endif #if LINUX_VERSION_IS_LESS(3,19,0) #define napi_alloc_frag(fragsz) netdev_alloc_frag(fragsz) #endif #if LINUX_VERSION_IS_LESS(3,19,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 #define netdev_rss_key_fill LINUX_BACKPORT(netdev_rss_key_fill) void netdev_rss_key_fill(void *buffer, size_t len); #endif /* LINUX_VERSION_IS_LESS(3,19,0) */ #if LINUX_VERSION_IS_LESS(3,19,0) #define napi_alloc_skb LINUX_BACKPORT(napi_alloc_skb) static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length) { return netdev_alloc_skb_ip_align(napi->dev, length); } #endif /* LINUX_VERSION_IS_LESS(3,19,0) */ #ifndef IFF_TX_SKB_SHARING #define IFF_TX_SKB_SHARING 0 #endif #if LINUX_VERSION_IS_LESS(4,1,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) netdev_features_t passthru_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); #endif /* LINUX_VERSION_IS_LESS(4,1,0) */ #if LINUX_VERSION_IS_LESS(4,2,0) #undef u64_stats_init static inline void u64_stats_init(struct u64_stats_sync *syncp) { #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) seqcount_init(&syncp->seq); #endif } #endif /* LINUX_VERSION_IS_LESS(4,2,0) */ #ifndef netdev_alloc_pcpu_stats #define netdev_alloc_pcpu_stats(type) \ ({ \ typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ if (pcpu_stats) { \ int i; \ for_each_possible_cpu(i) { \ typeof(type) *stat; \ stat = per_cpu_ptr(pcpu_stats, i); \ u64_stats_init(&stat->syncp); \ } \ } \ pcpu_stats; \ }) #endif /* netdev_alloc_pcpu_stats */ #if LINUX_VERSION_IS_LESS(3,19,0) #define napi_complete_done LINUX_BACKPORT(napi_complete_done) static inline void napi_complete_done(struct napi_struct *n, int work_done) { napi_complete(n); } #endif /* < 3.19 */ #if LINUX_VERSION_IS_LESS(4,5,0) #define netif_tx_napi_add LINUX_BACKPORT(netif_tx_napi_add) /** * netif_tx_napi_add - initialize a napi context * @dev: network device * @napi: napi context * @poll: polling function * @weight: default weight * * This variant of netif_napi_add() should be used from drivers using NAPI * to exclusively poll a TX queue. * This will avoid we add it into napi_hash[], thus polluting this hash table. */ static inline void netif_tx_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { netif_napi_add(dev, napi, poll, weight); } #endif /* < 4.5 */ #ifndef NETIF_F_CSUM_MASK #define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM #endif #if LINUX_VERSION_IS_LESS(4,7,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define netif_trans_update LINUX_BACKPORT(netif_trans_update) static inline void netif_trans_update(struct net_device *dev) { dev->trans_start = jiffies; } #endif #if LINUX_VERSION_IS_LESS(4,11,9) #define netdev_set_priv_destructor(_dev, _destructor) \ (_dev)->destructor = __ ## _destructor #define netdev_set_def_destructor(_dev) \ (_dev)->destructor = free_netdev #else #define netdev_set_priv_destructor(_dev, _destructor) \ (_dev)->needs_free_netdev = true; \ (_dev)->priv_destructor = (_destructor); #define netdev_set_def_destructor(_dev) \ (_dev)->needs_free_netdev = true; #endif #if LINUX_VERSION_IS_LESS(4,15,0) static inline int _bp_netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev) { #if LINUX_VERSION_IS_LESS(3,9,0) return 0; #else return netdev_upper_dev_link(dev, upper_dev); #endif } #define netdev_upper_dev_link3(dev, upper, extack) \ netdev_upper_dev_link(dev, upper) #define netdev_upper_dev_link2(dev, upper) \ netdev_upper_dev_link(dev, upper) #define netdev_upper_dev_link(...) \ macro_dispatcher(netdev_upper_dev_link, __VA_ARGS__)(__VA_ARGS__) #endif #if LINUX_VERSION_IS_LESS(5,0,0) static inline int backport_dev_open(struct net_device *dev, struct netlink_ext_ack *extack) { return dev_open(dev); } #define dev_open LINUX_BACKPORT(dev_open) #endif #endif /* __BACKPORT_NETDEVICE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/netlink.h000066400000000000000000000033111351064634500241010ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_NETLINK_H #define __BACKPORT_LINUX_NETLINK_H #include_next #include #if LINUX_VERSION_IS_LESS(4,14,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) struct nla_bitfield32 { __u32 value; __u32 selector; }; #endif #if LINUX_VERSION_IS_LESS(4,12,0) #define NETLINK_MAX_COOKIE_LEN 20 struct netlink_ext_ack { const char *_msg; const struct nlattr *bad_attr; u8 cookie[NETLINK_MAX_COOKIE_LEN]; u8 cookie_len; /* backport only field */ void *__bp_doit; }; #define NL_SET_ERR_MSG(extack, msg) do { \ static const char _msg[] = (msg); \ \ (extack)->_msg = _msg; \ } while (0) #endif #ifndef NL_SET_ERR_MSG_ATTR #define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ static const char __msg[] = msg; \ struct netlink_ext_ack *__extack = (extack); \ \ if (__extack) { \ __extack->_msg = __msg; \ __extack->bad_attr = (attr); \ } \ } while (0) #endif /* this is for patches we apply */ #if LINUX_VERSION_IS_LESS(3,7,0) #define netlink_notify_portid(__notify) (__notify->pid) #define NETLINK_CB_PORTID(__skb) NETLINK_CB(__skb).pid #else #define netlink_notify_portid(__notify) (__notify->portid) #define NETLINK_CB_PORTID(__skb) NETLINK_CB(__skb).portid #endif #ifndef NL_SET_BAD_ATTR #define NL_SET_BAD_ATTR(extack, attr) do { \ if ((extack)) \ (extack)->bad_attr = (attr); \ } while (0) #endif /* NL_SET_BAD_ATTR */ #if LINUX_VERSION_IS_LESS(5,0,0) static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, u64 cookie) { u64 __cookie = cookie; memcpy(extack->cookie, &__cookie, sizeof(__cookie)); extack->cookie_len = sizeof(__cookie); } #endif #endif /* __BACKPORT_LINUX_NETLINK_H */ backport-iwlwifi-dkms-7906/backport-include/linux/nl80211.h000066400000000000000000000003721351064634500234460ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_NL80211_H #define __BACKPORT_LINUX_NL80211_H #include_next #include #if LINUX_VERSION_IS_LESS(3,3,0) #define NL80211_FEATURE_SK_TX_STATUS 0 #endif #endif /* __BACKPORT_LINUX_NL80211_H */ backport-iwlwifi-dkms-7906/backport-include/linux/nospec.h000066400000000000000000000005701351064634500237300ustar00rootroot00000000000000#ifndef _BACKPORT_LINUX_NOSPEC_H #define _BACKPORT_LINUX_NOSPEC_H #if LINUX_VERSION_IS_GEQ(4,15,2) || \ LINUX_VERSION_IN_RANGE(4,14,18, 4,15,0) || \ LINUX_VERSION_IN_RANGE(4,9,81, 4,10,0) || \ LINUX_VERSION_IN_RANGE(4,4,118, 4,5,0) #include_next #else #define array_index_nospec(index, size) (index) #endif #endif /* _BACKPORT_LINUX_NOSPEC_H */ backport-iwlwifi-dkms-7906/backport-include/linux/of.h000066400000000000000000000162251351064634500230510ustar00rootroot00000000000000#ifndef _COMPAT_LINUX_OF_H #define _COMPAT_LINUX_OF_H 1 #include #include_next #if LINUX_VERSION_IS_LESS(3,7,0) #ifdef CONFIG_OF extern struct device_node *of_get_child_by_name(const struct device_node *node, const char *name); #else static inline struct device_node *of_get_child_by_name( const struct device_node *node, const char *name) { return NULL; } #endif /* CONFIG_OF */ #endif /* LINUX_VERSION_IS_LESS(3,7,0) */ #if LINUX_VERSION_IS_LESS(3,7,0) #ifndef CONFIG_OF static inline struct device_node *of_find_node_by_name(struct device_node *from, const char *name) { return NULL; } #endif /* CONFIG_OF */ #endif /* LINUX_VERSION_IS_LESS(3,7,0) */ #if LINUX_VERSION_IS_LESS(3,8,0) #define of_property_read_u8_array LINUX_BACKPORT(of_property_read_u8_array) #ifdef CONFIG_OF extern int of_property_read_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz); #else static inline int of_property_read_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz) { return -ENOSYS; } #endif /* CONFIG_OF */ #endif /* LINUX_VERSION_IS_LESS(3,8,0) */ #if LINUX_VERSION_IS_LESS(3,1,0) #define of_property_read_u32_array LINUX_BACKPORT(of_property_read_u32_array) #ifdef CONFIG_OF extern int of_property_read_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz); #else static inline int of_property_read_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz) { return -ENOSYS; } #endif /* CONFIG_OF */ #define of_property_read_u32 LINUX_BACKPORT(of_property_read_u32) static inline int of_property_read_u32(const struct device_node *np, const char *propname, u32 *out_value) { return of_property_read_u32_array(np, propname, out_value, 1); } #ifndef CONFIG_OF #define of_get_property LINUX_BACKPORT(of_get_property) static inline const void *of_get_property(const struct device_node *node, const char *name, int *lenp) { return NULL; } #endif #endif /* LINUX_VERSION_IS_LESS(3,1,0) */ #if LINUX_VERSION_IS_LESS(3,10,0) #define of_property_read_u32_index LINUX_BACKPORT(of_property_read_u32_index) #ifdef CONFIG_OF extern int of_property_read_u32_index(const struct device_node *np, const char *propname, u32 index, u32 *out_value); #else static inline int of_property_read_u32_index(const struct device_node *np, const char *propname, u32 index, u32 *out_value) { return -ENOSYS; } #endif /* CONFIG_OF */ #endif /* LINUX_VERSION_IS_LESS(3,10,0) */ #if LINUX_VERSION_IS_LESS(3,15,0) #define of_property_count_elems_of_size LINUX_BACKPORT(of_property_count_elems_of_size) #ifdef CONFIG_OF extern int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size); #else static inline int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size) { return -ENOSYS; } #endif /* CONFIG_OF */ #endif /* LINUX_VERSION_IS_LESS(3,15,0) */ #if LINUX_VERSION_IS_LESS(3,15,0) /** * of_property_count_u32_elems - Count the number of u32 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u32 elements * in it. Returns number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u32 and -ENODATA if the * property does not have a value. */ #define of_property_count_u32_elems LINUX_BACKPORT(of_property_count_u32_elems) static inline int of_property_count_u32_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u32)); } #endif /* LINUX_VERSION_IS_LESS(3,15,0) */ #if LINUX_VERSION_IS_LESS(3,3,0) #ifndef CONFIG_OF #define of_node_get LINUX_BACKPORT(of_node_get) /* Dummy ref counting routines - to be implemented later */ static inline struct device_node *of_node_get(struct device_node *node) { return node; } static inline void of_node_put(struct device_node *node) { } #endif /* CONFIG_OF */ #endif /* LINUX_VERSION_IS_LESS(3,3,0) */ #ifndef of_match_ptr #ifdef CONFIG_OF #define of_match_ptr(_ptr) (_ptr) #else #define of_match_ptr(_ptr) NULL #endif /* CONFIG_OF */ #endif /* of_match_ptr */ #ifndef for_each_compatible_node #define for_each_compatible_node(dn, type, compatible) \ for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ dn = of_find_compatible_node(dn, type, compatible)) #endif /* for_each_compatible_node */ #if LINUX_VERSION_IS_LESS(3,3,0) #ifndef CONFIG_OF static inline struct device_node *of_find_compatible_node( struct device_node *from, const char *type, const char *compat) { return NULL; } #endif #endif #if LINUX_VERSION_IS_LESS(3,18,0) #define of_property_read_u64_array LINUX_BACKPORT(of_property_read_u64_array) #ifdef CONFIG_OF /* This is static in the kernel, but we need it in multiple places */ void *of_find_property_value_of_size(const struct device_node *np, const char *propname, u32 len); extern int of_property_read_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz); #else static inline int of_property_read_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz) { return -ENOSYS; } #endif /* CONFIG_OF */ #endif /* LINUX_VERSION_IS_LESS(3,15,0) */ #if LINUX_VERSION_IS_LESS(3,6,0) #define of_node_full_name LINUX_BACKPORT(of_node_full_name) #ifdef CONFIG_OF static inline const char *of_node_full_name(const struct device_node *np) { return np ? np->full_name : ""; } #else static inline const char* of_node_full_name(const struct device_node *np) { return ""; } #endif /* CONFIG_OF */ #endif /* < 3.6 */ #ifndef for_each_child_of_node #define for_each_child_of_node(parent, child) \ while (0) #endif #ifndef CONFIG_OF #if LINUX_VERSION_IS_LESS(3,10,0) static inline int of_device_is_available(const struct device_node *device) { return 0; } #endif #if LINUX_VERSION_IS_LESS(3,6,0) && !LINUX_VERSION_IN_RANGE(3,2,70, 3,3,0) static inline int of_property_match_string(struct device_node *np, const char *propname, const char *string) { return -ENOSYS; } #endif #if LINUX_VERSION_IS_LESS(3,2,0) static inline struct property *of_find_property(const struct device_node *np, const char *name, int *lenp) { return NULL; } static inline int of_device_is_compatible(const struct device_node *device, const char *name) { return 0; } static inline struct device_node *of_parse_phandle(struct device_node *np, const char *phandle_name, int index) { return NULL; } #define of_match_node(_matches, _node) NULL #endif #endif /* CONFIG_OF */ #if LINUX_VERSION_IS_LESS(3,4,0) && !LINUX_VERSION_IN_RANGE(3,2,44, 3,3,0) static inline bool of_property_read_bool(const struct device_node *np, const char *propname) { struct property *prop = of_find_property(np, propname, NULL); return prop ? true : false; } #endif #endif /* _COMPAT_LINUX_OF_H */ backport-iwlwifi-dkms-7906/backport-include/linux/of_address.h000066400000000000000000000010001351064634500245370ustar00rootroot00000000000000#ifndef __BACKPORT_OF_ADDRESS_H #define __BACKPORT_OF_ADDRESS_H #include_next #include #if LINUX_VERSION_IS_LESS(4,4,0) && !defined(CONFIG_OF_ADDRESS) #ifndef OF_BAD_ADDR #define OF_BAD_ADDR ((u64)-1) #endif #define of_translate_address LINUX_BACKPORT(of_translate_addres) static inline u64 of_translate_address(struct device_node *np, const __be32 *addr) { return OF_BAD_ADDR; } #endif /* LINUX_VERSION_IS_LESS(4,4,0) */ #endif /* __BACKPORT_OF_IRQ_H */ backport-iwlwifi-dkms-7906/backport-include/linux/of_device.h000066400000000000000000000012121351064634500243560ustar00rootroot00000000000000#ifndef __BP_OF_DEVICE_H #define __BP_OF_DEVICE_H #include_next #include #if LINUX_VERSION_IS_LESS(4,18,0) static inline int backport_of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) { #if LINUX_VERSION_IS_GEQ(4,15,0) dev->bus->force_dma = force_dma; return of_dma_configure(dev, np); #elif LINUX_VERSION_IS_GEQ(4,12,0) return of_dma_configure(dev, np); #elif LINUX_VERSION_IS_GEQ(4,1,0) of_dma_configure(dev, np); return 0; #else return 0; #endif } #define of_dma_configure LINUX_BACKPORT(of_dma_configure) #endif /* < 4.18 */ #endif /* __BP_OF_DEVICE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/of_irq.h000066400000000000000000000006421351064634500237200ustar00rootroot00000000000000#ifndef __BACKPORT_OF_IRQ_H #define __BACKPORT_OF_IRQ_H #include_next #include #if LINUX_VERSION_IS_LESS(3,5,0) && !defined(CONFIG_OF) #define irq_of_parse_and_map LINUX_BACKPORT(irq_of_parse_and_map) static inline unsigned int irq_of_parse_and_map(struct device_node *dev, int index) { return 0; } #endif /* LINUX_VERSION_IS_LESS(4,5,0) */ #endif /* __BACKPORT_OF_IRQ_H */ backport-iwlwifi-dkms-7906/backport-include/linux/of_net.h000066400000000000000000000004311351064634500237070ustar00rootroot00000000000000#ifndef _BP_OF_NET_H #define _BP_OF_NET_H #include_next #include #ifndef CONFIG_OF #if LINUX_VERSION_IS_LESS(3,10,0) static inline const void *of_get_mac_address(struct device_node *np) { return NULL; } #endif #endif #endif /* _BP_OF_NET_H */ backport-iwlwifi-dkms-7906/backport-include/linux/of_platform.h000066400000000000000000000023411351064634500247470ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_OF_PLATFORM_H #define __BACKPORT_LINUX_OF_PLATFORM_H #include_next #include #include /* upstream now includes this here and some people rely on it */ #include #if LINUX_VERSION_IS_LESS(3,4,0) && !defined(CONFIG_OF_DEVICE) struct of_dev_auxdata; #define of_platform_populate LINUX_BACKPORT(of_platform_populate) static inline int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, struct device *parent) { return -ENODEV; } #endif /* LINUX_VERSION_IS_LESS(3,4,0) */ #if LINUX_VERSION_IS_LESS(3,11,0) && !defined(CONFIG_OF_DEVICE) extern const struct of_device_id of_default_bus_match_table[]; #endif /* LINUX_VERSION_IS_LESS(3,11,0) */ #if LINUX_VERSION_IS_LESS(4,3,0) && !defined(CONFIG_OF_DEVICE) struct of_dev_auxdata; #define of_platform_default_populate \ LINUX_BACKPORT(of_platform_default_populate) static inline int of_platform_default_populate(struct device_node *root, const struct of_dev_auxdata *lookup, struct device *parent) { return -ENODEV; } #endif /* LINUX_VERSION_IS_LESS(4,3,0) */ #endif /* __BACKPORT_LINUX_OF_PLATFORM_H */ backport-iwlwifi-dkms-7906/backport-include/linux/olpc-ec.h000066400000000000000000000003541351064634500237630ustar00rootroot00000000000000#ifndef _COMPAT_LINUX_OLPC_EC_H #define _COMPAT_LINUX_OLPC_EC_H #include #if LINUX_VERSION_IS_GEQ(3,6,0) #include_next #endif /* LINUX_VERSION_IS_GEQ(3,6,0) */ #endif /* _COMPAT_LINUX_OLPC_EC_H */ backport-iwlwifi-dkms-7906/backport-include/linux/page_ref.h000066400000000000000000000010241351064634500242040ustar00rootroot00000000000000#ifndef __BP_PAGE_REF_H #define __BP_PAGE_REF_H #include #if LINUX_VERSION_IS_GEQ(4,6,0) || \ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) #include_next #else static inline void page_ref_inc(struct page *page) { atomic_inc(&page->_count); } static inline int page_ref_count(struct page *page) { return atomic_read(&page->_count); } static inline int page_ref_sub_and_test(struct page *page, int nr) { return atomic_sub_and_test(nr, &page->_count); } #endif #endif /* __BP_PAGE_REF_H */ backport-iwlwifi-dkms-7906/backport-include/linux/pci.h000066400000000000000000000177031351064634500232220ustar00rootroot00000000000000#ifndef _BACKPORT_LINUX_PCI_H #define _BACKPORT_LINUX_PCI_H #include_next #include #ifndef module_pci_driver /** * module_pci_driver() - Helper macro for registering a PCI driver * @__pci_driver: pci_driver struct * * Helper macro for PCI drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_pci_driver(__pci_driver) \ module_driver(__pci_driver, pci_register_driver, \ pci_unregister_driver) #endif #if LINUX_VERSION_IS_LESS(3,7,0) #define pcie_capability_read_word LINUX_BACKPORT(pcie_capability_read_word) int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); #define pcie_capability_read_dword LINUX_BACKPORT(pcie_capability_read_dword) int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); #define pcie_capability_write_word LINUX_BACKPORT(pcie_capability_write_word) int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); #define pcie_capability_write_dword LINUX_BACKPORT(pcie_capability_write_dword) int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val); #define pcie_capability_clear_and_set_word LINUX_BACKPORT(pcie_capability_clear_and_set_word) int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, u16 clear, u16 set); #define pcie_capability_clear_and_set_dword LINUX_BACKPORT(pcie_capability_clear_and_set_dword) int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, u32 clear, u32 set); #define pcie_capability_set_word LINUX_BACKPORT(pcie_capability_set_word) static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, u16 set) { return pcie_capability_clear_and_set_word(dev, pos, 0, set); } #define pcie_capability_set_dword LINUX_BACKPORT(pcie_capability_set_dword) static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos, u32 set) { return pcie_capability_clear_and_set_dword(dev, pos, 0, set); } #define pcie_capability_clear_word LINUX_BACKPORT(pcie_capability_clear_word) static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, u16 clear) { return pcie_capability_clear_and_set_word(dev, pos, clear, 0); } #define pcie_capability_clear_dword LINUX_BACKPORT(pcie_capability_clear_dword) static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, u32 clear) { return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); } #endif #ifndef PCI_DEVICE_SUB /** * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID * @subvend: the 16 bit PCI Subvendor ID * @subdev: the 16 bit PCI Subdevice ID * * This macro is used to create a struct pci_device_id that matches a * specific device with subsystem information. */ #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \ .vendor = (vend), .device = (dev), \ .subvendor = (subvend), .subdevice = (subdev) #endif /* PCI_DEVICE_SUB */ #if LINUX_VERSION_IS_LESS(3,2,0) #define pci_dev_flags LINUX_BACKPORT(pci_dev_flags) #define PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG LINUX_BACKPORT(PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG) #define PCI_DEV_FLAGS_NO_D3 LINUX_BACKPORT(PCI_DEV_FLAGS_NO_D3) #define PCI_DEV_FLAGS_ASSIGNED LINUX_BACKPORT(PCI_DEV_FLAGS_ASSIGNED) enum pci_dev_flags { /* INTX_DISABLE in PCI_COMMAND register disables MSI * generation too. */ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, /* Device configuration is irrevocably lost if disabled into D3 */ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, /* Provide indication device is assigned by a Virtual Machine Manager */ PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, }; #endif /* LINUX_VERSION_IS_LESS(3,2,0) */ #if LINUX_VERSION_IS_LESS(3,8,0) #define pci_sriov_set_totalvfs LINUX_BACKPORT(pci_sriov_set_totalvfs) int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); #endif /* LINUX_VERSION_IS_LESS(3,8,0) */ #if LINUX_VERSION_IS_LESS(3,10,0) /* Taken from drivers/pci/pci.h */ struct pci_sriov { int pos; /* capability position */ int nres; /* number of resources */ u32 cap; /* SR-IOV Capabilities */ u16 ctrl; /* SR-IOV Control */ u16 total_VFs; /* total VFs associated with the PF */ u16 initial_VFs; /* initial VFs associated with the PF */ u16 num_VFs; /* number of VFs available */ u16 offset; /* first VF Routing ID offset */ u16 stride; /* following VF stride */ u32 pgsz; /* page size for BAR alignment */ u8 link; /* Function Dependency Link */ u16 driver_max_VFs; /* max num VFs driver supports */ struct pci_dev *dev; /* lowest numbered PF */ struct pci_dev *self; /* this PF */ struct mutex lock; /* lock for VF bus */ struct work_struct mtask; /* VF Migration task */ u8 __iomem *mstate; /* VF Migration State Array */ }; #define pci_vfs_assigned LINUX_BACKPORT(pci_vfs_assigned) #ifdef CONFIG_PCI_IOV int pci_vfs_assigned(struct pci_dev *dev); #else static inline int pci_vfs_assigned(struct pci_dev *dev) { return 0; } #endif #endif /* LINUX_VERSION_IS_LESS(3,10,0) */ #if LINUX_VERSION_IS_LESS(4,8,0) #define pci_alloc_irq_vectors LINUX_BACKPORT(pci_alloc_irq_vectors) #ifdef CONFIG_PCI_MSI int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags); #else static inline int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags) { return -ENOSYS; } #endif #endif #if LINUX_VERSION_IS_LESS(4,8,0) #define pci_free_irq_vectors LINUX_BACKPORT(pci_free_irq_vectors) static inline void pci_free_irq_vectors(struct pci_dev *dev) { } #endif #if LINUX_VERSION_IS_LESS(3,14,0) #define pci_enable_msi_range LINUX_BACKPORT(pci_enable_msi_range) #ifdef CONFIG_PCI_MSI int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); #else static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) { return -ENOSYS; } #endif #endif #ifdef CONFIG_PCI #if LINUX_VERSION_IS_LESS(3,14,0) #define pci_enable_msix_range LINUX_BACKPORT(pci_enable_msix_range) #ifdef CONFIG_PCI_MSI int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec); #else static inline int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) { return -ENOSYS; } #endif #endif #endif #if LINUX_VERSION_IS_LESS(3,13,0) #define pci_device_is_present LINUX_BACKPORT(pci_device_is_present) bool pci_device_is_present(struct pci_dev *pdev); #endif #ifdef CONFIG_PCI #if LINUX_VERSION_IS_LESS(3,14,0) #define pci_enable_msix_exact LINUX_BACKPORT(pci_enable_msix_exact) #ifdef CONFIG_PCI_MSI static inline int pci_enable_msix_exact(struct pci_dev *dev, struct msix_entry *entries, int nvec) { int rc = pci_enable_msix_range(dev, entries, nvec, nvec); if (rc < 0) return rc; return 0; } #else static inline int pci_enable_msix_exact(struct pci_dev *dev, struct msix_entry *entries, int nvec) { return -ENOSYS; } #endif /* CONFIG_PCI_MSI */ #endif #endif /* CONFIG_PCI */ #if LINUX_VERSION_IS_LESS(4,9,0) && \ !LINUX_VERSION_IN_RANGE(3,12,69, 3,13,0) && \ !LINUX_VERSION_IN_RANGE(4,4,37, 4,5,0) && \ !LINUX_VERSION_IN_RANGE(4,8,13, 4,9,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) { while (1) { if (!pci_is_pcie(dev)) break; if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) return dev; if (!dev->bus->self) break; dev = dev->bus->self; } return NULL; } #endif/* <4.9.0 but not >= 3.12.69, 4.4.37, 4.8.13 */ #ifndef PCI_IRQ_LEGACY #define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ #define PCI_IRQ_ALL_TYPES \ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) #endif #endif /* _BACKPORT_LINUX_PCI_H */ backport-iwlwifi-dkms-7906/backport-include/linux/percpu-defs.h000066400000000000000000000005641351064634500246610ustar00rootroot00000000000000#ifndef __BACKPORT_PERCPU_DEFS_H #define __BACKPORT_PERCPU_DEFS_H #include_next /* override this with the array-safe version */ #undef __verify_pcpu_ptr #define __verify_pcpu_ptr(ptr) \ do { \ const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ (void)__vpp_verify; \ } while (0) #endif /* __BACKPORT_PERCPU_DEFS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/percpu.h000066400000000000000000000010131351064634500237300ustar00rootroot00000000000000/* * Copyright (C) 2018 Intel Corporation */ #ifndef __BACKPORT_PERCPU_H #define __BACKPORT_PERCPU_H #include_next #if LINUX_VERSION_IS_LESS(3,18,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline void __percpu *__alloc_gfp_warn(void) { WARN(1, "Cannot backport alloc_percpu_gfp"); return NULL; } #define alloc_percpu_gfp(type, gfp) \ ({ (gfp == GFP_KERNEL) ? alloc_percpu(type) : __alloc_gfp_warn(); }) #endif /* LINUX_VERSION_IS_LESS(3,18,0) */ #endif /* __BACKPORT_PERCPU_H */ backport-iwlwifi-dkms-7906/backport-include/linux/phy.h000066400000000000000000000040471351064634500232440ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_PHY_H #define __BACKPORT_LINUX_PHY_H #include_next #include #include #if LINUX_VERSION_IS_LESS(3,9,0) #define phy_connect(dev, bus_id, handler, interface) \ phy_connect(dev, bus_id, handler, 0, interface) #endif #if LINUX_VERSION_IS_LESS(4,5,0) #define phydev_name LINUX_BACKPORT(phydev_name) static inline const char *phydev_name(const struct phy_device *phydev) { return dev_name(&phydev->dev); } #define mdiobus_is_registered_device LINUX_BACKPORT(mdiobus_is_registered_device) static inline bool mdiobus_is_registered_device(struct mii_bus *bus, int addr) { return bus->phy_map[addr]; } #define phy_attached_print LINUX_BACKPORT(phy_attached_print) void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) __printf(2, 3); #define phy_attached_info LINUX_BACKPORT(phy_attached_info) void phy_attached_info(struct phy_device *phydev); static inline int backport_mdiobus_register(struct mii_bus *bus) { bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!bus->irq) { pr_err("mii_bus irq allocation failed\n"); return -ENOMEM; } memset(bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR); /* in kernel 4.3 a #define for mdiobus_register is added to the kernel. */ #ifndef mdiobus_register return mdiobus_register(bus); #else return __mdiobus_register(bus, THIS_MODULE); #endif } #ifdef mdiobus_register #undef mdiobus_register #endif #define mdiobus_register LINUX_BACKPORT(mdiobus_register) static inline void backport_mdiobus_unregister(struct mii_bus *bus) { kfree(bus->irq); mdiobus_unregister(bus); } #define mdiobus_unregister LINUX_BACKPORT(mdiobus_unregister) #endif /* < 4.5 */ #if LINUX_VERSION_IS_LESS(4,5,0) #define phydev_get_addr LINUX_BACKPORT(phydev_get_addr) static inline int phydev_get_addr(struct phy_device *phydev) { return phydev->addr; } #else #define phydev_get_addr LINUX_BACKPORT(phydev_get_addr) static inline int phydev_get_addr(struct phy_device *phydev) { return phydev->mdio.addr; } #endif #endif /* __BACKPORT_LINUX_PHY_H */ backport-iwlwifi-dkms-7906/backport-include/linux/platform_device.h000066400000000000000000000017431351064634500256070ustar00rootroot00000000000000#ifndef __BACKPORT_PLATFORM_DEVICE_H #define __BACKPORT_PLATFORM_DEVICE_H #include_next #include #ifndef module_platform_driver_probe #define module_platform_driver_probe(__platform_driver, __platform_probe) \ static int __init __platform_driver##_init(void) \ { \ return platform_driver_probe(&(__platform_driver), \ __platform_probe); \ } \ module_init(__platform_driver##_init); \ static void __exit __platform_driver##_exit(void) \ { \ platform_driver_unregister(&(__platform_driver)); \ } \ module_exit(__platform_driver##_exit); #endif #ifndef PLATFORM_DEVID_NONE #define PLATFORM_DEVID_NONE (-1) #endif #ifndef PLATFORM_DEVID_AUTO #define PLATFORM_DEVID_AUTO (-1) #endif #ifndef module_platform_driver #define module_platform_driver(__platform_driver) \ module_driver(__platform_driver, platform_driver_register, \ platform_driver_unregister) #endif #endif /* __BACKPORT_PLATFORM_DEVICE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/pm.h000066400000000000000000000005131351064634500230520ustar00rootroot00000000000000#ifndef __BACKPORT_PM_H #define __BACKPORT_PM_H #include_next #ifndef PM_EVENT_AUTO #define PM_EVENT_AUTO 0x0400 #endif #ifndef PM_EVENT_SLEEP #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND) #endif #ifndef PMSG_IS_AUTO #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) #endif #endif /* __BACKPORT_PM_H */ backport-iwlwifi-dkms-7906/backport-include/linux/pm_qos.h000066400000000000000000000005261351064634500237400ustar00rootroot00000000000000#ifndef _COMPAT_LINUX_PM_QOS_H #define _COMPAT_LINUX_PM_QOS_H 1 #include #if LINUX_VERSION_IS_GEQ(3,2,0) #include_next #else #include #endif /* LINUX_VERSION_IS_GEQ(3,2,0) */ #ifndef PM_QOS_DEFAULT_VALUE #define PM_QOS_DEFAULT_VALUE -1 #endif #endif /* _COMPAT_LINUX_PM_QOS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/pm_runtime.h000066400000000000000000000017271351064634500246250ustar00rootroot00000000000000#ifndef __BACKPORT_PM_RUNTIME_H #define __BACKPORT_PM_RUNTIME_H #include_next #if LINUX_VERSION_IS_LESS(3,9,0) #define pm_runtime_active LINUX_BACKPORT(pm_runtime_active) #ifdef CONFIG_PM static inline bool pm_runtime_active(struct device *dev) { return dev->power.runtime_status == RPM_ACTIVE || dev->power.disable_depth; } #else static inline bool pm_runtime_active(struct device *dev) { return true; } #endif /* CONFIG_PM */ #endif /* LINUX_VERSION_IS_LESS(3,9,0) */ #if LINUX_VERSION_IS_LESS(3,15,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline int pm_runtime_force_suspend(struct device *dev) { #ifdef CONFIG_PM /* cannot backport properly, I think */ WARN_ON_ONCE(1); return -EINVAL; #endif return 0; } static inline int pm_runtime_force_resume(struct device *dev) { #ifdef CONFIG_PM /* cannot backport properly, I think */ WARN_ON_ONCE(1); return -EINVAL; #endif return 0; } #endif #endif /* __BACKPORT_PM_RUNTIME_H */ backport-iwlwifi-dkms-7906/backport-include/linux/pnp.h000066400000000000000000000011641351064634500232360ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_PNP_H #define __BACKPORT_LINUX_PNP_H #include_next #ifndef module_pnp_driver /** * module_pnp_driver() - Helper macro for registering a PnP driver * @__pnp_driver: pnp_driver struct * * Helper macro for PnP drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_pnp_driver(__pnp_driver) \ module_driver(__pnp_driver, pnp_register_driver, \ pnp_unregister_driver) #endif #endif /* __BACKPORT_LINUX_PNP_H */ backport-iwlwifi-dkms-7906/backport-include/linux/poll.h000066400000000000000000000011001351064634500233750ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_POLL_H #define __BACKPORT_LINUX_POLL_H #include_next #include #include #if LINUX_VERSION_IS_LESS(3,4,0) #define poll_does_not_wait LINUX_BACKPORT(poll_does_not_wait) static inline bool poll_does_not_wait(const poll_table *p) { return p == NULL || p->qproc == NULL; } #define poll_requested_events LINUX_BACKPORT(poll_requested_events) static inline unsigned long poll_requested_events(const poll_table *p) { return p ? p->key : ~0UL; } #endif /* < 3.4 */ #endif /* __BACKPORT_LINUX_POLL_H */ backport-iwlwifi-dkms-7906/backport-include/linux/printk.h000066400000000000000000000114161351064634500237510ustar00rootroot00000000000000#ifndef _COMPAT_LINUX_PRINTK_H #define _COMPAT_LINUX_PRINTK_H 1 #include #include_next /* see pr_fmt at end of file */ #if LINUX_VERSION_IS_LESS(3,9,0) /* backports 7a555613 */ #if defined(CONFIG_DYNAMIC_DEBUG) #define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \ __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ print_hex_dump(KERN_DEBUG, prefix_str, \ prefix_type, rowsize, groupsize, \ buf, len, ascii); \ } while (0) #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) #else #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) #endif /* defined(CONFIG_DYNAMIC_DEBUG) */ #endif /* LINUX_VERSION_IS_LESS(3,9,0) */ #ifndef pr_warn #define pr_warn pr_warning #endif #ifndef printk_once #define printk_once(x...) ({ \ static bool __print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk(x); \ } \ }) #endif #ifndef printk_ratelimited /* * ratelimited messages with local ratelimit_state, * no local ratelimit_state used in the !PRINTK case */ #ifdef CONFIG_PRINTK #define printk_ratelimited(fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ \ if (__ratelimit(&_rs)) \ printk(fmt, ##__VA_ARGS__); \ }) #else #define printk_ratelimited(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) #endif #endif /* printk_ratelimited */ #ifndef pr_emerg_ratelimited #define pr_emerg_ratelimited(fmt, ...) \ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) #endif /* pr_emerg_ratelimited */ #ifndef pr_alert_ratelimited #define pr_alert_ratelimited(fmt, ...) \ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) #endif /* pr_alert_ratelimited */ #ifndef pr_crit_ratelimited #define pr_crit_ratelimited(fmt, ...) \ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) #endif /* pr_crit_ratelimited */ #ifndef pr_err_ratelimited #define pr_err_ratelimited(fmt, ...) \ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #endif /* pr_err_ratelimited */ #ifndef pr_warn_ratelimited #define pr_warn_ratelimited(fmt, ...) \ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) #endif /* pr_warn_ratelimited */ #ifndef pr_notice_ratelimited #define pr_notice_ratelimited(fmt, ...) \ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #endif /* pr_notice_ratelimited */ #ifndef pr_info_ratelimited #define pr_info_ratelimited(fmt, ...) \ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) #endif /* pr_info_ratelimited */ /* no pr_cont_ratelimited, don't do that... */ #ifndef pr_devel_ratelimited #if defined(DEBUG) #define pr_devel_ratelimited(fmt, ...) \ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_devel_ratelimited(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif #endif /* pr_devel_ratelimited */ #ifndef pr_debug_ratelimited /* If you are writing a driver, please use dev_dbg instead */ #if defined(CONFIG_DYNAMIC_DEBUG) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define pr_debug_ratelimited(fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ __ratelimit(&_rs)) \ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ } while (0) #elif defined(DEBUG) #define pr_debug_ratelimited(fmt, ...) \ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug_ratelimited(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif #endif /* pr_debug_ratelimited */ /* replace hex_dump_to_buffer() with a version which returns the length */ #if LINUX_VERSION_IS_LESS(4,0,0) #define hex_dump_to_buffer LINUX_BACKPORT(hex_dump_to_buffer) extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii); #endif #endif /* _COMPAT_LINUX_PRINTK_H */ /* This must be outside -- see also kernel.h */ #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif backport-iwlwifi-dkms-7906/backport-include/linux/proc_fs.h000066400000000000000000000016021351064634500240710ustar00rootroot00000000000000#ifndef __BACKPORT_PROC_FS_H #define __BACKPORT_PROC_FS_H #include_next #include #if LINUX_VERSION_IS_LESS(3,10,0) #ifdef CONFIG_PROC_FS /* * backport of: * procfs: new helper - PDE_DATA(inode) */ #define PDE_DATA LINUX_BACKPORT(PDE_DATA) static inline void *PDE_DATA(const struct inode *inode) { return PROC_I(inode)->pde->data; } extern void proc_set_size(struct proc_dir_entry *, loff_t); extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t); #else #define PDE_DATA LINUX_BACKPORT(PDE_DATA) static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;} static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {} static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {} #endif /* CONFIG_PROC_FS */ #endif /* LINUX_VERSION_IS_LESS(3,10,0) */ #endif /* __BACKPORT_PROC_FS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/property.h000066400000000000000000000007241351064634500243260ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_PROPERTY_H_ #define __BACKPORT_LINUX_PROPERTY_H_ #include #if LINUX_VERSION_IS_GEQ(3,18,17) || \ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) #include_next #endif #if LINUX_VERSION_IS_LESS(4,3,0) #define device_get_mac_address LINUX_BACKPORT(device_get_mac_address) void *device_get_mac_address(struct device *dev, char *addr, int alen); #endif /* < 4.3 */ #endif /* __BACKPORT_LINUX_PROPERTY_H_ */ backport-iwlwifi-dkms-7906/backport-include/linux/ptp_clock_kernel.h000066400000000000000000000021601351064634500257540ustar00rootroot00000000000000#ifndef __BACKPORT_PTP_CLOCK_KERNEL_H #define __BACKPORT_PTP_CLOCK_KERNEL_H #include #include_next #if LINUX_VERSION_IS_LESS(3,5,0) #include #define PTP_MAX_TIMESTAMPS 128 #define PTP_BUF_TIMESTAMPS 30 struct timestamp_event_queue { struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; int head; int tail; spinlock_t lock; }; struct ptp_clock { struct posix_clock clock; struct device *dev; struct ptp_clock_info *info; dev_t devid; int index; /* index into clocks.map */ struct pps_device *pps_source; struct timestamp_event_queue tsevq; /* simple fifo for time stamps */ struct mutex tsevq_mux; /* one process at a time reading the fifo */ wait_queue_head_t tsev_wq; int defunct; /* tells readers to go away when clock is being removed */ }; extern int ptp_clock_index(struct ptp_clock *ptp); #endif /* LINUX_VERSION_IS_LESS(3,5,0) */ #if LINUX_VERSION_IS_LESS(3,7,0) && !defined(CONFIG_SUSE_KERNEL) #define ptp_clock_register(info,parent) ptp_clock_register(info) #endif /* LINUX_VERSION_IS_LESS(3,7,0) */ #endif /* __BACKPORT_PTP_CLOCK_KERNEL_H */ backport-iwlwifi-dkms-7906/backport-include/linux/random.h000066400000000000000000000032621351064634500237220ustar00rootroot00000000000000#ifndef __BACKPORT_RANDOM_H #define __BACKPORT_RANDOM_H #include_next #include #if (LINUX_VERSION_IS_GEQ(3,3,0) && LINUX_VERSION_IS_LESS(3,4,10)) || \ (LINUX_VERSION_IS_GEQ(3,1,0) && LINUX_VERSION_IS_LESS(3,2,27)) || \ LINUX_VERSION_IS_LESS(3,0,41) #define add_device_randomness LINUX_BACKPORT(add_device_randomness) static inline void add_device_randomness(const void *buf, unsigned int size) { } #endif #if LINUX_VERSION_IS_LESS(3,8,0) /* backports 496f2f9 */ #define prandom_seed(_seed) srandom32(_seed) #define prandom_u32() random32() #define prandom_u32_state(_state) prandom32(_state) /* backport 6582c665d6b882dad8329e05749fbcf119f1ab88 */ #define prandom_bytes LINUX_BACKPORT(prandom_bytes) void prandom_bytes(void *buf, int bytes); #endif #if LINUX_VERSION_IS_LESS(3,14,0) /** * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) * @ep_ro: right open interval endpoint * * Returns a pseudo-random number that is in interval [0, ep_ro). Note * that the result depends on PRNG being well distributed in [0, ~0U] * u32 space. Here we use maximally equidistributed combined Tausworthe * generator, that is, prandom_u32(). This is useful when requesting a * random index of an array containing ep_ro elements, for example. * * Returns: pseudo-random number in interval [0, ep_ro) */ #define prandom_u32_max LINUX_BACKPORT(prandom_u32_max) static inline u32 prandom_u32_max(u32 ep_ro) { return (u32)(((u64) prandom_u32() * ep_ro) >> 32); } #endif /* LINUX_VERSION_IS_LESS(3,14,0) */ #if LINUX_VERSION_IS_LESS(4,11,0) static inline u32 get_random_u32(void) { return get_random_int(); } #endif #endif /* __BACKPORT_RANDOM_H */ backport-iwlwifi-dkms-7906/backport-include/linux/rculist.h000066400000000000000000000040521351064634500241250ustar00rootroot00000000000000#ifndef __BACKPORT_RCULIST_H #define __BACKPORT_RCULIST_H #include_next #if LINUX_VERSION_IS_LESS(3,9,0) #include #define hlist_for_each_entry_rcu4(tpos, pos, head, member) \ for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; });\ pos = rcu_dereference_raw(hlist_next_rcu(pos))) #define hlist_for_each_entry_rcu3(pos, head, member) \ for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) #undef hlist_for_each_entry_rcu #define hlist_for_each_entry_rcu(...) \ macro_dispatcher(hlist_for_each_entry_rcu, __VA_ARGS__)(__VA_ARGS__) #endif /* < 3.9 */ #ifndef list_for_each_entry_continue_rcu #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ prefetch(pos->member.next), &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) #endif #ifndef list_entry_rcu #define list_entry_rcu(ptr, type, member) \ container_of(rcu_dereference(ptr), type, member) #endif #ifndef list_first_or_null_rcu /** * list_first_or_null_rcu - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_struct within the struct. * * Note that if the list is empty, it returns NULL. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_first_or_null_rcu(ptr, type, member) \ ({ \ struct list_head *__ptr = (ptr); \ struct list_head *__next = ACCESS_ONCE(__ptr->next); \ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ }) #endif /* list_first_or_null_rcu */ #endif /* __BACKPORT_RCULIST_H */ backport-iwlwifi-dkms-7906/backport-include/linux/rcupdate.h000066400000000000000000000023631351064634500242520ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_RCUPDATE_H #define __BACKPORT_LINUX_RCUPDATE_H #include_next /* * This adds a nested function everywhere kfree_rcu() was called. This * function frees the memory and is given as a function to call_rcu(). * The rcu callback could happen every time also after the module was * unloaded and this will cause problems. To address that problem, we * put rcu_barrier() into each module_exit() in module.h. */ #if !defined(kfree_rcu) #define kfree_rcu(data, rcuhead) do { \ void __kfree_rcu_fn(struct rcu_head *rcu_head) \ { \ void *___ptr; \ ___ptr = container_of(rcu_head, typeof(*(data)), rcuhead);\ kfree(___ptr); \ } \ call_rcu(&(data)->rcuhead, __kfree_rcu_fn); \ } while (0) #endif #ifndef RCU_INIT_POINTER #define RCU_INIT_POINTER(p, v) \ p = (typeof(*v) __force __rcu *)(v) #endif #ifndef rcu_dereference_check #define rcu_dereference_check(p, c) rcu_dereference(p) #endif #ifndef rcu_dereference_protected #define rcu_dereference_protected(p, c) (p) #endif #ifndef rcu_access_pointer #define rcu_access_pointer(p) ACCESS_ONCE(p) #endif #ifndef rcu_dereference_raw #define rcu_dereference_raw(p) rcu_dereference(p) #endif #endif /* __BACKPORT_LINUX_RCUPDATE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/regmap.h000066400000000000000000000030371351064634500237150ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_REGMAP_H #define __BACKPORT_LINUX_REGMAP_H #include_next #include #if LINUX_VERSION_IS_LESS(3,5,0) && \ LINUX_VERSION_IS_GEQ(3,2,0) #define dev_get_regmap LINUX_BACKPORT(dev_get_regmap) static inline struct regmap *dev_get_regmap(struct device *dev, const char *name) { return NULL; } #endif #if LINUX_VERSION_IS_LESS(3,4,0) && \ LINUX_VERSION_IS_GEQ(3,2,0) #if defined(CONFIG_REGMAP) #define devm_regmap_init LINUX_BACKPORT(devm_regmap_init) struct regmap *devm_regmap_init(struct device *dev, const struct regmap_bus *bus, const struct regmap_config *config); #if defined(CONFIG_REGMAP_I2C) #define devm_regmap_init_i2c LINUX_BACKPORT(devm_regmap_init_i2c) struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config); #endif /* defined(CONFIG_REGMAP_I2C) */ /* * We can't backport these unless we try to backport * the full regmap into core so warn if used. * No drivers are using this yet anyway. */ #define regmap_raw_write_async LINUX_BACKPORT(regmap_raw_write_async) static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg, const void *val, size_t val_len) { WARN_ONCE(1, "regmap API is disabled"); return -EINVAL; } #define regmap_async_complete LINUX_BACKPORT(regmap_async_complete) static inline void regmap_async_complete(struct regmap *map) { WARN_ONCE(1, "regmap API is disabled"); } #endif /* defined(CONFIG_REGMAP) */ #endif /* 3.2 <= version < 3.4 */ #endif /* __BACKPORT_LINUX_REGMAP_H */ backport-iwlwifi-dkms-7906/backport-include/linux/rfkill.h000066400000000000000000000101451351064634500237230ustar00rootroot00000000000000#ifndef __COMPAT_RFKILL_H #define __COMPAT_RFKILL_H #include #if LINUX_VERSION_IS_GEQ(3,10,0) #include_next #else /* API only slightly changed since then */ #define rfkill_type old_rfkill_type #define RFKILL_TYPE_ALL OLD_RFKILL_TYPE_ALL #define RFKILL_TYPE_WLAN OLD_RFKILL_TYPE_WLAN #define RFKILL_TYPE_BLUETOOTH OLD_RFKILL_TYPE_BLUETOOTH #define RFKILL_TYPE_UWB OLD_RFKILL_TYPE_UWB #define RFKILL_TYPE_WIMAX OLD_RFKILL_TYPE_WIMAX #define RFKILL_TYPE_WWAN OLD_RFKILL_TYPE_WWAN #define RFKILL_TYPE_GPS OLD_RFKILL_TYPE_GPS #define RFKILL_TYPE_FM OLD_RFKILL_TYPE_FM #define RFKILL_TYPE_NFC OLD_RFKILL_TYPE_NFC #define NUM_RFKILL_TYPES OLD_NUM_RFKILL_TYPES #include_next #undef rfkill_type #undef RFKILL_TYPE_ALL #undef RFKILL_TYPE_WLAN #undef RFKILL_TYPE_BLUETOOTH #undef RFKILL_TYPE_UWB #undef RFKILL_TYPE_WIMAX #undef RFKILL_TYPE_WWAN #undef RFKILL_TYPE_GPS #undef RFKILL_TYPE_FM #undef RFKILL_TYPE_NFC #undef NUM_RFKILL_TYPES #define HAVE_OLD_RFKILL /* this changes infrequently, backport manually */ enum rfkill_type { RFKILL_TYPE_ALL = 0, RFKILL_TYPE_WLAN, RFKILL_TYPE_BLUETOOTH, RFKILL_TYPE_UWB, RFKILL_TYPE_WIMAX, RFKILL_TYPE_WWAN, RFKILL_TYPE_GPS, RFKILL_TYPE_FM, RFKILL_TYPE_NFC, NUM_RFKILL_TYPES, }; static inline struct rfkill * __must_check backport_rfkill_alloc(const char *name, struct device *parent, const enum rfkill_type type, const struct rfkill_ops *ops, void *ops_data) { #ifdef HAVE_OLD_RFKILL if ((unsigned int)type >= (unsigned int)OLD_NUM_RFKILL_TYPES) return ERR_PTR(-ENODEV); return rfkill_alloc(name, parent, (enum old_rfkill_type)type, ops, ops_data); #else return ERR_PTR(-ENODEV); #endif } #define rfkill_alloc backport_rfkill_alloc static inline int __must_check backport_rfkill_register(struct rfkill *rfkill) { if (rfkill == ERR_PTR(-ENODEV)) return 0; #ifdef HAVE_OLD_RFKILL return rfkill_register(rfkill); #else return -EINVAL; #endif } #define rfkill_register backport_rfkill_register static inline void backport_rfkill_pause_polling(struct rfkill *rfkill) { #ifdef HAVE_OLD_RFKILL rfkill_pause_polling(rfkill); #endif } #define rfkill_pause_polling backport_rfkill_pause_polling static inline void backport_rfkill_resume_polling(struct rfkill *rfkill) { #ifdef HAVE_OLD_RFKILL rfkill_resume_polling(rfkill); #endif } #define rfkill_resume_polling backport_rfkill_resume_polling static inline void backport_rfkill_unregister(struct rfkill *rfkill) { #ifdef HAVE_OLD_RFKILL if (rfkill == ERR_PTR(-ENODEV)) return; rfkill_unregister(rfkill); #endif } #define rfkill_unregister backport_rfkill_unregister static inline void backport_rfkill_destroy(struct rfkill *rfkill) { #ifdef HAVE_OLD_RFKILL if (rfkill == ERR_PTR(-ENODEV)) return; rfkill_destroy(rfkill); #endif } #define rfkill_destroy backport_rfkill_destroy static inline bool backport_rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) { #ifdef HAVE_OLD_RFKILL if (rfkill != ERR_PTR(-ENODEV)) return rfkill_set_hw_state(rfkill, blocked); #endif return blocked; } #define rfkill_set_hw_state backport_rfkill_set_hw_state static inline bool backport_rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) { #ifdef HAVE_OLD_RFKILL if (rfkill != ERR_PTR(-ENODEV)) return rfkill_set_sw_state(rfkill, blocked); #endif return blocked; } #define rfkill_set_sw_state backport_rfkill_set_sw_state static inline void backport_rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) { #ifdef HAVE_OLD_RFKILL if (rfkill != ERR_PTR(-ENODEV)) rfkill_init_sw_state(rfkill, blocked); #endif } #define rfkill_init_sw_state backport_rfkill_init_sw_state static inline void backport_rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) { #ifdef HAVE_OLD_RFKILL if (rfkill != ERR_PTR(-ENODEV)) rfkill_set_states(rfkill, sw, hw); #endif } #define rfkill_set_states backport_rfkill_set_states static inline bool backport_rfkill_blocked(struct rfkill *rfkill) { #ifdef HAVE_OLD_RFKILL if (rfkill != ERR_PTR(-ENODEV)) return rfkill_blocked(rfkill); #endif return false; } #define rfkill_blocked backport_rfkill_blocked #endif #endif backport-iwlwifi-dkms-7906/backport-include/linux/rtnetlink.h000066400000000000000000000014511351064634500244520ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_RTNETLINK_H #define __BACKPORT_LINUX_RTNETLINK_H #include_next #ifndef rtnl_dereference #define rtnl_dereference(p) \ rcu_dereference_protected(p, lockdep_rtnl_is_held()) #endif #ifndef rcu_dereference_rtnl #define rcu_dereference_rtnl(p) \ rcu_dereference_check(p, rcu_read_lock_held() || \ lockdep_rtnl_is_held()) #endif #if LINUX_VERSION_IS_LESS(3,19,0) #define ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags) \ ndo_dflt_fdb_add(ndm, tb, dev, addr, flags) #endif #if LINUX_VERSION_IS_LESS(3,13,0) && \ !defined(CONFIG_PROVE_LOCKING) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline bool lockdep_rtnl_is_held(void) { return true; } #endif #endif /* __BACKPORT_LINUX_RTNETLINK_H */ backport-iwlwifi-dkms-7906/backport-include/linux/scatterlist.h000066400000000000000000000102151351064634500247770ustar00rootroot00000000000000#ifndef __BACKPORT_SCATTERLIST_H #define __BACKPORT_SCATTERLIST_H #include_next #if LINUX_VERSION_IS_LESS(3,7,0) int sg_nents(struct scatterlist *sg); #endif #if LINUX_VERSION_IS_LESS(3, 9, 0) /* * sg page iterator * * Iterates over sg entries page-by-page. On each successful iteration, * @piter->page points to the current page, @piter->sg to the sg holding this * page and @piter->sg_pgoffset to the page's page offset within the sg. The * iteration will stop either when a maximum number of sg entries was reached * or a terminating sg (sg_last(sg) == true) was reached. */ struct sg_page_iter { struct page *page; /* current page */ struct scatterlist *sg; /* sg holding the page */ unsigned int sg_pgoffset; /* page offset within the sg */ /* these are internal states, keep away */ unsigned int __nents; /* remaining sg entries */ int __pg_advance; /* nr pages to advance at the * next step */ }; struct backport_sg_mapping_iter { /* the following three fields can be accessed directly */ struct page *page; /* currently mapped page */ void *addr; /* pointer to the mapped area */ size_t length; /* length of the mapped area */ size_t consumed; /* number of consumed bytes */ struct sg_page_iter piter; /* page iterator */ /* these are internal states, keep away */ unsigned int __offset; /* offset within page */ unsigned int __remaining; /* remaining bytes on page */ unsigned int __flags; }; #define sg_mapping_iter LINUX_BACKPORT(sg_mapping_iter) /** * sg_page_iter_page - get the current page held by the page iterator * @piter: page iterator holding the page */ static inline struct page *sg_page_iter_page(struct sg_page_iter *piter) { return nth_page(sg_page(piter->sg), piter->sg_pgoffset); } bool __sg_page_iter_next(struct sg_page_iter *piter); void __sg_page_iter_start(struct sg_page_iter *piter, struct scatterlist *sglist, unsigned int nents, unsigned long pgoffset); void backport_sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, unsigned int nents, unsigned int flags); bool backport_sg_miter_next(struct sg_mapping_iter *miter); void backport_sg_miter_stop(struct sg_mapping_iter *miter); #define sg_miter_start LINUX_BACKPORT(sg_miter_start) #define sg_miter_next LINUX_BACKPORT(sg_miter_next) #define sg_miter_stop LINUX_BACKPORT(sg_miter_stop) /** * for_each_sg_page - iterate over the pages of the given sg list * @sglist: sglist to iterate over * @piter: page iterator to hold current page, sg, sg_pgoffset * @nents: maximum number of sg entries to iterate over * @pgoffset: starting page offset */ #define for_each_sg_page(sglist, piter, nents, pgoffset) \ for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ __sg_page_iter_next(piter);) #endif /* LINUX_VERSION_IS_LESS(3, 9, 0) */ #if LINUX_VERSION_IS_LESS(3, 11, 0) size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip, bool to_buffer); #define sg_pcopy_to_buffer LINUX_BACKPORT(sg_pcopy_to_buffer) static inline size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip) { return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); } #define sg_pcopy_from_buffer LINUX_BACKPORT(sg_pcopy_from_buffer) static inline size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip) { return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); } #endif /* LINUX_VERSION_IS_LESS(3, 11, 0) */ #if LINUX_VERSION_IS_LESS(4, 17, 0) #define sg_init_marker LINUX_BACKPORT(sg_init_marker) /** * sg_init_marker - Initialize markers in sg table * @sgl: The SG table * @nents: Number of entries in table * **/ static inline void sg_init_marker(struct scatterlist *sgl, unsigned int nents) { #ifdef CONFIG_DEBUG_SG unsigned int i; for (i = 0; i < nents; i++) sgl[i].sg_magic = SG_MAGIC; #endif sg_mark_end(&sgl[nents - 1]); } #endif /* LINUX_VERSION_IS_LESS(4, 17, 0) */ #endif /* __BACKPORT_SCATTERLIST_H */ backport-iwlwifi-dkms-7906/backport-include/linux/sched/000077500000000000000000000000001351064634500233545ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/linux/sched/signal.h000066400000000000000000000003531351064634500250030ustar00rootroot00000000000000#ifndef _BACKPORT_LINUX_SCHED_SIGNAL_H #define _BACKPORT_LINUX_SCHED_SIGNAL_H #if LINUX_VERSION_IS_LESS(4, 11, 0) #include #else #include_next #endif #endif /* _BACKPORT_LINUX_SCHED_SIGNAL_H */ backport-iwlwifi-dkms-7906/backport-include/linux/security.h000066400000000000000000000010101351064634500242760ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_SECURITY_H #define __BACKPORT_LINUX_SECURITY_H #include_next #if LINUX_VERSION_IS_LESS(3,1,0) /* * This has been defined in include/linux/security.h for some time, but was * only given an EXPORT_SYMBOL for 3.1. Add a compat_* definition to avoid * breaking the compile. */ #define security_sk_clone(a, b) compat_security_sk_clone(a, b) static inline void security_sk_clone(const struct sock *sk, struct sock *newsk) { } #endif #endif /* __BACKPORT_LINUX_SECURITY_H */ backport-iwlwifi-dkms-7906/backport-include/linux/seq_file.h000066400000000000000000000036421351064634500242330ustar00rootroot00000000000000#ifndef __BACKPORT_SEQ_FILE_H #define __BACKPORT_SEQ_FILE_H #include_next #include #if LINUX_VERSION_IS_LESS(3,7,0) #include #include #include #ifdef CONFIG_USER_NS static inline struct user_namespace *seq_user_ns(struct seq_file *seq) { struct file *f = container_of((void *) seq, struct file, private_data); return f->f_cred->user_ns; } #else static inline struct user_namespace *seq_user_ns(struct seq_file *seq) { extern struct user_namespace init_user_ns; return &init_user_ns; } #endif /* CONFIG_USER_NS */ #endif /* < 3.7 */ #if LINUX_VERSION_IS_LESS(3,19,0) #define seq_has_overflowed LINUX_BACKPORT(seq_has_overflowed) /** * seq_has_overflowed - check if the buffer has overflowed * @m: the seq_file handle * * seq_files have a buffer which may overflow. When this happens a larger * buffer is reallocated and all the data will be printed again. * The overflow state is true when m->count == m->size. * * Returns true if the buffer received more than it can hold. */ static inline bool seq_has_overflowed(struct seq_file *m) { return m->count == m->size; } #endif #if LINUX_VERSION_IS_LESS(4,3,0) #define seq_hex_dump LINUX_BACKPORT(seq_hex_dump) void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii); #endif #ifndef DEFINE_SHOW_ATTRIBUTE #define DEFINE_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ return single_open(file, __name ## _show, inode->i_private); \ } \ \ static const struct file_operations __name ## _fops = { \ .owner = THIS_MODULE, \ .open = __name ## _open, \ .read = seq_read, \ .llseek = seq_lseek, \ .release = single_release, \ } #endif /* DEFINE_SHOW_ATTRIBUTE */ #endif /* __BACKPORT_SEQ_FILE_H */ backport-iwlwifi-dkms-7906/backport-include/linux/skbuff.h000066400000000000000000000264761351064634500237360ustar00rootroot00000000000000#ifndef __BACKPORT_SKBUFF_H #define __BACKPORT_SKBUFF_H #include_next #include #include #if LINUX_VERSION_IS_LESS(3,4,0) && \ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,4)) && \ !(defined(CONFIG_SUSE_KERNEL) && LINUX_VERSION_IS_GEQ(3,0,0)) #define skb_add_rx_frag(skb, i, page, off, size, truesize) \ skb_add_rx_frag(skb, i, page, off, size) #endif #if LINUX_VERSION_IS_LESS(3,3,0) #define __pskb_copy LINUX_BACKPORT(__pskb_copy) extern struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask); #define skb_complete_wifi_ack LINUX_BACKPORT(skb_complete_wifi_ack) static inline void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) { WARN_ON(1); } /* define to 0 so checks for it are always false */ #define SKBTX_WIFI_STATUS 0 #elif LINUX_VERSION_IS_LESS(3,18,0) #define skb_complete_wifi_ack LINUX_BACKPORT(skb_complete_wifi_ack) void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); #endif #if LINUX_VERSION_IS_LESS(3,2,0) #include /* mask skb_frag_page as RHEL6 backports this */ #define skb_frag_page LINUX_BACKPORT(skb_frag_page) static inline struct page *skb_frag_page(const skb_frag_t *frag) { return frag->page; } #define skb_frag_size LINUX_BACKPORT(skb_frag_size) static inline unsigned int skb_frag_size(const skb_frag_t *frag) { return frag->size; } /* mask skb_frag_dma_map as RHEL6 backports this */ #define skb_frag_dma_map LINUX_BACKPORT(skb_frag_dma_map) static inline dma_addr_t skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, size_t offset, size_t size, enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), frag->page_offset + offset, size, dir); } #endif #if LINUX_VERSION_IS_LESS(3,1,0) /* mask __netdev_alloc_skb_ip_align as RHEL6 backports this */ #define __netdev_alloc_skb_ip_align(a,b,c) compat__netdev_alloc_skb_ip_align(a,b,c) static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length, gfp_t gfp) { struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); if (NET_IP_ALIGN && skb) skb_reserve(skb, NET_IP_ALIGN); return skb; } #endif #ifndef skb_walk_frags #define skb_walk_frags(skb, iter) \ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) #endif #if LINUX_VERSION_IS_LESS(3,2,0) #define skb_frag_size_sub LINUX_BACKPORT(skb_frag_size_sub) static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { frag->size -= delta; } /** * skb_frag_address - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer * * Returns the address of the data within @frag. The page must already * be mapped. */ #define skb_frag_address LINUX_BACKPORT(skb_frag_address) static inline void *skb_frag_address(const skb_frag_t *frag) { return page_address(skb_frag_page(frag)) + frag->page_offset; } #endif /* LINUX_VERSION_IS_LESS(3,2,0) */ #if LINUX_VERSION_IS_LESS(3,9,0) #ifndef NETDEV_FRAG_PAGE_MAX_ORDER #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) #endif #ifndef NETDEV_FRAG_PAGE_MAX_SIZE #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) #endif #endif /* LINUX_VERSION_IS_LESS(3,9,0) */ #if LINUX_VERSION_IS_LESS(3,9,0) #define skb_unclone LINUX_BACKPORT(skb_unclone) static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) { might_sleep_if(pri & __GFP_WAIT); if (skb_cloned(skb)) return pskb_expand_head(skb, 0, 0, pri); return 0; } #endif #if LINUX_VERSION_IS_LESS(3,2,0) #define skb_frag_address_safe LINUX_BACKPORT(skb_frag_address_safe) /** * skb_frag_address_safe - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer * * Returns the address of the data within @frag. Checks that the page * is mapped and returns %NULL otherwise. */ static inline void *skb_frag_address_safe(const skb_frag_t *frag) { void *ptr = page_address(skb_frag_page(frag)); if (unlikely(!ptr)) return NULL; return ptr + frag->page_offset; } #endif /* LINUX_VERSION_IS_LESS(3,2,0) */ #if LINUX_VERSION_IS_LESS(3,14,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0) && \ !(LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30) /* * Packet hash types specify the type of hash in skb_set_hash. * * Hash types refer to the protocol layer addresses which are used to * construct a packet's hash. The hashes are used to differentiate or identify * flows of the protocol layer for the hash type. Hash types are either * layer-2 (L2), layer-3 (L3), or layer-4 (L4). * * Properties of hashes: * * 1) Two packets in different flows have different hash values * 2) Two packets in the same flow should have the same hash value * * A hash at a higher layer is considered to be more specific. A driver should * set the most specific hash possible. * * A driver cannot indicate a more specific hash than the layer at which a hash * was computed. For instance an L3 hash cannot be set as an L4 hash. * * A driver may indicate a hash level which is less specific than the * actual layer the hash was computed on. For instance, a hash computed * at L4 may be considered an L3 hash. This should only be done if the * driver can't unambiguously determine that the HW computed the hash at * the higher layer. Note that the "should" in the second property above * permits this. */ enum pkt_hash_types { PKT_HASH_TYPE_NONE, /* Undefined type */ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ }; static inline void skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) { #if LINUX_VERSION_IS_GEQ(3,2,0) /* 4031ae6edb */ skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); #endif #if LINUX_VERSION_IS_GEQ(3,4,0) /* bdeab99191 */ skb->rxhash = hash; #endif } #endif /* LINUX_VERSION_IS_LESS(3,14,0) */ #if LINUX_VERSION_IS_LESS(3,16,0) #define __pskb_copy_fclone LINUX_BACKPORT(__pskb_copy_fclone) static inline struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, gfp_t gfp_mask, bool fclone) { return __pskb_copy(skb, headroom, gfp_mask); } #endif #if LINUX_VERSION_IS_LESS(3,18,0) #define skb_clone_sk LINUX_BACKPORT(skb_clone_sk) struct sk_buff *skb_clone_sk(struct sk_buff *skb); #endif #if LINUX_VERSION_IS_LESS(3,18,0) #define skb_xmit_more(skb) false #elif LINUX_VERSION_IS_LESS(5,2,0) #define skb_xmit_more(skb) ((skb)->xmit_more) #else #define skb_xmit_more(skb) netdev_xmit_more() #endif #if LINUX_VERSION_IS_LESS(3,19,0) /** * __dev_alloc_pages - allocate page for network Rx * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * @order: size of the allocation * * Allocate a new page. * * %NULL is returned if there is no free memory. */ #define __dev_alloc_pages LINUX_BACKPORT(__dev_alloc_pages) static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, unsigned int order) { /* This piece of code contains several assumptions. * 1. This is for device Rx, therefor a cold page is preferred. * 2. The expectation is the user wants a compound page. * 3. If requesting a order 0 page it will not be compound * due to the check to see if order has a value in prep_new_page * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to * code in gfp_to_alloc_flags that should be enforcing this. */ gfp_mask |= __GFP_COLD | __GFP_COMP; #if LINUX_VERSION_IS_GEQ(3,6,0) gfp_mask |= __GFP_MEMALLOC; #endif return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); } #define dev_alloc_pages LINUX_BACKPORT(dev_alloc_pages) static inline struct page *dev_alloc_pages(unsigned int order) { return __dev_alloc_pages(GFP_ATOMIC, order); } /** * __dev_alloc_page - allocate a page for network Rx * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * * Allocate a new page. * * %NULL is returned if there is no free memory. */ #define __dev_alloc_page LINUX_BACKPORT(__dev_alloc_page) static inline struct page *__dev_alloc_page(gfp_t gfp_mask) { return __dev_alloc_pages(gfp_mask, 0); } #define dev_alloc_page LINUX_BACKPORT(dev_alloc_page) static inline struct page *dev_alloc_page(void) { return __dev_alloc_page(GFP_ATOMIC); } #endif /* LINUX_VERSION_IS_LESS(3,19,0) */ #if LINUX_VERSION_IS_LESS(3,19,0) #define skb_copy_datagram_msg LINUX_BACKPORT(skb_copy_datagram_msg) static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, struct msghdr *msg, int size) { return skb_copy_datagram_iovec(from, offset, msg->msg_iov, size); } #define memcpy_from_msg LINUX_BACKPORT(memcpy_from_msg) static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { return memcpy_fromiovec(data, msg->msg_iov, len); } /** * skb_put_padto - increase size and pad an skbuff up to a minimal size * @skb: buffer to pad * @len: minimal length * * Pads up a buffer to ensure the trailing bytes exist and are * blanked. If the buffer already contains sufficient data it * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ #define skb_put_padto LINUX_BACKPORT(skb_put_padto) static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) { unsigned int size = skb->len; if (unlikely(size < len)) { len -= size; if (skb_pad(skb, len)) return -ENOMEM; __skb_put(skb, len); } return 0; } #define skb_ensure_writable LINUX_BACKPORT(skb_ensure_writable) int skb_ensure_writable(struct sk_buff *skb, int write_len); #endif /* LINUX_VERSION_IS_LESS(3,19,0) */ #if LINUX_VERSION_IS_LESS(4,2,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline void skb_free_frag(void *data) { put_page(virt_to_head_page(data)); } #if LINUX_VERSION_IS_LESS(3,3,0) static inline u32 skb_get_hash_perturb(struct sk_buff *skb, u32 key) { return 0; } #else #include #include static inline u32 skb_get_hash_perturb(struct sk_buff *skb, u32 key) { struct flow_keys keys; skb_flow_dissect(skb, &keys); return jhash_3words((__force u32)keys.dst, (__force u32)keys.src ^ keys.ip_proto, (__force u32)keys.ports, key); } #endif /* LINUX_VERSION_IS_LESS(3,3,0) */ #endif /* LINUX_VERSION_IS_LESS(4,2,0) */ #if LINUX_VERSION_IS_LESS(4,13,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline void *backport_skb_put(struct sk_buff *skb, unsigned int len) { return skb_put(skb, len); } #define skb_put LINUX_BACKPORT(skb_put) static inline void *backport_skb_push(struct sk_buff *skb, unsigned int len) { return skb_push(skb, len); } #define skb_push LINUX_BACKPORT(skb_push) static inline void *backport___skb_push(struct sk_buff *skb, unsigned int len) { return __skb_push(skb, len); } #define __skb_push LINUX_BACKPORT(__skb_push) static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len) { void *tmp = skb_put(skb, len); memset(tmp, 0, len); return tmp; } static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned int len) { void *tmp = skb_put(skb, len); memcpy(tmp, data, len); return tmp; } static inline void skb_put_u8(struct sk_buff *skb, u8 val) { *(u8 *)skb_put(skb, 1) = val; } #endif #if LINUX_VERSION_IS_LESS(4,20,0) static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_) { return list_->next; } #endif #endif /* __BACKPORT_SKBUFF_H */ backport-iwlwifi-dkms-7906/backport-include/linux/slab.h000066400000000000000000000012001351064634500233510ustar00rootroot00000000000000#ifndef __BACKPORT_SLAB_H #define __BACKPORT_SLAB_H #include_next #include #if LINUX_VERSION_IS_LESS(3,4,0) /* This backports: * * commit a8203725dfded5c1f79dca3368a4a273e24b59bb * Author: Xi Wang * Date: Mon Mar 5 15:14:41 2012 -0800 * * slab: introduce kmalloc_array() */ #include /* for SIZE_MAX */ #define kmalloc_array LINUX_BACKPORT(kmalloc_array) static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { if (size != 0 && n > SIZE_MAX / size) return NULL; return __kmalloc(n * size, flags); } #endif #endif /* __BACKPORT_SLAB_H */ backport-iwlwifi-dkms-7906/backport-include/linux/socket.h000066400000000000000000000006061351064634500237310ustar00rootroot00000000000000#ifndef __BACKPORT_SOCKET_H #define __BACKPORT_SOCKET_H #include_next #ifndef SOL_NFC /* * backport SOL_NFC -- see commit: * NFC: llcp: Implement socket options */ #define SOL_NFC 280 #endif #ifndef __sockaddr_check_size #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) #endif #endif /* __BACKPORT_SOCKET_H */ backport-iwlwifi-dkms-7906/backport-include/linux/spi/000077500000000000000000000000001351064634500230615ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/linux/spi/spi.h000066400000000000000000000037731351064634500240370ustar00rootroot00000000000000#ifndef _BACKPORTS_LINUX_SPI_H #define _BACKPORTS_LINUX_SPI_H 1 #include_next #ifndef module_spi_driver /** * module_spi_driver() - Helper macro for registering a SPI driver * @__spi_driver: spi_driver struct * * Helper macro for SPI drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_spi_driver(__spi_driver) \ module_driver(__spi_driver, spi_register_driver, \ spi_unregister_driver) #endif #if LINUX_VERSION_IS_LESS(3,9,0) /** * spi_message_init_with_transfers - Initialize spi_message and append transfers * @m: spi_message to be initialized * @xfers: An array of spi transfers * @num_xfers: Number of items in the xfer array * * This function initializes the given spi_message and adds each spi_transfer in * the given array to the message. */ #define spi_message_init_with_transfers LINUX_BACKPORT(spi_message_init_with_transfers) static inline void spi_message_init_with_transfers(struct spi_message *m, struct spi_transfer *xfers, unsigned int num_xfers) { unsigned int i; spi_message_init(m); for (i = 0; i < num_xfers; ++i) spi_message_add_tail(&xfers[i], m); } /** * spi_sync_transfer - synchronous SPI data transfer * @spi: device with which data will be exchanged * @xfers: An array of spi_transfers * @num_xfers: Number of items in the xfer array * Context: can sleep * * Does a synchronous SPI data transfer of the given spi_transfer array. * * For more specific semantics see spi_sync(). * * It returns zero on success, else a negative error code. */ #define spi_sync_transfer LINUX_BACKPORT(spi_sync_transfer) static inline int spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, unsigned int num_xfers) { struct spi_message msg; spi_message_init_with_transfers(&msg, xfers, num_xfers); return spi_sync(spi, &msg); } #endif /* < 3.9 */ #endif /* _BACKPORTS_LINUX_SPI_H */ backport-iwlwifi-dkms-7906/backport-include/linux/spinlock.h000066400000000000000000000026721351064634500242700ustar00rootroot00000000000000#ifndef __BACKPORT_SPINLOCK_H #define __BACKPORT_SPINLOCK_H #include_next #if LINUX_VERSION_IS_LESS(3,18,0) #ifndef CONFIG_DEBUG_LOCK_ALLOC #undef raw_spin_lock_nested /* * Always evaluate the 'subclass' argument to avoid that the compiler * warns about set-but-not-used variables when building with * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. */ # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif #endif /* LINUX_VERSION_IS_LESS(3,18,0) */ #if LINUX_VERSION_IS_LESS(4,16,0) int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, size_t max_size, unsigned int cpu_mult, gfp_t gfp); void free_bucket_spinlocks(spinlock_t *locks); #endif /* LINUX_VERSION_IS_LESS(4,16,0) */ #if LINUX_VERSION_IS_LESS(4,19,0) int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, size_t max_size, unsigned int cpu_mult, gfp_t gfp, const char *name, struct lock_class_key *key); #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ ({ \ static struct lock_class_key key; \ int ret; \ \ ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ cpu_mult, gfp, #locks, &key); \ ret; \ }) #endif /* LINUX_VERSION_IS_LESS(4,19,0) */ #endif /* __BACKPORT_SPINLOCK_H */ backport-iwlwifi-dkms-7906/backport-include/linux/static_key.h000066400000000000000000000036171351064634500246050ustar00rootroot00000000000000#ifndef _BACKPORTS_LINUX_STATIC_KEY_H #define _BACKPORTS_LINUX_STATIC_KEY_H 1 #include #if LINUX_VERSION_IS_GEQ(3,3,0) /* kernels >= 3.3 */ /* * XXX: NOTE! * * Some 3.3 kernels carry but some don't even though its * its including . What makes it more confusing is that * later all this got shuffled. The safe thing to do then is to just assume * kernels 3.3..3.4 don't have it and include instead, * and for newer kernels include . */ #if LINUX_VERSION_IS_GEQ(3,5,0) #include_next #else #include #endif #else /* kernels < 3.3 */ /* * in between 2.6.37 - 3.5 there's a slew of changes that make * it hard to backport this properly. If you are interested in * trying you can use this as reference: * * http://drvbp1.linux-foundation.org/~mcgrof/examples/2014/04/01/backport-static-keys.patch * * And these notes: * * < v2.6.37 - No tracing support * bf5438fc - v2.6.37 - Jump label support added primarily for tracing but * tracing was broken, later kernels started sporting * functional tracing. * d430d3d7e - v3.0 - Static branch optimizations for jump labels * c5905afb - v3.3 - Static keys split out, note on the below issue * c5905afb - v3.5 - git describe --contains c5905afb claims but not true! * c4b2c0c5f - v3.13 - Adds static_key_initialized(), STATIC_KEY_CHECK_USE() * * Because all of this we skip 2.6.37 - 3.3 but and adding support for older * can be done by of carrying over the non-architecture optimized code. * Carrying new changes into this file is a burden though so if we really * find use for this we could just split the non optimized versions upstream * and copy that through an automatic process. */ #endif /* kernels < 3.3 */ #endif /* _BACKPORTS_LINUX_STATIC_KEY_H */ backport-iwlwifi-dkms-7906/backport-include/linux/stddef.h000066400000000000000000000006241351064634500237120ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_STDDEF_H #define __BACKPORT_LINUX_STDDEF_H #include_next #ifndef offsetofend /** * offsetofend(TYPE, MEMBER) * * @TYPE: The type of the structure * @MEMBER: The member within the structure to get the end offset of */ #define offsetofend(TYPE, MEMBER) \ (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) #endif #endif /* __BACKPORT_LINUX_STDDEF_H */ backport-iwlwifi-dkms-7906/backport-include/linux/string.h000066400000000000000000000025351351064634500237520ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_STRING_H #define __BACKPORT_LINUX_STRING_H #include_next #include #if LINUX_VERSION_IS_LESS(4,5,0) #define memdup_user_nul LINUX_BACKPORT(memdup_user_nul) extern void *memdup_user_nul(const void __user *, size_t); #endif /* this was added in v3.2.65, v3.4.106, v3.10.60, v3.12.33, v3.14.24, * v3.17.3 and v3.18 */ #if !(LINUX_VERSION_IS_GEQ(3,17,3) || \ (LINUX_VERSION_IS_GEQ(3,14,24) && \ LINUX_VERSION_IS_LESS(3,15,0)) || \ (LINUX_VERSION_IS_GEQ(3,12,33) && \ LINUX_VERSION_IS_LESS(3,13,0)) || \ (LINUX_VERSION_IS_GEQ(3,10,60) && \ LINUX_VERSION_IS_LESS(3,11,0)) || \ (LINUX_VERSION_IS_GEQ(3,4,106) && \ LINUX_VERSION_IS_LESS(3,5,0)) || \ (LINUX_VERSION_IS_GEQ(3,2,65) && \ LINUX_VERSION_IS_LESS(3,3,0))) #define memzero_explicit LINUX_BACKPORT(memzero_explicit) void memzero_explicit(void *s, size_t count); #endif #if LINUX_VERSION_IS_LESS(4,3,0) ssize_t strscpy(char *dest, const char *src, size_t count); #endif #if LINUX_VERSION_IS_LESS(4,2,0) char *strreplace(char *s, char old, char new); #endif #if LINUX_VERSION_IS_LESS(4,6,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) int match_string(const char * const *array, size_t n, const char *string); #endif /* LINUX_VERSION_IS_LESS(4,5,0) */ #endif /* __BACKPORT_LINUX_STRING_H */ backport-iwlwifi-dkms-7906/backport-include/linux/sysfs.h000066400000000000000000000004341351064634500236070ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_SYSFS_H #define __BACKPORT_LINUX_SYSFS_H #include_next #include #ifndef __ATTR_RW #define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ _name##_show, _name##_store) #endif #endif /* __BACKPORT_LINUX_SYSFS_H */ backport-iwlwifi-dkms-7906/backport-include/linux/thermal.h000066400000000000000000000121511351064634500240730ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_THERMAL_H #define __BACKPORT_LINUX_THERMAL_H #include_next #include #ifdef CONFIG_THERMAL #if LINUX_VERSION_IS_LESS(3,8,0) #include struct thermal_bind_params { struct thermal_cooling_device *cdev; int weight; int trip_mask; int (*match)(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev); }; struct thermal_zone_params { int num_tbps; struct thermal_bind_params *tbp; }; static inline struct thermal_zone_device * backport_thermal_zone_device_register(const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { return ERR_PTR(-EOPNOTSUPP); } #define thermal_zone_device_register backport_thermal_zone_device_register static inline void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { } #else /* < 3.8.0 */ #if LINUX_VERSION_IS_LESS(3,10,0) #define thermal_notify_framework notify_thermal_framework #endif /* LINUX_VERSION_IS_LESS(3,10,0) */ #if LINUX_VERSION_IS_LESS(4,3,0) typedef struct thermal_zone_device_ops old_thermal_zone_device_ops_t; /* also add a way to call the old register and unregister functions */ static inline struct thermal_zone_device *old_thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, old_thermal_zone_device_ops_t *_ops, const struct thermal_zone_params *_tzp, int passive_delay, int polling_delay) { struct thermal_zone_device_ops *ops = (struct thermal_zone_device_ops *) _ops; /* cast the const away */ struct thermal_zone_params *tzp = (struct thermal_zone_params *)_tzp; return thermal_zone_device_register(type, trips, mask, devdata, ops, tzp, passive_delay, polling_delay); } static inline void old_thermal_zone_device_unregister(struct thermal_zone_device *dev) { thermal_zone_device_unregister(dev); } #ifndef CONFIG_BTNS_PMIC struct backport_thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); int (*unbind) (struct thermal_zone_device *, struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*get_mode) (struct thermal_zone_device *, enum thermal_device_mode *); int (*set_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, enum thermal_trip_type *); int (*get_trip_temp) (struct thermal_zone_device *, int, int *); int (*set_trip_temp) (struct thermal_zone_device *, int, int); int (*get_trip_hyst) (struct thermal_zone_device *, int, int *); int (*set_trip_hyst) (struct thermal_zone_device *, int, int); int (*get_crit_temp) (struct thermal_zone_device *, int *); int (*set_emul_temp) (struct thermal_zone_device *, int); int (*get_trend) (struct thermal_zone_device *, int, enum thermal_trend *); int (*notify) (struct thermal_zone_device *, int, enum thermal_trip_type); }; #else /* CONFIG_BTNS_PMIC */ struct backport_thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); int (*unbind) (struct thermal_zone_device *, struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*get_mode) (struct thermal_zone_device *, enum thermal_device_mode *); int (*set_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, enum thermal_trip_type *); int (*get_trip_temp) (struct thermal_zone_device *, int, int *); int (*set_trip_temp) (struct thermal_zone_device *, int, int); int (*get_trip_hyst) (struct thermal_zone_device *, int, int *); int (*set_trip_hyst) (struct thermal_zone_device *, int, int); int (*get_slope) (struct thermal_zone_device *, int *); int (*set_slope) (struct thermal_zone_device *, int); int (*get_intercept) (struct thermal_zone_device *, int *); int (*set_intercept) (struct thermal_zone_device *, int); int (*get_crit_temp) (struct thermal_zone_device *, int *); int (*set_emul_temp) (struct thermal_zone_device *, int); int (*get_trend) (struct thermal_zone_device *, int, enum thermal_trend *); int (*notify) (struct thermal_zone_device *, int, enum thermal_trip_type); }; #endif /* CONFIG_BTNS_PMIC */ #define thermal_zone_device_ops LINUX_BACKPORT(thermal_zone_device_ops) #undef thermal_zone_device_register struct thermal_zone_device *backport_thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, int passive_delay, int polling_delay); #define thermal_zone_device_register \ LINUX_BACKPORT(thermal_zone_device_register) #undef thermal_zone_device_unregister void backport_thermal_zone_device_unregister(struct thermal_zone_device *); #define thermal_zone_device_unregister \ LINUX_BACKPORT(thermal_zone_device_unregister) #endif /* LINUX_VERSION_IS_LESS(4,3,0) */ #endif /* ! < 3.8.0 */ #endif /* CONFIG_THERMAL */ #endif /* __BACKPORT_LINUX_THERMAL_H */ backport-iwlwifi-dkms-7906/backport-include/linux/time.h000066400000000000000000000004261351064634500233770ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_TIME_H #define __BACKPORT_LINUX_TIME_H #if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) #include_next #include #endif #include_next #include #endif /* __BACKPORT_LINUX_TIME_H */ backport-iwlwifi-dkms-7906/backport-include/linux/time64.h000066400000000000000000000027411351064634500235530ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_TIME64_H #define __BACKPORT_LINUX_TIME64_H #if LINUX_VERSION_IS_GEQ(3,17,0) #include_next #else #include #endif #if LINUX_VERSION_IS_LESS(3,17,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define timespec64_equal timespec_equal #define timespec64_compare timespec_compare #define set_normalized_timespec64 set_normalized_timespec #define timespec64_add_safe timespec_add_safe #define timespec64_add timespec_add #define timespec64_sub timespec_sub #define timespec64_valid timespec_valid #define timespec64_valid_strict timespec_valid_strict #define timespec64_to_ns timespec_to_ns #define ns_to_timespec64 ns_to_timespec #define timespec64_add_ns timespec_add_ns #define timespec64 timespec #endif /* LINUX_VERSION_IS_LESS(3,17,0) */ #if LINUX_VERSION_IS_LESS(3,19,0) static inline time64_t mktime64(const unsigned int year0, const unsigned int mon0, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec) { unsigned int mon = mon0, year = year0; /* 1..12 -> 11,12,1..10 */ if (0 >= (int) (mon -= 2)) { mon += 12; /* Puts Feb last since it has leap day */ year -= 1; } return ((((time64_t) (year/4 - year/100 + year/400 + 367*mon/12 + day) + year*365 - 719499 )*24 + hour /* now have hours - midnight tomorrow handled here */ )*60 + min /* now have minutes */ )*60 + sec; /* finally seconds */ } #endif #endif /* __BACKPORT_LINUX_TIME64_H */ backport-iwlwifi-dkms-7906/backport-include/linux/timecounter.h000066400000000000000000000012301351064634500247710ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_TIMECOUNTER_H #define __BACKPORT_LINUX_TIMECOUNTER_H #if LINUX_VERSION_IS_GEQ(3,20,0) #include_next #else #include /** * timecounter_adjtime - Shifts the time of the clock. * @delta: Desired change in nanoseconds. */ #define timecounter_adjtime LINUX_BACKPORT(timecounter_adjtime) static inline void timecounter_adjtime(struct timecounter *tc, s64 delta) { tc->nsec += delta; } #endif #ifndef CYCLECOUNTER_MASK /* simplify initialization of mask field */ #define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) #endif #endif /* __BACKPORT_LINUX_TIMECOUNTER_H */ backport-iwlwifi-dkms-7906/backport-include/linux/timekeeping.h000066400000000000000000000043741351064634500247500ustar00rootroot00000000000000#ifndef __BACKPORT_TIMEKEEPING_H #define __BACKPORT_TIMEKEEPING_H #include #include #if LINUX_VERSION_IS_GEQ(3,17,0) || \ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) #include_next #endif #if LINUX_VERSION_IS_LESS(3,17,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define ktime_get_ns LINUX_BACKPORT(ktime_get_ns) extern ktime_t ktime_get(void); #define ktime_get_ns LINUX_BACKPORT(ktime_get_ns) static inline u64 ktime_get_ns(void) { return ktime_to_ns(ktime_get()); } extern ktime_t ktime_get_boottime(void); #define ktime_get_boot_ns LINUX_BACKPORT(ktime_get_boot_ns) static inline u64 ktime_get_boot_ns(void) { return ktime_to_ns(ktime_get_boottime()); } #endif /* < 3.17 */ #if LINUX_VERSION_IS_LESS(4,18,0) extern time64_t ktime_get_boottime_seconds(void); #endif /* < 4.18 */ #if LINUX_VERSION_IS_LESS(3,19,0) static inline time64_t ktime_get_seconds(void) { struct timespec t; ktime_get_ts(&t); return t.tv_sec; } static inline time64_t ktime_get_real_seconds(void) { struct timeval tv; do_gettimeofday(&tv); return tv.tv_sec; } #endif #if LINUX_VERSION_IS_LESS(3,17,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline void ktime_get_ts64(struct timespec64 *ts) { ktime_get_ts(ts); } #endif #if LINUX_VERSION_IS_LESS(3,17,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /* This was introduced in 4.15, but we only need it in the * ktime_get_raw_ts64 backport() for < 3.17. */ #if __BITS_PER_LONG == 64 static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) { return *(const struct timespec64 *)&ts; } #else static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) { struct timespec64 ret; ret.tv_sec = ts.tv_sec; ret.tv_nsec = ts.tv_nsec; return ret; } #endif #endif /* < 3.17 */ #if LINUX_VERSION_IS_LESS(4,18,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define ktime_get_raw_ts64 LINUX_BACKPORT(ktime_get_raw_ts64) static inline void ktime_get_raw_ts64(struct timespec64 *ts) { #if LINUX_VERSION_IS_LESS(3,19,0) struct timespec64 ts64; getrawmonotonic(&ts64); *ts = timespec_to_timespec64(ts64); #else return getrawmonotonic64(ts); #endif /* < 3.19 */ } #endif #endif /* __BACKPORT_TIMEKEEPING_H */ backport-iwlwifi-dkms-7906/backport-include/linux/timer.h000066400000000000000000000036121351064634500235610ustar00rootroot00000000000000#ifndef _BACKPORT_TIMER_H #define _BACKPORT_TIMER_H #include_next #ifndef setup_deferrable_timer /* * The TIMER_DEFERRABLE flag has not been around since 3.0 so * two different backports are needed here. */ #ifdef TIMER_DEFERRABLE #define setup_deferrable_timer(timer, fn, data) \ __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) #else static inline void setup_deferrable_timer_key(struct timer_list *timer, const char *name, struct lock_class_key *key, void (*func)(unsigned long), unsigned long data) { timer->function = func; timer->data = data; init_timer_deferrable_key(timer, name, key); } #define setup_deferrable_timer(timer, fn, data) \ do { \ static struct lock_class_key __key; \ setup_deferrable_timer_key((timer), #timer, &__key, \ (fn), (data)); \ } while (0) #endif #endif #ifndef TIMER_DEFERRABLE #define TIMER_DEFERRABLE 1 #endif #ifndef from_timer #define TIMER_DATA_TYPE unsigned long #define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) static inline void timer_setup(struct timer_list *timer, void (*callback) (struct timer_list *), unsigned int flags) { #ifdef __setup_timer __setup_timer(timer, (TIMER_FUNC_TYPE) callback, (TIMER_DATA_TYPE) timer, flags); #else if (flags & TIMER_DEFERRABLE) setup_deferrable_timer(timer, (TIMER_FUNC_TYPE) callback, (TIMER_DATA_TYPE) timer); else setup_timer(timer, (TIMER_FUNC_TYPE) callback, (TIMER_DATA_TYPE) timer); #endif } #define from_timer(var, callback_timer, timer_fieldname) \ container_of(callback_timer, typeof(*var), timer_fieldname) #endif #if LINUX_VERSION_IS_LESS(4,15,0) #undef DEFINE_TIMER #define DEFINE_TIMER(_name, _function) \ struct timer_list _name = \ __TIMER_INITIALIZER(_function, 0, 0, 0) #endif #endif /* _BACKPORT_TIMER_H */ backport-iwlwifi-dkms-7906/backport-include/linux/tracepoint.h000066400000000000000000000003331351064634500246060ustar00rootroot00000000000000#include_next #ifndef __BACKPORT_LINUX_TRACEPOINT_H #define __BACKPORT_LINUX_TRACEPOINT_H #ifndef TRACE_DEFINE_ENUM #define TRACE_DEFINE_ENUM(a) #endif #endif /* __BACKPORT_LINUX_TRACEPOINT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/tty.h000066400000000000000000000017631351064634500232660ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_TTY_H #define __BACKPORT_LINUX_TTY_H #include_next /* * This really belongs into uapi/asm-generic/termbits.h but * that doesn't usually get included directly. */ #ifndef EXTPROC #define EXTPROC 0200000 #endif #if LINUX_VERSION_IS_LESS(3,7,0) /* Backports tty_lock: Localise the lock */ #define tty_lock(__tty) tty_lock() #define tty_unlock(__tty) tty_unlock() #define tty_port_register_device(port, driver, index, device) \ tty_register_device(driver, index, device) #endif #if LINUX_VERSION_IS_LESS(3,10,0) extern void tty_port_tty_wakeup(struct tty_port *port); extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); #endif /* LINUX_VERSION_IS_LESS(3,10,0) */ #if LINUX_VERSION_IS_LESS(4,1,0) && \ LINUX_VERSION_IS_GEQ(4,0,0) extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); #endif /* LINUX_VERSION_IS_LESS(4,1,0) */ #ifndef N_NCI #define N_NCI 25 /* NFC NCI UART */ #endif #endif /* __BACKPORT_LINUX_TTY_H */ backport-iwlwifi-dkms-7906/backport-include/linux/tty_flip.h000066400000000000000000000005551351064634500242760ustar00rootroot00000000000000#ifndef __BACKPORT_TTY_FLIP_H #define __BACKPORT_TTY_FLIP_H #include_next #include #if LINUX_VERSION_IS_LESS(3,9,0) #define tty_flip_buffer_push(port) tty_flip_buffer_push((port)->tty) #define tty_insert_flip_string(port, chars, size) tty_insert_flip_string((port)->tty, chars, size) #endif #endif /* __BACKPORT_TTY_FLIP_H */ backport-iwlwifi-dkms-7906/backport-include/linux/types.h000066400000000000000000000004211351064634500236000ustar00rootroot00000000000000#ifndef __BACKPORT_TYPES #define __BACKPORT_TYPES #include #include_next #if LINUX_VERSION_IS_LESS(3,17,0) typedef __s64 time64_t; #endif #if LINUX_VERSION_IS_LESS(4,16,0) typedef unsigned __poll_t; #endif #endif /* __BACKPORT_TYPES */ backport-iwlwifi-dkms-7906/backport-include/linux/u64_stats_sync.h000066400000000000000000000123411351064634500253300ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_U64_STATS_SYNC_H #define __BACKPORT_LINUX_U64_STATS_SYNC_H #include #include #if LINUX_VERSION_IS_GEQ(3,6,0) #include_next #else /* * To properly implement 64bits network statistics on 32bit and 64bit hosts, * we provide a synchronization point, that is a noop on 64bit or UP kernels. * * Key points : * 1) Use a seqcount on SMP 32bits, with low overhead. * 2) Whole thing is a noop on 64bit arches or UP kernels. * 3) Write side must ensure mutual exclusion or one seqcount update could * be lost, thus blocking readers forever. * If this synchronization point is not a mutex, but a spinlock or * spinlock_bh() or disable_bh() : * 3.1) Write side should not sleep. * 3.2) Write side should not allow preemption. * 3.3) If applicable, interrupts should be disabled. * * 4) If reader fetches several counters, there is no guarantee the whole values * are consistent (remember point 1) : this is a noop on 64bit arches anyway) * * 5) readers are allowed to sleep or be preempted/interrupted : They perform * pure reads. But if they have to fetch many values, it's better to not allow * preemptions/interruptions to avoid many retries. * * 6) If counter might be written by an interrupt, readers should block interrupts. * (On UP, there is no seqcount_t protection, a reader allowing interrupts could * read partial values) * * 7) For softirq uses, readers can use u64_stats_fetch_begin_irq() and * u64_stats_fetch_retry_irq() helpers * * Usage : * * Stats producer (writer) should use following template granted it already got * an exclusive access to counters (a lock is already taken, or per cpu * data is used [in a non preemptable context]) * * spin_lock_bh(...) or other synchronization to get exclusive access * ... * u64_stats_update_begin(&stats->syncp); * stats->bytes64 += len; // non atomic operation * stats->packets64++; // non atomic operation * u64_stats_update_end(&stats->syncp); * * While a consumer (reader) should use following template to get consistent * snapshot for each variable (but no guarantee on several ones) * * u64 tbytes, tpackets; * unsigned int start; * * do { * start = u64_stats_fetch_begin(&stats->syncp); * tbytes = stats->bytes64; // non atomic operation * tpackets = stats->packets64; // non atomic operation * } while (u64_stats_fetch_retry(&stats->syncp, start)); * * * Example of use in drivers/net/loopback.c, using per_cpu containers, * in BH disabled context. */ #include struct u64_stats_sync { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) seqcount_t seq; #endif }; static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&syncp->seq); #endif } static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); #endif } static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 preempt_disable(); #endif return 0; #endif } static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 preempt_enable(); #endif return false; #endif } #endif /* LINUX_VERSION_IS_GEQ(3,6,0) */ #if LINUX_VERSION_IS_LESS(3,15,0) && \ !(LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 local_irq_disable(); #endif return 0; #endif } static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 local_irq_enable(); #endif return false; #endif } #endif /* LINUX_VERSION_IS_GEQ(3,15,0) */ #if LINUX_VERSION_IS_LESS(3,13,0) #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) # define u64_stats_init(syncp) seqcount_init(syncp.seq) #else # define u64_stats_init(syncp) do { } while (0) #endif #endif /* LINUX_VERSION_IS_LESS(3,13,0) */ #if LINUX_VERSION_IS_LESS(4,16,0) && \ !LINUX_VERSION_IN_RANGE(4,14,44, 4,15,0) && \ !(LINUX_VERSION_IS_GEQ(4,15,18) && UTS_UBUNTU_RELEASE_ABI >= 33) static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) { unsigned long flags = 0; #if BITS_PER_LONG==32 && defined(CONFIG_SMP) local_irq_save(flags); write_seqcount_begin(&syncp->seq); #endif return flags; } static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, unsigned long flags) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); local_irq_restore(flags); #endif } #endif /* < 4.16 */ #endif /* __BACKPORT_LINUX_U64_STATS_SYNC_H */ backport-iwlwifi-dkms-7906/backport-include/linux/uidgid.h000066400000000000000000000121051351064634500237030ustar00rootroot00000000000000#if LINUX_VERSION_IS_GEQ(3,5,0) #include_next #else #ifndef _LINUX_UIDGID_H #define _LINUX_UIDGID_H /* * A set of types for the internal kernel types representing uids and gids. * * The types defined in this header allow distinguishing which uids and gids in * the kernel are values used by userspace and which uid and gid values are * the internal kernel values. With the addition of user namespaces the values * can be different. Using the type system makes it possible for the compiler * to detect when we overlook these differences. * */ #include #include struct user_namespace; extern struct user_namespace init_user_ns; #ifdef CONFIG_UIDGID_STRICT_TYPE_CHECKS typedef struct { uid_t val; } kuid_t; typedef struct { gid_t val; } kgid_t; #define KUIDT_INIT(value) (kuid_t){ value } #define KGIDT_INIT(value) (kgid_t){ value } static inline uid_t __kuid_val(kuid_t uid) { return uid.val; } static inline gid_t __kgid_val(kgid_t gid) { return gid.val; } #else typedef uid_t kuid_t; typedef gid_t kgid_t; static inline uid_t __kuid_val(kuid_t uid) { return uid; } static inline gid_t __kgid_val(kgid_t gid) { return gid; } #define KUIDT_INIT(value) ((kuid_t) value ) #define KGIDT_INIT(value) ((kgid_t) value ) #endif #define GLOBAL_ROOT_UID KUIDT_INIT(0) #define GLOBAL_ROOT_GID KGIDT_INIT(0) #define INVALID_UID KUIDT_INIT(-1) #define INVALID_GID KGIDT_INIT(-1) static inline bool uid_eq(kuid_t left, kuid_t right) { return __kuid_val(left) == __kuid_val(right); } static inline bool gid_eq(kgid_t left, kgid_t right) { return __kgid_val(left) == __kgid_val(right); } static inline bool uid_gt(kuid_t left, kuid_t right) { return __kuid_val(left) > __kuid_val(right); } static inline bool gid_gt(kgid_t left, kgid_t right) { return __kgid_val(left) > __kgid_val(right); } static inline bool uid_gte(kuid_t left, kuid_t right) { return __kuid_val(left) >= __kuid_val(right); } static inline bool gid_gte(kgid_t left, kgid_t right) { return __kgid_val(left) >= __kgid_val(right); } static inline bool uid_lt(kuid_t left, kuid_t right) { return __kuid_val(left) < __kuid_val(right); } static inline bool gid_lt(kgid_t left, kgid_t right) { return __kgid_val(left) < __kgid_val(right); } static inline bool uid_lte(kuid_t left, kuid_t right) { return __kuid_val(left) <= __kuid_val(right); } static inline bool gid_lte(kgid_t left, kgid_t right) { return __kgid_val(left) <= __kgid_val(right); } static inline bool uid_valid(kuid_t uid) { return !uid_eq(uid, INVALID_UID); } static inline bool gid_valid(kgid_t gid) { return !gid_eq(gid, INVALID_GID); } #ifdef CONFIG_USER_NS #define make_kuid LINUX_BACKPORT(make_kuid) extern kuid_t make_kuid(struct user_namespace *from, uid_t uid); #define make_kgid LINUX_BACKPORT(make_kgid) extern kgid_t make_kgid(struct user_namespace *from, gid_t gid); #define from_kuid LINUX_BACKPORT(from_kuid) extern uid_t from_kuid(struct user_namespace *to, kuid_t uid); #define from_kgid LINUX_BACKPORT(from_kgid) extern gid_t from_kgid(struct user_namespace *to, kgid_t gid); #define from_kuid_munged LINUX_BACKPORT(from_kuid_munged) extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid); #define from_kgid_munged LINUX_BACKPORT(from_kgid_munged) extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid); #define kuid_has_mapping LINUX_BACKPORT(kuid_has_mapping) static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { return from_kuid(ns, uid) != (uid_t) -1; } #define kgid_has_mapping LINUX_BACKPORT(kgid_has_mapping) static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { return from_kgid(ns, gid) != (gid_t) -1; } #else #define make_kuid LINUX_BACKPORT(make_kuid) static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid) { return KUIDT_INIT(uid); } #define make_kgid LINUX_BACKPORT(make_kgid) static inline kgid_t make_kgid(struct user_namespace *from, gid_t gid) { return KGIDT_INIT(gid); } #define from_kuid LINUX_BACKPORT(from_kuid) static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid) { return __kuid_val(kuid); } #define from_kgid LINUX_BACKPORT(from_kgid) static inline gid_t from_kgid(struct user_namespace *to, kgid_t kgid) { return __kgid_val(kgid); } #define from_kuid_munged LINUX_BACKPORT(from_kuid_munged) static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid) { uid_t uid = from_kuid(to, kuid); if (uid == (uid_t)-1) uid = overflowuid; return uid; } #define from_kgid_munged LINUX_BACKPORT(from_kgid_munged) static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid) { gid_t gid = from_kgid(to, kgid); if (gid == (gid_t)-1) gid = overflowgid; return gid; } #define kuid_has_mapping LINUX_BACKPORT(kuid_has_mapping) static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { return true; } #define kgid_has_mapping LINUX_BACKPORT(kgid_has_mapping) static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { return true; } #endif /* CONFIG_USER_NS */ #endif /* _LINUX_UIDGID_H */ #endif /* LINUX_VERSION_IS_GEQ(3,5,0) */ backport-iwlwifi-dkms-7906/backport-include/linux/usb.h000066400000000000000000000054341351064634500232360ustar00rootroot00000000000000#ifndef __BACKPORT_USB_H #define __BACKPORT_USB_H #include_next #include #ifndef module_usb_driver /** * module_usb_driver() - Helper macro for registering a USB driver * @__usb_driver: usb_driver struct * * Helper macro for USB drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_usb_driver(__usb_driver) \ module_driver(__usb_driver, usb_register, \ usb_deregister) #endif #ifndef USB_VENDOR_AND_INTERFACE_INFO /** * Backports * * commit d81a5d1956731c453b85c141458d4ff5d6cc5366 * Author: Gustavo Padovan * Date: Tue Jul 10 19:10:06 2012 -0300 * * USB: add USB_VENDOR_AND_INTERFACE_INFO() macro */ #define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ | USB_DEVICE_ID_MATCH_VENDOR, \ .idVendor = (vend), \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) #endif /* USB_VENDOR_AND_INTERFACE_INFO */ #ifndef USB_DEVICE_INTERFACE_NUMBER /** * USB_DEVICE_INTERFACE_NUMBER - describe a usb device with a specific interface number * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @num: bInterfaceNumber value * * This macro is used to create a struct usb_device_id that matches a * specific interface number of devices. */ #define USB_DEVICE_INTERFACE_NUMBER(vend, prod, num) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \ .idVendor = (vend), \ .idProduct = (prod) #endif /* USB_DEVICE_INTERFACE_NUMBER */ #ifndef USB_DEVICE_INTERFACE_CLASS /** * USB_DEVICE_INTERFACE_CLASS - describe a usb device with a specific interface class * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @cl: bInterfaceClass value * * This macro is used to create a struct usb_device_id that matches a * specific interface class of devices. */ #define USB_DEVICE_INTERFACE_CLASS(vend, prod, cl) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceClass = (cl) #endif /* USB_DEVICE_INTERFACE_CLASS */ #ifndef USB_SUBCLASS_VENDOR_SPEC /* this is defined in usb/ch9.h, but we only need it through here */ #define USB_SUBCLASS_VENDOR_SPEC 0xff #endif #if LINUX_VERSION_IS_LESS(3,2,0) #define usb_translate_errors LINUX_BACKPORT(usb_translate_errors) static inline int usb_translate_errors(int error_code) { switch (error_code) { case 0: case -ENOMEM: case -ENODEV: case -EOPNOTSUPP: return error_code; default: return -EIO; } } #endif /* LINUX_VERSION_IS_LESS(3,2,0) */ #endif /* __BACKPORT_USB_H */ backport-iwlwifi-dkms-7906/backport-include/linux/usb/000077500000000000000000000000001351064634500230575ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/linux/usb/cdc.h000066400000000000000000000032541351064634500237650ustar00rootroot00000000000000#ifndef __BP_USB_CDC_H #define __BP_USB_CDC_H #include #if LINUX_VERSION_IS_GEQ(4,8,0) #include_next #else #include /* * inofficial magic numbers */ #define CDC_PHONET_MAGIC_NUMBER 0xAB #ifndef USB_CDC_MBIM_EXTENDED_TYPE #define USB_CDC_MBIM_EXTENDED_TYPE 0x1c /* "MBIM Extended Functional Descriptor" from CDC MBIM spec 1.0 errata-1 */ struct usb_cdc_mbim_extended_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdMBIMExtendedVersion; __u8 bMaxOutstandingCommandMessages; __le16 wMTU; } __attribute__ ((packed)); #endif /* * parsing CDC headers */ struct usb_cdc_parsed_header { struct usb_cdc_union_desc *usb_cdc_union_desc; struct usb_cdc_header_desc *usb_cdc_header_desc; struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; struct usb_cdc_ether_desc *usb_cdc_ether_desc; struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; struct usb_cdc_obex_desc *usb_cdc_obex_desc; struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; bool phonet_magic_present; }; #define cdc_parse_cdc_header LINUX_BACKPORT(cdc_parse_cdc_header) int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, struct usb_interface *intf, u8 *buffer, int buflen); #endif #endif /* __BP_USB_CDC_H */ backport-iwlwifi-dkms-7906/backport-include/linux/usb/ch9.h000066400000000000000000000012761351064634500237210ustar00rootroot00000000000000#ifndef __BACKPORT__LINUX_USB_CH9_H #define __BACKPORT__LINUX_USB_CH9_H #include #include_next #if LINUX_VERSION_IS_LESS(3,2,0) #include /* __u8 etc */ #include /* le16_to_cpu */ /** * usb_endpoint_maxp - get endpoint's max packet size * @epd: endpoint to be checked * * Returns @epd's max packet */ #define usb_endpoint_maxp LINUX_BACKPORT(usb_endpoint_maxp) static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd) { return __le16_to_cpu(epd->wMaxPacketSize); } #endif /* < 3.2 */ #if LINUX_VERSION_IS_LESS(4,6,0) #define USB_SPEED_SUPER_PLUS 6 #endif #endif /* __BACKPORT__LINUX_USB_CH9_H */ backport-iwlwifi-dkms-7906/backport-include/linux/uuid.h000066400000000000000000000013031351064634500234020ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_UUID_H_ #define __BACKPORT_LINUX_UUID_H_ #include #include_next #ifndef UUID_STRING_LEN /* * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") * not including trailing NUL. */ #define UUID_STRING_LEN 36 #endif #if LINUX_VERSION_IS_LESS(4,13,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define guid_t uuid_le #define uuid_t uuid_be static inline void guid_gen(guid_t *u) { return uuid_le_gen(u); } static inline void uuid_gen(uuid_t *u) { return uuid_be_gen(u); } static inline void guid_copy(guid_t *dst, const guid_t *src) { memcpy(dst, src, sizeof(guid_t)); } #endif #endif /* __BACKPORT_LINUX_UUID_H_ */ backport-iwlwifi-dkms-7906/backport-include/linux/verification.h000066400000000000000000000016251351064634500251250ustar00rootroot00000000000000#ifndef __BP_VERIFICATION_H #define __BP_VERIFICATION_H #include #if LINUX_VERSION_IS_GEQ(4,7,0) && !defined(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) #include_next #else #include enum key_being_used_for { VERIFYING_MODULE_SIGNATURE, VERIFYING_FIRMWARE_SIGNATURE, VERIFYING_KEXEC_PE_SIGNATURE, VERIFYING_KEY_SIGNATURE, VERIFYING_KEY_SELF_SIGNATURE, VERIFYING_UNSPECIFIED_SIGNATURE, NR__KEY_BEING_USED_FOR }; extern int verify_pkcs7_signature(const void *data, size_t len, const void *raw_pkcs7, size_t pkcs7_len, struct key *trusted_keys, enum key_being_used_for usage, int (*view_content)(void *ctx, const void *data, size_t len, size_t asn1hdrlen), void *ctx); #endif /* LINUX_VERSION_IS_GEQ(4,7,0) && !defined(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) */ #endif /* __BP_VERIFICATION_H */ backport-iwlwifi-dkms-7906/backport-include/linux/version.h000066400000000000000000000010741351064634500241260ustar00rootroot00000000000000#ifndef _BP_LINUX_VERSION_H #define _BP_LINUX_VERSION_H #include_next #ifndef RHEL_RELEASE_VERSION #define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) #endif #ifndef RHEL_RELEASE_CODE #define RHEL_RELEASE_CODE 0 #endif #define LINUX_VERSION_IS_LESS(x1,x2,x3) (LINUX_VERSION_CODE < KERNEL_VERSION(x1,x2,x3)) #define LINUX_VERSION_IS_GEQ(x1,x2,x3) (LINUX_VERSION_CODE >= KERNEL_VERSION(x1,x2,x3)) #define LINUX_VERSION_IN_RANGE(x1,x2,x3, y1,y2,y3) \ (LINUX_VERSION_IS_GEQ(x1,x2,x3) && LINUX_VERSION_IS_LESS(y1,y2,y3)) #endif /* _BP_LINUX_VERSION_H */ backport-iwlwifi-dkms-7906/backport-include/linux/wait.h000066400000000000000000000061231351064634500234050ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_WAIT_H #define __BACKPORT_LINUX_WAIT_H #include_next #if LINUX_VERSION_IS_LESS(3,17,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) extern int bit_wait(void *); extern int bit_wait_io(void *); static inline int backport_wait_on_bit(void *word, int bit, unsigned mode) { return wait_on_bit(word, bit, bit_wait, mode); } static inline int backport_wait_on_bit_io(void *word, int bit, unsigned mode) { return wait_on_bit(word, bit, bit_wait_io, mode); } #define wait_on_bit LINUX_BACKPORT(wait_on_bit) #define wait_on_bit_io LINUX_BACKPORT(wait_on_bit_io) #endif #if LINUX_VERSION_IS_LESS(3,18,12) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define WQ_FLAG_WOKEN 0x02 #define wait_woken LINUX_BACKPORT(wait_woken) long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); #define wait_woken LINUX_BACKPORT(wait_woken) int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); #endif /** * For wait_on_bit_timeout() an extra member in struct wait_bit_key is needed. * This was introuced in kernel 3.17 and we are only able to backport this * function on these kernel versions. */ #if LINUX_VERSION_IS_GEQ(3,17,0) #if LINUX_VERSION_IS_LESS(3,18,0) #define out_of_line_wait_on_bit_timeout LINUX_BACKPORT(out_of_line_wait_on_bit_timeout) int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long); #define bit_wait_timeout LINUX_BACKPORT(bit_wait_timeout) extern int bit_wait_timeout(struct wait_bit_key *); #endif #if LINUX_VERSION_IS_LESS(3,20,0) #define wait_on_bit_timeout LINUX_BACKPORT(wait_on_bit_timeout) /** * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * @mode: the task state to sleep in * @timeout: timeout, in jiffies * * Use the standard hashed waitqueue table to wait for a bit * to be cleared. This is similar to wait_on_bit(), except also takes a * timeout parameter. * * Returned value will be zero if the bit was cleared before the * @timeout elapsed, or non-zero if the @timeout elapsed or process * received a signal and the mode permitted wakeup on that signal. */ static inline int wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout) { might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, mode, timeout); } #endif #endif #if LINUX_VERSION_IS_LESS(4,13,0) #define wait_queue_entry_t wait_queue_t #define wait_event_killable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_killable_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_killable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_KILLABLE, 0, timeout, \ __ret = schedule_timeout(__ret)) #endif #endif /* __BACKPORT_LINUX_WAIT_H */ backport-iwlwifi-dkms-7906/backport-include/linux/watchdog.h000066400000000000000000000003761351064634500242450ustar00rootroot00000000000000#ifndef __BACKPORT_WATCHDOG_H #define __BACKPORT_WATCHDOG_H #include_next #if LINUX_VERSION_IS_LESS(3,1,0) #define watchdog_device LINUX_BACKPORT(watchdog_device) struct watchdog_device { }; #endif #endif /* __BACKPORT_WATCHDOG_H */ backport-iwlwifi-dkms-7906/backport-include/linux/workqueue.h000066400000000000000000000041331351064634500244670ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_WORKQUEUE_H #define __BACKPORT_LINUX_WORKQUEUE_H #include_next #include #if LINUX_VERSION_IS_LESS(3,7,0) #define mod_delayed_work LINUX_BACKPORT(mod_delayed_work) bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); #endif #ifndef create_freezable_workqueue /* note freez_a_ble -> freez_ea_able */ #define create_freezable_workqueue create_freezeable_workqueue #endif #if LINUX_VERSION_IS_LESS(3,3,0) #define __WQ_ORDERED 0 /* * commit b196be89cdc14a88cc637cdad845a75c5886c82d * Author: Tejun Heo * Date: Tue Jan 10 15:11:35 2012 -0800 * * workqueue: make alloc_workqueue() take printf fmt and args for name */ struct workqueue_struct * backport_alloc_workqueue(const char *fmt, unsigned int flags, int max_active, struct lock_class_key *key, const char *lock_name, ...); #undef alloc_workqueue #ifdef CONFIG_LOCKDEP #define alloc_workqueue(fmt, flags, max_active, args...) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ \ if (__builtin_constant_p(fmt)) \ __lock_name = (fmt); \ else \ __lock_name = #fmt; \ \ backport_alloc_workqueue((fmt), (flags), (max_active), \ &__key, __lock_name, ##args); \ }) #else #define alloc_workqueue(fmt, flags, max_active, args...) \ backport_alloc_workqueue((fmt), (flags), (max_active), \ NULL, NULL, ##args) #endif #undef alloc_ordered_workqueue #define alloc_ordered_workqueue(fmt, flags, args...) \ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) #define destroy_workqueue backport_destroy_workqueue void backport_destroy_workqueue(struct workqueue_struct *wq); #endif #if LINUX_VERSION_IS_LESS(3,11,0) /* power efficient workqueues were added in commit 0668106ca386. */ #define system_power_efficient_wq system_wq #define system_freezable_power_efficient_wq system_freezable_wq #endif #if LINUX_VERSION_IS_LESS(3,1,0) #define drain_workqueue(wq) flush_workqueue(wq) #endif #endif /* __BACKPORT_LINUX_WORKQUEUE_H */ backport-iwlwifi-dkms-7906/backport-include/net/000077500000000000000000000000001351064634500217155ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/net/addrconf.h000066400000000000000000000013441351064634500236500ustar00rootroot00000000000000#ifndef _BACKPORT_NET_ADDRCONF_H #define _BACKPORT_NET_ADDRCONF_H 1 #include_next #include #if LINUX_VERSION_IS_LESS(3,9,0) static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 __u64 *p = (__u64 *)addr; return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | ((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) & cpu_to_be64(0xffffffffff000000UL))) == 0UL; #else return ((addr->s6_addr32[0] ^ htonl(0xff020000)) | addr->s6_addr32[1] | (addr->s6_addr32[2] ^ htonl(0x00000001)) | (addr->s6_addr[12] ^ 0xff)) == 0; #endif } #endif /* LINUX_VERSION_IS_LESS(3,9,0) */ #endif /* _BACKPORT_NET_ADDRCONF_H */ backport-iwlwifi-dkms-7906/backport-include/net/flow_keys.h000066400000000000000000000006151351064634500240720ustar00rootroot00000000000000#if LINUX_VERSION_IS_GEQ(3,3,0) #include_next #else #ifndef _NET_FLOW_KEYS_H #define _NET_FLOW_KEYS_H struct flow_keys { /* (src,dst) must be grouped, in the same way than in IP header */ __be32 src; __be32 dst; union { __be32 ports; __be16 port16[2]; }; u8 ip_proto; }; extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow); #endif #endif backport-iwlwifi-dkms-7906/backport-include/net/genetlink.h000066400000000000000000000123431351064634500240510ustar00rootroot00000000000000#ifndef __BACKPORT_NET_GENETLINK_H #define __BACKPORT_NET_GENETLINK_H #include_next #include static inline void __bp_genl_info_userhdr_set(struct genl_info *info, void *userhdr) { info->userhdr = userhdr; } static inline void *__bp_genl_info_userhdr(struct genl_info *info) { return info->userhdr; } #if LINUX_VERSION_IS_LESS(4,12,0) #define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG(genl_info_extack(info), msg) static inline int genl_err_attr(struct genl_info *info, int err, struct nlattr *attr) { return err; } #endif /* < 4.12 */ /* this is for patches we apply */ static inline struct netlink_ext_ack *genl_info_extack(struct genl_info *info) { #if LINUX_VERSION_IS_GEQ(4,12,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) return info->extack; #else return info->userhdr; #endif } /* this gets put in place of info->userhdr, since we use that above */ static inline void *genl_info_userhdr(struct genl_info *info) { return (u8 *)info->genlhdr + GENL_HDRLEN; } /* this is for patches we apply */ #if LINUX_VERSION_IS_LESS(3,7,0) #define genl_info_snd_portid(__genl_info) (__genl_info->snd_pid) #else #define genl_info_snd_portid(__genl_info) (__genl_info->snd_portid) #endif #if LINUX_VERSION_IS_LESS(3,13,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define __genl_const #else /* < 3.13 */ #define __genl_const const #endif /* < 3.13 */ #ifndef GENLMSG_DEFAULT_SIZE #define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN) #endif #if LINUX_VERSION_IS_LESS(3,1,0) #define genl_dump_check_consistent(cb, user_hdr) do { } while (0) #endif #if LINUX_VERSION_IS_LESS(4,10,0) #define __genl_ro_after_init #else #define __genl_ro_after_init __ro_after_init #endif #if LINUX_VERSION_IS_LESS(4,15,0) #define genlmsg_nlhdr LINUX_BACKPORT(genlmsg_nlhdr) static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr) { return (struct nlmsghdr *)((char *)user_hdr - GENL_HDRLEN - NLMSG_HDRLEN); } #ifndef genl_dump_check_consistent static inline void backport_genl_dump_check_consistent(struct netlink_callback *cb, void *user_hdr) { struct genl_family dummy_family = { .hdrsize = 0, }; genl_dump_check_consistent(cb, user_hdr, &dummy_family); } #define genl_dump_check_consistent LINUX_BACKPORT(genl_dump_check_consistent) #endif #endif /* LINUX_VERSION_IS_LESS(4,15,0) */ #if LINUX_VERSION_IS_LESS(5,2,0) static inline int __real_backport_genl_register_family(struct genl_family *family) { return genl_register_family(family); } static inline int __real_backport_genl_unregister_family(struct genl_family *family) { return genl_unregister_family(family); } struct backport_genl_family { struct genl_family family; const struct genl_ops * copy_ops; /* copied */ int id; /* private */ unsigned int hdrsize; char name[GENL_NAMSIZ]; unsigned int version; unsigned int maxattr; bool netnsok; bool parallel_ops; const struct nla_policy *policy; int (*pre_doit)(__genl_const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); void (*post_doit)(__genl_const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); /* * unsupported! int (*mcast_bind)(struct net *net, int group); void (*mcast_unbind)(struct net *net, int group); */ struct nlattr ** attrbuf; /* private */ __genl_const struct genl_ops * ops; __genl_const struct genl_multicast_group *mcgrps; unsigned int n_ops; unsigned int n_mcgrps; struct module *module; }; #undef genl_family #define genl_family backport_genl_family #define genl_register_family backport_genl_register_family int genl_register_family(struct genl_family *family); #define genl_unregister_family backport_genl_unregister_family int backport_genl_unregister_family(struct genl_family *family); #define genl_notify LINUX_BACKPORT(genl_notify) void genl_notify(const struct genl_family *family, struct sk_buff *skb, struct genl_info *info, u32 group, gfp_t flags); #define genlmsg_put LINUX_BACKPORT(genlmsg_put) void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, const struct genl_family *family, int flags, u8 cmd); #define genlmsg_put_reply LINUX_BACKPORT(genlmsg_put_reply) void *genlmsg_put_reply(struct sk_buff *skb, struct genl_info *info, const struct genl_family *family, int flags, u8 cmd); #define genlmsg_multicast_netns LINUX_BACKPORT(genlmsg_multicast_netns) int genlmsg_multicast_netns(const struct genl_family *family, struct net *net, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags); #define genlmsg_multicast LINUX_BACKPORT(genlmsg_multicast) int genlmsg_multicast(const struct genl_family *family, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags); #define genlmsg_multicast_allns LINUX_BACKPORT(genlmsg_multicast_allns) int backport_genlmsg_multicast_allns(const struct genl_family *family, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags); #define genl_family_attrbuf LINUX_BACKPORT(genl_family_attrbuf) static inline struct nlattr **genl_family_attrbuf(struct genl_family *family) { WARN_ON(family->parallel_ops); return family->attrbuf; } #endif /* LINUX_VERSION_IS_LESS(4,20,0) */ #endif /* __BACKPORT_NET_GENETLINK_H */ backport-iwlwifi-dkms-7906/backport-include/net/inet_frag.h000066400000000000000000000046171351064634500240340ustar00rootroot00000000000000#ifndef __BACKPORT__NET_FRAG_H__ #define __BACKPORT__NET_FRAG_H__ #include_next #include #if LINUX_VERSION_IS_LESS(3,9,0) /* Memory Tracking Functions. */ #define frag_mem_limit LINUX_BACKPORT(frag_mem_limit) static inline int frag_mem_limit(struct netns_frags *nf) { return atomic_read(&nf->mem); } #define init_frag_mem_limit LINUX_BACKPORT(init_frag_mem_limit) static inline void init_frag_mem_limit(struct netns_frags *nf) { atomic_set(&nf->mem, 0); } #define sum_frag_mem_limit LINUX_BACKPORT(sum_frag_mem_limit) static inline int sum_frag_mem_limit(struct netns_frags *nf) { return atomic_read(&nf->mem); } #define inet_frag_maybe_warn_overflow LINUX_BACKPORT(inet_frag_maybe_warn_overflow) void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, const char *prefix); #endif /* LINUX_VERSION_IS_LESS(3,9,0) */ /* the type of the paramater changed with kernel 4.3 */ #if LINUX_VERSION_IS_LESS(3,9,0) || LINUX_VERSION_IN_RANGE(3,16,51, 3,17,0) #define sub_frag_mem_limit LINUX_BACKPORT(sub_frag_mem_limit) static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) { atomic_sub(i, &nf->mem); } #define add_frag_mem_limit LINUX_BACKPORT(add_frag_mem_limit) static inline void add_frag_mem_limit(struct netns_frags *nf, int i) { atomic_add(i, &nf->mem); } #elif LINUX_VERSION_IS_LESS(4,3,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define sub_frag_mem_limit LINUX_BACKPORT(sub_frag_mem_limit) static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) { __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch); } #define add_frag_mem_limit LINUX_BACKPORT(add_frag_mem_limit) static inline void add_frag_mem_limit(struct netns_frags *nf, int i) { __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch); } #endif /* LINUX_VERSION_IS_LESS(4,3,0) */ #if LINUX_VERSION_IS_LESS(4,4,0) && \ LINUX_VERSION_IS_GEQ(3,9,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define inet_frags_uninit_net LINUX_BACKPORT(inet_frags_uninit_net) static inline void inet_frags_uninit_net(struct netns_frags *nf) { percpu_counter_destroy(&nf->mem); } #endif /* < 4.4 && >= 3.9 */ #if LINUX_VERSION_IS_LESS(4,4,0) static inline int backport_inet_frags_init_net(struct netns_frags *nf) { inet_frags_init_net(nf); return 0; } #define inet_frags_init_net LINUX_BACKPORT(inet_frags_init_net) #endif /* < 4.4 */ #endif /* __BACKPORT__NET_FRAG_H__ */ backport-iwlwifi-dkms-7906/backport-include/net/ip.h000066400000000000000000000005101351064634500224720ustar00rootroot00000000000000#ifndef __BACKPORT_NET_IP_H #define __BACKPORT_NET_IP_H #include_next #include #if LINUX_VERSION_IS_LESS(3,1,0) /* Backports 56f8a75c */ static inline bool ip_is_fragment(const struct iphdr *iph) { return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; } #endif #endif /* __BACKPORT_NET_IP_H */ backport-iwlwifi-dkms-7906/backport-include/net/ip6_fib.h000066400000000000000000000013071351064634500234050ustar00rootroot00000000000000#ifndef __BACKPORT_NET_IP6_ROUTE_H #define __BACKPORT_NET_IP6_ROUTE_H #include_next #include #include #include /* * This function is avaliable with one argument since kernel 3.10, but the * secound one was added in 4.2. */ #if LINUX_VERSION_IS_LESS(4,2,0) #define rt6_nexthop LINUX_BACKPORT(rt6_nexthop) static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *daddr) { if (rt->rt6i_flags & RTF_GATEWAY) return &rt->rt6i_gateway; else if (rt->rt6i_flags & RTF_CACHE) return &rt->rt6i_dst.addr; else return daddr; } #endif /* LINUX_VERSION_IS_LESS(4,2,0) */ #endif /* __BACKPORT_NET_IP6_ROUTE_H */ backport-iwlwifi-dkms-7906/backport-include/net/ipv6.h000066400000000000000000000030241351064634500227510ustar00rootroot00000000000000#ifndef __BACKPORT_NET_IPV6_H #define __BACKPORT_NET_IPV6_H #include_next #include #include #include #if LINUX_VERSION_IS_LESS(3,7,0) /* * Equivalent of ipv4 struct ip */ struct frag_queue { struct inet_frag_queue q; __be32 id; /* fragment id */ u32 user; struct in6_addr saddr; struct in6_addr daddr; int iif; unsigned int csum; __u16 nhoffset; }; #endif /* LINUX_VERSION_IS_LESS(3,7,0) */ #if LINUX_VERSION_IS_LESS(3,6,0) #define ipv6_addr_hash LINUX_BACKPORT(ipv6_addr_hash) static inline u32 ipv6_addr_hash(const struct in6_addr *a) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul = (const unsigned long *)a; unsigned long x = ul[0] ^ ul[1]; return (u32)(x ^ (x >> 32)); #else return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^ a->s6_addr32[2] ^ a->s6_addr32[3]); #endif } #endif #if LINUX_VERSION_IS_LESS(4,5,0) #define ipv6_addr_prefix_copy LINUX_BACKPORT(ipv6_addr_prefix_copy) static inline void ipv6_addr_prefix_copy(struct in6_addr *addr, const struct in6_addr *pfx, int plen) { /* caller must guarantee 0 <= plen <= 128 */ int o = plen >> 3, b = plen & 0x7; memcpy(addr->s6_addr, pfx, o); if (b != 0) { addr->s6_addr[o] &= ~(0xff00 >> b); addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b)); } } #endif #endif /* __BACKPORT_NET_IPV6_H */ backport-iwlwifi-dkms-7906/backport-include/net/iw_handler.h000066400000000000000000000024271351064634500242070ustar00rootroot00000000000000#ifndef __BACKPORT_IW_HANDLER_H #define __BACKPORT_IW_HANDLER_H #include_next #if LINUX_VERSION_IS_LESS(4,1,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline char * iwe_stream_add_event_check(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, int event_len) { char *res = iwe_stream_add_event(info, stream, ends, iwe, event_len); if (res == stream) return ERR_PTR(-E2BIG); return res; } static inline char * iwe_stream_add_point_check(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, char *extra) { char *res = iwe_stream_add_point(info, stream, ends, iwe, extra); if (res == stream) return ERR_PTR(-E2BIG); return res; } #endif /* LINUX_VERSION_IS_LESS(4,1,0) */ /* this was added in v3.2.79, v3.18.30, v4.1.21, v4.4.6 and 4.5 */ #if !(LINUX_VERSION_IS_GEQ(4,4,6) || \ (LINUX_VERSION_IS_GEQ(4,1,21) && \ LINUX_VERSION_IS_LESS(4,2,0)) || \ (LINUX_VERSION_IS_GEQ(3,18,30) && \ LINUX_VERSION_IS_LESS(3,19,0)) || \ (LINUX_VERSION_IS_GEQ(3,2,79) && \ LINUX_VERSION_IS_LESS(3,3,0))) #define wireless_nlevent_flush LINUX_BACKPORT(wireless_nlevent_flush) static inline void wireless_nlevent_flush(void) {} #endif #endif /* __BACKPORT_IW_HANDLER_H */ backport-iwlwifi-dkms-7906/backport-include/net/net_namespace.h000066400000000000000000000017431351064634500246750ustar00rootroot00000000000000#ifndef _COMPAT_NET_NET_NAMESPACE_H #define _COMPAT_NET_NET_NAMESPACE_H 1 #include_next #if LINUX_VERSION_IS_LESS(3,20,0) /* * In older kernels we simply fail this function. */ #define get_net_ns_by_fd LINUX_BACKPORT(get_net_ns_by_fd) static inline struct net *get_net_ns_by_fd(int fd) { return ERR_PTR(-EINVAL); } #endif #if LINUX_VERSION_IS_LESS(4,1,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) typedef struct { #ifdef CONFIG_NET_NS struct net *net; #endif } possible_net_t; static inline void possible_write_pnet(possible_net_t *pnet, struct net *net) { #ifdef CONFIG_NET_NS pnet->net = net; #endif } static inline struct net *possible_read_pnet(const possible_net_t *pnet) { #ifdef CONFIG_NET_NS return pnet->net; #else return &init_net; #endif } #else #define possible_write_pnet(pnet, net) write_pnet(pnet, net) #define possible_read_pnet(pnet) read_pnet(pnet) #endif /* LINUX_VERSION_IS_LESS(4,1,0) */ #endif /* _COMPAT_NET_NET_NAMESPACE_H */ backport-iwlwifi-dkms-7906/backport-include/net/netlink.h000066400000000000000000000412771351064634500235450ustar00rootroot00000000000000#ifndef __BACKPORT_NET_NETLINK_H #define __BACKPORT_NET_NETLINK_H #include_next #include #include #if LINUX_VERSION_IS_LESS(5,1,0) #undef NLA_POLICY_NESTED #undef NLA_POLICY_NESTED_ARRAY #define _NLA_POLICY_NESTED(maxattr, policy) \ { .type = NLA_NESTED, .validation_data = policy, .len = maxattr } #define _NLA_POLICY_NESTED_ARRAY(maxattr, policy) \ { .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr } #define NLA_POLICY_NESTED(policy) \ _NLA_POLICY_NESTED(ARRAY_SIZE(policy) - 1, policy) #define NLA_POLICY_NESTED_ARRAY(policy) \ _NLA_POLICY_NESTED_ARRAY(ARRAY_SIZE(policy) - 1, policy) #endif /* < 5.1 */ #if LINUX_VERSION_IS_LESS(4,20,0) /* can't backport using the enum - need to override */ #define NLA_UNSPEC 0 #define NLA_U8 1 #define NLA_U16 2 #define NLA_U32 3 #define NLA_U64 4 #define NLA_STRING 5 #define NLA_FLAG 6 #define NLA_MSECS 7 #define NLA_NESTED 8 #define NLA_NESTED_ARRAY 9 #define NLA_NUL_STRING 10 #define NLA_BINARY 11 #define NLA_S8 12 #define NLA_S16 13 #define NLA_S32 14 #define NLA_S64 15 #define NLA_BITFIELD32 16 #define NLA_REJECT 17 #define NLA_EXACT_LEN 18 #define NLA_EXACT_LEN_WARN 19 #define __NLA_TYPE_MAX 20 #define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1) enum nla_policy_validation { NLA_VALIDATE_NONE, NLA_VALIDATE_RANGE, NLA_VALIDATE_MIN, NLA_VALIDATE_MAX, NLA_VALIDATE_FUNCTION, }; struct backport_nla_policy { u8 type; u8 validation_type; u16 len; union { const void *validation_data; struct { s16 min, max; }; int (*validate)(const struct nlattr *attr, struct netlink_ext_ack *extack); }; }; #define nla_policy backport_nla_policy #define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len } #define NLA_POLICY_EXACT_LEN_WARN(_len) { .type = NLA_EXACT_LEN_WARN, \ .len = _len } #define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN) #define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN) #define __NLA_ENSURE(condition) (sizeof(char[1 - 2*!(condition)]) - 1) #define NLA_ENSURE_INT_TYPE(tp) \ (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \ tp == NLA_S16 || tp == NLA_U16 || \ tp == NLA_S32 || tp == NLA_U32 || \ tp == NLA_S64 || tp == NLA_U64) + tp) #define NLA_ENSURE_NO_VALIDATION_PTR(tp) \ (__NLA_ENSURE(tp != NLA_BITFIELD32 && \ tp != NLA_REJECT && \ tp != NLA_NESTED && \ tp != NLA_NESTED_ARRAY) + tp) #define NLA_POLICY_RANGE(tp, _min, _max) { \ .type = NLA_ENSURE_INT_TYPE(tp), \ .validation_type = NLA_VALIDATE_RANGE, \ .min = _min, \ .max = _max \ } #define NLA_POLICY_MIN(tp, _min) { \ .type = NLA_ENSURE_INT_TYPE(tp), \ .validation_type = NLA_VALIDATE_MIN, \ .min = _min, \ } #define NLA_POLICY_MAX(tp, _max) { \ .type = NLA_ENSURE_INT_TYPE(tp), \ .validation_type = NLA_VALIDATE_MAX, \ .max = _max, \ } #define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \ .type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \ .validation_type = NLA_VALIDATE_FUNCTION, \ .validate = fn, \ .len = __VA_ARGS__ + 0, \ } #define nla_validate LINUX_BACKPORT(nla_validate) int nla_validate(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack); #define nla_parse LINUX_BACKPORT(nla_parse) int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy, struct netlink_ext_ack *extack); #define nla_policy_len LINUX_BACKPORT(nla_policy_len) int nla_policy_len(const struct nla_policy *, int); #define nlmsg_parse LINUX_BACKPORT(nlmsg_parse) static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, struct nlattr *tb[], int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) return -EINVAL; return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), policy, extack); } #define nlmsg_validate LINUX_BACKPORT(nlmsg_validate) static inline int nlmsg_validate(const struct nlmsghdr *nlh, int hdrlen, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) return -EINVAL; return nla_validate(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), maxtype, policy, extack); } #define nla_parse_nested LINUX_BACKPORT(nla_parse_nested) static inline int nla_parse_nested(struct nlattr *tb[], int maxtype, const struct nlattr *nla, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy, extack); } #define nla_validate_nested LINUX_BACKPORT(nla_validate_nested) static inline int nla_validate_nested(const struct nlattr *start, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return nla_validate(nla_data(start), nla_len(start), maxtype, policy, extack); } #endif /* < 4.20 */ #if LINUX_VERSION_IS_LESS(4,12,0) #include static inline int _nla_validate5(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return nla_validate(head, len, maxtype, policy, extack); } static inline int _nla_validate4(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy) { return nla_validate(head, len, maxtype, policy, NULL); } #undef nla_validate #define nla_validate(...) \ macro_dispatcher(_nla_validate, __VA_ARGS__)(__VA_ARGS__) static inline int _nla_parse6(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return nla_parse(tb, maxtype, head, len, policy, extack); } static inline int _nla_parse5(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy) { return nla_parse(tb, maxtype, head, len, policy, NULL); } #undef nla_parse #define nla_parse(...) \ macro_dispatcher(_nla_parse, __VA_ARGS__)(__VA_ARGS__) static inline int _nlmsg_parse6(const struct nlmsghdr *nlh, int hdrlen, struct nlattr *tb[], int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return nlmsg_parse(nlh, hdrlen, tb, maxtype, policy, extack); } static inline int _nlmsg_parse5(const struct nlmsghdr *nlh, int hdrlen, struct nlattr *tb[], int maxtype, const struct nla_policy *policy) { return nlmsg_parse(nlh, hdrlen, tb, maxtype, policy, NULL); } #undef nlmsg_parse #define nlmsg_parse(...) \ macro_dispatcher(_nlmsg_parse, __VA_ARGS__)(__VA_ARGS__) static inline int _nlmsg_validate5(const struct nlmsghdr *nlh, int hdrlen, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return nlmsg_validate(nlh, hdrlen, maxtype, policy, extack); } static inline int _nlmsg_validate4(const struct nlmsghdr *nlh, int hdrlen, int maxtype, const struct nla_policy *policy) { return nlmsg_validate(nlh, hdrlen, maxtype, policy, NULL); } #undef nlmsg_validate #define nlmsg_validate(...) \ macro_dispatcher(_nlmsg_validate, __VA_ARGS__)(__VA_ARGS__) static inline int _nla_parse_nested5(struct nlattr *tb[], int maxtype, const struct nlattr *nla, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return nla_parse_nested(tb, maxtype, nla, policy, extack); } static inline int _nla_parse_nested4(struct nlattr *tb[], int maxtype, const struct nlattr *nla, const struct nla_policy *policy) { return nla_parse_nested(tb, maxtype, nla, policy, NULL); } #undef nla_parse_nested #define nla_parse_nested(...) \ macro_dispatcher(_nla_parse_nested, __VA_ARGS__)(__VA_ARGS__) static inline int _nla_validate_nested4(const struct nlattr *start, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return nla_validate_nested(start, maxtype, policy, extack); } static inline int _nla_validate_nested3(const struct nlattr *start, int maxtype, const struct nla_policy *policy) { return nla_validate_nested(start, maxtype, policy, NULL); } #undef nla_validate_nested #define nla_validate_nested(...) \ macro_dispatcher(_nla_validate_nested, __VA_ARGS__)(__VA_ARGS__) #endif /* LINUX_VERSION_IS_LESS(4,12,0) */ #if LINUX_VERSION_IS_LESS(3,7,0) /** * nla_put_s8 - Add a s8 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value */ #define nla_put_s8 LINUX_BACKPORT(nla_put_s8) static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) { return nla_put(skb, attrtype, sizeof(s8), &value); } /** * nla_put_s16 - Add a s16 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value */ #define nla_put_s16 LINUX_BACKPORT(nla_put_s16) static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) { return nla_put(skb, attrtype, sizeof(s16), &value); } /** * nla_put_s32 - Add a s32 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value */ #define nla_put_s32 LINUX_BACKPORT(nla_put_s32) static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) { return nla_put(skb, attrtype, sizeof(s32), &value); } /** * nla_get_s32 - return payload of s32 attribute * @nla: s32 netlink attribute */ #define nla_get_s32 LINUX_BACKPORT(nla_get_s32) static inline s32 nla_get_s32(const struct nlattr *nla) { return *(s32 *) nla_data(nla); } /** * nla_get_s16 - return payload of s16 attribute * @nla: s16 netlink attribute */ #define nla_get_s16 LINUX_BACKPORT(nla_get_s16) static inline s16 nla_get_s16(const struct nlattr *nla) { return *(s16 *) nla_data(nla); } /** * nla_get_s8 - return payload of s8 attribute * @nla: s8 netlink attribute */ #define nla_get_s8 LINUX_BACKPORT(nla_get_s8) static inline s8 nla_get_s8(const struct nlattr *nla) { return *(s8 *) nla_data(nla); } /** * nla_get_s64 - return payload of s64 attribute * @nla: s64 netlink attribute */ #define nla_get_s64 LINUX_BACKPORT(nla_get_s64) static inline s64 nla_get_s64(const struct nlattr *nla) { s64 tmp; nla_memcpy(&tmp, nla, sizeof(tmp)); return tmp; } #endif /* < 3.7.0 */ #if LINUX_VERSION_IS_LESS(3,5,0) /* * This backports: * commit 569a8fc38367dfafd87454f27ac646c8e6b54bca * Author: David S. Miller * Date: Thu Mar 29 23:18:53 2012 -0400 * * netlink: Add nla_put_be{16,32,64}() helpers. */ #define nla_put_be16 LINUX_BACKPORT(nla_put_be16) static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) { return nla_put(skb, attrtype, sizeof(__be16), &value); } #define nla_put_be32 LINUX_BACKPORT(nla_put_be32) static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) { return nla_put(skb, attrtype, sizeof(__be32), &value); } #define nla_put_be64 LINUX_BACKPORT(nla_put_be64) static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) { return nla_put(skb, attrtype, sizeof(__be64), &value); } #endif /* < 3.5 */ #if LINUX_VERSION_IS_LESS(3,7,0) #define NLA_S8 (NLA_BINARY + 1) #define NLA_S16 (NLA_BINARY + 2) #define NLA_S32 (NLA_BINARY + 3) #define NLA_S64 (NLA_BINARY + 4) #define __NLA_TYPE_MAX (NLA_BINARY + 5) #undef NLA_TYPE_MAX #define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1) #endif #if LINUX_VERSION_IS_LESS(4,1,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define nla_put_in_addr LINUX_BACKPORT(nla_put_in_addr) static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, __be32 addr) { return nla_put_be32(skb, attrtype, addr); } #define nla_put_in6_addr LINUX_BACKPORT(nla_put_in6_addr) static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype, const struct in6_addr *addr) { return nla_put(skb, attrtype, sizeof(*addr), addr); } static inline __be32 nla_get_in_addr(const struct nlattr *nla) { return *(__be32 *) nla_data(nla); } static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla) { struct in6_addr tmp; nla_memcpy(&tmp, nla, sizeof(tmp)); return tmp; } #endif /* < 4.1 */ #if LINUX_VERSION_IS_LESS(4,4,0) /** * nla_get_le32 - return payload of __le32 attribute * @nla: __le32 netlink attribute */ #define nla_get_le32 LINUX_BACKPORT(nla_get_le32) static inline __le32 nla_get_le32(const struct nlattr *nla) { return *(__le32 *) nla_data(nla); } /** * nla_get_le64 - return payload of __le64 attribute * @nla: __le64 netlink attribute */ #define nla_get_le64 LINUX_BACKPORT(nla_get_le64) static inline __le64 nla_get_le64(const struct nlattr *nla) { return *(__le64 *) nla_data(nla); } #endif /* < 4.4 */ #if LINUX_VERSION_IS_LESS(4,7,0) /** * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute * @skb: socket buffer the message is stored in * * Return true if padding is needed to align the next attribute (nla_data()) to * a 64-bit aligned area. */ #define nla_need_padding_for_64bit LINUX_BACKPORT(nla_need_padding_for_64bit) static inline bool nla_need_padding_for_64bit(struct sk_buff *skb) { #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS /* The nlattr header is 4 bytes in size, that's why we test * if the skb->data _is_ aligned. A NOP attribute, plus * nlattr header for next attribute, will make nla_data() * 8-byte aligned. */ if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8)) return true; #endif return false; } /** * nla_align_64bit - 64-bit align the nla_data() of next attribute * @skb: socket buffer the message is stored in * @padattr: attribute type for the padding * * Conditionally emit a padding netlink attribute in order to make * the next attribute we emit have a 64-bit aligned nla_data() area. * This will only be done in architectures which do not have * HAVE_EFFICIENT_UNALIGNED_ACCESS defined. * * Returns zero on success or a negative error code. */ #define nla_align_64bit LINUX_BACKPORT(nla_align_64bit) static inline int nla_align_64bit(struct sk_buff *skb, int padattr) { if (nla_need_padding_for_64bit(skb) && !nla_reserve(skb, padattr, 0)) return -EMSGSIZE; return 0; } /** * nla_total_size_64bit - total length of attribute including padding * @payload: length of payload */ #define nla_total_size_64bit LINUX_BACKPORT(nla_total_size_64bit) static inline int nla_total_size_64bit(int payload) { return NLA_ALIGN(nla_attr_size(payload)) #ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS + NLA_ALIGN(nla_attr_size(0)) #endif ; } #define __nla_reserve_64bit LINUX_BACKPORT(__nla_reserve_64bit) struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr); #define nla_reserve_64bit LINUX_BACKPORT(nla_reserve_64bit) struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr); #define __nla_put_64bit LINUX_BACKPORT(__nla_put_64bit) void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr); #define nla_put_64bit LINUX_BACKPORT(nla_put_64bit) int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr); /** * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value * @padattr: attribute type for the padding */ #define nla_put_u64_64bit LINUX_BACKPORT(nla_put_u64_64bit) static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, u64 value, int padattr) { return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr); } #define nla_put_u64 DONT_USE_nla_put_u64 /** * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value * @padattr: attribute type for the padding */ #define nla_put_s64 LINUX_BACKPORT(nla_put_s64) static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, int padattr) { return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr); } #endif /* < 4.7 */ #if LINUX_VERSION_IS_LESS(4,10,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /** * nla_memdup - duplicate attribute memory (kmemdup) * @src: netlink attribute to duplicate from * @gfp: GFP mask */ #define nla_memdump LINUX_BACKPORT(nla_memdup) static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp) { return kmemdup(nla_data(src), nla_len(src), gfp); } #endif /* < 4.9 */ #endif /* __BACKPORT_NET_NETLINK_H */ backport-iwlwifi-dkms-7906/backport-include/net/sch_generic.h000066400000000000000000000013251351064634500243400ustar00rootroot00000000000000#ifndef __BACKPORT_NET_SCH_GENERIC_H #define __BACKPORT_NET_SCH_GENERIC_H #include_next #if LINUX_VERSION_IS_LESS(3,3,0) #if !((LINUX_VERSION_IS_GEQ(3,2,9) && LINUX_VERSION_IS_LESS(3,3,0)) || (LINUX_VERSION_IS_GEQ(3,0,23) && LINUX_VERSION_IS_LESS(3,1,0))) /* mask qdisc_cb_private_validate as RHEL6 backports this */ #define qdisc_cb_private_validate(a,b) compat_qdisc_cb_private_validate(a,b) static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct qdisc_skb_cb) + sz); } #endif #endif /* LINUX_VERSION_IS_LESS(3,3,0) */ #ifndef TCQ_F_CAN_BYPASS #define TCQ_F_CAN_BYPASS 4 #endif #endif /* __BACKPORT_NET_SCH_GENERIC_H */ backport-iwlwifi-dkms-7906/backport-include/net/sock.h000066400000000000000000000042121351064634500230240ustar00rootroot00000000000000#ifndef __BACKPORT_NET_SOCK_H #define __BACKPORT_NET_SOCK_H #include_next #include #if LINUX_VERSION_IS_LESS(3,9,0) #include #define sk_for_each3(__sk, node, list) \ hlist_for_each_entry(__sk, node, list, sk_node) #define sk_for_each_safe4(__sk, node, tmp, list) \ hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) #define sk_for_each2(__sk, list) \ hlist_for_each_entry(__sk, list, sk_node) #define sk_for_each_safe3(__sk, tmp, list) \ hlist_for_each_entry_safe(__sk, tmp, list, sk_node) #undef sk_for_each #define sk_for_each(...) \ macro_dispatcher(sk_for_each, __VA_ARGS__)(__VA_ARGS__) #undef sk_for_each_safe #define sk_for_each_safe(...) \ macro_dispatcher(sk_for_each_safe, __VA_ARGS__)(__VA_ARGS__) #endif #if LINUX_VERSION_IS_LESS(3,10,0) /* * backport SOCK_SELECT_ERR_QUEUE -- see commit * "net: add option to enable error queue packets waking select" * * Adding 14 to SOCK_QUEUE_SHRUNK will reach a bet that can't be * set on older kernels, so sock_flag() will always return false. */ #define SOCK_SELECT_ERR_QUEUE (SOCK_QUEUE_SHRUNK + 14) #endif #ifndef sock_skb_cb_check_size #define sock_skb_cb_check_size(size) \ BUILD_BUG_ON((size) > FIELD_SIZEOF(struct sk_buff, cb)) #endif #if LINUX_VERSION_IS_LESS(4,2,0) #define sk_alloc(net, family, priority, prot, kern) sk_alloc(net, family, priority, prot) #endif #if LINUX_VERSION_IS_LESS(4,5,0) #define sk_set_bit LINUX_BACKPORT(sk_set_bit) static inline void sk_set_bit(int nr, struct sock *sk) { set_bit(nr, &sk->sk_socket->flags); } #endif /* < 4.5 */ #if LINUX_VERSION_IS_LESS(4,5,0) #define sk_clear_bit LINUX_BACKPORT(sk_clear_bit) static inline void sk_clear_bit(int nr, struct sock *sk) { clear_bit(nr, &sk->sk_socket->flags); } #endif /* < 4.5 */ #if LINUX_VERSION_IS_LESS(4,16,0) #define sk_pacing_shift_update LINUX_BACKPORT(sk_pacing_shift_update) static inline void sk_pacing_shift_update(struct sock *sk, int val) { #if LINUX_VERSION_IS_GEQ(4,15,0) if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val) return; sk->sk_pacing_shift = val; #endif /* >= 4.15 */ } #endif /* < 4.16 */ #endif /* __BACKPORT_NET_SOCK_H */ backport-iwlwifi-dkms-7906/backport-include/net/tso.h000066400000000000000000000014231351064634500226730ustar00rootroot00000000000000#ifndef BACKPORT_TSO_H #define BACKPORT_TSO_H #include #if LINUX_VERSION_IS_LESS(4,4,0) #define tso_t LINUX_BACKPORT(tso_t) struct tso_t { int next_frag_idx; void *data; size_t size; u16 ip_id; bool ipv6; u32 tcp_seq; }; #define tso_count_descs LINUX_BACKPORT(tso_count_descs) int tso_count_descs(struct sk_buff *skb); #define tso_build_hdr LINUX_BACKPORT(tso_build_hdr) void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last); #define tso_build_data LINUX_BACKPORT(tso_build_data) void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size); #define tso_start LINUX_BACKPORT(tso_start) void tso_start(struct sk_buff *skb, struct tso_t *tso); #else #include_next #endif #endif /* BACKPORT_TSO_H */ backport-iwlwifi-dkms-7906/backport-include/pcmcia/000077500000000000000000000000001351064634500223635ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/pcmcia/device_id.h000066400000000000000000000013241351064634500244470ustar00rootroot00000000000000#ifndef __BACKPORT_PCMCIA_DEVICE_ID_H #define __BACKPORT_PCMCIA_DEVICE_ID_H #include_next #ifndef PCMCIA_DEVICE_MANF_CARD_PROD_ID3 #define PCMCIA_DEVICE_MANF_CARD_PROD_ID3(manf, card, v3, vh3) { \ .match_flags = PCMCIA_DEV_ID_MATCH_MANF_ID| \ PCMCIA_DEV_ID_MATCH_CARD_ID| \ PCMCIA_DEV_ID_MATCH_PROD_ID3, \ .manf_id = (manf), \ .card_id = (card), \ .prod_id = { NULL, NULL, (v3), NULL }, \ .prod_id_hash = { 0, 0, (vh3), 0 }, } #endif #ifndef PCMCIA_DEVICE_PROD_ID3 #define PCMCIA_DEVICE_PROD_ID3(v3, vh3) { \ .match_flags = PCMCIA_DEV_ID_MATCH_PROD_ID3, \ .prod_id = { NULL, NULL, (v3), NULL }, \ .prod_id_hash = { 0, 0, (vh3), 0 }, } #endif #endif /* __BACKPORT_PCMCIA_DEVICE_ID_H */ backport-iwlwifi-dkms-7906/backport-include/pcmcia/ds.h000066400000000000000000000016221351064634500231430ustar00rootroot00000000000000#ifndef __BACKPORT_PCMCIA_DS_H #define __BACKPORT_PCMCIA_DS_H #include_next #ifndef module_pcmcia_driver /** * backport of: * * commit 6ed7ffddcf61f668114edb676417e5fb33773b59 * Author: H Hartley Sweeten * Date: Wed Mar 6 11:24:44 2013 -0700 * * pcmcia/ds.h: introduce helper for pcmcia_driver module boilerplate */ /** * module_pcmcia_driver() - Helper macro for registering a pcmcia driver * @__pcmcia_driver: pcmcia_driver struct * * Helper macro for pcmcia drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only use * this macro once, and calling it replaces module_init() and module_exit(). */ #define module_pcmcia_driver(__pcmcia_driver) \ module_driver(__pcmcia_driver, pcmcia_register_driver, \ pcmcia_unregister_driver) #endif #endif /* __BACKPORT_PCMCIA_DS_H */ backport-iwlwifi-dkms-7906/backport-include/trace/000077500000000000000000000000001351064634500222255ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/trace/ftrace.h000066400000000000000000000006041351064634500236420ustar00rootroot00000000000000#undef __print_array #define __print_array(array, count, el_size) \ ({ \ BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ el_size != 4 && el_size != 8); \ ftrace_print_array_seq(p, array, count, el_size); \ }) #undef __get_dynamic_array_len #define __get_dynamic_array_len(field) \ ((__entry->__data_loc_##field >> 16) & 0xffff) #include_next backport-iwlwifi-dkms-7906/backport-include/uapi/000077500000000000000000000000001351064634500220655ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/uapi/linux/000077500000000000000000000000001351064634500232245ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/backport-include/uapi/linux/genetlink.h000066400000000000000000000005451351064634500253610ustar00rootroot00000000000000#ifndef __BACKPORT_UAPI_LINUX_GENETLINK_H #define __BACKPORT_UAPI_LINUX_GENETLINK_H #include #if LINUX_VERSION_IS_GEQ(3,7,0) #include_next #else #include_next #endif #ifndef GENL_UNS_ADMIN_PERM #define GENL_UNS_ADMIN_PERM GENL_ADMIN_PERM #endif #endif /* __BACKPORT_UAPI_LINUX_GENETLINK_H */ backport-iwlwifi-dkms-7906/backport-include/uapi/linux/sockios.h000066400000000000000000000023631351064634500250530ustar00rootroot00000000000000#ifndef __BACKPORT_LINUX_SOCKIOS_H #define __BACKPORT_LINUX_SOCKIOS_H #include_next #include /* * Kernel backports UAPI note: * * We carry UAPI headers for backports to enable compilation * of kernel / driver code to compile without any changes. If * it so happens that a feature is backported it can be added * here but notice that if full subsystems are backported you * should just include the respective full header onto the * copy-list file so that its copied intact. This strategy * is used to either backport a specific feature or to just * avoid having to do ifdef changes to compile. * * Userspace is not expected to copy over backports headers * to compile userspace programs, userspace programs can * and should consider carrying over a respective copy-list * of the latest UAPI kernel headers they need in their * upstream sources, the kernel the user uses, whether with * backports or not should be able to return -EOPNOTSUPP if * the feature is not available and let it through if its * supported and meats the expected form. */ #if LINUX_VERSION_IS_LESS(3,14,0) #define SIOCGHWTSTAMP 0x89b1 /* get config */ #endif /* LINUX_VERSION_IS_LESS(3,14,0) */ #endif /* __BACKPORT_LINUX_SOCKIOS_H */ backport-iwlwifi-dkms-7906/compat/000077500000000000000000000000001351064634500171645ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/compat/.gitignore000066400000000000000000000000241351064634500211500ustar00rootroot00000000000000oid_registry_data.c backport-iwlwifi-dkms-7906/compat/Kconfig000066400000000000000000000076201351064634500204740ustar00rootroot00000000000000# # backport Kconfig # # Some options are user-selectable ("BPAUTO_USERSEL_*") # # Most options, however, follow a few different schemes: # # A) An option that is selected by drivers ("select FOO") will be # changed to "select BPAUTO_FOO" (if the option BPAUTO_FOO # exists). The option BPAUTO_FOO then controls setting of the # BPAUTO_BUILD_FOO option, which is a module, like this: # # config BPAUTO_BUILD_FOO # tristate # # or bool # # # not possible on kernel < X.Y, build will fail if any # # drivers are allowed to build on kernels < X.Y # depends on KERNEL_X_Y # # # don't build the backport code if FOO is in the kernel # # already, but only if the kernel version is also >= X.Z; # # this is an example of backporting where the version of # # the FOO subsystem that we need is only available from # # kernel version X.Z # depends on !FOO || KERNEL_X_Z # # # build if driver needs it (it selects BPAUTO_FOO) # default m if BPAUTO_FOO # # # or for build-testing (BPAUTO_USERSEL_BUILD_ALL is enabled) # default m if BPAUTO_USERSEL_BUILD_ALL # # config BPAUTO_FOO # bool # # This only works as-is if the kernel code is usable on any version, # otherwise the "&& !FOO" part needs to be different. # # # B) An option for code always present on some kernels (e.g. KFIFO). # This simply depends on/sets the default based on the version: # # config BPAUTO_BUILD_KFIFO # def_bool y # depends on KERNEL_2_6_36 # # # C) similarly, a kconfig symbol for an option, e.g. # BPAUTO_OPTION_SOME_FIX (no examples provided) check git log # # # Variations are obviously possible. # config BP_MODULES option modules bool default MODULES help This symbol is necessary for the newer kconf tool, it looks for the "option modules" to control the 'm' state. config BPAUTO_BUILD_CORDIC depends on n tristate depends on m depends on !CORDIC depends on KERNEL_3_1 default m if BPAUTO_CORDIC default m if BPAUTO_USERSEL_BUILD_ALL #module-name cordic #c-file lib/cordic.c config BPAUTO_CORDIC bool config BPAUTO_MII bool config BPAUTO_BUILD_LEDS bool depends on !NEW_LEDS || LEDS_CLASS=n || !LEDS_TRIGGERS default y if BPAUTO_NEW_LEDS default y if BPAUTO_LEDS_CLASS default y if BPAUTO_LEDS_TRIGGERS config BPAUTO_NEW_LEDS bool config BPAUTO_LEDS_CLASS bool config BPAUTO_LEDS_TRIGGERS bool config BPAUTO_USERSEL_BUILD_ALL bool "Build all compat code" help This option selects all the compat code options that would otherwise only be selected by drivers. It's only really useful for compat testing, so you probably shouldn't enable it. config BPAUTO_WANT_DEV_COREDUMP bool config BPAUTO_BUILD_WANT_DEV_COREDUMP bool default n if DEV_COREDUMP default n if DISABLE_DEV_COREDUMP default y if BPAUTO_WANT_DEV_COREDUMP #h-file linux/devcoredump.h #c-file drivers/base/devcoredump.c config BPAUTO_RHASHTABLE bool # current API of rhashtable was introduced in version 4.9 # (the one including rhltable) depends on KERNEL_4_9 # not very nice - but better than always having it default y if BACKPORTED_MAC80211 #h-file linux/rhashtable.h #h-file linux/rhashtable-types.h #c-file lib/rhashtable.c config BPAUTO_BUCKET_LOCKS bool # the API of bucket_locks that we need was introduced in version 4.16 depends on KERNEL_4_16 default y if BPAUTO_RHASHTABLE #c-file lib/bucket_locks.c config BPAUTO_REFCOUNT bool default y depends on KERNEL_4_11 #h-file linux/refcount.h #c-file lib/refcount.c config BPAUTO_SYSTEM_DATA_VERIFICATION bool config BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION bool default y if BPAUTO_SYSTEM_DATA_VERIFICATION depends on KERNEL_4_7 || !SYSTEM_DATA_VERIFICATION select BPAUTO_ASN1_DECODER select BPAUTO_PUBLIC_KEY select BPAUTO_PKCS7 #h-file linux/oid_registry.h #c-file lib/oid_registry.c config BPAUTO_PUBLIC_KEY bool #h-file crypto/public_key.h config BPAUTO_ASN1_DECODER bool #h-file linux/asn1_decoder.h #c-file lib/asn1_decoder.c config BPAUTO_PKCS7 bool #h-file crypto/pkcs7.h backport-iwlwifi-dkms-7906/compat/Makefile000066400000000000000000000076771351064634500206450ustar00rootroot00000000000000ccflags-y += -I$(src) -Wframe-larger-than=1280 ifeq ($(CONFIG_BACKPORT_INTEGRATE),) obj-m += compat.o else obj-y += compat.o endif compat-y += main.o # Kernel backport compatibility code compat-$(CPTCFG_KERNEL_3_0) += compat-3.0.o compat-$(CPTCFG_KERNEL_3_1) += compat-3.1.o compat-$(CPTCFG_KERNEL_3_2) += backport-3.2.o compat-$(CPTCFG_KERNEL_3_3) += compat-3.3.o compat-$(CPTCFG_KERNEL_3_4) += compat-3.4.o compat-$(CPTCFG_KERNEL_3_5) += compat-3.5.o user_namespace.o compat-$(CPTCFG_KERNEL_3_6) += compat-3.6.o compat-$(CPTCFG_KERNEL_3_7) += compat-3.7.o compat-$(CPTCFG_KERNEL_3_8) += compat-3.8.o compat-$(CPTCFG_KERNEL_3_9) += compat-3.9.o compat-$(CPTCFG_KERNEL_3_10) += backport-3.10.o compat-$(CPTCFG_KERNEL_3_11) += backport-3.11.o compat-$(CPTCFG_KERNEL_3_12) += backport-3.12.o compat-$(CPTCFG_KERNEL_3_13) += backport-3.13.o memneq.o compat-$(CPTCFG_KERNEL_3_14) += backport-3.14.o compat-$(CPTCFG_KERNEL_3_15) += backport-3.15.o compat-$(CPTCFG_KERNEL_3_17) += backport-3.17.o compat-$(CPTCFG_KERNEL_3_18) += backport-3.18.o compat-$(CPTCFG_KERNEL_3_19) += backport-3.19.o compat-$(CPTCFG_KERNEL_4_0) += backport-4.0.o compat-$(CPTCFG_KERNEL_4_1) += backport-4.1.o compat-$(CPTCFG_KERNEL_4_2) += backport-4.2.o compat-$(CPTCFG_KERNEL_4_3) += backport-4.3.o compat-$(CPTCFG_KERNEL_4_4) += backport-4.4.o compat-$(CPTCFG_KERNEL_4_5) += backport-4.5.o compat-$(CPTCFG_KERNEL_4_6) += backport-4.6.o compat-$(CPTCFG_KERNEL_4_7) += backport-4.7.o compat-$(CPTCFG_KERNEL_4_8) += backport-4.8.o compat-$(CPTCFG_KERNEL_4_10) += backport-4.10.o compat-$(CPTCFG_KERNEL_4_18) += backport-4.18.o compat-$(CPTCFG_KERNEL_4_20) += backport-4.20.o compat-$(CPTCFG_KERNEL_5_2) += backport-genetlink.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/verify.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/pkcs7.asn1.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/pkcs7_verify.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/pkcs7_parser.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/x509.asn1.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/x509_akid.asn1.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/x509_cert_parser.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/x509_public_key.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/pkcs7_trust.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/key.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/public_key.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/rsa.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/bignum.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/md.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/md_wrap.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/sha256.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/oid.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/asn1parse.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/rsapubkey.asn1.o $(obj)/lib-oid_registry.o: $(obj)/oid_registry_data.c $(obj)/oid_registry_data.c: $(src)/../include/linux/backport-oid_registry.h \ $(src)/build_OID_registry $(call cmd,build_OID_registry) quiet_cmd_build_OID_registry = GEN $@ cmd_build_OID_registry = perl $(src)/build_OID_registry $< $@ compat-$(CPTCFG_BPAUTO_ASN1_DECODER) += lib-asn1_decoder.o compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += lib-oid_registry.o compat-$(CPTCFG_BPAUTO_REFCOUNT) += lib-refcount.o compat-$(CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP) += drivers-base-devcoredump.o compat-$(CPTCFG_BPAUTO_RHASHTABLE) += lib-rhashtable.o compat-$(CPTCFG_BPAUTO_PUBLIC_KEY) += compat-$(CPTCFG_BPAUTO_BUCKET_LOCKS) += lib-bucket_locks.o compat-$(CPTCFG_BPAUTO_PKCS7) += backport-iwlwifi-dkms-7906/compat/backport-3.10.c000066400000000000000000000137501351064634500215220ustar00rootroot00000000000000/* * Copyright (c) 2013 Luis R. Rodriguez * * Linux backport symbols for kernels 3.10. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include void proc_set_size(struct proc_dir_entry *de, loff_t size) { de->size = size; } EXPORT_SYMBOL_GPL(proc_set_size); void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) { de->uid = uid; de->gid = gid; } EXPORT_SYMBOL_GPL(proc_set_user); /* get_random_int() was not exported for module use until 3.10-rc. Implement it here in terms of the more expensive get_random_bytes() */ unsigned int get_random_int(void) { unsigned int r; get_random_bytes(&r, sizeof(r)); return r; } EXPORT_SYMBOL_GPL(get_random_int); #ifdef CONFIG_TTY /** * tty_port_tty_wakeup - helper to wake up a tty * * @port: tty port */ void tty_port_tty_wakeup(struct tty_port *port) { struct tty_struct *tty = tty_port_tty_get(port); if (tty) { tty_wakeup(tty); tty_kref_put(tty); } } EXPORT_SYMBOL_GPL(tty_port_tty_wakeup); /** * tty_port_tty_hangup - helper to hang up a tty * * @port: tty port * @check_clocal: hang only ttys with CLOCAL unset? */ void tty_port_tty_hangup(struct tty_port *port, bool check_clocal) { struct tty_struct *tty = tty_port_tty_get(port); if (tty && (!check_clocal || !C_CLOCAL(tty))) tty_hangup(tty); tty_kref_put(tty); } EXPORT_SYMBOL_GPL(tty_port_tty_hangup); #endif /* CONFIG_TTY */ #ifdef CONFIG_PCI_IOV /* * pci_vfs_assigned - returns number of VFs are assigned to a guest * @dev: the PCI device * * Returns number of VFs belonging to this device that are assigned to a guest. * If device is not a physical function returns -ENODEV. */ int pci_vfs_assigned(struct pci_dev *dev) { struct pci_dev *vfdev; unsigned int vfs_assigned = 0; unsigned short dev_id; /* only search if we are a PF */ if (!dev->is_physfn) return 0; /* * determine the device ID for the VFs, the vendor ID will be the * same as the PF so there is no need to check for that one */ pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id); /* loop through all the VFs to see if we own any that are assigned */ vfdev = pci_get_device(dev->vendor, dev_id, NULL); while (vfdev) { /* * It is considered assigned if it is a virtual function with * our dev as the physical function and the assigned bit is set */ if (vfdev->is_virtfn && (vfdev->physfn == dev) && (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) vfs_assigned++; vfdev = pci_get_device(dev->vendor, dev_id, vfdev); } return vfs_assigned; } EXPORT_SYMBOL_GPL(pci_vfs_assigned); #endif /* CONFIG_PCI_IOV */ #ifdef CONFIG_OF /** * of_property_read_u32_index - Find and read a u32 from a multi-value property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @index: index of the u32 in the list of values * @out_value: pointer to return value, modified only if no error. * * Search for a property in a device node and read nth 32-bit value from * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_value is modified only if a valid u32 value can be decoded. */ int of_property_read_u32_index(const struct device_node *np, const char *propname, u32 index, u32 *out_value) { const u32 *val = of_find_property_value_of_size(np, propname, ((index + 1) * sizeof(*out_value))); if (IS_ERR(val)) return PTR_ERR(val); *out_value = be32_to_cpup(((__be32 *)val) + index); return 0; } EXPORT_SYMBOL_GPL(of_property_read_u32_index); #endif /* CONFIG_OF */ static inline void set_page_count(struct page *page, int v) { atomic_set(&page->_count, v); } /* * Turn a non-refcounted page (->_count == 0) into refcounted with * a count of one. */ static inline void set_page_refcounted(struct page *page) { VM_BUG_ON(PageTail(page)); VM_BUG_ON(atomic_read(&page->_count)); set_page_count(page, 1); } /* * split_page takes a non-compound higher-order page, and splits it into * n (1<action(devres->data); } /** * devm_add_action() - add a custom action to list of managed resources * @dev: Device that owns the action * @action: Function that should be called * @data: Pointer to data passed to @action implementation * * This adds a custom action to the list of managed resources so that * it gets executed as part of standard resource unwinding. */ int devm_add_action(struct device *dev, void (*action)(void *), void *data) { struct action_devres *devres; devres = devres_alloc(devm_action_release, sizeof(struct action_devres), GFP_KERNEL); if (!devres) return -ENOMEM; devres->data = data; devres->action = action; devres_add(dev, devres); return 0; } EXPORT_SYMBOL_GPL(devm_add_action); backport-iwlwifi-dkms-7906/compat/backport-3.11.c000066400000000000000000000060741351064634500215240ustar00rootroot00000000000000/* * Copyright (c) 2016 Intel Deutschland GmbH * * Backport functionality introduced in Linux 3.11. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) { if (!miter->__remaining) { struct scatterlist *sg; unsigned long pgoffset; if (!__sg_page_iter_next(&miter->piter)) return false; sg = miter->piter.sg; pgoffset = miter->piter.sg_pgoffset; miter->__offset = pgoffset ? 0 : sg->offset; miter->__remaining = sg->offset + sg->length - (pgoffset << PAGE_SHIFT) - miter->__offset; miter->__remaining = min_t(unsigned long, miter->__remaining, PAGE_SIZE - miter->__offset); } return true; } /** * sg_miter_skip - reposition mapping iterator * @miter: sg mapping iter to be skipped * @offset: number of bytes to plus the current location * * Description: * Sets the offset of @miter to its current location plus @offset bytes. * If mapping iterator @miter has been proceeded by sg_miter_next(), this * stops @miter. * * Context: * Don't care if @miter is stopped, or not proceeded yet. * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. * * Returns: * true if @miter contains the valid mapping. false if end of sg * list is reached. */ static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) { sg_miter_stop(miter); while (offset) { off_t consumed; if (!sg_miter_get_next_page(miter)) return false; consumed = min_t(off_t, offset, miter->__remaining); miter->__offset += consumed; miter->__remaining -= consumed; offset -= consumed; } return true; } /** * sg_copy_buffer - Copy data between a linear buffer and an SG list * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy * @skip: Number of bytes to skip before copying * @to_buffer: transfer direction (true == from an sg list to a * buffer, false == from a buffer to an sg list * * Returns the number of copied bytes. * **/ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip, bool to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; unsigned long flags; unsigned int sg_flags = SG_MITER_ATOMIC; if (to_buffer) sg_flags |= SG_MITER_FROM_SG; else sg_flags |= SG_MITER_TO_SG; sg_miter_start(&miter, sgl, nents, sg_flags); if (!sg_miter_skip(&miter, skip)) return false; local_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; len = min(miter.length, buflen - offset); if (to_buffer) memcpy(buf + offset, miter.addr, len); else memcpy(miter.addr, buf + offset, len); offset += len; } sg_miter_stop(&miter); local_irq_restore(flags); return offset; } EXPORT_SYMBOL_GPL(sg_copy_buffer); #endif backport-iwlwifi-dkms-7906/compat/backport-3.12.c000066400000000000000000000034471351064634500215260ustar00rootroot00000000000000/* * Copyright (c) 2013 Hauke Mehrtens * * Backport functionality introduced in Linux 3.12. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include /* * Allocator for buffer that is going to be passed to hid_output_report() */ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) { /* * 7 extra bytes are necessary to achieve proper functionality * of implement() working on 8 byte chunks */ int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7; return kmalloc(len, flags); } EXPORT_SYMBOL_GPL(hid_alloc_report_buf); #if BITS_PER_LONG == 32 /** * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder * @dividend: 64bit dividend * @divisor: 64bit divisor * @remainder: 64bit remainder * * This implementation is a comparable to algorithm used by div64_u64. * But this operation, which includes math for calculating the remainder, * is kept distinct to avoid slowing down the div64_u64 operation on 32bit * systems. */ #ifndef backports_div64_u64_rem_add u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) { u32 high = divisor >> 32; u64 quot; if (high == 0) { u32 rem32; quot = div_u64_rem(dividend, divisor, &rem32); *remainder = rem32; } else { int n = 1 + fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) quot--; *remainder = dividend - quot * divisor; if (*remainder >= divisor) { quot++; *remainder -= divisor; } } return quot; } EXPORT_SYMBOL_GPL(div64_u64_rem); #endif /* backports_div64_u64_rem_add */ #endif /* BITS_PER_LONG */ backport-iwlwifi-dkms-7906/compat/backport-3.13.c000066400000000000000000000102211351064634500215130ustar00rootroot00000000000000/* * Copyright (c) 2013 Hauke Mehrtens * Copyright (c) 2013 Hannes Frederic Sowa * Copyright (c) 2014 Luis R. Rodriguez * * Backport functionality introduced in Linux 3.13. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #ifdef __BACKPORT_NET_GET_RANDOM_ONCE struct __net_random_once_work { struct work_struct work; struct static_key *key; }; static void __net_random_once_deferred(struct work_struct *w) { struct __net_random_once_work *work = container_of(w, struct __net_random_once_work, work); if (!static_key_enabled(work->key)) static_key_slow_inc(work->key); kfree(work); } static void __net_random_once_disable_jump(struct static_key *key) { struct __net_random_once_work *w; w = kmalloc(sizeof(*w), GFP_ATOMIC); if (!w) return; INIT_WORK(&w->work, __net_random_once_deferred); w->key = key; schedule_work(&w->work); } bool __net_get_random_once(void *buf, int nbytes, bool *done, struct static_key *done_key) { static DEFINE_SPINLOCK(lock); unsigned long flags; spin_lock_irqsave(&lock, flags); if (*done) { spin_unlock_irqrestore(&lock, flags); return false; } get_random_bytes(buf, nbytes); *done = true; spin_unlock_irqrestore(&lock, flags); __net_random_once_disable_jump(done_key); return true; } EXPORT_SYMBOL_GPL(__net_get_random_once); #endif /* __BACKPORT_NET_GET_RANDOM_ONCE */ #ifdef CONFIG_PCI #define pci_bus_read_dev_vendor_id LINUX_BACKPORT(pci_bus_read_dev_vendor_id) static bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, int crs_timeout) { int delay = 1; if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; /* some broken boards return 0 or ~0 if a slot is empty: */ if (*l == 0xffffffff || *l == 0x00000000 || *l == 0x0000ffff || *l == 0xffff0000) return false; /* * Configuration Request Retry Status. Some root ports return the * actual device ID instead of the synthetic ID (0xFFFF) required * by the PCIe spec. Ignore the device ID and only check for * (vendor id == 1). */ while ((*l & 0xffff) == 0x0001) { if (!crs_timeout) return false; msleep(delay); delay *= 2; if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; /* Card hasn't responded in 60 seconds? Must be stuck. */ if (delay > crs_timeout) { printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); return false; } } return true; } bool pci_device_is_present(struct pci_dev *pdev) { u32 v; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); } EXPORT_SYMBOL_GPL(pci_device_is_present); #endif /* CONFIG_PCI */ #if defined(CONFIG_HWMON) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) struct device* hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, const struct attribute_group **groups) { struct device *hwdev; hwdev = hwmon_device_register(dev); hwdev->groups = groups; dev_set_drvdata(hwdev, drvdata); return hwdev; } static void devm_hwmon_release(struct device *dev, void *res) { struct device *hwdev = *(struct device **)res; hwmon_device_unregister(hwdev); } struct device * devm_hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, const struct attribute_group **groups) { struct device **ptr, *hwdev; if (!dev) return ERR_PTR(-EINVAL); ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups); if (IS_ERR(hwdev)) goto error; *ptr = hwdev; devres_add(dev, ptr); return hwdev; error: devres_free(ptr); return hwdev; } EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups); #endif backport-iwlwifi-dkms-7906/compat/backport-3.14.c000066400000000000000000000051021351064634500215160ustar00rootroot00000000000000/* * Copyright (c) 2014 Hauke Mehrtens * * Backport functionality introduced in Linux 3.14. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #ifdef CONFIG_PCI_MSI /** * pci_enable_msi_range - configure device's MSI capability structure * @dev: device to configure * @minvec: minimal number of interrupts to configure * @maxvec: maximum number of interrupts to configure * * This function tries to allocate a maximum possible number of interrupts in a * range between @minvec and @maxvec. It returns a negative errno if an error * occurs. If it succeeds, it returns the actual number of interrupts allocated * and updates the @dev's irq member to the lowest new interrupt number; * the other interrupt numbers allocated to this device are consecutive. **/ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) { int nvec = maxvec; int rc; if (maxvec < minvec) return -ERANGE; do { rc = pci_enable_msi_block(dev, nvec); if (rc < 0) { return rc; } else if (rc > 0) { if (rc < minvec) return -ENOSPC; nvec = rc; } } while (rc); return nvec; } EXPORT_SYMBOL(pci_enable_msi_range); #endif #ifdef CONFIG_PCI_MSI /** * pci_enable_msix_range - configure device's MSI-X capability structure * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of MSI-X entries * @minvec: minimum number of MSI-X irqs requested * @maxvec: maximum number of MSI-X irqs requested * * Setup the MSI-X capability structure of device function with a maximum * possible number of interrupts in the range between @minvec and @maxvec * upon its software driver call to request for MSI-X mode enabled on its * hardware device function. It returns a negative errno if an error occurs. * If it succeeds, it returns the actual number of interrupts allocated and * indicates the successful configuration of MSI-X capability structure * with new allocated MSI-X interrupts. **/ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) { int nvec = maxvec; int rc; if (maxvec < minvec) return -ERANGE; do { rc = pci_enable_msix(dev, entries, nvec); if (rc < 0) { return rc; } else if (rc > 0) { if (rc < minvec) return -ENOSPC; nvec = rc; } } while (rc); return nvec; } EXPORT_SYMBOL(pci_enable_msix_range); #endif backport-iwlwifi-dkms-7906/compat/backport-3.15.c000066400000000000000000000045471351064634500215330ustar00rootroot00000000000000/* * Copyright (c) 2014 Hauke Mehrtens * Copyright (c) 2015 Luis R. Rodriguez * * Backport functionality introduced in Linux 3.15. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include /** * devm_kstrdup - Allocate resource managed space and * copy an existing string into that. * @dev: Device to allocate memory for * @s: the string to duplicate * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) { size_t size; char *buf; if (!s) return NULL; size = strlen(s) + 1; buf = devm_kmalloc(dev, size, gfp); if (buf) memcpy(buf, s, size); return buf; } EXPORT_SYMBOL_GPL(devm_kstrdup); #ifdef CONFIG_OF /** * of_property_count_elems_of_size - Count the number of elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @elem_size: size of the individual element * * Search for a property in a device node and count the number of elements of * size elem_size in it. Returns number of elements on sucess, -EINVAL if the * property does not exist or its length does not match a multiple of elem_size * and -ENODATA if the property does not have a value. */ int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size) { struct property *prop = of_find_property(np, propname, NULL); if (!prop) return -EINVAL; if (!prop->value) return -ENODATA; if (prop->length % elem_size != 0) { pr_err("size of %s in node %s is not a multiple of %d\n", propname, np->full_name, elem_size); return -EINVAL; } return prop->length / elem_size; } EXPORT_SYMBOL_GPL(of_property_count_elems_of_size); #endif void kvfree(const void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); else kfree(addr); } EXPORT_SYMBOL_GPL(kvfree); backport-iwlwifi-dkms-7906/compat/backport-3.17.c000066400000000000000000000111611351064634500215230ustar00rootroot00000000000000/* * Copyright (c) 2014 Hauke Mehrtens * * Backport functionality introduced in Linux 3.17. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) int bit_wait(void *word) { schedule(); return 0; } EXPORT_SYMBOL_GPL(bit_wait); int bit_wait_io(void *word) { io_schedule(); return 0; } EXPORT_SYMBOL_GPL(bit_wait_io); #endif #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /** * ktime_get_raw - Returns the raw monotonic time in ktime_t format */ ktime_t ktime_get_raw(void) { struct timespec ts; getrawmonotonic(&ts); return timespec_to_ktime(ts); } EXPORT_SYMBOL_GPL(ktime_get_raw); #endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */ /** * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 * * @n: nsecs in u64 * * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. * And this doesn't return MAX_JIFFY_OFFSET since this function is designed * for scheduler, not for use in device drivers to calculate timeout value. * * note: * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years */ static u64 backport_nsecs_to_jiffies64(u64 n) { #if (NSEC_PER_SEC % HZ) == 0 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ return div_u64(n, NSEC_PER_SEC / HZ); #elif (HZ % 512) == 0 /* overflow after 292 years if HZ = 1024 */ return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); #else /* * Generic case - optimized for cases where HZ is a multiple of 3. * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. */ return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); #endif } /** * nsecs_to_jiffies - Convert nsecs in u64 to jiffies * * @n: nsecs in u64 * * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. * And this doesn't return MAX_JIFFY_OFFSET since this function is designed * for scheduler, not for use in device drivers to calculate timeout value. * * note: * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years */ unsigned long nsecs_to_jiffies(u64 n) { return (unsigned long)backport_nsecs_to_jiffies64(n); } EXPORT_SYMBOL_GPL(nsecs_to_jiffies); /** * devm_kvasprintf - Allocate resource managed space * for the formatted string. * @dev: Device to allocate memory for * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * @fmt: the formatted string to duplicate * @ap: the list of tokens to be placed in the formatted string * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = devm_kmalloc(dev, len+1, gfp); if (!p) return NULL; vsnprintf(p, len+1, fmt, ap); return p; } EXPORT_SYMBOL_GPL(devm_kvasprintf); /** * devm_kasprintf - Allocate resource managed space * and copy an existing formatted string into that * @dev: Device to allocate memory for * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * @fmt: the string to duplicate * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = devm_kvasprintf(dev, gfp, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL_GPL(devm_kasprintf); #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) #define STANDARD_PARAM_DEF(name, type, format, strtolfn) \ int param_set_##name(const char *val, const struct kernel_param *kp) \ { \ return strtolfn(val, 0, (type *)kp->arg); \ } \ int param_get_##name(char *buffer, const struct kernel_param *kp) \ { \ return scnprintf(buffer, PAGE_SIZE, format, \ *((type *)kp->arg)); \ } \ struct kernel_param_ops param_ops_##name = { \ .set = param_set_##name, \ .get = param_get_##name, \ }; \ EXPORT_SYMBOL(param_set_##name); \ EXPORT_SYMBOL(param_get_##name); \ EXPORT_SYMBOL(param_ops_##name) STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); #endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */ backport-iwlwifi-dkms-7906/compat/backport-3.18.c000066400000000000000000000221231351064634500215240ustar00rootroot00000000000000/* * Copyright (c) 2014 Hauke Mehrtens * * Backport functionality introduced in Linux 3.18. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include /** * eth_get_headlen - determine the the length of header for an ethernet frame * @data: pointer to start of frame * @len: total length of frame * * Make a best effort attempt to pull the length for all of the headers for * a given frame in a linear buffer. */ int eth_get_headlen(unsigned char *data, unsigned int max_len) { union { unsigned char *network; /* l2 headers */ struct ethhdr *eth; struct vlan_hdr *vlan; /* l3 headers */ struct iphdr *ipv4; struct ipv6hdr *ipv6; } hdr; __be16 protocol; u8 nexthdr = 0; /* default to not TCP */ u8 hlen; /* this should never happen, but better safe than sorry */ if (max_len < ETH_HLEN) return max_len; /* initialize network frame pointer */ hdr.network = data; /* set first protocol and move network header forward */ protocol = hdr.eth->h_proto; hdr.network += ETH_HLEN; /* handle any vlan tag if present */ if (protocol == htons(ETH_P_8021Q)) { if ((hdr.network - data) > (max_len - VLAN_HLEN)) return max_len; protocol = hdr.vlan->h_vlan_encapsulated_proto; hdr.network += VLAN_HLEN; } /* handle L3 protocols */ if (protocol == htons(ETH_P_IP)) { if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) return max_len; /* access ihl as a u8 to avoid unaligned access on ia64 */ hlen = (hdr.network[0] & 0x0F) << 2; /* verify hlen meets minimum size requirements */ if (hlen < sizeof(struct iphdr)) return hdr.network - data; /* record next protocol if header is present */ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) nexthdr = hdr.ipv4->protocol; } else if (protocol == htons(ETH_P_IPV6)) { if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) return max_len; /* record next protocol */ nexthdr = hdr.ipv6->nexthdr; hlen = sizeof(struct ipv6hdr); } else if (protocol == htons(ETH_P_FCOE)) { if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) return max_len; hlen = FCOE_HEADER_LEN; } else { return hdr.network - data; } /* relocate pointer to start of L4 header */ hdr.network += hlen; /* finally sort out TCP/UDP */ if (nexthdr == IPPROTO_TCP) { if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) return max_len; /* access doff as a u8 to avoid unaligned access on ia64 */ hlen = (hdr.network[12] & 0xF0) >> 2; /* verify hlen meets minimum size requirements */ if (hlen < sizeof(struct tcphdr)) return hdr.network - data; hdr.network += hlen; } else if (nexthdr == IPPROTO_UDP) { if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) return max_len; hdr.network += sizeof(struct udphdr); } /* * If everything has gone correctly hdr.network should be the * data section of the packet and will be the end of the header. * If not then it probably represents the end of the last recognized * header. */ if ((hdr.network - data) < max_len) return hdr.network - data; else return max_len; } EXPORT_SYMBOL_GPL(eth_get_headlen); #define sock_efree LINUX_BACKPORT(sock_efree) static void sock_efree(struct sk_buff *skb) { sock_put(skb->sk); } /** * skb_clone_sk - create clone of skb, and take reference to socket * @skb: the skb to clone * * This function creates a clone of a buffer that holds a reference on * sk_refcnt. Buffers created via this function are meant to be * returned using sock_queue_err_skb, or free via kfree_skb. * * When passing buffers allocated with this function to sock_queue_err_skb * it is necessary to wrap the call with sock_hold/sock_put in order to * prevent the socket from being released prior to being enqueued on * the sk_error_queue. */ struct sk_buff *skb_clone_sk(struct sk_buff *skb) { struct sock *sk = skb->sk; struct sk_buff *clone; if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) return NULL; clone = skb_clone(skb, GFP_ATOMIC); if (!clone) { sock_put(sk); return NULL; } clone->sk = sk; clone->destructor = sock_efree; return clone; } EXPORT_SYMBOL_GPL(skb_clone_sk); #if LINUX_VERSION_IS_GEQ(3,3,0) /* * skb_complete_wifi_ack() needs to get backported, because the version from * 3.18 added the sock_hold() and sock_put() calles missing in older versions. */ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) { struct sock *sk = skb->sk; struct sock_exterr_skb *serr; int err; #if LINUX_VERSION_IS_GEQ(3,3,0) skb->wifi_acked_valid = 1; skb->wifi_acked = acked; #endif serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; /* take a reference to prevent skb_orphan() from freeing the socket */ sock_hold(sk); err = sock_queue_err_skb(sk, skb); if (err) kfree_skb(skb); sock_put(sk); } EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); #endif #if LINUX_VERSION_IS_GEQ(3,17,0) int __sched out_of_line_wait_on_bit_timeout( void *word, int bit, wait_bit_action_f *action, unsigned mode, unsigned long timeout) { wait_queue_head_t *wq = bit_waitqueue(word, bit); DEFINE_WAIT_BIT(wait, word, bit); wait.key.private = jiffies + timeout; return __wait_on_bit(wq, &wait, action, mode); } EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); __sched int bit_wait_timeout(struct wait_bit_key *word) { unsigned long now = ACCESS_ONCE(jiffies); if (signal_pending_state(current->state, current)) return 1; if (time_after_eq(now, word->private)) return -EAGAIN; schedule_timeout(word->private - now); return 0; } EXPORT_SYMBOL_GPL(bit_wait_timeout); #endif #ifdef CONFIG_OF /** * of_find_property_value_of_size * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @len: requested length of property value * * Search for a property in a device node and valid the requested size. * Returns the property value on success, -EINVAL if the property does not * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * */ void *of_find_property_value_of_size(const struct device_node *np, const char *propname, u32 len) { struct property *prop = of_find_property(np, propname, NULL); if (!prop) return ERR_PTR(-EINVAL); if (!prop->value) return ERR_PTR(-ENODATA); if (len > prop->length) return ERR_PTR(-EOVERFLOW); return prop->value; } /** * of_property_read_u64_array - Find and read an array of 64 bit integers * from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 64-bit value(s) from * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u64 value can be decoded. */ int of_property_read_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz) { const __be32 *val = of_find_property_value_of_size(np, propname, (sz * sizeof(*out_values))); if (IS_ERR(val)) return PTR_ERR(val); while (sz--) { *out_values++ = of_read_number(val, 2); val += 2; } return 0; } EXPORT_SYMBOL_GPL(of_property_read_u64_array); #endif /* CONFIG_OF */ #if !(LINUX_VERSION_IS_GEQ(3,17,3) || \ (LINUX_VERSION_IS_GEQ(3,14,24) && \ LINUX_VERSION_IS_LESS(3,15,0)) || \ (LINUX_VERSION_IS_GEQ(3,12,33) && \ LINUX_VERSION_IS_LESS(3,13,0)) || \ (LINUX_VERSION_IS_GEQ(3,10,60) && \ LINUX_VERSION_IS_LESS(3,11,0)) || \ (LINUX_VERSION_IS_GEQ(3,4,106) && \ LINUX_VERSION_IS_LESS(3,5,0)) || \ (LINUX_VERSION_IS_GEQ(3,2,65) && \ LINUX_VERSION_IS_LESS(3,3,0))) /** * memzero_explicit - Fill a region of memory (e.g. sensitive * keying data) with 0s. * @s: Pointer to the start of the area. * @count: The size of the area. * * Note: usually using memset() is just fine (!), but in cases * where clearing out _local_ data at the end of a scope is * necessary, memzero_explicit() should be used instead in * order to prevent the compiler from optimising away zeroing. * * memzero_explicit() doesn't need an arch-specific version as * it just invokes the one of memset() implicitly. */ void memzero_explicit(void *s, size_t count) { memset(s, 0, count); barrier_data(s); } EXPORT_SYMBOL_GPL(memzero_explicit); #endif char *bin2hex(char *dst, const void *src, size_t count) { const unsigned char *_src = src; while (count--) dst = hex_byte_pack(dst, *_src++); return dst; } EXPORT_SYMBOL(bin2hex); backport-iwlwifi-dkms-7906/compat/backport-3.19.c000066400000000000000000000113571351064634500215340ustar00rootroot00000000000000/* * Copyright (c) 2014 Hauke Mehrtens * * Backport functionality introduced in Linux 3.19. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #if LINUX_VERSION_IS_LESS(3,18,12) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline bool is_kthread_should_stop(void) { return (current->flags & PF_KTHREAD) && kthread_should_stop(); } /* * DEFINE_WAIT_FUNC(wait, woken_wake_func); * * add_wait_queue(&wq, &wait); * for (;;) { * if (condition) * break; * * p->state = mode; condition = true; * smp_mb(); // A smp_wmb(); // C * if (!wait->flags & WQ_FLAG_WOKEN) wait->flags |= WQ_FLAG_WOKEN; * schedule() try_to_wake_up(); * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~ * wait->flags &= ~WQ_FLAG_WOKEN; condition = true; * smp_mb() // B smp_wmb(); // C * wait->flags |= WQ_FLAG_WOKEN; * } * remove_wait_queue(&wq, &wait); * */ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) { set_current_state(mode); /* A */ /* * The above implies an smp_mb(), which matches with the smp_wmb() from * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must * also observe all state before the wakeup. */ if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) timeout = schedule_timeout(timeout); __set_current_state(TASK_RUNNING); /* * The below implies an smp_mb(), it too pairs with the smp_wmb() from * woken_wake_function() such that we must either observe the wait * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss * an event. */ set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ return timeout; } EXPORT_SYMBOL(wait_woken); int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) { /* * Although this function is called under waitqueue lock, LOCK * doesn't imply write barrier and the users expects write * barrier semantics on wakeup functions. The following * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() * and is paired with set_mb() in wait_woken(). */ smp_wmb(); /* C */ wait->flags |= WQ_FLAG_WOKEN; return default_wake_function(wait, mode, sync, key); } EXPORT_SYMBOL(woken_wake_function); #endif #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; void netdev_rss_key_fill(void *buffer, size_t len) { BUG_ON(len > sizeof(netdev_rss_key)); #ifdef __BACKPORT_NET_GET_RANDOM_ONCE net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key)); memcpy(buffer, netdev_rss_key, len); #else get_random_bytes(buffer, len); #endif } EXPORT_SYMBOL_GPL(netdev_rss_key_fill); #endif #if defined(CONFIG_DEBUG_FS) struct debugfs_devm_entry { int (*read)(struct seq_file *seq, void *data); struct device *dev; }; static int debugfs_devm_entry_open(struct inode *inode, struct file *f) { struct debugfs_devm_entry *entry = inode->i_private; return single_open(f, entry->read, entry->dev); } static const struct file_operations debugfs_devm_entry_ops = { .owner = THIS_MODULE, .open = debugfs_devm_entry_open, .release = single_release, .read = seq_read, .llseek = seq_lseek }; /** * debugfs_create_devm_seqfile - create a debugfs file that is bound to device. * * @dev: device related to this debugfs file. * @name: name of the debugfs file. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @read_fn: function pointer called to print the seq_file content. */ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, struct dentry *parent, int (*read_fn)(struct seq_file *s, void *data)) { struct debugfs_devm_entry *entry; if (IS_ERR(parent)) return ERR_PTR(-ENOENT); entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL); if (!entry) return ERR_PTR(-ENOMEM); entry->read = read_fn; entry->dev = dev; return debugfs_create_file(name, S_IRUGO, parent, entry, &debugfs_devm_entry_ops); } EXPORT_SYMBOL_GPL(debugfs_create_devm_seqfile); #endif /* CONFIG_DEBUG_FS */ int skb_ensure_writable(struct sk_buff *skb, int write_len) { if (!pskb_may_pull(skb, write_len)) return -ENOMEM; if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) return 0; return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(skb_ensure_writable); backport-iwlwifi-dkms-7906/compat/backport-3.2.c000066400000000000000000000010401351064634500214300ustar00rootroot00000000000000/* * Linux backport symbols for kernels 3.2. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include int hex2bin(u8 *dst, const char *src, size_t count) { while (count--) { int hi = hex_to_bin(*src++); int lo = hex_to_bin(*src++); if ((hi < 0) || (lo < 0)) return -1; *dst++ = (hi << 4) | lo; } return 0; } EXPORT_SYMBOL_GPL(hex2bin); backport-iwlwifi-dkms-7906/compat/backport-4.0.c000066400000000000000000000233531351064634500214420ustar00rootroot00000000000000/* * Copyright (c) 2015 Hauke Mehrtens * * Backport functionality introduced in Linux 4.0. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas, int *locked, bool notify_drop, unsigned int flags) { long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; if (write) flags |= FOLL_WRITE; if (force) flags |= FOLL_FORCE; pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* VM_FAULT_RETRY didn't trigger */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (notify_drop && lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) { return __get_user_pages_locked(current, current->mm, start, nr_pages, write, force, pages, NULL, locked, true, FOLL_TOUCH); } EXPORT_SYMBOL_GPL(get_user_pages_locked); /* * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to * pass additional gup_flags as last parameter (like FOLL_HWPOISON). * * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the * caller if required (just like with __get_user_pages). "FOLL_GET", * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed * according to the parameters "pages", "write", "force" * respectively. */ static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, unsigned int gup_flags) { long ret; int locked = 1; down_read(&mm->mmap_sem); ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, pages, NULL, &locked, false, gup_flags); if (locked) up_read(&mm->mmap_sem); return ret; } /* * get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead, if the two parameters * "tsk" and "mm" are respectively equal to current and current->mm, * or if "force" shall be set to 1 (get_user_pages_fast misses the * "force" parameter). */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) { return __get_user_pages_unlocked(current, current->mm, start, nr_pages, write, force, pages, FOLL_TOUCH); } EXPORT_SYMBOL_GPL(get_user_pages_unlocked); #endif /** * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory * @buf: data blob to dump * @len: number of bytes in the @buf * @rowsize: number of bytes to print per line; must be 16 or 32 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) * @linebuf: where to put the converted data * @linebuflen: total size of @linebuf, including space for terminating NUL * @ascii: include ASCII after the hex output * * hex_dump_to_buffer() works on one "line" of output at a time, i.e., * 16 or 32 bytes of input data converted to hex + ASCII output. * * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data * to a hex + ASCII dump at the supplied memory location. * The converted output is always NUL-terminated. * * E.g.: * hex_dump_to_buffer(frame->data, frame->len, 16, 1, * linebuf, sizeof(linebuf), true); * * example output buffer: * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO * * Return: * The amount of bytes placed in the buffer without terminating NUL. If the * output was truncated, then the return value is the number of bytes * (excluding the terminating NUL) which would have been written to the final * string if enough space had been available. */ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii) { const u8 *ptr = buf; int ngroups; u8 ch; int j, lx = 0; int ascii_column; int ret; if (rowsize != 16 && rowsize != 32) rowsize = 16; if (len > rowsize) /* limit to one line at a time */ len = rowsize; if (!is_power_of_2(groupsize) || groupsize > 8) groupsize = 1; if ((len % groupsize) != 0) /* no mixed size output */ groupsize = 1; ngroups = len / groupsize; ascii_column = rowsize * 2 + rowsize / groupsize + 1; if (!linebuflen) goto overflow1; if (!len) goto nil; if (groupsize == 8) { const u64 *ptr8 = buf; for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%16.16llx", j ? " " : "", get_unaligned(ptr8 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; } } else if (groupsize == 4) { const u32 *ptr4 = buf; for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%8.8x", j ? " " : "", get_unaligned(ptr4 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; } } else if (groupsize == 2) { const u16 *ptr2 = buf; for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%4.4x", j ? " " : "", get_unaligned(ptr2 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; } } else { for (j = 0; j < len; j++) { if (linebuflen < lx + 3) goto overflow2; ch = ptr[j]; linebuf[lx++] = hex_asc_hi(ch); linebuf[lx++] = hex_asc_lo(ch); linebuf[lx++] = ' '; } if (j) lx--; } if (!ascii) goto nil; while (lx < ascii_column) { if (linebuflen < lx + 2) goto overflow2; linebuf[lx++] = ' '; } for (j = 0; j < len; j++) { if (linebuflen < lx + 2) goto overflow2; ch = ptr[j]; linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; } nil: linebuf[lx] = '\0'; return lx; overflow2: linebuf[lx++] = '\0'; overflow1: return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1; } EXPORT_SYMBOL_GPL(hex_dump_to_buffer); #if LINUX_VERSION_IS_LESS(3,17,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static inline unsigned char * trace_seq_buffer_ptr(struct trace_seq *s) { return s->buffer + s->len; } #endif #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) const char * ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len, size_t el_size) { const char *ret = trace_seq_buffer_ptr(p); const char *prefix = ""; void *ptr = (void *)buf; trace_seq_putc(p, '{'); while (ptr < buf + buf_len) { switch (el_size) { case 1: trace_seq_printf(p, "%s0x%x", prefix, *(u8 *)ptr); break; case 2: trace_seq_printf(p, "%s0x%x", prefix, *(u16 *)ptr); break; case 4: trace_seq_printf(p, "%s0x%x", prefix, *(u32 *)ptr); break; case 8: trace_seq_printf(p, "%s0x%llx", prefix, *(u64 *)ptr); break; default: trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size, *(u8 *)ptr); el_size = 1; } prefix = ","; ptr += el_size; } trace_seq_putc(p, '}'); trace_seq_putc(p, 0); return ret; } EXPORT_SYMBOL(ftrace_print_array_seq); #endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */ backport-iwlwifi-dkms-7906/compat/backport-4.1.c000066400000000000000000000047351351064634500214460ustar00rootroot00000000000000/* * Copyright (c) 2015 Stefan Assmann * Copyright (c) 2015 Hauke Mehrtens * * Backport functionality introduced in Linux 4.1. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) netdev_features_t passthru_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { return features; } EXPORT_SYMBOL_GPL(passthru_features_check); #endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */ #ifdef CONFIG_TTY #if LINUX_VERSION_IS_GEQ(4,0,0) static void unset_locked_termios(struct ktermios *termios, struct ktermios *old, struct ktermios *locked) { int i; #define NOSET_MASK(x, y, z) (x = ((x) & ~(z)) | ((y) & (z))) if (!locked) { printk(KERN_WARNING "Warning?!? termios_locked is NULL.\n"); return; } NOSET_MASK(termios->c_iflag, old->c_iflag, locked->c_iflag); NOSET_MASK(termios->c_oflag, old->c_oflag, locked->c_oflag); NOSET_MASK(termios->c_cflag, old->c_cflag, locked->c_cflag); NOSET_MASK(termios->c_lflag, old->c_lflag, locked->c_lflag); termios->c_line = locked->c_line ? old->c_line : termios->c_line; for (i = 0; i < NCCS; i++) termios->c_cc[i] = locked->c_cc[i] ? old->c_cc[i] : termios->c_cc[i]; /* FIXME: What should we do for i/ospeed */ } int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) { struct ktermios old_termios; struct tty_ldisc *ld; WARN_ON(tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER); /* * Perform the actual termios internal changes under lock. */ /* FIXME: we need to decide on some locking/ordering semantics for the set_termios notification eventually */ down_write(&tty->termios_rwsem); old_termios = tty->termios; tty->termios = *new_termios; unset_locked_termios(&tty->termios, &old_termios, &tty->termios_locked); if (tty->ops->set_termios) tty->ops->set_termios(tty, &old_termios); else tty_termios_copy_hw(&tty->termios, &old_termios); ld = tty_ldisc_ref(tty); if (ld != NULL) { if (ld->ops->set_termios) ld->ops->set_termios(tty, &old_termios); tty_ldisc_deref(ld); } up_write(&tty->termios_rwsem); return 0; } EXPORT_SYMBOL_GPL(tty_set_termios); #endif /* LINUX_VERSION_IS_GEQ(4,0,0) */ #endif /* CONFIG_TTY */ backport-iwlwifi-dkms-7906/compat/backport-4.10.c000066400000000000000000000174001351064634500215170ustar00rootroot00000000000000/* * Copyright(c) 2017 Hauke Mehrtens * * Backport functionality introduced in Linux 4.10. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #if LINUX_VERSION_IS_GEQ(4,6,0) #if LINUX_VERSION_IS_LESS(4,7,0) static bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, const unsigned long *src) { bool retval = true; /* TODO: following test will soon always be true */ if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) { __ETHTOOL_DECLARE_LINK_MODE_MASK(ext); bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_fill(ext, 32); bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS); if (bitmap_intersects(ext, src, __ETHTOOL_LINK_MODE_MASK_NBITS)) { /* src mask goes beyond bit 31 */ retval = false; } } *legacy_u32 = src[0]; return retval; } static void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, u32 legacy_u32) { bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); dst[0] = legacy_u32; } #endif static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); return mii_lpa_to_ethtool_lpa_t(advert); } /** * mii_ethtool_set_link_ksettings - set settings that are specified in @cmd * @mii: MII interfaces * @cmd: requested ethtool_link_ksettings * * Returns 0 for success, negative on error. */ int mii_ethtool_set_link_ksettings(struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u32 speed = cmd->base.speed; if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL) return -EINVAL; if (cmd->base.port != PORT_MII) return -EINVAL; if (cmd->base.phy_address != mii->phy_id) return -EINVAL; if (cmd->base.autoneg != AUTONEG_DISABLE && cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; u32 advertising; ethtool_convert_link_mode_to_legacy_u32( &advertising, cmd->link_modes.advertising); if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (cmd->base.duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else { mii->full_duplex = 0; } if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } EXPORT_SYMBOL(mii_ethtool_set_link_ksettings); /** * mii_ethtool_get_link_ksettings - get settings that are specified in @cmd * @mii: MII interface * @cmd: requested ethtool_link_ksettings * * The @cmd parameter is expected to have been cleared before calling * mii_ethtool_get_link_ksettings(). * * Returns 0 for success, negative on error. */ int mii_ethtool_get_link_ksettings(struct mii_if_info *mii, struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego, supported, advertising, lp_advertising; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ cmd->base.port = PORT_MII; /* this isn't fully supported at higher layers */ cmd->base.phy_address = mii->phy_id; cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } if (bmcr & BMCR_ANENABLE) { advertising |= ADVERTISED_Autoneg; cmd->base.autoneg = AUTONEG_ENABLE; advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmsr & BMSR_ANEGCOMPLETE) { lp_advertising = mii_get_an(mii, MII_LPA); lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { lp_advertising = 0; } nego = advertising & lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { cmd->base.speed = SPEED_1000; cmd->base.duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { cmd->base.speed = SPEED_100; cmd->base.duplex = !!(nego & ADVERTISED_100baseT_Full); } else { cmd->base.speed = SPEED_10; cmd->base.duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.speed = ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10)); cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; lp_advertising = 0; } mii->full_duplex = cmd->base.duplex; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, lp_advertising); /* ignore maxtxpkt, maxrxpkt for now */ return 0; } EXPORT_SYMBOL(mii_ethtool_get_link_ksettings); #endif /* LINUX_VERSION_IS_GEQ(4,6,0) */ #if LINUX_VERSION_IS_GEQ(4,2,0) void __page_frag_cache_drain(struct page *page, unsigned int count) { VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); if (page_ref_sub_and_test(page, count)) { unsigned int order = compound_order(page); /* * __free_pages_ok() is not exported so call * __free_pages() which decrements the ref counter * and increment the ref counter before. */ page_ref_inc(page); __free_pages(page, order); } } EXPORT_SYMBOL_GPL(__page_frag_cache_drain); #endif backport-iwlwifi-dkms-7906/compat/backport-4.18.c000066400000000000000000000003421351064634500215240ustar00rootroot00000000000000/* * Copyright (C) 2018 Intel Corporation */ #include time64_t ktime_get_boottime_seconds(void) { return ktime_divns(ktime_get_boottime(), NSEC_PER_SEC); } EXPORT_SYMBOL_GPL(ktime_get_boottime_seconds); backport-iwlwifi-dkms-7906/compat/backport-4.2.c000066400000000000000000000037651351064634500214510ustar00rootroot00000000000000/* * Copyright (c) 2015 Hauke Mehrtens * * Backport functionality introduced in Linux 4.2. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, unsigned int len) { for (;;) { if (!len) return src; if (src->length > len) break; len -= src->length; src = sg_next(src); } sg_init_table(dst, 2); sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); scatterwalk_crypto_chain(dst, sg_next(src), 0, 2); return dst; } #endif struct aead_old_request { struct scatterlist srcbuf[2]; struct scatterlist dstbuf[2]; struct aead_request subreq; }; unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) { return crypto_aead_crt(tfm)->reqsize + sizeof(struct aead_old_request); } EXPORT_SYMBOL_GPL(crypto_aead_reqsize); struct aead_request *crypto_backport_convert(struct aead_request *req) { struct aead_old_request *nreq = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct scatterlist *src, *dst; src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen); dst = req->src == req->dst ? src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen); aead_request_set_tfm(&nreq->subreq, aead); aead_request_set_callback(&nreq->subreq, aead_request_flags(req), req->base.complete, req->base.data); aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen, req->iv); aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen); return &nreq->subreq; } EXPORT_SYMBOL_GPL(crypto_backport_convert); char *strreplace(char *s, char old, char new) { for (; *s; ++s) if (*s == old) *s = new; return s; } EXPORT_SYMBOL_GPL(strreplace); backport-iwlwifi-dkms-7906/compat/backport-4.20.c000066400000000000000000000202751351064634500215240ustar00rootroot00000000000000/* * Copyright (C) 2018 - 2019 Intel Corporation * * Backport functionality introduced in Linux 4.20. * This is basically upstream lib/nlattr.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include static const u8 nla_attr_len[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_S8] = sizeof(s8), [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), }; static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_MSECS] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, [NLA_S8] = sizeof(s8), [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), }; static int validate_nla_bitfield32(const struct nlattr *nla, const u32 *valid_flags_mask) { const struct nla_bitfield32 *bf = nla_data(nla); if (!valid_flags_mask) return -EINVAL; /*disallow invalid bit selector */ if (bf->selector & ~*valid_flags_mask) return -EINVAL; /*disallow invalid bit values */ if (bf->value & ~*valid_flags_mask) return -EINVAL; /*disallow valid bit values that are not selected*/ if (bf->value & ~bf->selector) return -EINVAL; return 0; } static int nla_validate_array(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { const struct nlattr *entry; int rem; nla_for_each_attr(entry, head, len, rem) { int ret; if (nla_len(entry) == 0) continue; if (nla_len(entry) < NLA_HDRLEN) { NL_SET_ERR_MSG_ATTR(extack, entry, "Array element too short"); return -ERANGE; } ret = nla_validate(nla_data(entry), nla_len(entry), maxtype, policy, extack); if (ret < 0) return ret; } return 0; } static int nla_validate_int_range(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack) { bool validate_min, validate_max; s64 value; validate_min = pt->validation_type == NLA_VALIDATE_RANGE || pt->validation_type == NLA_VALIDATE_MIN; validate_max = pt->validation_type == NLA_VALIDATE_RANGE || pt->validation_type == NLA_VALIDATE_MAX; switch (pt->type) { case NLA_U8: value = nla_get_u8(nla); break; case NLA_U16: value = nla_get_u16(nla); break; case NLA_U32: value = nla_get_u32(nla); break; case NLA_S8: value = nla_get_s8(nla); break; case NLA_S16: value = nla_get_s16(nla); break; case NLA_S32: value = nla_get_s32(nla); break; case NLA_S64: value = nla_get_s64(nla); break; case NLA_U64: /* treat this one specially, since it may not fit into s64 */ if ((validate_min && nla_get_u64(nla) < pt->min) || (validate_max && nla_get_u64(nla) > pt->max)) { NL_SET_ERR_MSG_ATTR(extack, nla, "integer out of range"); return -ERANGE; } return 0; default: WARN_ON(1); return -EINVAL; } if ((validate_min && value < pt->min) || (validate_max && value > pt->max)) { NL_SET_ERR_MSG_ATTR(extack, nla, "integer out of range"); return -ERANGE; } return 0; } static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { const struct nla_policy *pt; int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); int err = -ERANGE; if (type <= 0 || type > maxtype) return 0; if (WARN_ON(!policy)) return -EINVAL; pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) || (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) { pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", current->comm, type); } switch (pt->type) { case NLA_EXACT_LEN: if (attrlen != pt->len) goto out_err; break; case NLA_REJECT: if (extack && pt->validation_data) { NL_SET_BAD_ATTR(extack, nla); extack->_msg = pt->validation_data; return -EINVAL; } err = -EINVAL; goto out_err; case NLA_FLAG: if (attrlen > 0) goto out_err; break; case NLA_BITFIELD32: if (attrlen != sizeof(struct nla_bitfield32)) goto out_err; err = validate_nla_bitfield32(nla, pt->validation_data); if (err) goto out_err; break; case NLA_NUL_STRING: if (pt->len) minlen = min_t(int, attrlen, pt->len + 1); else minlen = attrlen; if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) { err = -EINVAL; goto out_err; } /* fall through */ case NLA_STRING: if (attrlen < 1) goto out_err; if (pt->len) { char *buf = nla_data(nla); if (buf[attrlen - 1] == '\0') attrlen--; if (attrlen > pt->len) goto out_err; } break; case NLA_BINARY: if (pt->len && attrlen > pt->len) goto out_err; break; case NLA_NESTED: /* a nested attributes is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; if (attrlen < NLA_HDRLEN) goto out_err; if (pt->validation_data) { err = nla_validate(nla_data(nla), nla_len(nla), pt->len, pt->validation_data, extack); if (err < 0) { /* * return directly to preserve the inner * error message/attribute pointer */ return err; } } break; case NLA_NESTED_ARRAY: /* a nested array attribute is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; if (attrlen < NLA_HDRLEN) goto out_err; if (pt->validation_data) { int err; err = nla_validate_array(nla_data(nla), nla_len(nla), pt->len, pt->validation_data, extack); if (err < 0) { /* * return directly to preserve the inner * error message/attribute pointer */ return err; } } break; default: if (pt->len) minlen = pt->len; else if (pt->type != NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen) goto out_err; } /* further validation */ switch (pt->validation_type) { case NLA_VALIDATE_NONE: /* nothing to do */ break; case NLA_VALIDATE_RANGE: case NLA_VALIDATE_MIN: case NLA_VALIDATE_MAX: err = nla_validate_int_range(pt, nla, extack); if (err) return err; break; case NLA_VALIDATE_FUNCTION: if (pt->validate) { err = pt->validate(nla, extack); if (err) return err; } break; } return 0; out_err: NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation"); return err; } int backport_nla_validate(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { const struct nlattr *nla; int rem; if (!policy) return 0; nla_for_each_attr(nla, head, len, rem) { int err = validate_nla(nla, maxtype, policy, extack); if (err < 0) return err; } return 0; } EXPORT_SYMBOL_GPL(backport_nla_validate); int backport_nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_len[p->type]) len += nla_total_size(nla_attr_len[p->type]); else if (nla_attr_minlen[p->type]) len += nla_total_size(nla_attr_minlen[p->type]); } return len; } EXPORT_SYMBOL_GPL(backport_nla_policy_len); int backport_nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy, struct netlink_ext_ack *extack) { const struct nlattr *nla; int rem; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { u16 type = nla_type(nla); if (type > 0 && type <= maxtype) { if (policy) { int err = validate_nla(nla, maxtype, policy, extack); if (err < 0) return err; } tb[type] = (struct nlattr *)nla; } } if (unlikely(rem > 0)) pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n", rem, current->comm); return 0; } EXPORT_SYMBOL(backport_nla_parse); backport-iwlwifi-dkms-7906/compat/backport-4.3.c000066400000000000000000000246541351064634500214520ustar00rootroot00000000000000/* * Copyright (c) 2015 Hauke Mehrtens * Copyright (c) 2015 - 2016 Intel Deutschland GmbH * * Backport functionality introduced in Linux 4.3. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #ifdef CONFIG_THERMAL #if LINUX_VERSION_IS_GEQ(3,8,0) struct backport_thermal_ops_wrapper { old_thermal_zone_device_ops_t ops; struct thermal_zone_device_ops *driver_ops; }; #ifndef CONFIG_BTNS_PMIC static int backport_thermal_get_temp(struct thermal_zone_device *dev, unsigned long *temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); int _temp, ret; ret = wrapper->driver_ops->get_temp(dev, &_temp); if (!ret) *temp = (unsigned long)_temp; return ret; } static int backport_thermal_get_trip_temp(struct thermal_zone_device *dev, int i, unsigned long *temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); int _temp, ret; ret = wrapper->driver_ops->get_trip_temp(dev, i, &_temp); if (!ret) *temp = (unsigned long)_temp; return ret; } static int backport_thermal_set_trip_temp(struct thermal_zone_device *dev, int i, unsigned long temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); return wrapper->driver_ops->set_trip_temp(dev, i, (int)temp); } static int backport_thermal_get_trip_hyst(struct thermal_zone_device *dev, int i, unsigned long *temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); int _temp, ret; ret = wrapper->driver_ops->get_trip_hyst(dev, i, &_temp); if (!ret) *temp = (unsigned long)_temp; return ret; } static int backport_thermal_set_trip_hyst(struct thermal_zone_device *dev, int i, unsigned long temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); return wrapper->driver_ops->set_trip_hyst(dev, i, (int)temp); } static int backport_thermal_get_crit_temp(struct thermal_zone_device *dev, unsigned long *temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); int _temp, ret; ret = wrapper->driver_ops->get_crit_temp(dev, &_temp); if (!ret) *temp = (unsigned long)_temp; return ret; } #if LINUX_VERSION_IS_GEQ(3, 19, 0) static int backport_thermal_set_emul_temp(struct thermal_zone_device *dev, unsigned long temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); return wrapper->driver_ops->set_emul_temp(dev, (int)temp); } #endif /* LINUX_VERSION_IS_GEQ(3, 19, 0) */ #else /* !CONFIG_BTNS_PMIC */ static int backport_thermal_get_temp(struct thermal_zone_device *dev, long *temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); int _temp, ret; ret = wrapper->driver_ops->get_temp(dev, &_temp); if (!ret) *temp = (long)_temp; return ret; } static int backport_thermal_get_trip_temp(struct thermal_zone_device *dev, int i, long *temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); int _temp, ret; ret = wrapper->driver_ops->get_trip_temp(dev, i, &_temp); if (!ret) *temp = (long)_temp; return ret; } static int backport_thermal_set_trip_temp(struct thermal_zone_device *dev, int i, long temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); return wrapper->driver_ops->set_trip_temp(dev, i, (int)temp); } static int backport_thermal_get_trip_hyst(struct thermal_zone_device *dev, int i, long *temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); int _temp, ret; ret = wrapper->driver_ops->get_trip_hyst(dev, i, &_temp); if (!ret) *temp = (long)_temp; return ret; } static int backport_thermal_set_trip_hyst(struct thermal_zone_device *dev, int i, long temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); return wrapper->driver_ops->set_trip_hyst(dev, i, (int)temp); } static int backport_thermal_get_crit_temp(struct thermal_zone_device *dev, long *temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); int _temp, ret; ret = wrapper->driver_ops->get_crit_temp(dev, &_temp); if (!ret) *temp = (long)_temp; return ret; } static int backport_thermal_set_emul_temp(struct thermal_zone_device *dev, unsigned long temp) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); return wrapper->driver_ops->set_emul_temp(dev, (int)temp); } #endif /* !CONFIG_BTNS_PMIC */ struct thermal_zone_device *backport_thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { struct backport_thermal_ops_wrapper *wrapper = kzalloc(sizeof(*wrapper), GFP_KERNEL); struct thermal_zone_device *ret; if (!wrapper) return NULL; wrapper->driver_ops = ops; #define copy(_op) \ wrapper->ops._op = ops->_op copy(bind); copy(unbind); copy(get_mode); copy(set_mode); copy(get_trip_type); copy(get_trend); copy(notify); #ifdef CONFIG_BTNS_PMIC copy(get_slope); copy(set_slope); copy(get_intercept); copy(set_intercept); #endif /* Assign the backport ops to the old struct to get the * correct types. But only assign if the registrant defined * the ops. */ #define assign_ops(_op) \ if (ops->_op) \ wrapper->ops._op = backport_thermal_##_op assign_ops(get_temp); assign_ops(get_trip_temp); assign_ops(set_trip_temp); assign_ops(get_trip_hyst); assign_ops(set_trip_hyst); assign_ops(get_crit_temp); #if LINUX_VERSION_IS_GEQ(3, 19, 0) assign_ops(set_emul_temp); #endif /* LINUX_VERSION_IS_GEQ(3, 19, 0) */ #undef assign_ops ret = old_thermal_zone_device_register(type, trips, mask, devdata, &wrapper->ops, tzp, passive_delay, polling_delay); if (!ret) kfree(wrapper); return ret; } EXPORT_SYMBOL_GPL(backport_thermal_zone_device_register); void backport_thermal_zone_device_unregister(struct thermal_zone_device *dev) { struct backport_thermal_ops_wrapper *wrapper = container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); old_thermal_zone_device_unregister(dev); kfree(wrapper); } EXPORT_SYMBOL_GPL(backport_thermal_zone_device_unregister); #endif /* >= 3.8.0 */ #endif /* CONFIG_THERMAL */ static void seq_set_overflow(struct seq_file *m) { m->count = m->size; } /* A complete analogue of print_hex_dump() */ void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { const u8 *ptr = buf; int i, linelen, remaining = len; int ret; if (rowsize != 16 && rowsize != 32) rowsize = 16; for (i = 0; i < len && !seq_has_overflowed(m); i += rowsize) { linelen = min(remaining, rowsize); remaining -= rowsize; switch (prefix_type) { case DUMP_PREFIX_ADDRESS: seq_printf(m, "%s%p: ", prefix_str, ptr + i); break; case DUMP_PREFIX_OFFSET: seq_printf(m, "%s%.8x: ", prefix_str, i); break; default: seq_printf(m, "%s", prefix_str); break; } ret = hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, m->buf + m->count, m->size - m->count, ascii); if (ret >= m->size - m->count) { seq_set_overflow(m); } else { m->count += ret; seq_putc(m, '\n'); } } } EXPORT_SYMBOL_GPL(seq_hex_dump); ssize_t strscpy(char *dest, const char *src, size_t count) { long res = 0; if (count == 0) return -E2BIG; while (count) { char c; c = src[res]; dest[res] = c; if (!c) return res; res++; count--; } /* Hit buffer length without finding a NUL; force NUL-termination. */ if (res) dest[res-1] = '\0'; return -E2BIG; } EXPORT_SYMBOL_GPL(strscpy); static void *device_get_mac_addr(struct device *dev, const char *name, char *addr, int alen) { #if LINUX_VERSION_IS_GEQ(3,18,0) int ret = device_property_read_u8_array(dev, name, addr, alen); #else int ret = of_property_read_u8_array(dev->of_node, name, addr, alen); #endif if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr)) return addr; return NULL; } /** * device_get_mac_address - Get the MAC for a given device * @dev: Pointer to the device * @addr: Address of buffer to store the MAC in * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN * * Search the firmware node for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC * address. If that isn't set, then 'local-mac-address' is checked next, * because that is the default address. If that isn't set, then the obsolete * 'address' is checked, just in case we're using an old device tree. * * Note that the 'address' property is supposed to contain a virtual address of * the register set, but some DTS files have redefined that property to be the * MAC address. * * All-zero MAC addresses are rejected, because those could be properties that * exist in the firmware tables, but were not updated by the firmware. For * example, the DTS could define 'mac-address' and 'local-mac-address', with * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. * In this case, the real MAC is in 'local-mac-address', and 'mac-address' * exists but is all zeros. */ void *device_get_mac_address(struct device *dev, char *addr, int alen) { char *res; res = device_get_mac_addr(dev, "mac-address", addr, alen); if (res) return res; res = device_get_mac_addr(dev, "local-mac-address", addr, alen); if (res) return res; return device_get_mac_addr(dev, "address", addr, alen); } EXPORT_SYMBOL_GPL(device_get_mac_address); backport-iwlwifi-dkms-7906/compat/backport-4.4.c000066400000000000000000000101501351064634500214350ustar00rootroot00000000000000/* * Copyright(c) 2015 Intel Deutschland GmbH * * Backport functionality introduced in Linux 4.4. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_DEBUG_FS #if LINUX_VERSION_IS_LESS(4,3,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[3]; bool *val = file->private_data; if (*val) buf[0] = 'Y'; else buf[0] = 'N'; buf[1] = '\n'; buf[2] = 0x00; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; size_t buf_size; bool bv; bool *val = file->private_data; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; if (strtobool(buf, &bv) == 0) *val = bv; return count; } #endif /* < 4.3.0 */ static const struct file_operations fops_bool = { .read = debugfs_read_file_bool, .write = debugfs_write_file_bool, .open = simple_open, .llseek = default_llseek, }; struct dentry *debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, bool *value) { return debugfs_create_file(name, mode, parent, value, &fops_bool); } EXPORT_SYMBOL_GPL(debugfs_create_bool); #endif /* CONFIG_DEBUG_FS */ /* Calculate expected number of TX descriptors */ int tso_count_descs(struct sk_buff *skb) { /* The Marvell Way */ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; } EXPORT_SYMBOL(tso_count_descs); void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int mac_hdr_len = skb_network_offset(skb); memcpy(hdr, skb->data, hdr_len); if (!tso->ipv6) { struct iphdr *iph = (void *)(hdr + mac_hdr_len); iph->id = htons(tso->ip_id); iph->tot_len = htons(size + hdr_len - mac_hdr_len); tso->ip_id++; } else { #ifdef CONFIG_IPV6 struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); iph->payload_len = htons(size + tcp_hdrlen(skb)); #else /* CONFIG_IPV6 */ /* tso->ipv6 should never be set if IPV6 is not enabeld */ WARN_ON(1); #endif /* CONFIG_IPV6 */ } tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); put_unaligned_be32(tso->tcp_seq, &tcph->seq); if (!is_last) { /* Clear all special flags for not last packet */ tcph->psh = 0; tcph->fin = 0; tcph->rst = 0; } } EXPORT_SYMBOL(tso_build_hdr); void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) { tso->tcp_seq += size; tso->size -= size; tso->data += size; if ((tso->size == 0) && (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ tso->size = frag->size; tso->data = page_address(skb_frag_page(frag)) + frag->page_offset; tso->next_frag_idx++; } } EXPORT_SYMBOL(tso_build_data); void tso_start(struct sk_buff *skb, struct tso_t *tso) { int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); tso->ip_id = ntohs(ip_hdr(skb)->id); tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso->next_frag_idx = 0; tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6); /* Build first data */ tso->size = skb_headlen(skb) - hdr_len; tso->data = skb->data + hdr_len; if ((tso->size == 0) && (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ tso->size = frag->size; tso->data = page_address(skb_frag_page(frag)) + frag->page_offset; tso->next_frag_idx++; } } EXPORT_SYMBOL(tso_start); backport-iwlwifi-dkms-7906/compat/backport-4.5.c000066400000000000000000000066321351064634500214500ustar00rootroot00000000000000/* * Copyright(c) 2015 Hauke Mehrtens * * Backport functionality introduced in Linux 4.5. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_IS_GEQ(3,19,0) int led_set_brightness_sync(struct led_classdev *led_cdev, enum led_brightness value) { if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) return -EBUSY; led_cdev->brightness = min(value, led_cdev->max_brightness); if (led_cdev->flags & LED_SUSPENDED) return 0; if (led_cdev->brightness_set_sync) return led_cdev->brightness_set_sync(led_cdev, led_cdev->brightness); return -ENOTSUPP; } EXPORT_SYMBOL_GPL(led_set_brightness_sync); #endif /* >= 3.19 */ #if LINUX_VERSION_IS_GEQ(3,2,0) /** * no_seek_end_llseek - llseek implementation for fixed-sized devices * @file: file structure to seek on * @offset: file offset to seek to * @whence: type of seek * */ loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence) { switch (whence) { case SEEK_SET: case SEEK_CUR: #if LINUX_VERSION_IS_GEQ(3,6,0) return generic_file_llseek_size(file, offset, whence, ~0ULL, 0); #else return generic_file_llseek_size(file, offset, whence, ~0ULL); #endif default: return -EINVAL; } } EXPORT_SYMBOL_GPL(no_seek_end_llseek); #endif /* >= 3.2 */ /** * memdup_user_nul - duplicate memory region from user space and NUL-terminate * * @src: source address in user space * @len: number of bytes to copy * * Returns an ERR_PTR() on failure. */ void *memdup_user_nul(const void __user *src, size_t len) { char *p; /* * Always use GFP_KERNEL, since copy_from_user() can sleep and * cause pagefault, which makes it pointless to use GFP_NOFS * or GFP_ATOMIC. */ p = kmalloc(len + 1, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } p[len] = '\0'; return p; } EXPORT_SYMBOL_GPL(memdup_user_nul); void phy_attached_info(struct phy_device *phydev) { phy_attached_print(phydev, NULL); } EXPORT_SYMBOL_GPL(phy_attached_info); #define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) { if (!fmt) { dev_info(&phydev->dev, ATTACHED_FMT "\n", phydev->drv->name, phydev_name(phydev), phydev->irq); } else { va_list ap; dev_info(&phydev->dev, ATTACHED_FMT, phydev->drv->name, phydev_name(phydev), phydev->irq); va_start(ap, fmt); vprintk(fmt, ap); va_end(ap); } } EXPORT_SYMBOL_GPL(phy_attached_print); static void devm_led_trigger_release(struct device *dev, void *res) { led_trigger_unregister(*(struct led_trigger **)res); } int devm_led_trigger_register(struct device *dev, struct led_trigger *trig) { struct led_trigger **dr; int rc; dr = devres_alloc(devm_led_trigger_release, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; *dr = trig; rc = led_trigger_register(trig); if (rc) devres_free(dr); else devres_add(dev, dr); return rc; } EXPORT_SYMBOL_GPL(devm_led_trigger_register); backport-iwlwifi-dkms-7906/compat/backport-4.6.c000066400000000000000000000044051351064634500214450ustar00rootroot00000000000000/* * Copyright(c) 2016 Hauke Mehrtens * * Backport functionality introduced in Linux 4.6. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include /** * kstrtobool - convert common user inputs into boolean values * @s: input string * @res: result * * This routine returns 0 iff the first character is one of 'Yy1Nn0', or * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value * pointed to by res is updated upon finding a match. */ int kstrtobool(const char *s, bool *res) { if (!s) return -EINVAL; switch (s[0]) { case 'y': case 'Y': case '1': *res = true; return 0; case 'n': case 'N': case '0': *res = false; return 0; case 'o': case 'O': switch (s[1]) { case 'n': case 'N': *res = true; return 0; case 'f': case 'F': *res = false; return 0; default: break; } default: break; } return -EINVAL; } EXPORT_SYMBOL_GPL(kstrtobool); /* * Since "base" would be a nonsense argument, this open-codes the * _from_user helper instead of using the helper macro below. */ int kstrtobool_from_user(const char __user *s, size_t count, bool *res) { /* Longest string needed to differentiate, newline, terminator */ char buf[4]; count = min(count, sizeof(buf) - 1); if (copy_from_user(buf, s, count)) return -EFAULT; buf[count] = '\0'; return kstrtobool(buf, res); } EXPORT_SYMBOL_GPL(kstrtobool_from_user); #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /** * match_string - matches given string in an array * @array: array of strings * @n: number of strings in the array or -1 for NULL terminated arrays * @string: string to match with * * Return: * index of a @string in the @array if matches, or %-EINVAL otherwise. */ int match_string(const char * const *array, size_t n, const char *string) { int index; const char *item; for (index = 0; index < n; index++) { item = array[index]; if (!item) break; if (!strcmp(item, string)) return index; } return -EINVAL; } EXPORT_SYMBOL(match_string); #endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */ backport-iwlwifi-dkms-7906/compat/backport-4.7.c000066400000000000000000000124741351064634500214530ustar00rootroot00000000000000/* * Copyright(c) 2016 Hauke Mehrtens * * Backport functionality introduced in Linux 4.7. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include /** * __nla_reserve_64bit - reserve room for attribute on the skb and align it * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. It also ensure that this * attribute will be 64-bit aign. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr) { if (nla_need_padding_for_64bit(skb)) nla_align_64bit(skb, padattr); return __nla_reserve(skb, attrtype, attrlen); } EXPORT_SYMBOL_GPL(__nla_reserve_64bit); /** * nla_reserve_64bit - reserve room for attribute on the skb and align it * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. It also ensure that this * attribute will be 64-bit aign. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. */ struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr) { size_t len; if (nla_need_padding_for_64bit(skb)) len = nla_total_size_64bit(attrlen); else len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len)) return NULL; return __nla_reserve_64bit(skb, attrtype, attrlen, padattr); } EXPORT_SYMBOL_GPL(nla_reserve_64bit); /** * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr) { struct nlattr *nla; nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr); memcpy(nla_data(nla), data, attrlen); } EXPORT_SYMBOL_GPL(__nla_put_64bit); /** * nla_put_64bit - Add a netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. */ int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr) { size_t len; if (nla_need_padding_for_64bit(skb)) len = nla_total_size_64bit(attrlen); else len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len)) return -EMSGSIZE; __nla_put_64bit(skb, attrtype, attrlen, data, padattr); return 0; } EXPORT_SYMBOL_GPL(nla_put_64bit); /* * Below 3.18 or if the kernel has devcoredump disabled, we copied the * entire devcoredump, so no need to define these functions. */ #if (LINUX_VERSION_IS_GEQ(3,18,0) || \ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)) && \ !defined(CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP) #include #include static void devcd_free_sgtable(void *data) { struct scatterlist *table = data; int i; struct page *page; struct scatterlist *iter; struct scatterlist *delete_iter; /* free pages */ iter = table; for_each_sg(table, iter, sg_nents(table), i) { page = sg_page(iter); if (page) __free_page(page); } /* then free all chained tables */ iter = table; delete_iter = table; /* always points on a head of a table */ while (!sg_is_last(iter)) { iter++; if (sg_is_chain(iter)) { iter = sg_chain_ptr(iter); kfree(delete_iter); delete_iter = iter; } } /* free the last table */ kfree(delete_iter); } static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, size_t buf_len, void *data, size_t data_len) { struct scatterlist *table = data; if (offset > data_len) return -EINVAL; if (offset + buf_len > data_len) buf_len = data_len - offset; return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len, offset); } void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp) { dev_coredumpm(dev, THIS_MODULE, table, datalen, gfp, /* cast away some const problems */ (void *)devcd_read_from_sgtable, (void *)devcd_free_sgtable); } EXPORT_SYMBOL_GPL(dev_coredumpsg); #endif /* >= 3.18.0 */ backport-iwlwifi-dkms-7906/compat/backport-4.8.c000066400000000000000000000131321351064634500214440ustar00rootroot00000000000000/* * Copyright(c) 2017 Intel Deutschland GmbH * * Backport functionality introduced in Linux 4.8. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, struct usb_interface *intf, u8 *buffer, int buflen) { /* duplicates are ignored */ struct usb_cdc_union_desc *union_header = NULL; /* duplicates are not tolerated */ struct usb_cdc_header_desc *header = NULL; struct usb_cdc_ether_desc *ether = NULL; struct usb_cdc_mdlm_detail_desc *detail = NULL; struct usb_cdc_mdlm_desc *desc = NULL; unsigned int elength; int cnt = 0; memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); hdr->phonet_magic_present = false; while (buflen > 0) { elength = buffer[0]; if (!elength) { dev_err(&intf->dev, "skipping garbage byte\n"); elength = 1; goto next_desc; } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ if (elength < sizeof(struct usb_cdc_union_desc)) goto next_desc; if (union_header) { dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); goto next_desc; } union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: if (elength < sizeof(struct usb_cdc_country_functional_desc)) goto next_desc; hdr->usb_cdc_country_functional_desc = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: if (elength != sizeof(struct usb_cdc_header_desc)) goto next_desc; if (header) return -EINVAL; header = (struct usb_cdc_header_desc *)buffer; break; case USB_CDC_ACM_TYPE: if (elength < sizeof(struct usb_cdc_acm_descriptor)) goto next_desc; hdr->usb_cdc_acm_descriptor = (struct usb_cdc_acm_descriptor *)buffer; break; case USB_CDC_ETHERNET_TYPE: if (elength != sizeof(struct usb_cdc_ether_desc)) goto next_desc; if (ether) return -EINVAL; ether = (struct usb_cdc_ether_desc *)buffer; break; case USB_CDC_CALL_MANAGEMENT_TYPE: if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) goto next_desc; hdr->usb_cdc_call_mgmt_descriptor = (struct usb_cdc_call_mgmt_descriptor *)buffer; break; case USB_CDC_DMM_TYPE: if (elength < sizeof(struct usb_cdc_dmm_desc)) goto next_desc; hdr->usb_cdc_dmm_desc = (struct usb_cdc_dmm_desc *)buffer; break; case USB_CDC_MDLM_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_desc *)) goto next_desc; if (desc) return -EINVAL; desc = (struct usb_cdc_mdlm_desc *)buffer; break; case USB_CDC_MDLM_DETAIL_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) goto next_desc; if (detail) return -EINVAL; detail = (struct usb_cdc_mdlm_detail_desc *)buffer; break; case USB_CDC_NCM_TYPE: if (elength < sizeof(struct usb_cdc_ncm_desc)) goto next_desc; hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; break; case USB_CDC_MBIM_TYPE: if (elength < sizeof(struct usb_cdc_mbim_desc)) goto next_desc; hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; break; case USB_CDC_MBIM_EXTENDED_TYPE: if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) break; hdr->usb_cdc_mbim_extended_desc = (struct usb_cdc_mbim_extended_desc *)buffer; break; case CDC_PHONET_MAGIC_NUMBER: hdr->phonet_magic_present = true; break; default: /* * there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", buffer[2], elength); goto next_desc; } cnt++; next_desc: buflen -= elength; buffer += elength; } hdr->usb_cdc_union_desc = union_header; hdr->usb_cdc_header_desc = header; hdr->usb_cdc_mdlm_detail_desc = detail; hdr->usb_cdc_mdlm_desc = desc; hdr->usb_cdc_ether_desc = ether; return cnt; } EXPORT_SYMBOL_GPL(cdc_parse_cdc_header); #ifdef CONFIG_PCI #ifdef CONFIG_PCI_MSI /** * pci_alloc_irq_vectors - allocate multiple IRQs for a device * @dev: PCI device to operate on * @min_vecs: minimum number of vectors required (must be >= 1) * @max_vecs: maximum (desired) number of vectors * @flags: flags or quirks for the allocation * * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI * vectors if available, and fall back to a single legacy vector * if neither is available. Return the number of vectors allocated, * (which might be smaller than @max_vecs) if successful, or a negative * error code on error. If less than @min_vecs interrupt vectors are * available for @dev the function will fail with -ENOSPC. * * To get the Linux IRQ number used for a vector that can be passed to * request_irq() use the pci_irq_vector() helper. */ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags) { int vecs = -ENOSPC; if (flags & PCI_IRQ_MSIX) { vecs = pci_enable_msix_range(dev, NULL, min_vecs, max_vecs); if (vecs > 0) return vecs; } if (flags & PCI_IRQ_MSI) { vecs = pci_enable_msi_range(dev, min_vecs, max_vecs); if (vecs > 0) return vecs; } /* use legacy irq if allowed */ if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) { pci_intx(dev, 1); return 1; } return vecs; } EXPORT_SYMBOL_GPL(pci_alloc_irq_vectors); #endif /* CONFIG_PCI_MSI */ #endif /* CONFIG_PCI */ backport-iwlwifi-dkms-7906/compat/backport-genetlink.c000066400000000000000000000254441351064634500231240ustar00rootroot00000000000000/* * Copyright 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * Backport functionality introduced in Linux 4.20. * Much of this is based on upstream lib/nlattr.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include static const struct genl_family *find_family_real_ops(__genl_const struct genl_ops **ops) { const struct genl_family *family; __genl_const struct genl_ops *tmp_ops = *ops; /* find the family ... */ while (tmp_ops->doit || tmp_ops->dumpit) tmp_ops++; family = (void *)tmp_ops->done; /* cast to suppress const warning */ *ops = (void *)(family->ops + (*ops - family->copy_ops)); return family; } #if LINUX_VERSION_IS_LESS(4,12,0) enum nlmsgerr_attrs { NLMSGERR_ATTR_UNUSED, NLMSGERR_ATTR_MSG, NLMSGERR_ATTR_OFFS, NLMSGERR_ATTR_COOKIE, __NLMSGERR_ATTR_MAX, NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1 }; #define NLM_F_CAPPED 0x100 /* request was capped */ #define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */ static void extack_netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, const struct netlink_ext_ack *extack) { struct sk_buff *skb; struct nlmsghdr *rep; struct nlmsgerr *errmsg; size_t payload = sizeof(*errmsg); size_t tlvlen = 0; unsigned int flags = 0; /* backports assumes everyone supports this - libnl does so it's true */ bool nlk_has_extack = true; /* Error messages get the original request appened, unless the user * requests to cap the error message, and get extra error data if * requested. * (ignored in backports) */ if (nlk_has_extack && extack && extack->_msg) tlvlen += nla_total_size(strlen(extack->_msg) + 1); if (err) { if (1) payload += nlmsg_len(nlh); else flags |= NLM_F_CAPPED; if (nlk_has_extack && extack && extack->bad_attr) tlvlen += nla_total_size(sizeof(u32)); } else { flags |= NLM_F_CAPPED; if (nlk_has_extack && extack && extack->cookie_len) tlvlen += nla_total_size(extack->cookie_len); } if (tlvlen) flags |= NLM_F_ACK_TLVS; skb = nlmsg_new(payload + tlvlen, GFP_KERNEL); if (!skb) { NETLINK_CB(in_skb).sk->sk_err = ENOBUFS; NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk); return; } rep = __nlmsg_put(skb, NETLINK_CB_PORTID(in_skb), nlh->nlmsg_seq, NLMSG_ERROR, payload, flags); errmsg = nlmsg_data(rep); errmsg->error = err; memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); if (nlk_has_extack && extack) { if (extack->_msg) { WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg)); } if (err) { if (extack->bad_attr && !WARN_ON((u8 *)extack->bad_attr < in_skb->data || (u8 *)extack->bad_attr >= in_skb->data + in_skb->len)) WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, (u8 *)extack->bad_attr - in_skb->data)); } else { if (extack->cookie_len) WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, extack->cookie_len, extack->cookie)); } } nlmsg_end(skb, rep); netlink_unicast(in_skb->sk, skb, NETLINK_CB_PORTID(in_skb), MSG_DONTWAIT); } static int extack_doit(struct sk_buff *skb, struct genl_info *info) { int (*doit)(struct sk_buff *, struct genl_info *); int err; doit = genl_info_extack(info)->__bp_doit; /* signal from our pre_doit to not do anything */ if (!doit) return 0; err = doit(skb, info); if (err == -EINTR) return err; if (info->nlhdr->nlmsg_flags & NLM_F_ACK || err) extack_netlink_ack(skb, info->nlhdr, err, genl_info_extack(info)); /* suppress sending ACK from normal netlink code */ info->nlhdr->nlmsg_flags &= ~NLM_F_ACK; return 0; } #endif /* LINUX_VERSION_IS_LESS(4,12,0) */ static int backport_pre_doit(__genl_const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { const struct genl_family *family = find_family_real_ops(&ops); int err; #if LINUX_VERSION_IS_LESS(4,12,0) struct netlink_ext_ack *extack = kzalloc(sizeof(*extack), GFP_KERNEL); if (!extack) return -ENOMEM; __bp_genl_info_userhdr_set(info, extack); extack->__bp_doit = ops->doit; #else struct netlink_ext_ack *extack = genl_info_extack(info); #endif err = nlmsg_validate(info->nlhdr, GENL_HDRLEN + family->hdrsize, family->maxattr, family->policy, extack); if (!err && family->pre_doit) err = family->pre_doit(ops, skb, info); #if LINUX_VERSION_IS_LESS(4,12,0) if (err) { /* signal to do nothing */ extack->__bp_doit = NULL; extack_netlink_ack(skb, info->nlhdr, err, extack); /* suppress sending ACK from normal netlink code */ info->nlhdr->nlmsg_flags &= ~NLM_F_ACK; /* extack will be freed in post_doit as usual */ return 0; } #endif return err; } static void backport_post_doit(__genl_const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { const struct genl_family *family = find_family_real_ops(&ops); #if LINUX_VERSION_IS_LESS(4,12,0) if (genl_info_extack(info)->__bp_doit) #else if (1) #endif if (family->post_doit) family->post_doit(ops, skb, info); #if LINUX_VERSION_IS_LESS(4,12,0) kfree(__bp_genl_info_userhdr(info)); #endif } int backport_genl_register_family(struct genl_family *family) { struct genl_ops *ops; int err, i; #define COPY(memb) family->family.memb = family->memb #define BACK(memb) family->memb = family->family.memb /* we append one entry to the ops to find our family pointer ... */ ops = kzalloc(sizeof(*ops) * (family->n_ops + 1), GFP_KERNEL); if (!ops) return -ENOMEM; memcpy(ops, family->ops, sizeof(*ops) * family->n_ops); /* * Remove policy to skip validation as the struct nla_policy * memory layout isn't compatible with the old version */ for (i = 0; i < family->n_ops; i++) { ops[i].policy = NULL; #if LINUX_VERSION_IS_LESS(4,12,0) if (ops[i].doit) ops[i].doit = extack_doit; #endif } /* keep doit/dumpit NULL - that's invalid */ ops[family->n_ops].done = (void *)family; COPY(id); memcpy(family->family.name, family->name, sizeof(family->name)); COPY(hdrsize); COPY(version); COPY(maxattr); COPY(netnsok); #if LINUX_VERSION_IS_GEQ(3,10,0) COPY(parallel_ops); #endif family->family.pre_doit = backport_pre_doit; family->family.post_doit = backport_post_doit; /* attrbuf is output only */ family->copy_ops = ops; #if LINUX_VERSION_IS_GEQ(3,13,0) || \ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) family->family.ops = ops; COPY(mcgrps); COPY(n_ops); COPY(n_mcgrps); #endif #if LINUX_VERSION_IS_GEQ(3,11,0) COPY(module); #endif err = __real_backport_genl_register_family(&family->family); BACK(id); BACK(attrbuf); if (err) return err; #if LINUX_VERSION_IS_GEQ(3,13,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0) return 0; #else for (i = 0; i < family->n_ops; i++) { err = genl_register_ops(&family->family, ops + i); if (err < 0) goto error; } for (i = 0; i < family->n_mcgrps; i++) { err = genl_register_mc_group(&family->family, &family->mcgrps[i]); if (err) goto error; } return 0; error: genl_unregister_family(family); return err; #endif /* LINUX_VERSION_IS_GEQ(3,13,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0) */ } EXPORT_SYMBOL_GPL(backport_genl_register_family); int backport_genl_unregister_family(struct genl_family *family) { return __real_backport_genl_unregister_family(&family->family); } EXPORT_SYMBOL_GPL(backport_genl_unregister_family); #define INVALID_GROUP 0xffffffff static u32 __backport_genl_group(const struct genl_family *family, u32 group) { if (WARN_ON_ONCE(group >= family->n_mcgrps)) return INVALID_GROUP; #if LINUX_VERSION_IS_LESS(3,13,0) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) return family->mcgrps[group].id; #else return family->family.mcgrp_offset + group; #endif } void genl_notify(const struct genl_family *family, struct sk_buff *skb, struct genl_info *info, u32 group, gfp_t flags) { struct net *net = genl_info_net(info); struct sock *sk = net->genl_sock; int report = 0; if (info->nlhdr) report = nlmsg_report(info->nlhdr); group = __backport_genl_group(family, group); if (group == INVALID_GROUP) return; nlmsg_notify(sk, skb, genl_info_snd_portid(info), group, report, flags); } EXPORT_SYMBOL_GPL(genl_notify); void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, const struct genl_family *family, int flags, u8 cmd) { struct nlmsghdr *nlh; struct genlmsghdr *hdr; nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN + family->hdrsize, flags); if (nlh == NULL) return NULL; hdr = nlmsg_data(nlh); hdr->cmd = cmd; hdr->version = family->version; hdr->reserved = 0; return (char *) hdr + GENL_HDRLEN; } EXPORT_SYMBOL_GPL(genlmsg_put); void *genlmsg_put_reply(struct sk_buff *skb, struct genl_info *info, const struct genl_family *family, int flags, u8 cmd) { return genlmsg_put(skb, genl_info_snd_portid(info), info->snd_seq, family, flags, cmd); } EXPORT_SYMBOL_GPL(genlmsg_put_reply); int genlmsg_multicast_netns(const struct genl_family *family, struct net *net, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { group = __backport_genl_group(family, group); if (group == INVALID_GROUP) return -EINVAL; return nlmsg_multicast(net->genl_sock, skb, portid, group, flags); } EXPORT_SYMBOL_GPL(genlmsg_multicast_netns); int genlmsg_multicast(const struct genl_family *family, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { return genlmsg_multicast_netns(family, &init_net, skb, portid, group, flags); } EXPORT_SYMBOL_GPL(genlmsg_multicast); static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, gfp_t flags) { struct sk_buff *tmp; struct net *net, *prev = NULL; bool delivered = false; int err; for_each_net_rcu(net) { if (prev) { tmp = skb_clone(skb, flags); if (!tmp) { err = -ENOMEM; goto error; } err = nlmsg_multicast(prev->genl_sock, tmp, portid, group, flags); if (!err) delivered = true; else if (err != -ESRCH) goto error; } prev = net; } err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); if (!err) delivered = true; else if (err != -ESRCH) return err; return delivered ? 0 : -ESRCH; error: kfree_skb(skb); return err; } int backport_genlmsg_multicast_allns(const struct genl_family *family, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { group = __backport_genl_group(family, group); if (group == INVALID_GROUP) return -EINVAL; return genlmsg_mcast(skb, portid, group, flags); } EXPORT_SYMBOL_GPL(backport_genlmsg_multicast_allns); backport-iwlwifi-dkms-7906/compat/backports.h000066400000000000000000000005401351064634500213240ustar00rootroot00000000000000#ifndef LINUX_BACKPORTS_PRIVATE_H #define LINUX_BACKPORTS_PRIVATE_H #include #ifdef CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP int devcoredump_init(void); void devcoredump_exit(void); #else static inline int devcoredump_init(void) { return 0; } static inline void devcoredump_exit(void) {} #endif #endif /* LINUX_BACKPORTS_PRIVATE_H */ backport-iwlwifi-dkms-7906/compat/build_OID_registry000077500000000000000000000113641351064634500226410ustar00rootroot00000000000000#!/usr/bin/perl -w # # Build a static ASN.1 Object Identified (OID) registry # # Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. # Written by David Howells (dhowells@redhat.com) # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public Licence # as published by the Free Software Foundation; either version # 2 of the Licence, or (at your option) any later version. # use strict; my @names = (); my @oids = (); if ($#ARGV != 1) { print STDERR "Format: ", $0, " \n"; exit(2); } # # Open the file to read from # open IN_FILE, "<$ARGV[0]" || die; while () { chomp; if (m!\s+OID_([a-zA-z][a-zA-Z0-9_]+),\s+/[*]\s+([012][.0-9]*)\s+[*]/!) { push @names, $1; push @oids, $2; } } close IN_FILE || die; # # Open the files to write into # open C_FILE, ">$ARGV[1]" or die; print C_FILE "/*\n"; print C_FILE " * Automatically generated by ", $0, ". Do not edit\n"; print C_FILE " */\n"; # # Split the data up into separate lists and also determine the lengths of the # encoded data arrays. # my @indices = (); my @lengths = (); my $total_length = 0; for (my $i = 0; $i <= $#names; $i++) { my $name = $names[$i]; my $oid = $oids[$i]; my @components = split(/[.]/, $oid); # Determine the encoded length of this OID my $size = $#components; for (my $loop = 2; $loop <= $#components; $loop++) { my $c = $components[$loop]; # We will base128 encode the number my $tmp = ($c == 0) ? 0 : int(log($c)/log(2)); $tmp = int($tmp / 7); $size += $tmp; } push @lengths, $size; push @indices, $total_length; $total_length += $size; } # # Emit the look-up-by-OID index table # print C_FILE "\n"; if ($total_length <= 255) { print C_FILE "static const unsigned char oid_index[OID__NR + 1] = {\n"; } else { print C_FILE "static const unsigned short oid_index[OID__NR + 1] = {\n"; } for (my $i = 0; $i <= $#names; $i++) { print C_FILE "\t[OID_", $names[$i], "] = ", $indices[$i], ",\n" } print C_FILE "\t[OID__NR] = ", $total_length, "\n"; print C_FILE "};\n"; # # Encode the OIDs # my @encoded_oids = (); for (my $i = 0; $i <= $#names; $i++) { my @octets = (); my @components = split(/[.]/, $oids[$i]); push @octets, $components[0] * 40 + $components[1]; for (my $loop = 2; $loop <= $#components; $loop++) { my $c = $components[$loop]; # Base128 encode the number my $tmp = ($c == 0) ? 0 : int(log($c)/log(2)); $tmp = int($tmp / 7); for (; $tmp > 0; $tmp--) { push @octets, (($c >> $tmp * 7) & 0x7f) | 0x80; } push @octets, $c & 0x7f; } push @encoded_oids, \@octets; } # # Create a hash value for each OID # my @hash_values = (); for (my $i = 0; $i <= $#names; $i++) { my @octets = @{$encoded_oids[$i]}; my $hash = $#octets; foreach (@octets) { $hash += $_ * 33; } $hash = ($hash >> 24) ^ ($hash >> 16) ^ ($hash >> 8) ^ ($hash); push @hash_values, $hash & 0xff; } # # Emit the OID data # print C_FILE "\n"; print C_FILE "static const unsigned char oid_data[", $total_length, "] = {\n"; for (my $i = 0; $i <= $#names; $i++) { my @octets = @{$encoded_oids[$i]}; print C_FILE "\t"; print C_FILE $_, ", " foreach (@octets); print C_FILE "\t// ", $names[$i]; print C_FILE "\n"; } print C_FILE "};\n"; # # Build the search index table (ordered by length then hash then content) # my @index_table = ( 0 .. $#names ); @index_table = sort { my @octets_a = @{$encoded_oids[$a]}; my @octets_b = @{$encoded_oids[$b]}; return $hash_values[$a] <=> $hash_values[$b] if ($hash_values[$a] != $hash_values[$b]); return $#octets_a <=> $#octets_b if ($#octets_a != $#octets_b); for (my $i = $#octets_a; $i >= 0; $i--) { return $octets_a[$i] <=> $octets_b[$i] if ($octets_a[$i] != $octets_b[$i]); } return 0; } @index_table; # # Emit the search index and hash value table # print C_FILE "\n"; print C_FILE "static const struct {\n"; print C_FILE "\tunsigned char hash;\n"; if ($#names <= 255) { print C_FILE "\tenum OID oid : 8;\n"; } else { print C_FILE "\tenum OID oid : 16;\n"; } print C_FILE "} oid_search_table[OID__NR] = {\n"; for (my $i = 0; $i <= $#names; $i++) { my @octets = @{$encoded_oids[$index_table[$i]]}; printf(C_FILE "\t[%3u] = { %3u, OID_%-35s }, // ", $i, $hash_values[$index_table[$i]], $names[$index_table[$i]]); printf C_FILE "%02x", $_ foreach (@octets); print C_FILE "\n"; } print C_FILE "};\n"; # # Emit the OID debugging name table # #print C_FILE "\n"; #print C_FILE "const char *const oid_name_table[OID__NR + 1] = {\n"; # #for (my $i = 0; $i <= $#names; $i++) { # print C_FILE "\t\"", $names[$i], "\",\n" #} #print C_FILE "\t\"Unknown-OID\"\n"; #print C_FILE "};\n"; # # Polish off # close C_FILE or die; backport-iwlwifi-dkms-7906/compat/compat-3.0.c000066400000000000000000000047361351064634500211230ustar00rootroot00000000000000/* * Copyright 2011 Hauke Mehrtens * Copyright 2011 Alexey Dobriyan * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Backport functionality introduced in Linux 3.0. */ #include #include int mac_pton(const char *s, u8 *mac) { int i; /* XX:XX:XX:XX:XX:XX */ if (strlen(s) < 3 * ETH_ALEN - 1) return 0; /* Don't dirty result unless string is valid MAC. */ for (i = 0; i < ETH_ALEN; i++) { if (!strchr("0123456789abcdefABCDEF", s[i * 3])) return 0; if (!strchr("0123456789abcdefABCDEF", s[i * 3 + 1])) return 0; if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':') return 0; } for (i = 0; i < ETH_ALEN; i++) { mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]); } return 1; } EXPORT_SYMBOL_GPL(mac_pton); #define kstrto_from_user(f, g, type) \ int f(const char __user *s, size_t count, unsigned int base, type *res) \ { \ /* sign, base 2 representation, newline, terminator */ \ char buf[1 + sizeof(type) * 8 + 1 + 1]; \ \ count = min(count, sizeof(buf) - 1); \ if (copy_from_user(buf, s, count)) \ return -EFAULT; \ buf[count] = '\0'; \ return g(buf, base, res); \ } \ EXPORT_SYMBOL_GPL(f) kstrto_from_user(kstrtoull_from_user, kstrtoull, unsigned long long); kstrto_from_user(kstrtoll_from_user, kstrtoll, long long); kstrto_from_user(kstrtoul_from_user, kstrtoul, unsigned long); kstrto_from_user(kstrtol_from_user, kstrtol, long); kstrto_from_user(kstrtouint_from_user, kstrtouint, unsigned int); kstrto_from_user(kstrtoint_from_user, kstrtoint, int); kstrto_from_user(kstrtou16_from_user, kstrtou16, u16); kstrto_from_user(kstrtos16_from_user, kstrtos16, s16); kstrto_from_user(kstrtou8_from_user, kstrtou8, u8); kstrto_from_user(kstrtos8_from_user, kstrtos8, s8); /** * strtobool - convert common user inputs into boolean values * @s: input string * @res: result * * This routine returns 0 iff the first character is one of 'Yy1Nn0'. * Otherwise it will return -EINVAL. Value pointed to by res is * updated upon finding a match. */ int strtobool(const char *s, bool *res) { switch (s[0]) { case 'y': case 'Y': case '1': *res = true; break; case 'n': case 'N': case '0': *res = false; break; default: return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(strtobool); backport-iwlwifi-dkms-7906/compat/compat-3.1.c000066400000000000000000000057311351064634500211200ustar00rootroot00000000000000/* * Copyright 2012 Hauke Mehrtens * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Backport functionality introduced in Linux 3.1. */ #include #include #include static DEFINE_SPINLOCK(compat_simple_ida_lock); /** * ida_simple_get - get a new id. * @ida: the (initialized) ida. * @start: the minimum id (inclusive, < 0x8000000) * @end: the maximum id (exclusive, < 0x8000000 or 0) * @gfp_mask: memory allocation flags * * Allocates an id in the range start <= id < end, or returns -ENOSPC. * On memory allocation failure, returns -ENOMEM. * * Use ida_simple_remove() to get rid of an id. */ int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask) { int ret, id; unsigned int max; unsigned long flags; BUG_ON((int)start < 0); BUG_ON((int)end < 0); if (end == 0) max = 0x80000000; else { BUG_ON(end < start); max = end - 1; } again: if (!ida_pre_get(ida, gfp_mask)) return -ENOMEM; spin_lock_irqsave(&compat_simple_ida_lock, flags); ret = ida_get_new_above(ida, start, &id); if (!ret) { if (id > max) { ida_remove(ida, id); ret = -ENOSPC; } else { ret = id; } } spin_unlock_irqrestore(&compat_simple_ida_lock, flags); if (unlikely(ret == -EAGAIN)) goto again; return ret; } EXPORT_SYMBOL_GPL(ida_simple_get); /** * ida_simple_remove - remove an allocated id. * @ida: the (initialized) ida. * @id: the id returned by ida_simple_get. */ void ida_simple_remove(struct ida *ida, unsigned int id) { unsigned long flags; BUG_ON((int)id < 0); spin_lock_irqsave(&compat_simple_ida_lock, flags); ida_remove(ida, id); spin_unlock_irqrestore(&compat_simple_ida_lock, flags); } EXPORT_SYMBOL_GPL(ida_simple_remove); /* source lib/idr.c */ #ifdef CONFIG_OF /** * of_property_read_u32_array - Find and read an array of 32 bit integers * from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 32-bit value(s) from * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u32 value can be decoded. */ int of_property_read_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz) { const __be32 *val = of_find_property_value_of_size(np, propname, (sz * sizeof(*out_values))); if (IS_ERR(val)) return PTR_ERR(val); while (sz--) *out_values++ = be32_to_cpup(val++); return 0; } EXPORT_SYMBOL_GPL(of_property_read_u32_array); #endif backport-iwlwifi-dkms-7906/compat/compat-3.3.c000066400000000000000000000126411351064634500211200ustar00rootroot00000000000000/* * Copyright 2012 Luis R. Rodriguez * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Backport functionality introduced in Linux 3.3. */ #include #include #include #include #include #include #include static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { new->tstamp = old->tstamp; new->dev = old->dev; new->transport_header = old->transport_header; new->network_header = old->network_header; new->mac_header = old->mac_header; skb_dst_copy(new, old); new->rxhash = old->rxhash; #if LINUX_VERSION_IS_GEQ(3,1,0) new->ooo_okay = old->ooo_okay; #endif #if LINUX_VERSION_IS_GEQ(3,2,0) new->l4_rxhash = old->l4_rxhash; #endif #ifdef CONFIG_XFRM new->sp = secpath_get(old->sp); #endif memcpy(new->cb, old->cb, sizeof(old->cb)); new->csum = old->csum; new->local_df = old->local_df; new->pkt_type = old->pkt_type; new->ip_summed = old->ip_summed; skb_copy_queue_mapping(new, old); new->priority = old->priority; #if IS_ENABLED(CONFIG_IP_VS) new->ipvs_property = old->ipvs_property; #endif new->protocol = old->protocol; new->mark = old->mark; new->skb_iif = old->skb_iif; __nf_copy(new, old); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) new->nf_trace = old->nf_trace; #endif #ifdef CONFIG_NET_SCHED new->tc_index = old->tc_index; #ifdef CONFIG_NET_CLS_ACT new->tc_verd = old->tc_verd; #endif #endif new->vlan_tci = old->vlan_tci; skb_copy_secmark(new, old); } static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { #ifndef NET_SKBUFF_DATA_USES_OFFSET /* * Shift between the two data areas in bytes */ unsigned long offset = new->data - old->data; #endif __copy_skb_header(new, old); #ifndef NET_SKBUFF_DATA_USES_OFFSET /* {transport,network,mac}_header are relative to skb->head */ new->transport_header += offset; new->network_header += offset; if (skb_mac_header_was_set(new)) new->mac_header += offset; #endif skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; } static void skb_clone_fraglist(struct sk_buff *skb) { struct sk_buff *list; skb_walk_frags(skb, list) skb_get(list); } /** * __pskb_copy - create copy of an sk_buff with private head. * @skb: buffer to copy * @headroom: headroom of new skb * @gfp_mask: allocation priority * * Make a copy of both an &sk_buff and part of its data, located * in header. Fragmented data remain shared. This is used when * the caller wishes to modify only header of &sk_buff and needs * private copy of the header to alter. Returns %NULL on failure * or the pointer to the buffer on success. * The returned buffer has a reference count of 1. */ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) { unsigned int size = skb_headlen(skb) + headroom; struct sk_buff *n = alloc_skb(size, gfp_mask); if (!n) goto out; /* Set the data pointer */ skb_reserve(n, headroom); /* Set the tail pointer and length */ skb_put(n, skb_headlen(skb)); /* Copy the bytes */ skb_copy_from_linear_data(skb, n->data, n->len); n->truesize += skb->data_len; n->data_len = skb->data_len; n->len = skb->len; if (skb_shinfo(skb)->nr_frags) { int i; /* * SKBTX_DEV_ZEROCOPY was added on 3.1 as well but requires ubuf * stuff added to the skb which we do not have */ #if 0 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, gfp_mask)) { kfree_skb(n); n = NULL; goto out; } } #endif for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; #if LINUX_VERSION_IS_GEQ(3,2,0) skb_frag_ref(skb, i); #else get_page(skb_shinfo(skb)->frags[i].page); #endif } skb_shinfo(n)->nr_frags = i; } if (skb_has_frag_list(skb)) { skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; skb_clone_fraglist(n); } copy_skb_header(n, skb); out: return n; } EXPORT_SYMBOL_GPL(__pskb_copy); static DEFINE_SPINLOCK(wq_name_lock); static LIST_HEAD(wq_name_list); struct wq_name { struct list_head list; struct workqueue_struct *wq; char name[24]; }; struct workqueue_struct * backport_alloc_workqueue(const char *fmt, unsigned int flags, int max_active, struct lock_class_key *key, const char *lock_name, ...) { struct workqueue_struct *wq; struct wq_name *n = kzalloc(sizeof(*n), GFP_KERNEL); va_list args; if (!n) return NULL; va_start(args, lock_name); vsnprintf(n->name, sizeof(n->name), fmt, args); va_end(args); wq = __alloc_workqueue_key(n->name, flags, max_active, key, lock_name); if (!wq) { kfree(n); return NULL; } n->wq = wq; spin_lock(&wq_name_lock); list_add(&n->list, &wq_name_list); spin_unlock(&wq_name_lock); return wq; } EXPORT_SYMBOL_GPL(backport_alloc_workqueue); void backport_destroy_workqueue(struct workqueue_struct *wq) { struct wq_name *n, *tmp; /* call original */ #undef destroy_workqueue destroy_workqueue(wq); spin_lock(&wq_name_lock); list_for_each_entry_safe(n, tmp, &wq_name_list, list) { if (n->wq == wq) { list_del(&n->list); kfree(n); break; } } spin_unlock(&wq_name_lock); } EXPORT_SYMBOL_GPL(backport_destroy_workqueue); backport-iwlwifi-dkms-7906/compat/compat-3.4.c000066400000000000000000000115361351064634500211230ustar00rootroot00000000000000/* * Copyright 2012 Luis R. Rodriguez * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Backport functionality introduced in Linux 3.4. */ #include #include #include #include #include #if LINUX_VERSION_IS_GEQ(3,2,0) #include #include #include #endif /* LINUX_VERSION_IS_GEQ(3,2,0) */ #if LINUX_VERSION_IS_GEQ(3,2,0) #if defined(CONFIG_REGMAP) static void devm_regmap_release(struct device *dev, void *res) { regmap_exit(*(struct regmap **)res); } #if defined(CONFIG_REGMAP_I2C) static int regmap_i2c_write( struct device *dev, const void *data, size_t count) { struct i2c_client *i2c = to_i2c_client(dev); int ret; ret = i2c_master_send(i2c, data, count); if (ret == count) return 0; else if (ret < 0) return ret; else return -EIO; } static int regmap_i2c_gather_write( struct device *dev, const void *reg, size_t reg_size, const void *val, size_t val_size) { struct i2c_client *i2c = to_i2c_client(dev); struct i2c_msg xfer[2]; int ret; /* If the I2C controller can't do a gather tell the core, it * will substitute in a linear write for us. */ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART)) return -ENOTSUPP; xfer[0].addr = i2c->addr; xfer[0].flags = 0; xfer[0].len = reg_size; xfer[0].buf = (void *)reg; xfer[1].addr = i2c->addr; xfer[1].flags = I2C_M_NOSTART; xfer[1].len = val_size; xfer[1].buf = (void *)val; ret = i2c_transfer(i2c->adapter, xfer, 2); if (ret == 2) return 0; if (ret < 0) return ret; else return -EIO; } static int regmap_i2c_read( struct device *dev, const void *reg, size_t reg_size, void *val, size_t val_size) { struct i2c_client *i2c = to_i2c_client(dev); struct i2c_msg xfer[2]; int ret; xfer[0].addr = i2c->addr; xfer[0].flags = 0; xfer[0].len = reg_size; xfer[0].buf = (void *)reg; xfer[1].addr = i2c->addr; xfer[1].flags = I2C_M_RD; xfer[1].len = val_size; xfer[1].buf = val; ret = i2c_transfer(i2c->adapter, xfer, 2); if (ret == 2) return 0; else if (ret < 0) return ret; else return -EIO; } static struct regmap_bus regmap_i2c = { .write = regmap_i2c_write, .gather_write = regmap_i2c_gather_write, .read = regmap_i2c_read, }; #endif /* defined(CONFIG_REGMAP_I2C) */ /** * devm_regmap_init(): Initialise managed register map * * @dev: Device that will be interacted with * @bus: Bus-specific callbacks to use with device * @bus_context: Data passed to bus-specific callbacks * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer * to a struct regmap. This function should generally not be called * directly, it should be called by bus-specific init functions. The * map will be automatically freed by the device management code. */ struct regmap *devm_regmap_init(struct device *dev, const struct regmap_bus *bus, const struct regmap_config *config) { struct regmap **ptr, *regmap; ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); regmap = regmap_init(dev, bus, config); if (!IS_ERR(regmap)) { *ptr = regmap; devres_add(dev, ptr); } else { devres_free(ptr); } return regmap; } EXPORT_SYMBOL_GPL(devm_regmap_init); #if defined(CONFIG_REGMAP_I2C) /** * devm_regmap_init_i2c(): Initialise managed register map * * @i2c: Device that will be interacted with * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer * to a struct regmap. The regmap will be automatically freed by the * device management code. */ struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config) { return devm_regmap_init(&i2c->dev, ®map_i2c, config); } EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); #endif /* defined(CONFIG_REGMAP_I2C) */ #endif /* defined(CONFIG_REGMAP) */ #endif /* LINUX_VERSION_IS_GEQ(3,2,0) */ int simple_open(struct inode *inode, struct file *file) { if (inode->i_private) file->private_data = inode->i_private; return 0; } EXPORT_SYMBOL_GPL(simple_open); #ifdef CONFIG_COMPAT static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts) { return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || __put_user(ts->tv_sec, &cts->tv_sec) || __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } int compat_put_timespec(const struct timespec *ts, void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; else return __compat_put_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_put_timespec); #endif backport-iwlwifi-dkms-7906/compat/compat-3.5.c000066400000000000000000000103021351064634500211120ustar00rootroot00000000000000/* * Copyright 2012-2013 Luis R. Rodriguez * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Backport functionality introduced in Linux 3.5. */ #include #include #include #include #include #include #if LINUX_VERSION_IS_GEQ(3,2,0) #include /** * devres_release - Find a device resource and destroy it, calling release * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically, the * release function called and the resource freed. * * RETURNS: * 0 if devres is found and freed, -ENOENT if not found. */ int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { void *res; res = devres_remove(dev, release, match, match_data); if (unlikely(!res)) return -ENOENT; (*release)(dev, res); devres_free(res); return 0; } EXPORT_SYMBOL_GPL(devres_release); #endif /* LINUX_VERSION_IS_GEQ(3,2,0) */ /* * Commit 7a4e7408c5cadb240e068a662251754a562355e3 * exported overflowuid and overflowgid for all * kernel configurations, prior to that we only * had it exported when CONFIG_UID16 was enabled. * We are technically redefining it here but * nothing seems to be changing it, except * kernel/ code does epose it via sysctl and * proc... if required later we can add that here. */ #ifndef CONFIG_UID16 int overflowuid = DEFAULT_OVERFLOWUID; int overflowgid = DEFAULT_OVERFLOWGID; EXPORT_SYMBOL_GPL(overflowuid); EXPORT_SYMBOL_GPL(overflowgid); #endif #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) int ptp_clock_index(struct ptp_clock *ptp) { return ptp->index; } EXPORT_SYMBOL(ptp_clock_index); #endif /* CONFIG_PTP_1588_CLOCK */ #ifdef CONFIG_GPIOLIB static void devm_gpio_release(struct device *dev, void *res) { unsigned *gpio = res; gpio_free(*gpio); } /** * devm_gpio_request - request a GPIO for a managed device * @dev: device to request the GPIO for * @gpio: GPIO to allocate * @label: the name of the requested GPIO * * Except for the extra @dev argument, this function takes the * same arguments and performs the same function as * gpio_request(). GPIOs requested with this function will be * automatically freed on driver detach. * * If an GPIO allocated with this function needs to be freed * separately, devm_gpio_free() must be used. */ int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) { unsigned *dr; int rc; dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); if (!dr) return -ENOMEM; rc = gpio_request(gpio, label); if (rc) { devres_free(dr); return rc; } *dr = gpio; devres_add(dev, dr); return 0; } EXPORT_SYMBOL_GPL(devm_gpio_request); /** * devm_gpio_request_one - request a single GPIO with initial setup * @dev: device to request for * @gpio: the GPIO number * @flags: GPIO configuration as specified by GPIOF_* * @label: a literal description string of this GPIO */ int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label) { unsigned *dr; int rc; dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); if (!dr) return -ENOMEM; rc = gpio_request_one(gpio, flags, label); if (rc) { devres_free(dr); return rc; } *dr = gpio; devres_add(dev, dr); return 0; } EXPORT_SYMBOL_GPL(devm_gpio_request_one); static int devm_gpio_match(struct device *dev, void *res, void *data) { unsigned *this = res, *gpio = data; return *this == *gpio; } void devm_gpio_free(struct device *dev, unsigned int gpio) { WARN_ON(devres_destroy(dev, devm_gpio_release, devm_gpio_match, &gpio)); gpio_free(gpio); } EXPORT_SYMBOL_GPL(devm_gpio_free); #endif /* CONFIG_GPIOLIB */ backport-iwlwifi-dkms-7906/compat/compat-3.6.c000066400000000000000000000011311351064634500211130ustar00rootroot00000000000000/* * Copyright (c) 2013 Luis R. Rodriguez * * Backport compatibility file for Linux for kernels 3.6. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include /* whoopsie ! */ #ifndef CONFIG_COMMON_CLK int clk_enable(struct clk *clk) { return 0; } EXPORT_SYMBOL_GPL(clk_enable); void clk_disable(struct clk *clk) { } EXPORT_SYMBOL_GPL(clk_disable); #endif backport-iwlwifi-dkms-7906/compat/compat-3.7.c000066400000000000000000000160261351064634500211250ustar00rootroot00000000000000/* * Copyright 2012 Luis R. Rodriguez * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Backport functionality introduced in Linux 3.7. */ #include #include #include #include #include #include bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { cancel_delayed_work(dwork); queue_delayed_work(wq, dwork, delay); return false; } EXPORT_SYMBOL_GPL(mod_delayed_work); #ifdef CONFIG_PCI /* * Kernels >= 3.7 get their PCI-E Capabilities Register cached * via the pci_dev->pcie_flags_reg so for older kernels we have * no other option but to read this every single time we need * it accessed. If we really cared to improve the efficiency * of this we could try to find an unused u16 varible on the * pci_dev but if we found it we likely would remove it from * the kernel anyway right? Bite me. */ static inline u16 pcie_flags_reg(struct pci_dev *dev) { int pos; u16 reg16; pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (!pos) return 0; pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); return reg16; } #define pci_pcie_type LINUX_BACKPORT(pci_pcie_type) static inline int pci_pcie_type(struct pci_dev *dev) { return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } #define pcie_cap_version LINUX_BACKPORT(pcie_cap_version) static inline int pcie_cap_version(struct pci_dev *dev) { return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS; } static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev) { int type = pci_pcie_type(dev); return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_LEG_END; } static inline bool pcie_cap_has_sltctl(struct pci_dev *dev) { int type = pci_pcie_type(dev); return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT || (type == PCI_EXP_TYPE_DOWNSTREAM && pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT); } static inline bool pcie_cap_has_rtctl(struct pci_dev *dev) { int type = pci_pcie_type(dev); return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_RC_EC; } static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) { if (!pci_is_pcie(dev)) return false; switch (pos) { case PCI_EXP_FLAGS_TYPE: return true; case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: case PCI_EXP_DEVSTA: return true; case PCI_EXP_LNKCAP: case PCI_EXP_LNKCTL: case PCI_EXP_LNKSTA: return pcie_cap_has_lnkctl(dev); case PCI_EXP_SLTCAP: case PCI_EXP_SLTCTL: case PCI_EXP_SLTSTA: return pcie_cap_has_sltctl(dev); case PCI_EXP_RTCTL: case PCI_EXP_RTCAP: case PCI_EXP_RTSTA: return pcie_cap_has_rtctl(dev); case PCI_EXP_DEVCAP2: case PCI_EXP_DEVCTL2: case PCI_EXP_LNKCAP2: case PCI_EXP_LNKCTL2: case PCI_EXP_LNKSTA2: return pcie_cap_version(dev) > 1; default: return false; } } /* * Note that these accessor functions are only for the "PCI Express * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) */ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) { int ret; *val = 0; if (pos & 1) return -EINVAL; if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); /* * Reset *val to 0 if pci_read_config_word() fails, it may * have been written as 0xFFFF if hardware error happens * during pci_read_config_word(). */ if (ret) *val = 0; return ret; } /* * For Functions that do not implement the Slot Capabilities, * Slot Status, and Slot Control registers, these spaces must * be hardwired to 0b, with the exception of the Presence Detect * State bit in the Slot Status register of Downstream Ports, * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) */ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { *val = PCI_EXP_SLTSTA_PDS; } return 0; } EXPORT_SYMBOL_GPL(pcie_capability_read_word); int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) { int ret; *val = 0; if (pos & 3) return -EINVAL; if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); /* * Reset *val to 0 if pci_read_config_dword() fails, it may * have been written as 0xFFFFFFFF if hardware error happens * during pci_read_config_dword(). */ if (ret) *val = 0; return ret; } if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { *val = PCI_EXP_SLTSTA_PDS; } return 0; } EXPORT_SYMBOL_GPL(pcie_capability_read_dword); int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) { if (pos & 1) return -EINVAL; if (!pcie_capability_reg_implemented(dev, pos)) return 0; return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); } EXPORT_SYMBOL_GPL(pcie_capability_write_word); int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) { if (pos & 3) return -EINVAL; if (!pcie_capability_reg_implemented(dev, pos)) return 0; return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); } EXPORT_SYMBOL_GPL(pcie_capability_write_dword); int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, u16 clear, u16 set) { int ret; u16 val; ret = pcie_capability_read_word(dev, pos, &val); if (!ret) { val &= ~clear; val |= set; ret = pcie_capability_write_word(dev, pos, val); } return ret; } EXPORT_SYMBOL_GPL(pcie_capability_clear_and_set_word); int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, u32 clear, u32 set) { int ret; u32 val; ret = pcie_capability_read_dword(dev, pos, &val); if (!ret) { val &= ~clear; val |= set; ret = pcie_capability_write_dword(dev, pos, val); } return ret; } EXPORT_SYMBOL_GPL(pcie_capability_clear_and_set_dword); #endif #ifdef CONFIG_OF #if LINUX_VERSION_IS_LESS(3,7,0) /** * of_get_child_by_name - Find the child node by name for a given parent * @node: parent node * @name: child name to look for. * * This function looks for child node for given matching name * * Returns a node pointer if found, with refcount incremented, use * of_node_put() on it when done. * Returns NULL if node is not found. */ struct device_node *of_get_child_by_name(const struct device_node *node, const char *name) { struct device_node *child; for_each_child_of_node(node, child) if (child->name && (of_node_cmp(child->name, name) == 0)) break; return child; } EXPORT_SYMBOL_GPL(of_get_child_by_name); #endif /* LINUX_VERSION_IS_LESS(3,7,0) */ #endif /* CONFIG_OF */ int sg_nents(struct scatterlist *sg) { int nents; for (nents = 0; sg; sg = sg_next(sg)) nents++; return nents; } EXPORT_SYMBOL_GPL(sg_nents); backport-iwlwifi-dkms-7906/compat/compat-3.8.c000066400000000000000000000537311351064634500211320ustar00rootroot00000000000000/* * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2012 Jiri Kosina * Copyright (c) 2012 Luis R. Rodriguez * * Backport functionality introduced in Linux 3.8. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include "hid-ids.h" #include #include #include #include #include #include #if LINUX_VERSION_IS_LESS(3,7,8) void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops) { if (!dev->ethtool_ops) dev->ethtool_ops = ops; } EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); #endif /* a list of devices that shouldn't be handled by HID core at all */ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) }, { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) }, { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_0_4_IF_KIT) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_16_16_IF_KIT) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_8_8_8_IF_KIT) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_7_IF_KIT) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_8_IF_KIT) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_PHIDGET_MOTORCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) }, { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) }, { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) }, { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) }, { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) }, { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) }, { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) }, { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) }, { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, #if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE) { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) }, #endif { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) }, { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) }, { HID_USB_DEVICE(USB_VENDOR_ID_WACOM, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) }, { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, { } }; /** * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer * * There are composite devices for which we want to ignore only a certain * interface. This is a list of devices for which only the mouse interface will * be ignored. This allows a dedicated driver to take care of the interface. */ static const struct hid_device_id hid_mouse_ignore_list[] = { /* appletouch driver */ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, { } }; static bool hid_match_one_id(struct hid_device *hdev, const struct hid_device_id *id) { return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && #if LINUX_VERSION_IS_GEQ(3,8,0) (id->group == HID_GROUP_ANY || id->group == hdev->group) && #endif (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && (id->product == HID_ANY_ID || id->product == hdev->product); } #define hid_match_id LINUX_BACKPORT(hid_match_id) static const struct hid_device_id * hid_match_id(struct hid_device *hdev, const struct hid_device_id *id) { for (; id->bus; id++) if (hid_match_one_id(hdev, id)) return id; return NULL; } bool hid_ignore(struct hid_device *hdev) { if (hdev->quirks & HID_QUIRK_NO_IGNORE) return false; if (hdev->quirks & HID_QUIRK_IGNORE) return true; switch (hdev->vendor) { case USB_VENDOR_ID_CODEMERCS: /* ignore all Code Mercenaries IOWarrior devices */ if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST && hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST) return true; break; case USB_VENDOR_ID_LOGITECH: if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST && hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST) return true; /* * The Keene FM transmitter USB device has the same USB ID as * the Logitech AudioHub Speaker, but it should ignore the hid. * Check if the name is that of the Keene device. * For reference: the name of the AudioHub is * "HOLTEK AudioHub Speaker". */ if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB && !strcmp(hdev->name, "HOLTEK B-LINK USB Audio ")) return true; break; case USB_VENDOR_ID_SOUNDGRAPH: if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST && hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST) return true; break; case USB_VENDOR_ID_HANWANG: if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST && hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST) return true; break; case USB_VENDOR_ID_JESS: if (hdev->product == USB_DEVICE_ID_JESS_YUREX && hdev->type == HID_TYPE_USBNONE) return true; break; case USB_VENDOR_ID_DWAV: /* These are handled by usbtouchscreen. hdev->type is probably * HID_TYPE_USBNONE, but we say !HID_TYPE_USBMOUSE to match * usbtouchscreen. */ if ((hdev->product == USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER || hdev->product == USB_DEVICE_ID_DWAV_TOUCHCONTROLLER) && hdev->type != HID_TYPE_USBMOUSE) return true; break; } if (hdev->type == HID_TYPE_USBMOUSE && hid_match_id(hdev, hid_mouse_ignore_list)) return true; return !!hid_match_id(hdev, hid_ignore_list); } EXPORT_SYMBOL_GPL(hid_ignore); /** * prandom_bytes - get the requested number of pseudo-random bytes * @buf: where to copy the pseudo-random bytes to * @bytes: the requested number of bytes */ void prandom_bytes(void *buf, int bytes) { unsigned char *p = buf; int i; for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) { u32 random = random32(); int j; for (j = 0; j < sizeof(u32); j++) { p[i + j] = random; random >>= BITS_PER_BYTE; } } if (i < bytes) { u32 random = random32(); for (; i < bytes; i++) { p[i] = random; random >>= BITS_PER_BYTE; } } } EXPORT_SYMBOL_GPL(prandom_bytes); #ifdef CONFIG_OF /** * of_property_read_u8_array - Find and read an array of u8 from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 8-bit value(s) from * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * dts entry of array should be like: * property = /bits/ 8 <0x50 0x60 0x70>; * * The out_values is modified only if a valid u8 value can be decoded. */ int of_property_read_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz) { const u8 *val = of_find_property_value_of_size(np, propname, (sz * sizeof(*out_values))); if (IS_ERR(val)) return PTR_ERR(val); while (sz--) *out_values++ = *val++; return 0; } EXPORT_SYMBOL_GPL(of_property_read_u8_array); #endif /* CONFIG_OF */ #ifdef CONFIG_PCI_IOV /** * pci_sriov_set_totalvfs -- reduce the TotalVFs available * @dev: the PCI PF device * @numvfs: number that should be used for TotalVFs supported * * Should be called from PF driver's probe routine with * device's mutex held. * * Returns 0 if PF is an SRIOV-capable device and * value of numvfs valid. If not a PF return -ENOSYS; * if numvfs is invalid return -EINVAL; * if VFs already enabled, return -EBUSY. */ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) { if (!dev->is_physfn) return -ENOSYS; if (numvfs > dev->sriov->total_VFs) return -EINVAL; /* Shouldn't change if VFs already enabled */ if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE) return -EBUSY; else dev->sriov->driver_max_VFs = numvfs; return 0; } EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs); #endif /* CONFIG_PCI_IOV */ backport-iwlwifi-dkms-7906/compat/compat-3.9.c000066400000000000000000000143161351064634500211270ustar00rootroot00000000000000/* * Copyright (c) 2013 Luis R. Rodriguez * * Backport functionality introduced in Linux 3.9. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) { void __iomem *dest_ptr; dest_ptr = devm_ioremap_resource(dev, res); if (!dest_ptr) return (void __iomem *)ERR_PTR(-ENOMEM); return dest_ptr; } EXPORT_SYMBOL_GPL(devm_ioremap_resource); /** * eth_prepare_mac_addr_change - prepare for mac change * @dev: network device * @p: socket address */ int eth_prepare_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; return 0; } EXPORT_SYMBOL_GPL(eth_prepare_mac_addr_change); /** * eth_commit_mac_addr_change - commit mac change * @dev: network device * @p: socket address */ void eth_commit_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); } EXPORT_SYMBOL_GPL(eth_commit_mac_addr_change); void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, const char *prefix) { static const char msg[] = "inet_frag_find: Fragment hash bucket" " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) ". Dropping fragment.\n"; if (PTR_ERR(q) == -ENOBUFS) LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); } EXPORT_SYMBOL_GPL(inet_frag_maybe_warn_overflow); void __sg_page_iter_start(struct sg_page_iter *piter, struct scatterlist *sglist, unsigned int nents, unsigned long pgoffset) { piter->__pg_advance = 0; piter->__nents = nents; piter->page = NULL; piter->sg = sglist; piter->sg_pgoffset = pgoffset; } EXPORT_SYMBOL_GPL(__sg_page_iter_start); static int sg_page_count(struct scatterlist *sg) { return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; } bool __sg_page_iter_next(struct sg_page_iter *piter) { if (!piter->__nents || !piter->sg) return false; piter->sg_pgoffset += piter->__pg_advance; piter->__pg_advance = 1; while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { piter->sg_pgoffset -= sg_page_count(piter->sg); piter->sg = sg_next(piter->sg); if (!--piter->__nents || !piter->sg) return false; } piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset); return true; } EXPORT_SYMBOL_GPL(__sg_page_iter_next); static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) { if (!miter->__remaining) { struct scatterlist *sg; unsigned long pgoffset; if (!__sg_page_iter_next(&miter->piter)) return false; sg = miter->piter.sg; pgoffset = miter->piter.sg_pgoffset; miter->__offset = pgoffset ? 0 : sg->offset; miter->__remaining = sg->offset + sg->length - (pgoffset << PAGE_SHIFT) - miter->__offset; miter->__remaining = min_t(unsigned long, miter->__remaining, PAGE_SIZE - miter->__offset); } return true; } /** * sg_miter_start - start mapping iteration over a sg list * @miter: sg mapping iter to be started * @sgl: sg list to iterate over * @nents: number of sg entries * * Description: * Starts mapping iterator @miter. * * Context: * Don't care. */ void backport_sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, unsigned int nents, unsigned int flags) { memset(miter, 0, sizeof(struct sg_mapping_iter)); __sg_page_iter_start(&miter->piter, sgl, nents, 0); WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); miter->__flags = flags; } EXPORT_SYMBOL_GPL(backport_sg_miter_start); /** * sg_miter_next - proceed mapping iterator to the next mapping * @miter: sg mapping iter to proceed * * Description: * Proceeds @miter to the next mapping. @miter should have been started * using sg_miter_start(). On successful return, @miter->page, * @miter->addr and @miter->length point to the current mapping. * * Context: * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg * list is reached. */ bool backport_sg_miter_next(struct sg_mapping_iter *miter) { sg_miter_stop(miter); /* * Get to the next page if necessary. * __remaining, __offset is adjusted by sg_miter_stop */ if (!sg_miter_get_next_page(miter)) return false; miter->page = sg_page_iter_page(&miter->piter); miter->consumed = miter->length = miter->__remaining; if (miter->__flags & SG_MITER_ATOMIC) miter->addr = kmap_atomic(miter->page) + miter->__offset; else miter->addr = kmap(miter->page) + miter->__offset; return true; } EXPORT_SYMBOL_GPL(backport_sg_miter_next); /** * sg_miter_stop - stop mapping iteration * @miter: sg mapping iter to be stopped * * Description: * Stops mapping iterator @miter. @miter should have been started * using sg_miter_start(). A stopped iteration can be resumed by * calling sg_miter_next() on it. This is useful when resources (kmap) * need to be released during iteration. * * Context: * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care * otherwise. */ void backport_sg_miter_stop(struct sg_mapping_iter *miter) { WARN_ON(miter->consumed > miter->length); /* drop resources from the last iteration */ if (miter->addr) { miter->__offset += miter->consumed; miter->__remaining -= miter->consumed; if ((miter->__flags & SG_MITER_TO_SG) && !PageSlab(miter->page)) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { WARN_ON_ONCE(preemptible()); kunmap_atomic(miter->addr); } else kunmap(miter->page); miter->page = NULL; miter->addr = NULL; miter->length = 0; miter->consumed = 0; } } EXPORT_SYMBOL_GPL(backport_sg_miter_stop); backport-iwlwifi-dkms-7906/compat/drivers-base-devcoredump.c000066400000000000000000000236031351064634500242350ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * Author: Johannes Berg */ #include #include #include #include #include #include #include #include "backports.h" static struct class devcd_class; /* global disable flag, for security purposes */ static bool devcd_disabled; /* if data isn't read by userspace after 5 minutes then delete it */ #define DEVCD_TIMEOUT (HZ * 60 * 5) #if LINUX_VERSION_IS_LESS(3,11,0) static struct bin_attribute devcd_attr_data; #endif struct devcd_entry { struct device devcd_dev; void *data; size_t datalen; struct module *owner; ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen); void (*free)(void *data); struct delayed_work del_wk; struct device *failing_dev; }; static struct devcd_entry *dev_to_devcd(struct device *dev) { return container_of(dev, struct devcd_entry, devcd_dev); } static void devcd_dev_release(struct device *dev) { struct devcd_entry *devcd = dev_to_devcd(dev); devcd->free(devcd->data); module_put(devcd->owner); /* * this seems racy, but I don't see a notifier or such on * a struct device to know when it goes away? */ if (devcd->failing_dev->kobj.sd) sysfs_remove_link(&devcd->failing_dev->kobj, "devcoredump"); put_device(devcd->failing_dev); kfree(devcd); } static void devcd_del(struct work_struct *wk) { struct devcd_entry *devcd; devcd = container_of(wk, struct devcd_entry, del_wk.work); #if LINUX_VERSION_IS_LESS(3,11,0) device_remove_bin_file(&devcd->devcd_dev, &devcd_attr_data); #endif device_del(&devcd->devcd_dev); put_device(&devcd->devcd_dev); } static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); return devcd->read(buffer, offset, count, devcd->data, devcd->datalen); } static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); mod_delayed_work(system_wq, &devcd->del_wk, 0); return count; } static struct bin_attribute devcd_attr_data = { .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, }, .size = 0, .read = devcd_data_read, .write = devcd_data_write, }; #if LINUX_VERSION_IS_GEQ(3,11,0) static struct bin_attribute *devcd_dev_bin_attrs[] = { &devcd_attr_data, NULL, }; static const struct attribute_group devcd_dev_group = { .bin_attrs = devcd_dev_bin_attrs, }; static const struct attribute_group *devcd_dev_groups[] = { &devcd_dev_group, NULL, }; #endif /* LINUX_VERSION_IS_GEQ(3,11,0) */ static int devcd_free(struct device *dev, void *data) { struct devcd_entry *devcd = dev_to_devcd(dev); flush_delayed_work(&devcd->del_wk); return 0; } static ssize_t disabled_show(struct class *class, struct class_attribute *attr, char *buf) { return sprintf(buf, "%d\n", devcd_disabled); } static ssize_t disabled_store(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { long tmp = simple_strtol(buf, NULL, 10); /* * This essentially makes the attribute write-once, since you can't * go back to not having it disabled. This is intentional, it serves * as a system lockdown feature. */ if (tmp != 1) return -EINVAL; devcd_disabled = true; class_for_each_device(&devcd_class, NULL, NULL, devcd_free); return count; } static CLASS_ATTR_RW(disabled); static struct attribute *devcd_class_attrs[] = { &class_attr_disabled.attr, NULL, }; #if LINUX_VERSION_IS_GEQ(4,10,0) ATTRIBUTE_GROUPS(devcd_class); #else #define BP_ATTR_GRP_STRUCT class_attribute ATTRIBUTE_GROUPS_BACKPORT(devcd_class); #endif static struct class devcd_class = { .name = "devcoredump", .owner = THIS_MODULE, .dev_release = devcd_dev_release, #if LINUX_VERSION_IS_GEQ(3,11,0) .dev_groups = devcd_dev_groups, #endif #if LINUX_VERSION_IS_GEQ(4,10,0) .class_groups = devcd_class_groups, #else .class_attrs = devcd_class_dev_attrs, #endif }; static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { if (offset > datalen) return -EINVAL; if (offset + count > datalen) count = datalen - offset; if (count) memcpy(buffer, ((u8 *)data) + offset, count); return count; } static void devcd_freev(void *data) { vfree(data); } /** * dev_coredumpv - create device coredump with vmalloc data * @dev: the struct device for the crashed device * @data: vmalloc data containing the device coredump * @datalen: length of the data * @gfp: allocation flags * * This function takes ownership of the vmalloc'ed data and will free * it when it is no longer used. See dev_coredumpm() for more information. */ void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp) { dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev); } EXPORT_SYMBOL_GPL(dev_coredumpv); static int devcd_match_failing(struct device *dev, const void *failing) { struct devcd_entry *devcd = dev_to_devcd(dev); return devcd->failing_dev == failing; } /** * devcd_free_sgtable - free all the memory of the given scatterlist table * (i.e. both pages and scatterlist instances) * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained * using the sg_chain function then that function should be called only once * on the chained table * @table: pointer to sg_table to free */ static void devcd_free_sgtable(void *data) { _devcd_free_sgtable(data); } /** * devcd_read_from_table - copy data from sg_table to a given buffer * and return the number of bytes read * @buffer: the buffer to copy the data to it * @buf_len: the length of the buffer * @data: the scatterlist table to copy from * @offset: start copy from @offset@ bytes from the head of the data * in the given scatterlist * @data_len: the length of the data in the sg_table */ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, size_t buf_len, void *data, size_t data_len) { struct scatterlist *table = data; if (offset > data_len) return -EINVAL; if (offset + buf_len > data_len) buf_len = data_len - offset; return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len, offset); } /** * dev_coredumpm - create device coredump with read/free methods * @dev: the struct device for the crashed device * @owner: the module that contains the read/free functions, use %THIS_MODULE * @data: data cookie for the @read/@free functions * @datalen: length of the data * @gfp: allocation flags * @read: function to read from the given buffer * @free: function to free the given buffer * * Creates a new device coredump for the given device. If a previous one hasn't * been read yet, the new coredump is discarded. The data lifetime is determined * by the device coredump framework and when it is no longer needed the @free * function will be called to free the data. */ void dev_coredumpm(struct device *dev, struct module *owner, void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen), void (*free)(void *data)) { static atomic_t devcd_count = ATOMIC_INIT(0); struct devcd_entry *devcd; struct device *existing; if (devcd_disabled) goto free; existing = class_find_device(&devcd_class, NULL, dev, devcd_match_failing); if (existing) { put_device(existing); goto free; } if (!try_module_get(owner)) goto free; devcd = kzalloc(sizeof(*devcd), gfp); if (!devcd) goto put_module; devcd->owner = owner; devcd->data = data; devcd->datalen = datalen; devcd->read = read; devcd->free = free; devcd->failing_dev = get_device(dev); device_initialize(&devcd->devcd_dev); dev_set_name(&devcd->devcd_dev, "devcd%d", atomic_inc_return(&devcd_count)); devcd->devcd_dev.class = &devcd_class; if (device_add(&devcd->devcd_dev)) goto put_device; #if LINUX_VERSION_IS_LESS(3,11,0) if (device_create_bin_file(&devcd->devcd_dev, &devcd_attr_data)) goto put_device; #endif if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj, "failing_device")) /* nothing - symlink will be missing */; if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj, "devcoredump")) /* nothing - symlink will be missing */; INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); return; put_device: put_device(&devcd->devcd_dev); put_module: module_put(owner); free: free(data); } EXPORT_SYMBOL_GPL(dev_coredumpm); /** * dev_coredumpmsg - create device coredump that uses scatterlist as data * parameter * @dev: the struct device for the crashed device * @table: the dump data * @datalen: length of the data * @gfp: allocation flags * * Creates a new device coredump for the given device. If a previous one hasn't * been read yet, the new coredump is discarded. The data lifetime is determined * by the device coredump framework and when it is no longer needed * it will free the data. */ void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp) { dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable, devcd_free_sgtable); } EXPORT_SYMBOL_GPL(dev_coredumpsg); int __init devcoredump_init(void) { init_devcd_class_attrs(); return class_register(&devcd_class); } void __exit devcoredump_exit(void) { class_for_each_device(&devcd_class, NULL, NULL, devcd_free); class_unregister(&devcd_class); } backport-iwlwifi-dkms-7906/compat/hid-ids.h000066400000000000000000000752131351064634500206660ustar00rootroot00000000000000/* * USB HID quirks support for Linux * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #ifndef HID_IDS_H_FILE #define HID_IDS_H_FILE #define USB_VENDOR_ID_3M 0x0596 #define USB_DEVICE_ID_3M1968 0x0500 #define USB_DEVICE_ID_3M2256 0x0502 #define USB_DEVICE_ID_3M3266 0x0506 #define USB_VENDOR_ID_A4TECH 0x09da #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 #define USB_DEVICE_ID_A4TECH_X5_005D 0x000a #define USB_DEVICE_ID_A4TECH_RP_649 0x001a #define USB_VENDOR_ID_AASHIMA 0x06d6 #define USB_DEVICE_ID_AASHIMA_GAMEPAD 0x0025 #define USB_DEVICE_ID_AASHIMA_PREDATOR 0x0026 #define USB_VENDOR_ID_ACECAD 0x0460 #define USB_DEVICE_ID_ACECAD_FLAIR 0x0004 #define USB_DEVICE_ID_ACECAD_302 0x0008 #define USB_VENDOR_ID_ACRUX 0x1a34 #define USB_VENDOR_ID_ACTIONSTAR 0x2101 #define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011 #define USB_VENDOR_ID_ADS_TECH 0x06e1 #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 #define USB_VENDOR_ID_AFATECH 0x15a4 #define USB_DEVICE_ID_AFATECH_AF9016 0x9016 #define USB_VENDOR_ID_AIPTEK 0x08ca #define USB_DEVICE_ID_AIPTEK_01 0x0001 #define USB_DEVICE_ID_AIPTEK_10 0x0010 #define USB_DEVICE_ID_AIPTEK_20 0x0020 #define USB_DEVICE_ID_AIPTEK_21 0x0021 #define USB_DEVICE_ID_AIPTEK_22 0x0022 #define USB_DEVICE_ID_AIPTEK_23 0x0023 #define USB_DEVICE_ID_AIPTEK_24 0x0024 #define USB_VENDOR_ID_AIRCABLE 0x16CA #define USB_DEVICE_ID_AIRCABLE1 0x1502 #define USB_VENDOR_ID_AIREN 0x1a2c #define USB_DEVICE_ID_AIREN_SLIMPLUS 0x0002 #define USB_VENDOR_ID_ALCOR 0x058f #define USB_DEVICE_ID_ALCOR_USBRS232 0x9720 #define USB_VENDOR_ID_ALPS 0x0433 #define USB_DEVICE_ID_IBM_GAMEPAD 0x1101 #define USB_VENDOR_ID_APPLE 0x05ac #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f #define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 #define USB_DEVICE_ID_APPLE_GEYSER_ISO 0x0215 #define USB_DEVICE_ID_APPLE_GEYSER_JIS 0x0216 #define USB_DEVICE_ID_APPLE_GEYSER3_ANSI 0x0217 #define USB_DEVICE_ID_APPLE_GEYSER3_ISO 0x0218 #define USB_DEVICE_ID_APPLE_GEYSER3_JIS 0x0219 #define USB_DEVICE_ID_APPLE_GEYSER4_ANSI 0x021a #define USB_DEVICE_ID_APPLE_GEYSER4_ISO 0x021b #define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c #define USB_DEVICE_ID_APPLE_ALU_MINI_ANSI 0x021d #define USB_DEVICE_ID_APPLE_ALU_MINI_ISO 0x021e #define USB_DEVICE_ID_APPLE_ALU_MINI_JIS 0x021f #define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220 #define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221 #define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222 #define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223 #define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224 #define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225 #define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229 #define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a #define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e #define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 #define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 #define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 #define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f #define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240 #define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 #define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f #define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250 #define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254 #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259 #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a #define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e #define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262 #define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263 #define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 #define USB_VENDOR_ID_ASUS 0x0486 #define USB_DEVICE_ID_ASUS_T91MT 0x0185 #define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186 #define USB_VENDOR_ID_ASUSTEK 0x0b05 #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b #define USB_VENDOR_ID_ATEN 0x0557 #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 #define USB_DEVICE_ID_ATEN_CS124U 0x2202 #define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204 #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 #define USB_VENDOR_ID_ATMEL 0x03eb #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 #define USB_VENDOR_ID_AUREAL 0x0755 #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 #define USB_VENDOR_ID_AVERMEDIA 0x07ca #define USB_DEVICE_ID_AVER_FM_MR800 0xb800 #define USB_VENDOR_ID_AXENTIA 0x12cf #define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111 #define USB_VENDOR_ID_BAANTO 0x2453 #define USB_DEVICE_ID_BAANTO_MT_190W2 0x0100 #define USB_VENDOR_ID_BELKIN 0x050d #define USB_DEVICE_ID_FLIP_KVM 0x3201 #define USB_VENDOR_ID_BERKSHIRE 0x0c98 #define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140 #define USB_VENDOR_ID_BTC 0x046e #define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577 #define USB_VENDOR_ID_CANDO 0x2087 #define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01 #define USB_VENDOR_ID_CH 0x068e #define USB_DEVICE_ID_CH_PRO_THROTTLE 0x00f1 #define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2 #define USB_DEVICE_ID_CH_FIGHTERSTICK 0x00f3 #define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4 #define USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE 0x0051 #define USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE 0x00ff #define USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK 0x00d3 #define USB_DEVICE_ID_CH_AXIS_295 0x001c #define USB_VENDOR_ID_CHERRY 0x046a #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027 #define USB_VENDOR_ID_CHIC 0x05fe #define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014 #define USB_VENDOR_ID_CHICONY 0x04f2 #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 #define USB_DEVICE_ID_CHICONY_AK1D 0x1125 #define USB_VENDOR_ID_CHUNGHWAT 0x2247 #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001 #define USB_VENDOR_ID_CIDC 0x1677 #define USB_VENDOR_ID_CMEDIA 0x0d8c #define USB_DEVICE_ID_CM109 0x000e #define USB_VENDOR_ID_CODEMERCS 0x07c0 #define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500 #define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff #define USB_VENDOR_ID_CREATIVELABS 0x041e #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801 #define USB_VENDOR_ID_CVTOUCH 0x1ff7 #define USB_DEVICE_ID_CVTOUCH_SCREEN 0x0013 #define USB_VENDOR_ID_CYGNAL 0x10c4 #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a #define USB_VENDOR_ID_CYPRESS 0x04b4 #define USB_DEVICE_ID_CYPRESS_MOUSE 0x0001 #define USB_DEVICE_ID_CYPRESS_HIDCOM 0x5500 #define USB_DEVICE_ID_CYPRESS_ULTRAMOUSE 0x7417 #define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61 #define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64 #define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1 #define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81 #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001 #define USB_VENDOR_ID_DEALEXTREAME 0x10c5 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a #define USB_VENDOR_ID_DELORME 0x1163 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 #define USB_VENDOR_ID_DMI 0x0c0b #define USB_DEVICE_ID_DMI_ENC 0x5fab #define USB_VENDOR_ID_DRAGONRISE 0x0079 #define USB_VENDOR_ID_DWAV 0x0eef #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 #define USB_DEVICE_ID_DWAV_TOUCHCONTROLLER 0x0002 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7207 0x7207 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224 0x7224 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_722A 0x722A #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_725E 0x725e #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7262 0x7262 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72AA 0x72aa #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224 0x7224 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0 0x72d0 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4 0x72c4 #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 #define USB_VENDOR_ID_ELO 0x04E7 #define USB_DEVICE_ID_ELO_TS2515 0x0022 #define USB_DEVICE_ID_ELO_TS2700 0x0020 #define USB_VENDOR_ID_EMS 0x2006 #define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118 #define USB_VENDOR_ID_FLATFROG 0x25b5 #define USB_DEVICE_ID_MULTITOUCH_3200 0x0002 #define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 #define USB_VENDOR_ID_ETT 0x0664 #define USB_DEVICE_ID_TC5UH 0x0309 #define USB_DEVICE_ID_TC4UM 0x0306 #define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 #define USB_DEVICE_ID_ETURBOTOUCH 0x0006 #define USB_VENDOR_ID_EZKEY 0x0518 #define USB_DEVICE_ID_BTC_8193 0x0002 #define USB_VENDOR_ID_FREESCALE 0x15A2 #define USB_DEVICE_ID_FREESCALE_MX28 0x004F #define USB_VENDOR_ID_FRUCTEL 0x25B6 #define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002 #define USB_VENDOR_ID_GAMERON 0x0810 #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 #define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc #define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100 #define USB_VENDOR_ID_GLAB 0x06c2 #define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 #define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039 #define USB_DEVICE_ID_0_0_4_IF_KIT 0x0040 #define USB_DEVICE_ID_0_16_16_IF_KIT 0x0044 #define USB_DEVICE_ID_8_8_8_IF_KIT 0x0045 #define USB_DEVICE_ID_0_8_7_IF_KIT 0x0051 #define USB_DEVICE_ID_0_8_8_IF_KIT 0x0053 #define USB_DEVICE_ID_PHIDGET_MOTORCONTROL 0x0058 #define USB_VENDOR_ID_GOODTOUCH 0x1aad #define USB_DEVICE_ID_GOODTOUCH_000f 0x000f #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f #define USB_DEVICE_ID_GOGOPEN 0x00ce #define USB_DEVICE_ID_PENPOWER 0x00f4 #define USB_VENDOR_ID_GREENASIA 0x0e8f #define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013 #define USB_VENDOR_ID_GRETAGMACBETH 0x0971 #define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 #define USB_VENDOR_ID_GRIFFIN 0x077d #define USB_DEVICE_ID_POWERMATE 0x0410 #define USB_DEVICE_ID_SOUNDKNOB 0x04AA #define USB_DEVICE_ID_RADIOSHARK 0x627a #define USB_VENDOR_ID_GTCO 0x078c #define USB_DEVICE_ID_GTCO_90 0x0090 #define USB_DEVICE_ID_GTCO_100 0x0100 #define USB_DEVICE_ID_GTCO_101 0x0101 #define USB_DEVICE_ID_GTCO_103 0x0103 #define USB_DEVICE_ID_GTCO_104 0x0104 #define USB_DEVICE_ID_GTCO_105 0x0105 #define USB_DEVICE_ID_GTCO_106 0x0106 #define USB_DEVICE_ID_GTCO_107 0x0107 #define USB_DEVICE_ID_GTCO_108 0x0108 #define USB_DEVICE_ID_GTCO_200 0x0200 #define USB_DEVICE_ID_GTCO_201 0x0201 #define USB_DEVICE_ID_GTCO_202 0x0202 #define USB_DEVICE_ID_GTCO_203 0x0203 #define USB_DEVICE_ID_GTCO_204 0x0204 #define USB_DEVICE_ID_GTCO_205 0x0205 #define USB_DEVICE_ID_GTCO_206 0x0206 #define USB_DEVICE_ID_GTCO_207 0x0207 #define USB_DEVICE_ID_GTCO_300 0x0300 #define USB_DEVICE_ID_GTCO_301 0x0301 #define USB_DEVICE_ID_GTCO_302 0x0302 #define USB_DEVICE_ID_GTCO_303 0x0303 #define USB_DEVICE_ID_GTCO_304 0x0304 #define USB_DEVICE_ID_GTCO_305 0x0305 #define USB_DEVICE_ID_GTCO_306 0x0306 #define USB_DEVICE_ID_GTCO_307 0x0307 #define USB_DEVICE_ID_GTCO_308 0x0308 #define USB_DEVICE_ID_GTCO_309 0x0309 #define USB_DEVICE_ID_GTCO_400 0x0400 #define USB_DEVICE_ID_GTCO_401 0x0401 #define USB_DEVICE_ID_GTCO_402 0x0402 #define USB_DEVICE_ID_GTCO_403 0x0403 #define USB_DEVICE_ID_GTCO_404 0x0404 #define USB_DEVICE_ID_GTCO_405 0x0405 #define USB_DEVICE_ID_GTCO_500 0x0500 #define USB_DEVICE_ID_GTCO_501 0x0501 #define USB_DEVICE_ID_GTCO_502 0x0502 #define USB_DEVICE_ID_GTCO_503 0x0503 #define USB_DEVICE_ID_GTCO_504 0x0504 #define USB_DEVICE_ID_GTCO_1000 0x1000 #define USB_DEVICE_ID_GTCO_1001 0x1001 #define USB_DEVICE_ID_GTCO_1002 0x1002 #define USB_DEVICE_ID_GTCO_1003 0x1003 #define USB_DEVICE_ID_GTCO_1004 0x1004 #define USB_DEVICE_ID_GTCO_1005 0x1005 #define USB_DEVICE_ID_GTCO_1006 0x1006 #define USB_DEVICE_ID_GTCO_1007 0x1007 #define USB_VENDOR_ID_GYRATION 0x0c16 #define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003 #define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008 #define USB_VENDOR_ID_HANWANG 0x0b57 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff #define USB_VENDOR_ID_HANVON 0x20b3 #define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18 #define USB_VENDOR_ID_HANVON_ALT 0x22ed #define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010 #define USB_VENDOR_ID_HAPP 0x078b #define USB_DEVICE_ID_UGCI_DRIVING 0x0010 #define USB_DEVICE_ID_UGCI_FLYING 0x0020 #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030 #define USB_VENDOR_ID_IDEACOM 0x1cb6 #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 #define USB_VENDOR_ID_ILITEK 0x222a #define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001 #define USB_VENDOR_ID_ION 0x15e4 #define USB_DEVICE_ID_ICADE 0x0132 #define USB_VENDOR_ID_HOLTEK 0x1241 #define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015 #define USB_VENDOR_ID_HOLTEK_ALT 0x04d9 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 #define USB_VENDOR_ID_IMATION 0x0718 #define USB_DEVICE_ID_DISC_STAKKA 0xd000 #define USB_VENDOR_ID_INTEL_8086 0x8086 #define USB_VENDOR_ID_INTEL_8087 0x8087 #define USB_DEVICE_ID_SENSOR_HUB_1020 0x1020 #define USB_DEVICE_ID_SENSOR_HUB_09FA 0x09FA #define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615 #define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070 #define USB_VENDOR_ID_JESS 0x0c45 #define USB_DEVICE_ID_JESS_YUREX 0x1010 #define USB_VENDOR_ID_KBGEAR 0x084e #define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001 #define USB_VENDOR_ID_KENSINGTON 0x047d #define USB_DEVICE_ID_KS_SLIMBLADE 0x2041 #define USB_VENDOR_ID_KWORLD 0x1b80 #define USB_DEVICE_ID_KWORLD_RADIO_FM700 0xd700 #define USB_VENDOR_ID_KEYTOUCH 0x0926 #define USB_DEVICE_ID_KEYTOUCH_IEC 0x3333 #define USB_VENDOR_ID_KYE 0x0458 #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087 #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 #define USB_VENDOR_ID_LABTEC 0x1020 #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 #define USB_VENDOR_ID_LCPOWER 0x1241 #define USB_DEVICE_ID_LCPOWER_LC1000 0xf767 #define USB_VENDOR_ID_LD 0x0f11 #define USB_DEVICE_ID_LD_CASSY 0x1000 #define USB_DEVICE_ID_LD_CASSY2 0x1001 #define USB_DEVICE_ID_LD_POCKETCASSY 0x1010 #define USB_DEVICE_ID_LD_POCKETCASSY2 0x1011 #define USB_DEVICE_ID_LD_MOBILECASSY 0x1020 #define USB_DEVICE_ID_LD_MOBILECASSY2 0x1021 #define USB_DEVICE_ID_LD_MICROCASSYVOLTAGE 0x1031 #define USB_DEVICE_ID_LD_MICROCASSYCURRENT 0x1032 #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 #define USB_DEVICE_ID_LD_JWM 0x1080 #define USB_DEVICE_ID_LD_DMMP 0x1081 #define USB_DEVICE_ID_LD_UMIP 0x1090 #define USB_DEVICE_ID_LD_UMIC 0x10A0 #define USB_DEVICE_ID_LD_UMIB 0x10B0 #define USB_DEVICE_ID_LD_XRAY 0x1100 #define USB_DEVICE_ID_LD_XRAY2 0x1101 #define USB_DEVICE_ID_LD_XRAYCT 0x1110 #define USB_DEVICE_ID_LD_VIDEOCOM 0x1200 #define USB_DEVICE_ID_LD_MOTOR 0x1210 #define USB_DEVICE_ID_LD_COM3LAB 0x2000 #define USB_DEVICE_ID_LD_TELEPORT 0x2010 #define USB_DEVICE_ID_LD_NETWORKANALYSER 0x2020 #define USB_DEVICE_ID_LD_POWERCONTROL 0x2030 #define USB_DEVICE_ID_LD_MACHINETEST 0x2040 #define USB_DEVICE_ID_LD_MOSTANALYSER 0x2050 #define USB_DEVICE_ID_LD_MOSTANALYSER2 0x2051 #define USB_DEVICE_ID_LD_ABSESP 0x2060 #define USB_DEVICE_ID_LD_AUTODATABUS 0x2070 #define USB_DEVICE_ID_LD_MCT 0x2080 #define USB_DEVICE_ID_LD_HYBRID 0x2090 #define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0 #define USB_VENDOR_ID_LENOVO 0x17ef #define USB_DEVICE_ID_LENOVO_TPKBD 0x6009 #define USB_VENDOR_ID_LG 0x1fd2 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f #define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD 0xc20a #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD 0xc211 #define USB_DEVICE_ID_LOGITECH_EXTREME_3D 0xc215 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283 #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286 #define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287 #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 #define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 #define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298 #define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299 #define USB_DEVICE_ID_LOGITECH_DFGT_WHEEL 0xc29a #define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a #define USB_DEVICE_ID_S510_RECEIVER 0xc50c #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 #define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 #define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b #define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532 #define USB_DEVICE_ID_SPACETRAVELLER 0xc623 #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 #define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704 #define USB_DEVICE_ID_DINOVO_EDGE 0xc714 #define USB_DEVICE_ID_DINOVO_MINI 0xc71f #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03 #define USB_VENDOR_ID_LUMIO 0x202e #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006 #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007 #define USB_VENDOR_ID_MADCATZ 0x0738 #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a #define USB_VENDOR_ID_MGE 0x0463 #define USB_DEVICE_ID_MGE_UPS 0xffff #define USB_DEVICE_ID_MGE_UPS1 0x0001 #define USB_VENDOR_ID_MICROCHIP 0x04d8 #define USB_DEVICE_ID_PICKIT1 0x0032 #define USB_DEVICE_ID_PICKIT2 0x0033 #define USB_DEVICE_ID_PICOLCD 0xc002 #define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002 #define USB_VENDOR_ID_MICROSOFT 0x045e #define USB_DEVICE_ID_SIDEWINDER_GV 0x003b #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d #define USB_DEVICE_ID_MS_NE4K 0x00db #define USB_DEVICE_ID_MS_LK6K 0x00f9 #define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 #define USB_VENDOR_ID_MONTEREY 0x0566 #define USB_DEVICE_ID_GENIUS_KB29E 0x3004 #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 #define USB_DEVICE_ID_N_S_HARMONY 0xc359 #define USB_VENDOR_ID_NATSU 0x08b7 #define USB_DEVICE_ID_NATSU_GAMEPAD 0x0001 #define USB_VENDOR_ID_NCR 0x0404 #define USB_DEVICE_ID_NCR_FIRST 0x0300 #define USB_DEVICE_ID_NCR_LAST 0x03ff #define USB_VENDOR_ID_NEC 0x073e #define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301 #define USB_VENDOR_ID_NEXTWINDOW 0x1926 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 #define USB_VENDOR_ID_NINTENDO 0x057e #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 #define USB_VENDOR_ID_NOVATEK 0x0603 #define USB_DEVICE_ID_NOVATEK_PCT 0x0600 #define USB_VENDOR_ID_NTRIG 0x1b96 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2 0x0004 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3 0x0005 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4 0x0006 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5 0x0007 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6 0x0008 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7 0x0009 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8 0x000A #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9 0x000B #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10 0x000C #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11 0x000D #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12 0x000E #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13 0x000F #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14 0x0010 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15 0x0011 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014 #define USB_VENDOR_ID_ONTRAK 0x0a07 #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 #define USB_VENDOR_ID_ORTEK 0x05a4 #define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 #define USB_VENDOR_ID_PANASONIC 0x04da #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 #define USB_DEVICE_ID_PANABOARD_UBT880 0x104d #define USB_VENDOR_ID_PANJIT 0x134c #define USB_VENDOR_ID_PANTHERLORD 0x0810 #define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001 #define USB_VENDOR_ID_PENMOUNT 0x14e1 #define USB_DEVICE_ID_PENMOUNT_PCI 0x3500 #define USB_VENDOR_ID_PETALYNX 0x18b1 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 #define USB_VENDOR_ID_PHILIPS 0x0471 #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617 #define USB_VENDOR_ID_PI_ENGINEERING 0x05f3 #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff #define USB_VENDOR_ID_PIXART 0x093a #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001 #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002 #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003 #define USB_VENDOR_ID_PLAYDOTCOM 0x0b43 #define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003 #define USB_VENDOR_ID_POWERCOM 0x0d9f #define USB_DEVICE_ID_POWERCOM_UPS 0x0002 #define USB_VENDOR_ID_PRODIGE 0x05af #define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062 #define USB_VENDOR_ID_QUANTA 0x0408 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 #define USB_VENDOR_ID_ROCCAT 0x1e7d #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c #define USB_DEVICE_ID_ROCCAT_KONE 0x2ced #define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51 #define USB_DEVICE_ID_ROCCAT_KONEXTD 0x2e22 #define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50 #define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e #define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24 #define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6 #define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a #define USB_VENDOR_ID_SAITEK 0x06a3 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 #define USB_VENDOR_ID_SAMSUNG 0x0419 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 #define USB_VENDOR_ID_SENNHEISER 0x1395 #define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002 #define USB_VENDOR_ID_SIGMATEL 0x066F #define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780 #define USB_VENDOR_ID_SKYCABLE 0x1223 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 #define USB_VENDOR_ID_SONY 0x054c #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b #define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f #define USB_VENDOR_ID_SOUNDGRAPH 0x15c2 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046 #define USB_VENDOR_ID_STANTUM 0x1f87 #define USB_DEVICE_ID_MTP 0x0002 #define USB_VENDOR_ID_STANTUM_STM 0x0483 #define USB_DEVICE_ID_MTP_STM 0x3261 #define USB_DEVICE_ID_SENSOR_HUB_7014 0x7014 #define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403 #define USB_DEVICE_ID_MTP_SITRONIX 0x5001 #define USB_VENDOR_ID_SUN 0x0430 #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab #define USB_VENDOR_ID_SUNPLUS 0x04fc #define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 #define USB_VENDOR_ID_SYMBOL 0x05e0 #define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800 #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 #define USB_VENDOR_ID_SYNAPTICS 0x06cb #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 #define USB_DEVICE_ID_SYNAPTICS_CPAD 0x0003 #define USB_DEVICE_ID_SYNAPTICS_TS 0x0006 #define USB_DEVICE_ID_SYNAPTICS_STICK 0x0007 #define USB_DEVICE_ID_SYNAPTICS_WP 0x0008 #define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 #define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 #define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 #define USB_VENDOR_ID_THRUSTMASTER 0x044f #define USB_VENDOR_ID_TIVO 0x150a #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 #define USB_DEVICE_ID_TIVO_SLIDE 0x1201 #define USB_VENDOR_ID_TOPSEED 0x0766 #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 #define USB_VENDOR_ID_TOPSEED2 0x1784 #define USB_DEVICE_ID_TOPSEED2_RF_COMBO 0x0004 #define USB_DEVICE_ID_TOPSEED2_PERIPAD_701 0x0016 #define USB_VENDOR_ID_TOPMAX 0x0663 #define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103 #define USB_VENDOR_ID_TOUCH_INTL 0x1e5e #define USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH 0x0313 #define USB_VENDOR_ID_TOUCHPACK 0x1bfd #define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688 #define USB_VENDOR_ID_TPV 0x25aa #define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883 #define USB_VENDOR_ID_TURBOX 0x062a #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 #define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100 #define USB_VENDOR_ID_TWINHAN 0x6253 #define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100 #define USB_VENDOR_ID_UCLOGIC 0x5543 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 #define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001 #define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60 0x0064 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP1062 0x0064 #define USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850 0x0522 #define USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60 0x0781 #define USB_VENDOR_ID_UNITEC 0x227d #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 #define USB_VENDOR_ID_VERNIER 0x08f7 #define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 #define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002 #define USB_DEVICE_ID_VERNIER_SKIP 0x0003 #define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 #define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006 #define USB_VENDOR_ID_WACOM 0x056a #define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81 #define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD #define USB_VENDOR_ID_WALTOP 0x172f #define USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH 0x0032 #define USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH 0x0034 #define USB_DEVICE_ID_WALTOP_Q_PAD 0x0037 #define USB_DEVICE_ID_WALTOP_PID_0038 0x0038 #define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH 0x0501 #define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500 #define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET 0x0502 #define USB_VENDOR_ID_WISEGROUP 0x0925 #define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005 #define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101 #define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104 #define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201 #define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888 #define USB_DEVICE_ID_QUAD_USB_JOYPAD 0x8800 #define USB_DEVICE_ID_DUAL_USB_JOYPAD 0x8866 #define USB_VENDOR_ID_WISEGROUP_LTD 0x6666 #define USB_VENDOR_ID_WISEGROUP_LTD2 0x6677 #define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802 #define USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO 0x8801 #define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802 #define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804 #define USB_VENDOR_ID_X_TENSIONS 0x1ae7 #define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001 #define USB_VENDOR_ID_XAT 0x2505 #define USB_DEVICE_ID_XAT_CSR 0x0220 #define USB_VENDOR_ID_XIROKU 0x1477 #define USB_DEVICE_ID_XIROKU_SPX 0x1006 #define USB_DEVICE_ID_XIROKU_MPX 0x1007 #define USB_DEVICE_ID_XIROKU_CSR 0x100e #define USB_DEVICE_ID_XIROKU_SPX1 0x1021 #define USB_DEVICE_ID_XIROKU_CSR1 0x1022 #define USB_DEVICE_ID_XIROKU_MPX1 0x1023 #define USB_DEVICE_ID_XIROKU_SPX2 0x1024 #define USB_DEVICE_ID_XIROKU_CSR2 0x1025 #define USB_DEVICE_ID_XIROKU_MPX2 0x1026 #define USB_VENDOR_ID_YEALINK 0x6993 #define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001 #define USB_VENDOR_ID_ZEROPLUS 0x0c12 #define USB_VENDOR_ID_ZYDACRON 0x13EC #define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006 #define USB_VENDOR_ID_ZYTRONIC 0x14c8 #define USB_DEVICE_ID_ZYTRONIC_ZXY100 0x0005 #define USB_VENDOR_ID_PRIMAX 0x0461 #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 #endif backport-iwlwifi-dkms-7906/compat/lib-asn1_decoder.c000066400000000000000000000325701351064634500224320ustar00rootroot00000000000000/* Decoder for ASN.1 BER/DER/CER encoded bytestream * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include #include #include #include #include #include static const unsigned char asn1_op_lengths[ASN1_OP__NR] = { /* OPC TAG JMP ACT */ [ASN1_OP_MATCH] = 1 + 1, [ASN1_OP_MATCH_OR_SKIP] = 1 + 1, [ASN1_OP_MATCH_ACT] = 1 + 1 + 1, [ASN1_OP_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1, [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_MATCH_ANY] = 1, [ASN1_OP_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_MATCH_ANY_ACT] = 1 + 1, [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_ANY] = 1, [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1, [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_FAIL] = 1, [ASN1_OP_COMPLETE] = 1, [ASN1_OP_ACT] = 1 + 1, [ASN1_OP_MAYBE_ACT] = 1 + 1, [ASN1_OP_RETURN] = 1, [ASN1_OP_END_SEQ] = 1, [ASN1_OP_END_SEQ_OF] = 1 + 1, [ASN1_OP_END_SET] = 1, [ASN1_OP_END_SET_OF] = 1 + 1, [ASN1_OP_END_SEQ_ACT] = 1 + 1, [ASN1_OP_END_SEQ_OF_ACT] = 1 + 1 + 1, [ASN1_OP_END_SET_ACT] = 1 + 1, [ASN1_OP_END_SET_OF_ACT] = 1 + 1 + 1, }; /* * Find the length of an indefinite length object * @data: The data buffer * @datalen: The end of the innermost containing element in the buffer * @_dp: The data parse cursor (updated before returning) * @_len: Where to return the size of the element. * @_errmsg: Where to return a pointer to an error message on error */ static int asn1_find_indefinite_length(const unsigned char *data, size_t datalen, size_t *_dp, size_t *_len, const char **_errmsg) { unsigned char tag, tmp; size_t dp = *_dp, len, n; int indef_level = 1; next_tag: if (unlikely(datalen - dp < 2)) { if (datalen == dp) goto missing_eoc; goto data_overrun_error; } /* Extract a tag from the data */ tag = data[dp++]; if (tag == ASN1_EOC) { /* It appears to be an EOC. */ if (data[dp++] != 0) goto invalid_eoc; if (--indef_level <= 0) { *_len = dp - *_dp; *_dp = dp; return 0; } goto next_tag; } if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) { do { if (unlikely(datalen - dp < 2)) goto data_overrun_error; tmp = data[dp++]; } while (tmp & 0x80); } /* Extract the length */ len = data[dp++]; if (len <= 0x7f) goto check_length; if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5)) goto indefinite_len_primitive; indef_level++; goto next_tag; } n = len - 0x80; if (unlikely(n > sizeof(len) - 1)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; len = 0; for (; n > 0; n--) { len <<= 8; len |= data[dp++]; } check_length: if (len > datalen - dp) goto data_overrun_error; dp += len; goto next_tag; length_too_long: *_errmsg = "Unsupported length"; goto error; indefinite_len_primitive: *_errmsg = "Indefinite len primitive not permitted"; goto error; invalid_eoc: *_errmsg = "Invalid length EOC"; goto error; data_overrun_error: *_errmsg = "Data overrun error"; goto error; missing_eoc: *_errmsg = "Missing EOC in indefinite len cons"; error: *_dp = dp; return -1; } /** * asn1_ber_decoder - Decoder BER/DER/CER ASN.1 according to pattern * @decoder: The decoder definition (produced by asn1_compiler) * @context: The caller's context (to be passed to the action functions) * @data: The encoded data * @datalen: The size of the encoded data * * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern * produced by asn1_compiler. Action functions are called on marked tags to * allow the caller to retrieve significant data. * * LIMITATIONS: * * To keep down the amount of stack used by this function, the following limits * have been imposed: * * (1) This won't handle datalen > 65535 without increasing the size of the * cons stack elements and length_too_long checking. * * (2) The stack of constructed types is 10 deep. If the depth of non-leaf * constructed types exceeds this, the decode will fail. * * (3) The SET type (not the SET OF type) isn't really supported as tracking * what members of the set have been seen is a pain. */ int asn1_ber_decoder(const struct asn1_decoder *decoder, void *context, const unsigned char *data, size_t datalen) { const unsigned char *machine = decoder->machine; const asn1_action_t *actions = decoder->actions; size_t machlen = decoder->machlen; enum asn1_opcode op; unsigned char tag = 0, csp = 0, jsp = 0, optag = 0, hdr = 0; const char *errmsg; size_t pc = 0, dp = 0, tdp = 0, len = 0; int ret; unsigned char flags = 0; #define FLAG_INDEFINITE_LENGTH 0x01 #define FLAG_MATCHED 0x02 #define FLAG_LAST_MATCHED 0x04 /* Last tag matched */ #define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag * - ie. whether or not we are going to parse * a compound type. */ #define NR_CONS_STACK 10 unsigned short cons_dp_stack[NR_CONS_STACK]; unsigned short cons_datalen_stack[NR_CONS_STACK]; unsigned char cons_hdrlen_stack[NR_CONS_STACK]; #define NR_JUMP_STACK 10 unsigned char jump_stack[NR_JUMP_STACK]; if (datalen > 65535) return -EMSGSIZE; next_op: pr_debug("next_op: pc=\e[32m%zu\e[m/%zu dp=\e[33m%zu\e[m/%zu C=%d J=%d\n", pc, machlen, dp, datalen, csp, jsp); if (unlikely(pc >= machlen)) goto machine_overrun_error; op = machine[pc]; if (unlikely(pc + asn1_op_lengths[op] > machlen)) goto machine_overrun_error; /* If this command is meant to match a tag, then do that before * evaluating the command. */ if (op <= ASN1_OP__MATCHES_TAG) { unsigned char tmp; /* Skip conditional matches if possible */ if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) || (op & ASN1_OP_MATCH__SKIP && dp == datalen)) { flags &= ~FLAG_LAST_MATCHED; pc += asn1_op_lengths[op]; goto next_op; } flags = 0; hdr = 2; /* Extract a tag from the data */ if (unlikely(datalen - dp < 2)) goto data_overrun_error; tag = data[dp++]; if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) goto long_tag_not_supported; if (op & ASN1_OP_MATCH__ANY) { pr_debug("- any %02x\n", tag); } else { /* Extract the tag from the machine * - Either CONS or PRIM are permitted in the data if * CONS is not set in the op stream, otherwise CONS * is mandatory. */ optag = machine[pc + 1]; flags |= optag & FLAG_CONS; /* Determine whether the tag matched */ tmp = optag ^ tag; tmp &= ~(optag & ASN1_CONS_BIT); pr_debug("- match? %02x %02x %02x\n", tag, optag, tmp); if (tmp != 0) { /* All odd-numbered tags are MATCH_OR_SKIP. */ if (op & ASN1_OP_MATCH__SKIP) { pc += asn1_op_lengths[op]; dp--; goto next_op; } goto tag_mismatch; } } flags |= FLAG_MATCHED; len = data[dp++]; if (len > 0x7f) { if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ if (unlikely(!(tag & ASN1_CONS_BIT))) goto indefinite_len_primitive; flags |= FLAG_INDEFINITE_LENGTH; if (unlikely(2 > datalen - dp)) goto data_overrun_error; } else { int n = len - 0x80; if (unlikely(n > 2)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; hdr += n; for (len = 0; n > 0; n--) { len <<= 8; len |= data[dp++]; } if (unlikely(len > datalen - dp)) goto data_overrun_error; } } else { if (unlikely(len > datalen - dp)) goto data_overrun_error; } if (flags & FLAG_CONS) { /* For expected compound forms, we stack the positions * of the start and end of the data. */ if (unlikely(csp >= NR_CONS_STACK)) goto cons_stack_overflow; cons_dp_stack[csp] = dp; cons_hdrlen_stack[csp] = hdr; if (!(flags & FLAG_INDEFINITE_LENGTH)) { cons_datalen_stack[csp] = datalen; datalen = dp + len; } else { cons_datalen_stack[csp] = 0; } csp++; } pr_debug("- TAG: %02x %zu%s\n", tag, len, flags & FLAG_CONS ? " CONS" : ""); tdp = dp; } /* Decide how to handle the operation */ switch (op) { case ASN1_OP_MATCH: case ASN1_OP_MATCH_OR_SKIP: case ASN1_OP_MATCH_ACT: case ASN1_OP_MATCH_ACT_OR_SKIP: case ASN1_OP_MATCH_ANY: case ASN1_OP_MATCH_ANY_OR_SKIP: case ASN1_OP_MATCH_ANY_ACT: case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_OR_SKIP: case ASN1_OP_COND_MATCH_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_ANY: case ASN1_OP_COND_MATCH_ANY_OR_SKIP: case ASN1_OP_COND_MATCH_ANY_ACT: case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: if (!(flags & FLAG_CONS)) { if (flags & FLAG_INDEFINITE_LENGTH) { size_t tmp = dp; ret = asn1_find_indefinite_length( data, datalen, &tmp, &len, &errmsg); if (ret < 0) goto error; } pr_debug("- LEAF: %zu\n", len); } if (op & ASN1_OP_MATCH__ACT) { unsigned char act; if (op & ASN1_OP_MATCH__ANY) act = machine[pc + 1]; else act = machine[pc + 2]; ret = actions[act](context, hdr, tag, data + dp, len); if (ret < 0) return ret; } if (!(flags & FLAG_CONS)) dp += len; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_MATCH_JUMP: case ASN1_OP_MATCH_JUMP_OR_SKIP: case ASN1_OP_COND_MATCH_JUMP_OR_SKIP: pr_debug("- MATCH_JUMP\n"); if (unlikely(jsp == NR_JUMP_STACK)) goto jump_stack_overflow; jump_stack[jsp++] = pc + asn1_op_lengths[op]; pc = machine[pc + 2]; goto next_op; case ASN1_OP_COND_FAIL: if (unlikely(!(flags & FLAG_MATCHED))) goto tag_mismatch; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_COMPLETE: if (unlikely(jsp != 0 || csp != 0)) { pr_err("ASN.1 decoder error: Stacks not empty at completion (%u, %u)\n", jsp, csp); return -EBADMSG; } return 0; case ASN1_OP_END_SET: case ASN1_OP_END_SET_ACT: if (unlikely(!(flags & FLAG_MATCHED))) goto tag_mismatch; case ASN1_OP_END_SEQ: case ASN1_OP_END_SET_OF: case ASN1_OP_END_SEQ_OF: case ASN1_OP_END_SEQ_ACT: case ASN1_OP_END_SET_OF_ACT: case ASN1_OP_END_SEQ_OF_ACT: if (unlikely(csp <= 0)) goto cons_stack_underflow; csp--; tdp = cons_dp_stack[csp]; hdr = cons_hdrlen_stack[csp]; len = datalen; datalen = cons_datalen_stack[csp]; pr_debug("- end cons t=%zu dp=%zu l=%zu/%zu\n", tdp, dp, len, datalen); if (datalen == 0) { /* Indefinite length - check for the EOC. */ datalen = len; if (unlikely(datalen - dp < 2)) goto data_overrun_error; if (data[dp++] != 0) { if (op & ASN1_OP_END__OF) { dp--; csp++; pc = machine[pc + 1]; pr_debug("- continue\n"); goto next_op; } goto missing_eoc; } if (data[dp++] != 0) goto invalid_eoc; len = dp - tdp - 2; } else { if (dp < len && (op & ASN1_OP_END__OF)) { datalen = len; csp++; pc = machine[pc + 1]; pr_debug("- continue\n"); goto next_op; } if (dp != len) goto cons_length_error; len -= tdp; pr_debug("- cons len l=%zu d=%zu\n", len, dp - tdp); } if (op & ASN1_OP_END__ACT) { unsigned char act; if (op & ASN1_OP_END__OF) act = machine[pc + 2]; else act = machine[pc + 1]; ret = actions[act](context, hdr, 0, data + tdp, len); if (ret < 0) return ret; } pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_MAYBE_ACT: if (!(flags & FLAG_LAST_MATCHED)) { pc += asn1_op_lengths[op]; goto next_op; } case ASN1_OP_ACT: ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len); if (ret < 0) return ret; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_RETURN: if (unlikely(jsp <= 0)) goto jump_stack_underflow; pc = jump_stack[--jsp]; flags |= FLAG_MATCHED | FLAG_LAST_MATCHED; goto next_op; default: break; } /* Shouldn't reach here */ pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n", op, pc); return -EBADMSG; data_overrun_error: errmsg = "Data overrun error"; goto error; machine_overrun_error: errmsg = "Machine overrun error"; goto error; jump_stack_underflow: errmsg = "Jump stack underflow"; goto error; jump_stack_overflow: errmsg = "Jump stack overflow"; goto error; cons_stack_underflow: errmsg = "Cons stack underflow"; goto error; cons_stack_overflow: errmsg = "Cons stack overflow"; goto error; cons_length_error: errmsg = "Cons length error"; goto error; missing_eoc: errmsg = "Missing EOC in indefinite len cons"; goto error; invalid_eoc: errmsg = "Invalid length EOC"; goto error; length_too_long: errmsg = "Unsupported length"; goto error; indefinite_len_primitive: errmsg = "Indefinite len primitive not permitted"; goto error; tag_mismatch: errmsg = "Unexpected tag"; goto error; long_tag_not_supported: errmsg = "Long tag not supported"; error: pr_debug("\nASN1: %s [m=%zu d=%zu ot=%02x t=%02x l=%zu]\n", errmsg, pc, dp, optag, tag, len); return -EBADMSG; } EXPORT_SYMBOL_GPL(asn1_ber_decoder); MODULE_LICENSE("GPL"); backport-iwlwifi-dkms-7906/compat/lib-bucket_locks.c000066400000000000000000000026351351064634500225520ustar00rootroot00000000000000#include #include #include #include #include /* Allocate an array of spinlocks to be accessed by a hash. Two arguments * indicate the number of elements to allocate in the array. max_size * gives the maximum number of elements to allocate. cpu_mult gives * the number of locks per CPU to allocate. The size is rounded up * to a power of 2 to be suitable as a hash table. */ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask, size_t max_size, unsigned int cpu_mult, gfp_t gfp, const char *name, struct lock_class_key *key) { spinlock_t *tlocks = NULL; unsigned int i, size; #if defined(CONFIG_PROVE_LOCKING) unsigned int nr_pcpus = 2; #else unsigned int nr_pcpus = num_possible_cpus(); #endif if (cpu_mult) { nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size); } else { size = max_size; } if (sizeof(spinlock_t) != 0) { tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp); if (!tlocks) return -ENOMEM; for (i = 0; i < size; i++) { spin_lock_init(&tlocks[i]); lockdep_init_map(&tlocks[i].dep_map, name, key, 0); } } *locks = tlocks; *locks_mask = size - 1; return 0; } EXPORT_SYMBOL(__alloc_bucket_spinlocks); void free_bucket_spinlocks(spinlock_t *locks) { kvfree(locks); } EXPORT_SYMBOL(free_bucket_spinlocks); backport-iwlwifi-dkms-7906/compat/lib-oid_registry.c000066400000000000000000000076271351064634500226130ustar00rootroot00000000000000/* ASN.1 Object identifier (OID) registry * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include #include #include #include #include #include #include "oid_registry_data.c" MODULE_DESCRIPTION("OID Registry"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); /** * look_up_OID - Find an OID registration for the specified data * @data: Binary representation of the OID * @datasize: Size of the binary representation */ enum OID look_up_OID(const void *data, size_t datasize) { const unsigned char *octets = data; enum OID oid; unsigned char xhash; unsigned i, j, k, hash; size_t len; /* Hash the OID data */ hash = datasize - 1; for (i = 0; i < datasize; i++) hash += octets[i] * 33; hash = (hash >> 24) ^ (hash >> 16) ^ (hash >> 8) ^ hash; hash &= 0xff; /* Binary search the OID registry. OIDs are stored in ascending order * of hash value then ascending order of size and then in ascending * order of reverse value. */ i = 0; k = OID__NR; while (i < k) { j = (i + k) / 2; xhash = oid_search_table[j].hash; if (xhash > hash) { k = j; continue; } if (xhash < hash) { i = j + 1; continue; } oid = oid_search_table[j].oid; len = oid_index[oid + 1] - oid_index[oid]; if (len > datasize) { k = j; continue; } if (len < datasize) { i = j + 1; continue; } /* Variation is most likely to be at the tail end of the * OID, so do the comparison in reverse. */ while (len > 0) { unsigned char a = oid_data[oid_index[oid] + --len]; unsigned char b = octets[len]; if (a > b) { k = j; goto next; } if (a < b) { i = j + 1; goto next; } } return oid; next: ; } return OID__NR; } EXPORT_SYMBOL_GPL(look_up_OID); /* * sprint_OID - Print an Object Identifier into a buffer * @data: The encoded OID to print * @datasize: The size of the encoded OID * @buffer: The buffer to render into * @bufsize: The size of the buffer * * The OID is rendered into the buffer in "a.b.c.d" format and the number of * bytes is returned. -EBADMSG is returned if the data could not be intepreted * and -ENOBUFS if the buffer was too small. */ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize) { const unsigned char *v = data, *end = v + datasize; unsigned long num; unsigned char n; size_t ret; int count; if (v >= end) goto bad; n = *v++; ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40); if (count >= bufsize) return -ENOBUFS; buffer += count; bufsize -= count; while (v < end) { num = 0; n = *v++; if (!(n & 0x80)) { num = n; } else { num = n & 0x7f; do { if (v >= end) goto bad; n = *v++; num <<= 7; num |= n & 0x7f; } while (n & 0x80); } ret += count = snprintf(buffer, bufsize, ".%lu", num); if (count >= bufsize) return -ENOBUFS; buffer += count; bufsize -= count; } return ret; bad: snprintf(buffer, bufsize, "(bad)"); return -EBADMSG; } EXPORT_SYMBOL_GPL(sprint_oid); /** * sprint_OID - Print an Object Identifier into a buffer * @oid: The OID to print * @buffer: The buffer to render into * @bufsize: The size of the buffer * * The OID is rendered into the buffer in "a.b.c.d" format and the number of * bytes is returned. */ int sprint_OID(enum OID oid, char *buffer, size_t bufsize) { int ret; BUG_ON(oid >= OID__NR); ret = sprint_oid(oid_data + oid_index[oid], oid_index[oid + 1] - oid_index[oid], buffer, bufsize); BUG_ON(ret == -EBADMSG); return ret; } EXPORT_SYMBOL_GPL(sprint_OID); backport-iwlwifi-dkms-7906/compat/lib-refcount.c000066400000000000000000000263751351064634500217360ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * Variant of atomic_t specialized for reference counts. * * The interface matches the atomic_t interface (to aid in porting) but only * provides the few functions one should use for reference counting. * * It differs in that the counter saturates at UINT_MAX and will not move once * there. This avoids wrapping the counter and causing 'spurious' * use-after-free issues. * * Memory ordering rules are slightly relaxed wrt regular atomic_t functions * and provide only what is strictly required for refcounts. * * The increments are fully relaxed; these will not provide ordering. The * rationale is that whatever is used to obtain the object we're increasing the * reference count on will provide the ordering. For locked data structures, * its the lock acquire, for RCU/lockless data structures its the dependent * load. * * Do note that inc_not_zero() provides a control dependency which will order * future stores against the inc, this ensures we'll never modify the object * if we did not in fact acquire a reference. * * The decrements will provide release order, such that all the prior loads and * stores will be issued before, it also provides a control dependency, which * will order us against the subsequent free(). * * The control dependency is against the load of the cmpxchg (ll/sc) that * succeeded. This means the stores aren't fully ordered, but this is fine * because the 1->0 transition indicates no concurrency. * * Note that the allocator is responsible for ordering things between free() * and alloc(). * */ #include #include #include #include #include /** * refcount_add_not_zero_checked - add a value to a refcount unless it is 0 * @i: the value to add to the refcount * @r: the refcount * * Will saturate at UINT_MAX and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. * * Return: false if the passed refcount is 0, true otherwise */ bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r) { unsigned int new, val = atomic_read(&r->refs); do { if (!val) return false; if (unlikely(val == UINT_MAX)) return true; new = val + i; if (new < val) new = UINT_MAX; } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); return true; } EXPORT_SYMBOL(refcount_add_not_zero_checked); /** * refcount_add_checked - add a value to a refcount * @i: the value to add to the refcount * @r: the refcount * * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. */ void refcount_add_checked(unsigned int i, refcount_t *r) { WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n"); } EXPORT_SYMBOL(refcount_add_checked); /** * refcount_inc_not_zero_checked - increment a refcount unless it is 0 * @r: the refcount to increment * * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Return: true if the increment was successful, false otherwise */ bool refcount_inc_not_zero_checked(refcount_t *r) { unsigned int new, val = atomic_read(&r->refs); do { new = val + 1; if (!val) return false; if (unlikely(!new)) return true; } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); return true; } EXPORT_SYMBOL(refcount_inc_not_zero_checked); /** * refcount_inc_checked - increment a refcount * @r: the refcount to increment * * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. * * Provides no memory ordering, it is assumed the caller already has a * reference on the object. * * Will WARN if the refcount is 0, as this represents a possible use-after-free * condition. */ void refcount_inc_checked(refcount_t *r) { WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n"); } EXPORT_SYMBOL(refcount_inc_checked); /** * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0 * @i: amount to subtract from the refcount * @r: the refcount * * Similar to atomic_dec_and_test(), but it will WARN, return false and * ultimately leak on underflow and will fail to decrement when saturated * at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_dec(), or one of its variants, should instead be used to * decrement a reference count. * * Return: true if the resulting refcount is 0, false otherwise */ bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r) { unsigned int new, val = atomic_read(&r->refs); do { if (unlikely(val == UINT_MAX)) return false; new = val - i; if (new > val) { WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); return false; } } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); return !new; } EXPORT_SYMBOL(refcount_sub_and_test_checked); /** * refcount_dec_and_test_checked - decrement a refcount and test if it is 0 * @r: the refcount * * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to * decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. * * Return: true if the resulting refcount is 0, false otherwise */ bool refcount_dec_and_test_checked(refcount_t *r) { return refcount_sub_and_test_checked(1, r); } EXPORT_SYMBOL(refcount_dec_and_test_checked); /** * refcount_dec_checked - decrement a refcount * @r: the refcount * * Similar to atomic_dec(), it will WARN on underflow and fail to decrement * when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before. */ void refcount_dec_checked(refcount_t *r) { WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n"); } EXPORT_SYMBOL(refcount_dec_checked); /** * refcount_dec_if_one - decrement a refcount if it is 1 * @r: the refcount * * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the * success thereof. * * Like all decrement operations, it provides release memory order and provides * a control dependency. * * It can be used like a try-delete operator; this explicit case is provided * and not cmpxchg in generic, because that would allow implementing unsafe * operations. * * Return: true if the resulting refcount is 0, false otherwise */ bool refcount_dec_if_one(refcount_t *r) { int val = 1; return atomic_try_cmpxchg_release(&r->refs, &val, 0); } EXPORT_SYMBOL(refcount_dec_if_one); /** * refcount_dec_not_one - decrement a refcount if it is not 1 * @r: the refcount * * No atomic_t counterpart, it decrements unless the value is 1, in which case * it will return false. * * Was often done like: atomic_add_unless(&var, -1, 1) * * Return: true if the decrement operation was successful, false otherwise */ bool refcount_dec_not_one(refcount_t *r) { unsigned int new, val = atomic_read(&r->refs); do { if (unlikely(val == UINT_MAX)) return true; if (val == 1) return false; new = val - 1; if (new > val) { WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); return true; } } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); return true; } EXPORT_SYMBOL(refcount_dec_not_one); /** * refcount_dec_and_mutex_lock - return holding mutex if able to decrement * refcount to 0 * @r: the refcount * @lock: the mutex to be locked * * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail * to decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. * * Return: true and hold mutex if able to decrement refcount to 0, false * otherwise */ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) { if (refcount_dec_not_one(r)) return false; mutex_lock(lock); if (!refcount_dec_and_test(r)) { mutex_unlock(lock); return false; } return true; } EXPORT_SYMBOL(refcount_dec_and_mutex_lock); /** * refcount_dec_and_lock - return holding spinlock if able to decrement * refcount to 0 * @r: the refcount * @lock: the spinlock to be locked * * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to * decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. * * Return: true and hold spinlock if able to decrement refcount to 0, false * otherwise */ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) { if (refcount_dec_not_one(r)) return false; spin_lock(lock); if (!refcount_dec_and_test(r)) { spin_unlock(lock); return false; } return true; } EXPORT_SYMBOL(refcount_dec_and_lock); /** * refcount_dec_and_lock_irqsave - return holding spinlock with disabled * interrupts if able to decrement refcount to 0 * @r: the refcount * @lock: the spinlock to be locked * @flags: saved IRQ-flags if the is acquired * * Same as refcount_dec_and_lock() above except that the spinlock is acquired * with disabled interupts. * * Return: true and hold spinlock if able to decrement refcount to 0, false * otherwise */ bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, unsigned long *flags) { if (refcount_dec_not_one(r)) return false; spin_lock_irqsave(lock, *flags); if (!refcount_dec_and_test(r)) { spin_unlock_irqrestore(lock, *flags); return false; } return true; } EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); backport-iwlwifi-dkms-7906/compat/lib-rhashtable.c000066400000000000000000000726061351064634500222240ustar00rootroot00000000000000/* * Resizable, Scalable, Concurrent Hash Table * * Copyright (c) 2015 Herbert Xu * Copyright (c) 2014-2015 Thomas Graf * Copyright (c) 2008-2014 Patrick McHardy * * Code partially derived from nft_hash * Rewritten with rehash code from br_multicast plus single list * pointer as suggested by Josh Triplett * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U #define BUCKET_LOCKS_PER_CPU 32UL union nested_table { union nested_table __rcu *table; struct rhash_head __rcu *bucket; }; static u32 head_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he) { return rht_head_hashfn(ht, tbl, he, ht->p); } #ifdef CONFIG_PROVE_LOCKING #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { spinlock_t *lock = rht_bucket_lock(tbl, hash); return (debug_locks) ? lockdep_is_held(lock) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #else #define ASSERT_RHT_MUTEX(HT) #endif static void nested_table_free(union nested_table *ntbl, unsigned int size) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int len = 1 << shift; unsigned int i; ntbl = rcu_dereference_raw(ntbl->table); if (!ntbl) return; if (size > len) { size >>= shift; for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); } kfree(ntbl); } static void nested_bucket_table_free(const struct bucket_table *tbl) { unsigned int size = tbl->size >> tbl->nest; unsigned int len = 1 << tbl->nest; union nested_table *ntbl; unsigned int i; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); kfree(ntbl); } static void bucket_table_free(const struct bucket_table *tbl) { if (tbl->nest) nested_bucket_table_free(tbl); free_bucket_spinlocks(tbl->locks); kvfree(tbl); } static void bucket_table_free_rcu(struct rcu_head *head) { bucket_table_free(container_of(head, struct bucket_table, rcu)); } static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, bool leaf) { union nested_table *ntbl; int i; ntbl = rcu_dereference(*prev); if (ntbl) return ntbl; ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (ntbl && leaf) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } rcu_assign_pointer(*prev, ntbl); return ntbl; } static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); struct bucket_table *tbl; size_t size; if (nbuckets < (1 << (shift + 1))) return NULL; size = sizeof(*tbl) + sizeof(tbl->buckets[0]); tbl = kzalloc(size, gfp); if (!tbl) return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, false)) { kfree(tbl); return NULL; } tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; return tbl; } static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { struct bucket_table *tbl = NULL; size_t size, max_locks; int i; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); tbl = kvzalloc(size, gfp); size = nbuckets; if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) { tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); nbuckets = 0; } if (tbl == NULL) return NULL; tbl->size = size; max_locks = size >> 1; if (tbl->nest) max_locks = min_t(size_t, max_locks, 1U << tbl->nest); if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks, ht->p.locks_mul, gfp) < 0) { bucket_table_free(tbl); return NULL; } INIT_LIST_HEAD(&tbl->walkers); tbl->hash_rnd = get_random_u32(); for (i = 0; i < nbuckets; i++) INIT_RHT_NULLS_HEAD(tbl->buckets[i]); return tbl; } static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *new_tbl; do { new_tbl = tbl; tbl = rht_dereference_rcu(tbl->future_tbl, ht); } while (tbl); return new_tbl; } static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); int err = -EAGAIN; struct rhash_head *head, *next, *entry; spinlock_t *new_bucket_lock; unsigned int new_hash; if (new_tbl->nest) goto out; err = -ENOENT; rht_for_each(entry, old_tbl, old_hash) { err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); if (rht_is_a_nulls(next)) break; pprev = &entry->next; } if (err) goto out; new_hash = head_hashfn(ht, new_tbl, entry); new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); head = rht_dereference_bucket(new_tbl->buckets[new_hash], new_tbl, new_hash); RCU_INIT_POINTER(entry->next, head); rcu_assign_pointer(new_tbl->buckets[new_hash], entry); spin_unlock(new_bucket_lock); rcu_assign_pointer(*pprev, next); out: return err; } static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); spinlock_t *old_bucket_lock; int err; old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); spin_lock_bh(old_bucket_lock); while (!(err = rhashtable_rehash_one(ht, old_hash))) ; if (err == -ENOENT) { old_tbl->rehash++; err = 0; } spin_unlock_bh(old_bucket_lock); return err; } static int rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl) { /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. * As cmpxchg() provides strong barriers, we do not need * rcu_assign_pointer(). */ if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) return -EEXIST; return 0; } static int rhashtable_rehash_table(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl; struct rhashtable_walker *walker; unsigned int old_hash; int err; new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) return 0; for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { err = rhashtable_rehash_chain(ht, old_hash); if (err) return err; cond_resched(); } /* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); spin_lock(&ht->lock); list_for_each_entry(walker, &old_tbl->walkers, list) walker->tbl = NULL; spin_unlock(&ht->lock); /* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. */ call_rcu(&old_tbl->rcu, bucket_table_free_rcu); return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; } static int rhashtable_rehash_alloc(struct rhashtable *ht, struct bucket_table *old_tbl, unsigned int size) { struct bucket_table *new_tbl; int err; ASSERT_RHT_MUTEX(ht); new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (new_tbl == NULL) return -ENOMEM; err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); if (err) bucket_table_free(new_tbl); return err; } /** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink * * This function shrinks the hash table to fit, i.e., the smallest * size would not cause it to expand right away automatically. * * The caller must ensure that no concurrent resizing occurs by holding * ht->mutex. * * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. * * It is valid to have concurrent insertions and deletions protected by per * bucket locks or concurrent RCU protected lookups and traversals. */ static int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); unsigned int nelems = atomic_read(&ht->nelems); unsigned int size = 0; if (nelems) size = roundup_pow_of_two(nelems * 3 / 2); if (size < ht->p.min_size) size = ht->p.min_size; if (old_tbl->size <= size) return 0; if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; return rhashtable_rehash_alloc(ht, old_tbl, size); } static void rht_deferred_worker(struct work_struct *work) { struct rhashtable *ht; struct bucket_table *tbl; int err = 0; ht = container_of(work, struct rhashtable, run_work); mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); tbl = rhashtable_last_table(ht, tbl); if (rht_grow_above_75(ht, tbl)) err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) err = rhashtable_shrink(ht); else if (tbl->nest) err = rhashtable_rehash_alloc(ht, tbl, tbl->size); if (!err) err = rhashtable_rehash_table(ht); mutex_unlock(&ht->mutex); if (err) schedule_work(&ht->run_work); } static int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *old_tbl; struct bucket_table *new_tbl; unsigned int size; int err; old_tbl = rht_dereference_rcu(ht->tbl, ht); size = tbl->size; err = -EBUSY; if (rht_grow_above_75(ht, tbl)) size *= 2; /* Do not schedule more than one rehash */ else if (old_tbl != tbl) goto fail; err = -ENOMEM; new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); if (new_tbl == NULL) goto fail; err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { bucket_table_free(new_tbl); if (err == -EEXIST) err = 0; } else schedule_work(&ht->run_work); return err; fail: /* Do not fail the insert if someone else did a rehash. */ if (likely(rcu_access_pointer(tbl->future_tbl))) return 0; /* Schedule async rehash to retry allocation in process context. */ if (err == -ENOMEM) schedule_work(&ht->run_work); return err; } static void *rhashtable_lookup_one(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; struct rhash_head __rcu **pprev; struct rhash_head *head; int elasticity; elasticity = RHT_ELASTICITY; pprev = rht_bucket_var(tbl, hash); rht_for_each_continue(head, *pprev, tbl, hash) { struct rhlist_head *list; struct rhlist_head *plist; elasticity--; if (!key || (ht->p.obj_cmpfn ? ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : rhashtable_compare(&arg, rht_obj(ht, head)))) { pprev = &head->next; continue; } if (!ht->rhlist) return rht_obj(ht, head); list = container_of(obj, struct rhlist_head, rhead); plist = container_of(head, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); rcu_assign_pointer(*pprev, obj); return NULL; } if (elasticity <= 0) return ERR_PTR(-EAGAIN); return ERR_PTR(-ENOENT); } static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, void *data) { struct rhash_head __rcu **pprev; struct bucket_table *new_tbl; struct rhash_head *head; if (!IS_ERR_OR_NULL(data)) return ERR_PTR(-EEXIST); if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data); new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (new_tbl) return new_tbl; if (PTR_ERR(data) != -ENOENT) return ERR_CAST(data); if (unlikely(rht_grow_above_max(ht, tbl))) return ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); pprev = rht_bucket_insert(ht, tbl, hash); if (!pprev) return ERR_PTR(-ENOMEM); head = rht_dereference_bucket(*pprev, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (ht->rhlist) { struct rhlist_head *list; list = container_of(obj, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, NULL); } rcu_assign_pointer(*pprev, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); return NULL; } static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct rhash_head *obj) { struct bucket_table *new_tbl; struct bucket_table *tbl; unsigned int hash; spinlock_t *lock; void *data; tbl = rcu_dereference(ht->tbl); /* All insertions must grab the oldest table containing * the hashed bucket that is yet to be rehashed. */ for (;;) { hash = rht_head_hashfn(ht, tbl, obj, ht->p); lock = rht_bucket_lock(tbl, hash); spin_lock_bh(lock); if (tbl->rehash <= hash) break; spin_unlock_bh(lock); tbl = rht_dereference_rcu(tbl->future_tbl, ht); } data = rhashtable_lookup_one(ht, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); while (!IS_ERR_OR_NULL(new_tbl)) { tbl = new_tbl; hash = rht_head_hashfn(ht, tbl, obj, ht->p); spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); data = rhashtable_lookup_one(ht, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); spin_unlock(rht_bucket_lock(tbl, hash)); } spin_unlock_bh(lock); if (PTR_ERR(data) == -EAGAIN) data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: -EAGAIN); return data; } void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj) { void *data; do { rcu_read_lock(); data = rhashtable_try_insert(ht, key, obj); rcu_read_unlock(); } while (PTR_ERR(data) == -EAGAIN); return data; } EXPORT_SYMBOL_GPL(rhashtable_insert_slow); /** * rhashtable_walk_enter - Initialise an iterator * @ht: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * * This function may be called from any process context, including * non-preemptable context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns. */ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) { iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; iter->end_of_table = 0; spin_lock(&ht->lock); iter->walker.tbl = rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); list_add(&iter->walker.list, &iter->walker.tbl->walkers); spin_unlock(&ht->lock); } EXPORT_SYMBOL_GPL(rhashtable_walk_enter); /** * rhashtable_walk_exit - Free an iterator * @iter: Hash table Iterator * * This function frees resources allocated by rhashtable_walk_init. */ void rhashtable_walk_exit(struct rhashtable_iter *iter) { spin_lock(&iter->ht->lock); if (iter->walker.tbl) list_del(&iter->walker.list); spin_unlock(&iter->ht->lock); } EXPORT_SYMBOL_GPL(rhashtable_walk_exit); /** * rhashtable_walk_start_check - Start a hash table walk * @iter: Hash table iterator * * Start a hash table walk at the current iterator position. Note that we take * the RCU lock in all cases including when we return an error. So you must * always call rhashtable_walk_stop to clean up. * * Returns zero if successful. * * Returns -EAGAIN if resize event occured. Note that the iterator * will rewind back to the beginning and you may use it immediately * by calling rhashtable_walk_next. * * rhashtable_walk_start is defined as an inline variant that returns * void. This is preferred in cases where the caller would ignore * resize events and always continue. */ int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU) { struct rhashtable *ht = iter->ht; bool rhlist = ht->rhlist; rcu_read_lock(); spin_lock(&ht->lock); if (iter->walker.tbl) list_del(&iter->walker.list); spin_unlock(&ht->lock); if (iter->end_of_table) return 0; if (!iter->walker.tbl) { iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); iter->slot = 0; iter->skip = 0; return -EAGAIN; } if (iter->p && !rhlist) { /* * We need to validate that 'p' is still in the table, and * if so, update 'skip' */ struct rhash_head *p; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { skip++; if (p == iter->p) { iter->skip = skip; goto found; } } iter->p = NULL; } else if (iter->p && rhlist) { /* Need to validate that 'list' is still in the table, and * if so, update 'skip' and 'p'. */ struct rhash_head *p; struct rhlist_head *list; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { for (list = container_of(p, struct rhlist_head, rhead); list; list = rcu_dereference(list->next)) { skip++; if (list == iter->list) { iter->p = p; iter->skip = skip; goto found; } } } iter->p = NULL; } found: return 0; } EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); /** * __rhashtable_walk_find_next - Find the next element in a table (or the first * one in case of a new walk). * * @iter: Hash table iterator * * Returns the found object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. */ static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) { struct bucket_table *tbl = iter->walker.tbl; struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist; if (!tbl) return NULL; for (; iter->slot < tbl->size; iter->slot++) { int skip = iter->skip; rht_for_each_rcu(p, tbl, iter->slot) { if (rhlist) { list = container_of(p, struct rhlist_head, rhead); do { if (!skip) goto next; skip--; list = rcu_dereference(list->next); } while (list); continue; } if (!skip) break; skip--; } next: if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); } iter->skip = 0; } iter->p = NULL; /* Ensure we see any new tables. */ smp_rmb(); iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (iter->walker.tbl) { iter->slot = 0; iter->skip = 0; return ERR_PTR(-EAGAIN); } else { iter->end_of_table = true; } return NULL; } /** * rhashtable_walk_next - Return the next object and advance the iterator * @iter: Hash table iterator * * Note that you must call rhashtable_walk_stop when you are finished * with the walk. * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist; if (p) { if (!rhlist || !(list = rcu_dereference(list->next))) { p = rcu_dereference(p->next); list = container_of(p, struct rhlist_head, rhead); } if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); } /* At the end of this slot, switch to next one and then find * next entry from that point. */ iter->skip = 0; iter->slot++; } return __rhashtable_walk_find_next(iter); } EXPORT_SYMBOL_GPL(rhashtable_walk_next); /** * rhashtable_walk_peek - Return the next object but don't advance the iterator * @iter: Hash table iterator * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_peek(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; if (p) return rht_obj(ht, ht->rhlist ? &list->rhead : p); /* No object found in current iter, find next one in the table. */ if (iter->skip) { /* A nonzero skip value points to the next entry in the table * beyond that last one that was found. Decrement skip so * we find the current value. __rhashtable_walk_find_next * will restore the original value of skip assuming that * the table hasn't changed. */ iter->skip--; } return __rhashtable_walk_find_next(iter); } EXPORT_SYMBOL_GPL(rhashtable_walk_peek); /** * rhashtable_walk_stop - Finish a hash table walk * @iter: Hash table iterator * * Finish a hash table walk. Does not reset the iterator to the start of the * hash table. */ void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU) { struct rhashtable *ht; struct bucket_table *tbl = iter->walker.tbl; if (!tbl) goto out; ht = iter->ht; spin_lock(&ht->lock); if (tbl->rehash < tbl->size) list_add(&iter->walker.list, &tbl->walkers); else iter->walker.tbl = NULL; spin_unlock(&ht->lock); out: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rhashtable_walk_stop); static size_t rounded_hashtable_size(const struct rhashtable_params *params) { size_t retsize; if (params->nelem_hint) retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), (unsigned long)params->min_size); else retsize = max(HASH_DEFAULT_SIZE, (unsigned long)params->min_size); return retsize; } static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) { return jhash2(key, length, seed); } /** * rhashtable_init - initialize a new hash table * @ht: hash table to be initialized * @params: configuration parameters * * Initializes a new hash table based on the provided configuration * parameters. A table can be configured either with a variable or * fixed length key: * * Configuration Example 1: Fixed length keys * struct test_obj { * int key; * void * my_member; * struct rhash_head node; * }; * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, * }; * * Configuration Example 2: Variable length keys * struct test_obj { * [...] * struct rhash_head node; * }; * * u32 my_hash_fn(const void *data, u32 len, u32 seed) * { * struct test_obj *obj = data; * * return [... hash ...]; * } * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .hashfn = jhash, * .obj_hashfn = my_hash_fn, * }; */ int rhashtable_init(struct rhashtable *ht, const struct rhashtable_params *params) { struct bucket_table *tbl; size_t size; if ((!params->key_len && !params->obj_hashfn) || (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL; memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); spin_lock_init(&ht->lock); memcpy(&ht->p, params, sizeof(*params)); if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); /* Cap total entries at 2^31 to avoid nelems overflow. */ ht->max_elems = 1u << 31; if (params->max_size) { ht->p.max_size = rounddown_pow_of_two(params->max_size); if (ht->p.max_size < ht->max_elems / 2) ht->max_elems = ht->p.max_size * 2; } ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); size = rounded_hashtable_size(&ht->p); if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); else ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; ht->key_len = ht->p.key_len; if (!params->hashfn) { ht->p.hashfn = jhash; if (!(ht->key_len & (sizeof(u32) - 1))) { ht->key_len /= sizeof(u32); ht->p.hashfn = rhashtable_jhash2; } } /* * This is api initialization and thus we need to guarantee the * initial rhashtable allocation. Upon failure, retry with the * smallest possible size with __GFP_NOFAIL semantics. */ tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (unlikely(tbl == NULL)) { size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); } atomic_set(&ht->nelems, 0); RCU_INIT_POINTER(ht->tbl, tbl); INIT_WORK(&ht->run_work, rht_deferred_worker); return 0; } EXPORT_SYMBOL_GPL(rhashtable_init); /** * rhltable_init - initialize a new hash list table * @hlt: hash list table to be initialized * @params: configuration parameters * * Initializes a new hash list table. * * See documentation for rhashtable_init. */ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) { int err; err = rhashtable_init(&hlt->ht, params); hlt->ht.rhlist = true; return err; } EXPORT_SYMBOL_GPL(rhltable_init); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), void *arg) { struct rhlist_head *list; if (!ht->rhlist) { free_fn(rht_obj(ht, obj), arg); return; } list = container_of(obj, struct rhlist_head, rhead); do { obj = &list->rhead; list = rht_dereference(list->next, ht); free_fn(rht_obj(ht, obj), arg); } while (list); } /** * rhashtable_free_and_destroy - free elements and destroy hash table * @ht: the hash table to destroy * @free_fn: callback to release resources of element * @arg: pointer passed to free_fn * * Stops an eventual async resize. If defined, invokes free_fn for each * element to releasal resources. Please note that RCU protected * readers may still be accessing the elements. Releasing of resources * must occur in a compatible manner. Then frees the bucket array. * * This function will eventually sleep to wait for an async resize * to complete. The caller is responsible that no further write operations * occurs in parallel. */ void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) { struct bucket_table *tbl, *next_tbl; unsigned int i; cancel_work_sync(&ht->run_work); mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); restart: if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; cond_resched(); for (pos = rht_dereference(*rht_bucket(tbl, i), ht), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); pos = next, next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL) rhashtable_free_one(ht, pos, free_fn, arg); } } next_tbl = rht_dereference(tbl->future_tbl, ht); bucket_table_free(tbl); if (next_tbl) { tbl = next_tbl; goto restart; } mutex_unlock(&ht->mutex); } EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); void rhashtable_destroy(struct rhashtable *ht) { return rhashtable_free_and_destroy(ht, NULL, NULL); } EXPORT_SYMBOL_GPL(rhashtable_destroy); struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); static struct rhash_head __rcu *rhnull; unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; union nested_table *ntbl; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); subhash >>= tbl->nest; while (ntbl && size > (1 << shift)) { index = subhash & ((1 << shift) - 1); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); size >>= shift; subhash >>= shift; } if (!ntbl) { if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); return &rhnull; } return &ntbl[subhash].bucket; } EXPORT_SYMBOL_GPL(rht_bucket_nested); struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); hash >>= tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift)); while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift)); } if (!ntbl) return NULL; return &ntbl[hash].bucket; } EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); backport-iwlwifi-dkms-7906/compat/main.c000066400000000000000000000043601351064634500202570ustar00rootroot00000000000000#include #include #include #include #include "backports.h" MODULE_AUTHOR("Luis R. Rodriguez"); MODULE_DESCRIPTION("Kernel backport module"); MODULE_LICENSE("GPL"); #ifndef CPTCFG_KERNEL_NAME #error "You need a CPTCFG_KERNEL_NAME" #endif #ifndef CPTCFG_KERNEL_VERSION #error "You need a CPTCFG_KERNEL_VERSION" #endif #ifndef CPTCFG_VERSION #error "You need a CPTCFG_VERSION" #endif static char *backported_kernel_name = CPTCFG_KERNEL_NAME; module_param(backported_kernel_name, charp, 0400); MODULE_PARM_DESC(backported_kernel_name, "The kernel tree name that was used for this backport (" CPTCFG_KERNEL_NAME ")"); #ifdef BACKPORTS_GIT_TRACKED static char *backports_tracker_id = BACKPORTS_GIT_TRACKED; module_param(backports_tracker_id, charp, 0400); MODULE_PARM_DESC(backports_tracker_id, "The version of the tree containing this backport (" BACKPORTS_GIT_TRACKED ")"); #else static char *backported_kernel_version = CPTCFG_KERNEL_VERSION; static char *backports_version = CPTCFG_VERSION; module_param(backported_kernel_version, charp, 0400); MODULE_PARM_DESC(backported_kernel_version, "The kernel version that was used for this backport (" CPTCFG_KERNEL_VERSION ")"); module_param(backports_version, charp, 0400); MODULE_PARM_DESC(backports_version, "The git version of the backports tree used to generate this backport (" CPTCFG_VERSION ")"); #endif void backport_dependency_symbol(void) { } EXPORT_SYMBOL_GPL(backport_dependency_symbol); static int __init backport_init(void) { int ret = devcoredump_init(); if (ret) return ret; printk(KERN_INFO "Loading modules backported from " CPTCFG_KERNEL_NAME #ifndef BACKPORTS_GIT_TRACKED " version " CPTCFG_KERNEL_VERSION #endif "\n"); #ifdef BACKPORTS_GIT_TRACKED printk(KERN_INFO BACKPORTS_GIT_TRACKED "\n"); #else #ifdef CONFIG_BACKPORT_INTEGRATE printk(KERN_INFO "Backport integrated by backports.git " CPTCFG_VERSION "\n"); #else printk(KERN_INFO "Backport generated by backports.git " CPTCFG_VERSION "\n"); #endif /* CONFIG_BACKPORT_INTEGRATE */ #endif /* BACKPORTS_GIT_TRACKED */ return 0; } subsys_initcall(backport_init); static void __exit backport_exit(void) { devcoredump_exit(); } module_exit(backport_exit); backport-iwlwifi-dkms-7906/compat/memneq.c000066400000000000000000000142311351064634500206130ustar00rootroot00000000000000/* * Constant-time equality testing of memory regions. * * Authors: * * James Yonan * Daniel Borkmann * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of OpenVPN Technologies nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifndef __HAVE_ARCH_CRYPTO_MEMNEQ /* Generic path for arbitrary size */ static inline unsigned long __crypto_memneq_generic(const void *a, const void *b, size_t size) { unsigned long neq = 0; #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) while (size >= sizeof(unsigned long)) { neq |= *(unsigned long *)a ^ *(unsigned long *)b; OPTIMIZER_HIDE_VAR(neq); a += sizeof(unsigned long); b += sizeof(unsigned long); size -= sizeof(unsigned long); } #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ while (size > 0) { neq |= *(unsigned char *)a ^ *(unsigned char *)b; OPTIMIZER_HIDE_VAR(neq); a += 1; b += 1; size -= 1; } return neq; } /* Loop-free fast-path for frequently used 16-byte size */ static inline unsigned long __crypto_memneq_16(const void *a, const void *b) { unsigned long neq = 0; #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (sizeof(unsigned long) == 8) { neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); OPTIMIZER_HIDE_VAR(neq); } else if (sizeof(unsigned int) == 4) { neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); OPTIMIZER_HIDE_VAR(neq); } else #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ { neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); OPTIMIZER_HIDE_VAR(neq); neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); OPTIMIZER_HIDE_VAR(neq); } return neq; } /* Compare two areas of memory without leaking timing information, * and with special optimizations for common sizes. Users should * not call this function directly, but should instead use * crypto_memneq defined in crypto/algapi.h. */ noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size) { switch (size) { case 16: return __crypto_memneq_16(a, b); default: return __crypto_memneq_generic(a, b, size); } } EXPORT_SYMBOL(__crypto_memneq); #endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ backport-iwlwifi-dkms-7906/compat/user_namespace.c000066400000000000000000000030141351064634500223200ustar00rootroot00000000000000/* * Copyright 2012 Luis R. Rodriguez * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Backport functionality introduced in Linux user_namespace.c */ #include #include #include #include #ifdef CONFIG_USER_NS kuid_t make_kuid(struct user_namespace *ns, uid_t uid) { /* Map the uid to a global kernel uid */ return KUIDT_INIT(uid); } EXPORT_SYMBOL_GPL(make_kuid); uid_t from_kuid(struct user_namespace *targ, kuid_t kuid) { /* Map the uid from a global kernel uid */ return __kuid_val(kuid); } EXPORT_SYMBOL_GPL(from_kuid); uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid) { uid_t uid; uid = from_kuid(targ, kuid); if (uid == (uid_t) -1) uid = overflowuid; return uid; } EXPORT_SYMBOL_GPL(from_kuid_munged); kgid_t make_kgid(struct user_namespace *ns, gid_t gid) { /* Map the gid to a global kernel gid */ return KGIDT_INIT(gid); } EXPORT_SYMBOL_GPL(make_kgid); gid_t from_kgid(struct user_namespace *targ, kgid_t kgid) { /* Map the gid from a global kernel gid */ return __kgid_val(kgid); } EXPORT_SYMBOL_GPL(from_kgid); gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid) { gid_t gid; gid = from_kgid(targ, kgid); if (gid == (gid_t) -1) gid = overflowgid; return gid; } EXPORT_SYMBOL_GPL(from_kgid_munged); #endif /* CONFIG_USER_NS */ backport-iwlwifi-dkms-7906/compat/verification/000077500000000000000000000000001351064634500216465ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/compat/verification/asn1parse.c000066400000000000000000000225721351064634500237170ustar00rootroot00000000000000/* * Generic ASN.1 parsing * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_ASN1_PARSE_C) #include "mbedtls/asn1.h" #if defined(MBEDTLS_BIGNUM_C) #include "mbedtls/bignum.h" #endif #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include #define mbedtls_calloc calloc #define mbedtls_free free #endif /* Implementation that should never be optimized out by the compiler */ static void mbedtls_zeroize( void *v, size_t n ) { volatile unsigned char *p = (unsigned char*)v; while( n-- ) *p++ = 0; } /* * ASN.1 DER decoding routines */ int mbedtls_asn1_get_len( unsigned char **p, const unsigned char *end, size_t *len ) { if( ( end - *p ) < 1 ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); if( ( **p & 0x80 ) == 0 ) *len = *(*p)++; else { switch( **p & 0x7F ) { case 1: if( ( end - *p ) < 2 ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); *len = (*p)[1]; (*p) += 2; break; case 2: if( ( end - *p ) < 3 ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); *len = ( (size_t)(*p)[1] << 8 ) | (*p)[2]; (*p) += 3; break; case 3: if( ( end - *p ) < 4 ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); *len = ( (size_t)(*p)[1] << 16 ) | ( (size_t)(*p)[2] << 8 ) | (*p)[3]; (*p) += 4; break; case 4: if( ( end - *p ) < 5 ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); *len = ( (size_t)(*p)[1] << 24 ) | ( (size_t)(*p)[2] << 16 ) | ( (size_t)(*p)[3] << 8 ) | (*p)[4]; (*p) += 5; break; default: return( MBEDTLS_ERR_ASN1_INVALID_LENGTH ); } } if( *len > (size_t) ( end - *p ) ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); return( 0 ); } int mbedtls_asn1_get_tag( unsigned char **p, const unsigned char *end, size_t *len, int tag ) { if( ( end - *p ) < 1 ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); if( **p != tag ) return( MBEDTLS_ERR_ASN1_UNEXPECTED_TAG ); (*p)++; return( mbedtls_asn1_get_len( p, end, len ) ); } int mbedtls_asn1_get_bool( unsigned char **p, const unsigned char *end, int *val ) { int ret; size_t len; if( ( ret = mbedtls_asn1_get_tag( p, end, &len, MBEDTLS_ASN1_BOOLEAN ) ) != 0 ) return( ret ); if( len != 1 ) return( MBEDTLS_ERR_ASN1_INVALID_LENGTH ); *val = ( **p != 0 ) ? 1 : 0; (*p)++; return( 0 ); } int mbedtls_asn1_get_int( unsigned char **p, const unsigned char *end, int *val ) { int ret; size_t len; if( ( ret = mbedtls_asn1_get_tag( p, end, &len, MBEDTLS_ASN1_INTEGER ) ) != 0 ) return( ret ); if( len == 0 || len > sizeof( int ) || ( **p & 0x80 ) != 0 ) return( MBEDTLS_ERR_ASN1_INVALID_LENGTH ); *val = 0; while( len-- > 0 ) { *val = ( *val << 8 ) | **p; (*p)++; } return( 0 ); } #if defined(MBEDTLS_BIGNUM_C) int mbedtls_asn1_get_mpi( unsigned char **p, const unsigned char *end, mbedtls_mpi *X ) { int ret; size_t len; if( ( ret = mbedtls_asn1_get_tag( p, end, &len, MBEDTLS_ASN1_INTEGER ) ) != 0 ) return( ret ); ret = mbedtls_mpi_read_binary( X, *p, len ); *p += len; return( ret ); } #endif /* MBEDTLS_BIGNUM_C */ int mbedtls_asn1_get_bitstring( unsigned char **p, const unsigned char *end, mbedtls_asn1_bitstring *bs) { int ret; /* Certificate type is a single byte bitstring */ if( ( ret = mbedtls_asn1_get_tag( p, end, &bs->len, MBEDTLS_ASN1_BIT_STRING ) ) != 0 ) return( ret ); /* Check length, subtract one for actual bit string length */ if( bs->len < 1 ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); bs->len -= 1; /* Get number of unused bits, ensure unused bits <= 7 */ bs->unused_bits = **p; if( bs->unused_bits > 7 ) return( MBEDTLS_ERR_ASN1_INVALID_LENGTH ); (*p)++; /* Get actual bitstring */ bs->p = *p; *p += bs->len; if( *p != end ) return( MBEDTLS_ERR_ASN1_LENGTH_MISMATCH ); return( 0 ); } /* * Get a bit string without unused bits */ int mbedtls_asn1_get_bitstring_null( unsigned char **p, const unsigned char *end, size_t *len ) { int ret; if( ( ret = mbedtls_asn1_get_tag( p, end, len, MBEDTLS_ASN1_BIT_STRING ) ) != 0 ) return( ret ); if( (*len)-- < 2 || *(*p)++ != 0 ) return( MBEDTLS_ERR_ASN1_INVALID_DATA ); return( 0 ); } /* * Parses and splits an ASN.1 "SEQUENCE OF " */ int mbedtls_asn1_get_sequence_of( unsigned char **p, const unsigned char *end, mbedtls_asn1_sequence *cur, int tag) { int ret; size_t len; mbedtls_asn1_buf *buf; /* Get main sequence tag */ if( ( ret = mbedtls_asn1_get_tag( p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE ) ) != 0 ) return( ret ); if( *p + len != end ) return( MBEDTLS_ERR_ASN1_LENGTH_MISMATCH ); while( *p < end ) { buf = &(cur->buf); buf->tag = **p; if( ( ret = mbedtls_asn1_get_tag( p, end, &buf->len, tag ) ) != 0 ) return( ret ); buf->p = *p; *p += buf->len; /* Allocate and assign next pointer */ if( *p < end ) { cur->next = (mbedtls_asn1_sequence*)mbedtls_calloc( 1, sizeof( mbedtls_asn1_sequence ) ); if( cur->next == NULL ) return( MBEDTLS_ERR_ASN1_ALLOC_FAILED ); cur = cur->next; } } /* Set final sequence entry's next pointer to NULL */ cur->next = NULL; if( *p != end ) return( MBEDTLS_ERR_ASN1_LENGTH_MISMATCH ); return( 0 ); } int mbedtls_asn1_get_alg( unsigned char **p, const unsigned char *end, mbedtls_asn1_buf *alg, mbedtls_asn1_buf *params ) { int ret; size_t len; if( ( ret = mbedtls_asn1_get_tag( p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE ) ) != 0 ) return( ret ); if( ( end - *p ) < 1 ) return( MBEDTLS_ERR_ASN1_OUT_OF_DATA ); alg->tag = **p; end = *p + len; if( ( ret = mbedtls_asn1_get_tag( p, end, &alg->len, MBEDTLS_ASN1_OID ) ) != 0 ) return( ret ); alg->p = *p; *p += alg->len; if( *p == end ) { mbedtls_zeroize( params, sizeof(mbedtls_asn1_buf) ); return( 0 ); } params->tag = **p; (*p)++; if( ( ret = mbedtls_asn1_get_len( p, end, ¶ms->len ) ) != 0 ) return( ret ); params->p = *p; *p += params->len; if( *p != end ) return( MBEDTLS_ERR_ASN1_LENGTH_MISMATCH ); return( 0 ); } int mbedtls_asn1_get_alg_null( unsigned char **p, const unsigned char *end, mbedtls_asn1_buf *alg ) { int ret; mbedtls_asn1_buf params; memset( ¶ms, 0, sizeof(mbedtls_asn1_buf) ); if( ( ret = mbedtls_asn1_get_alg( p, end, alg, ¶ms ) ) != 0 ) return( ret ); if( ( params.tag != MBEDTLS_ASN1_NULL && params.tag != 0 ) || params.len != 0 ) return( MBEDTLS_ERR_ASN1_INVALID_DATA ); return( 0 ); } void mbedtls_asn1_free_named_data( mbedtls_asn1_named_data *cur ) { if( cur == NULL ) return; mbedtls_free( cur->oid.p ); mbedtls_free( cur->val.p ); mbedtls_zeroize( cur, sizeof( mbedtls_asn1_named_data ) ); } void mbedtls_asn1_free_named_data_list( mbedtls_asn1_named_data **head ) { mbedtls_asn1_named_data *cur; while( ( cur = *head ) != NULL ) { *head = cur->next; mbedtls_asn1_free_named_data( cur ); mbedtls_free( cur ); } } mbedtls_asn1_named_data *mbedtls_asn1_find_named_data( mbedtls_asn1_named_data *list, const char *oid, size_t len ) { while( list != NULL ) { if( list->oid.len == len && memcmp( list->oid.p, oid, len ) == 0 ) { break; } list = list->next; } return( list ); } #endif /* MBEDTLS_ASN1_PARSE_C */ backport-iwlwifi-dkms-7906/compat/verification/bignum.c000066400000000000000000001630541351064634500233040ustar00rootroot00000000000000/* * Multi-precision integer library * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ /* * The following sources were referenced in the design of this Multi-precision * Integer library: * * [1] Handbook of Applied Cryptography - 1997 * Menezes, van Oorschot and Vanstone * * [2] Multi-Precision Math * Tom St Denis * https://github.com/libtom/libtommath/blob/develop/tommath.pdf * * [3] GNU Multi-Precision Arithmetic Library * https://gmplib.org/manual/index.html * */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_BIGNUM_C) #include "mbedtls/bignum.h" #include "mbedtls/bn_mul.h" #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include #include #define mbedtls_printf printf #define mbedtls_calloc calloc #define mbedtls_free free #endif /* Implementation that should never be optimized out by the compiler */ static void mbedtls_mpi_zeroize( mbedtls_mpi_uint *v, size_t n ) { volatile mbedtls_mpi_uint *p = v; while( n-- ) *p++ = 0; } #define ciL (sizeof(mbedtls_mpi_uint)) /* chars in limb */ #define biL (ciL << 3) /* bits in limb */ #define biH (ciL << 2) /* half limb size */ #define MPI_SIZE_T_MAX ( (size_t) -1 ) /* SIZE_T_MAX is not standard */ /* * Convert between bits/chars and number of limbs * Divide first in order to avoid potential overflows */ #define BITS_TO_LIMBS(i) ( (i) / biL + ( (i) % biL != 0 ) ) #define CHARS_TO_LIMBS(i) ( (i) / ciL + ( (i) % ciL != 0 ) ) /* * Initialize one MPI */ void mbedtls_mpi_init( mbedtls_mpi *X ) { if( X == NULL ) return; X->s = 1; X->n = 0; X->p = NULL; } /* * Unallocate one MPI */ void mbedtls_mpi_free( mbedtls_mpi *X ) { if( X == NULL ) return; if( X->p != NULL ) { mbedtls_mpi_zeroize( X->p, X->n ); mbedtls_free( X->p ); } X->s = 1; X->n = 0; X->p = NULL; } /* * Enlarge to the specified number of limbs */ int mbedtls_mpi_grow( mbedtls_mpi *X, size_t nblimbs ) { mbedtls_mpi_uint *p; if( nblimbs > MBEDTLS_MPI_MAX_LIMBS ) return( MBEDTLS_ERR_MPI_ALLOC_FAILED ); if( X->n < nblimbs ) { if( ( p = (mbedtls_mpi_uint*)mbedtls_calloc( nblimbs, ciL ) ) == NULL ) return( MBEDTLS_ERR_MPI_ALLOC_FAILED ); if( X->p != NULL ) { memcpy( p, X->p, X->n * ciL ); mbedtls_mpi_zeroize( X->p, X->n ); mbedtls_free( X->p ); } X->n = nblimbs; X->p = p; } return( 0 ); } /* * Resize down as much as possible, * while keeping at least the specified number of limbs */ int mbedtls_mpi_shrink( mbedtls_mpi *X, size_t nblimbs ) { mbedtls_mpi_uint *p; size_t i; /* Actually resize up in this case */ if( X->n <= nblimbs ) return( mbedtls_mpi_grow( X, nblimbs ) ); for( i = X->n - 1; i > 0; i-- ) if( X->p[i] != 0 ) break; i++; if( i < nblimbs ) i = nblimbs; if( ( p = (mbedtls_mpi_uint*)mbedtls_calloc( i, ciL ) ) == NULL ) return( MBEDTLS_ERR_MPI_ALLOC_FAILED ); if( X->p != NULL ) { memcpy( p, X->p, i * ciL ); mbedtls_mpi_zeroize( X->p, X->n ); mbedtls_free( X->p ); } X->n = i; X->p = p; return( 0 ); } /* * Copy the contents of Y into X */ int mbedtls_mpi_copy( mbedtls_mpi *X, const mbedtls_mpi *Y ) { int ret; size_t i; if( X == Y ) return( 0 ); if( Y->p == NULL ) { mbedtls_mpi_free( X ); return( 0 ); } for( i = Y->n - 1; i > 0; i-- ) if( Y->p[i] != 0 ) break; i++; X->s = Y->s; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, i ) ); memset( X->p, 0, X->n * ciL ); memcpy( X->p, Y->p, i * ciL ); cleanup: return( ret ); } /* * Swap the contents of X and Y */ void mbedtls_mpi_swap( mbedtls_mpi *X, mbedtls_mpi *Y ) { mbedtls_mpi T; memcpy( &T, X, sizeof( mbedtls_mpi ) ); memcpy( X, Y, sizeof( mbedtls_mpi ) ); memcpy( Y, &T, sizeof( mbedtls_mpi ) ); } /* * Conditionally assign X = Y, without leaking information * about whether the assignment was made or not. * (Leaking information about the respective sizes of X and Y is ok however.) */ int mbedtls_mpi_safe_cond_assign( mbedtls_mpi *X, const mbedtls_mpi *Y, unsigned char assign ) { int ret = 0; size_t i; /* make sure assign is 0 or 1 in a time-constant manner */ assign = (assign | (unsigned char)-assign) >> 7; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, Y->n ) ); X->s = X->s * ( 1 - assign ) + Y->s * assign; for( i = 0; i < Y->n; i++ ) X->p[i] = X->p[i] * ( 1 - assign ) + Y->p[i] * assign; for( ; i < X->n; i++ ) X->p[i] *= ( 1 - assign ); cleanup: return( ret ); } /* * Conditionally swap X and Y, without leaking information * about whether the swap was made or not. * Here it is not ok to simply swap the pointers, which whould lead to * different memory access patterns when X and Y are used afterwards. */ int mbedtls_mpi_safe_cond_swap( mbedtls_mpi *X, mbedtls_mpi *Y, unsigned char swap ) { int ret, s; size_t i; mbedtls_mpi_uint tmp; if( X == Y ) return( 0 ); /* make sure swap is 0 or 1 in a time-constant manner */ swap = (swap | (unsigned char)-swap) >> 7; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, Y->n ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( Y, X->n ) ); s = X->s; X->s = X->s * ( 1 - swap ) + Y->s * swap; Y->s = Y->s * ( 1 - swap ) + s * swap; for( i = 0; i < X->n; i++ ) { tmp = X->p[i]; X->p[i] = X->p[i] * ( 1 - swap ) + Y->p[i] * swap; Y->p[i] = Y->p[i] * ( 1 - swap ) + tmp * swap; } cleanup: return( ret ); } /* * Set value from integer */ int mbedtls_mpi_lset( mbedtls_mpi *X, mbedtls_mpi_sint z ) { int ret; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, 1 ) ); memset( X->p, 0, X->n * ciL ); X->p[0] = ( z < 0 ) ? -z : z; X->s = ( z < 0 ) ? -1 : 1; cleanup: return( ret ); } /* * Get a specific bit */ int mbedtls_mpi_get_bit( const mbedtls_mpi *X, size_t pos ) { if( X->n * biL <= pos ) return( 0 ); return( ( X->p[pos / biL] >> ( pos % biL ) ) & 0x01 ); } /* * Set a bit to a specific value of 0 or 1 */ int mbedtls_mpi_set_bit( mbedtls_mpi *X, size_t pos, unsigned char val ) { int ret = 0; size_t off = pos / biL; size_t idx = pos % biL; if( val != 0 && val != 1 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); if( X->n * biL <= pos ) { if( val == 0 ) return( 0 ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, off + 1 ) ); } X->p[off] &= ~( (mbedtls_mpi_uint) 0x01 << idx ); X->p[off] |= (mbedtls_mpi_uint) val << idx; cleanup: return( ret ); } /* * Return the number of less significant zero-bits */ size_t mbedtls_mpi_lsb( const mbedtls_mpi *X ) { size_t i, j, count = 0; for( i = 0; i < X->n; i++ ) for( j = 0; j < biL; j++, count++ ) if( ( ( X->p[i] >> j ) & 1 ) != 0 ) return( count ); return( 0 ); } /* * Count leading zero bits in a given integer */ static size_t mbedtls_clz( const mbedtls_mpi_uint x ) { size_t j; mbedtls_mpi_uint mask = (mbedtls_mpi_uint) 1 << (biL - 1); for( j = 0; j < biL; j++ ) { if( x & mask ) break; mask >>= 1; } return j; } /* * Return the number of bits */ size_t mbedtls_mpi_bitlen( const mbedtls_mpi *X ) { size_t i, j; if( X->n == 0 ) return( 0 ); for( i = X->n - 1; i > 0; i-- ) if( X->p[i] != 0 ) break; j = biL - mbedtls_clz( X->p[i] ); return( ( i * biL ) + j ); } /* * Return the total size in bytes */ size_t mbedtls_mpi_size( const mbedtls_mpi *X ) { return( ( mbedtls_mpi_bitlen( X ) + 7 ) >> 3 ); } /* * Convert an ASCII character to digit value */ static int mpi_get_digit( mbedtls_mpi_uint *d, int radix, char c ) { *d = 255; if( c >= 0x30 && c <= 0x39 ) *d = c - 0x30; if( c >= 0x41 && c <= 0x46 ) *d = c - 0x37; if( c >= 0x61 && c <= 0x66 ) *d = c - 0x57; if( *d >= (mbedtls_mpi_uint) radix ) return( MBEDTLS_ERR_MPI_INVALID_CHARACTER ); return( 0 ); } /* * Import from an ASCII string */ int mbedtls_mpi_read_string( mbedtls_mpi *X, int radix, const char *s ) { int ret; size_t i, j, slen, n; mbedtls_mpi_uint d; mbedtls_mpi T; if( radix < 2 || radix > 16 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); mbedtls_mpi_init( &T ); slen = strlen( s ); if( radix == 16 ) { if( slen > MPI_SIZE_T_MAX >> 2 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); n = BITS_TO_LIMBS( slen << 2 ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, n ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( X, 0 ) ); for( i = slen, j = 0; i > 0; i--, j++ ) { if( i == 1 && s[i - 1] == '-' ) { X->s = -1; break; } MBEDTLS_MPI_CHK( mpi_get_digit( &d, radix, s[i - 1] ) ); X->p[j / ( 2 * ciL )] |= d << ( ( j % ( 2 * ciL ) ) << 2 ); } } else { MBEDTLS_MPI_CHK( mbedtls_mpi_lset( X, 0 ) ); for( i = 0; i < slen; i++ ) { if( i == 0 && s[i] == '-' ) { X->s = -1; continue; } MBEDTLS_MPI_CHK( mpi_get_digit( &d, radix, s[i] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_int( &T, X, radix ) ); if( X->s == 1 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, &T, d ) ); } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( X, &T, d ) ); } } } cleanup: mbedtls_mpi_free( &T ); return( ret ); } /* * Helper to write the digits high-order first */ static int mpi_write_hlp( mbedtls_mpi *X, int radix, char **p ) { int ret; mbedtls_mpi_uint r; if( radix < 2 || radix > 16 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_int( &r, X, radix ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_div_int( X, NULL, X, radix ) ); if( mbedtls_mpi_cmp_int( X, 0 ) != 0 ) MBEDTLS_MPI_CHK( mpi_write_hlp( X, radix, p ) ); if( r < 10 ) *(*p)++ = (char)( r + 0x30 ); else *(*p)++ = (char)( r + 0x37 ); cleanup: return( ret ); } /* * Export into an ASCII string */ int mbedtls_mpi_write_string( const mbedtls_mpi *X, int radix, char *buf, size_t buflen, size_t *olen ) { int ret = 0; size_t n; char *p; mbedtls_mpi T; if( radix < 2 || radix > 16 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); n = mbedtls_mpi_bitlen( X ); if( radix >= 4 ) n >>= 1; if( radix >= 16 ) n >>= 1; /* * Round up the buffer length to an even value to ensure that there is * enough room for hexadecimal values that can be represented in an odd * number of digits. */ n += 3 + ( ( n + 1 ) & 1 ); if( buflen < n ) { *olen = n; return( MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL ); } p = buf; mbedtls_mpi_init( &T ); if( X->s == -1 ) *p++ = '-'; if( radix == 16 ) { int c; size_t i, j, k; for( i = X->n, k = 0; i > 0; i-- ) { for( j = ciL; j > 0; j-- ) { c = ( X->p[i - 1] >> ( ( j - 1 ) << 3) ) & 0xFF; if( c == 0 && k == 0 && ( i + j ) != 2 ) continue; *(p++) = "0123456789ABCDEF" [c / 16]; *(p++) = "0123456789ABCDEF" [c % 16]; k = 1; } } } else { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &T, X ) ); if( T.s == -1 ) T.s = 1; MBEDTLS_MPI_CHK( mpi_write_hlp( &T, radix, &p ) ); } *p++ = '\0'; *olen = p - buf; cleanup: mbedtls_mpi_free( &T ); return( ret ); } #if defined(MBEDTLS_FS_IO) /* * Read X from an opened file */ int mbedtls_mpi_read_file( mbedtls_mpi *X, int radix, FILE *fin ) { mbedtls_mpi_uint d; size_t slen; char *p; /* * Buffer should have space for (short) label and decimal formatted MPI, * newline characters and '\0' */ char s[ MBEDTLS_MPI_RW_BUFFER_SIZE ]; memset( s, 0, sizeof( s ) ); if( fgets( s, sizeof( s ) - 1, fin ) == NULL ) return( MBEDTLS_ERR_MPI_FILE_IO_ERROR ); slen = strlen( s ); if( slen == sizeof( s ) - 2 ) return( MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL ); if( slen > 0 && s[slen - 1] == '\n' ) { slen--; s[slen] = '\0'; } if( slen > 0 && s[slen - 1] == '\r' ) { slen--; s[slen] = '\0'; } p = s + slen; while( p-- > s ) if( mpi_get_digit( &d, radix, *p ) != 0 ) break; return( mbedtls_mpi_read_string( X, radix, p + 1 ) ); } /* * Write X into an opened file (or stdout if fout == NULL) */ int mbedtls_mpi_write_file( const char *p, const mbedtls_mpi *X, int radix, FILE *fout ) { int ret; size_t n, slen, plen; /* * Buffer should have space for (short) label and decimal formatted MPI, * newline characters and '\0' */ char s[ MBEDTLS_MPI_RW_BUFFER_SIZE ]; memset( s, 0, sizeof( s ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_write_string( X, radix, s, sizeof( s ) - 2, &n ) ); if( p == NULL ) p = ""; plen = strlen( p ); slen = strlen( s ); s[slen++] = '\r'; s[slen++] = '\n'; if( fout != NULL ) { if( fwrite( p, 1, plen, fout ) != plen || fwrite( s, 1, slen, fout ) != slen ) return( MBEDTLS_ERR_MPI_FILE_IO_ERROR ); } else mbedtls_printf( "%s%s", p, s ); cleanup: return( ret ); } #endif /* MBEDTLS_FS_IO */ /* * Import X from unsigned binary data, big endian */ int mbedtls_mpi_read_binary( mbedtls_mpi *X, const unsigned char *buf, size_t buflen ) { int ret; size_t i, j, n; for( n = 0; n < buflen; n++ ) if( buf[n] != 0 ) break; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, CHARS_TO_LIMBS( buflen - n ) ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( X, 0 ) ); for( i = buflen, j = 0; i > n; i--, j++ ) X->p[j / ciL] |= ((mbedtls_mpi_uint) buf[i - 1]) << ((j % ciL) << 3); cleanup: return( ret ); } /* * Export X into unsigned binary data, big endian */ int mbedtls_mpi_write_binary( const mbedtls_mpi *X, unsigned char *buf, size_t buflen ) { size_t i, j, n; n = mbedtls_mpi_size( X ); if( buflen < n ) return( MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL ); memset( buf, 0, buflen ); for( i = buflen - 1, j = 0; n > 0; i--, j++, n-- ) buf[i] = (unsigned char)( X->p[j / ciL] >> ((j % ciL) << 3) ); return( 0 ); } /* * Left-shift: X <<= count */ int mbedtls_mpi_shift_l( mbedtls_mpi *X, size_t count ) { int ret; size_t i, v0, t1; mbedtls_mpi_uint r0 = 0, r1; v0 = count / (biL ); t1 = count & (biL - 1); i = mbedtls_mpi_bitlen( X ) + count; if( X->n * biL < i ) MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, BITS_TO_LIMBS( i ) ) ); ret = 0; /* * shift by count / limb_size */ if( v0 > 0 ) { for( i = X->n; i > v0; i-- ) X->p[i - 1] = X->p[i - v0 - 1]; for( ; i > 0; i-- ) X->p[i - 1] = 0; } /* * shift by count % limb_size */ if( t1 > 0 ) { for( i = v0; i < X->n; i++ ) { r1 = X->p[i] >> (biL - t1); X->p[i] <<= t1; X->p[i] |= r0; r0 = r1; } } cleanup: return( ret ); } /* * Right-shift: X >>= count */ int mbedtls_mpi_shift_r( mbedtls_mpi *X, size_t count ) { size_t i, v0, v1; mbedtls_mpi_uint r0 = 0, r1; v0 = count / biL; v1 = count & (biL - 1); if( v0 > X->n || ( v0 == X->n && v1 > 0 ) ) return mbedtls_mpi_lset( X, 0 ); /* * shift by count / limb_size */ if( v0 > 0 ) { for( i = 0; i < X->n - v0; i++ ) X->p[i] = X->p[i + v0]; for( ; i < X->n; i++ ) X->p[i] = 0; } /* * shift by count % limb_size */ if( v1 > 0 ) { for( i = X->n; i > 0; i-- ) { r1 = X->p[i - 1] << (biL - v1); X->p[i - 1] >>= v1; X->p[i - 1] |= r0; r0 = r1; } } return( 0 ); } /* * Compare unsigned values */ int mbedtls_mpi_cmp_abs( const mbedtls_mpi *X, const mbedtls_mpi *Y ) { size_t i, j; for( i = X->n; i > 0; i-- ) if( X->p[i - 1] != 0 ) break; for( j = Y->n; j > 0; j-- ) if( Y->p[j - 1] != 0 ) break; if( i == 0 && j == 0 ) return( 0 ); if( i > j ) return( 1 ); if( j > i ) return( -1 ); for( ; i > 0; i-- ) { if( X->p[i - 1] > Y->p[i - 1] ) return( 1 ); if( X->p[i - 1] < Y->p[i - 1] ) return( -1 ); } return( 0 ); } /* * Compare signed values */ int mbedtls_mpi_cmp_mpi( const mbedtls_mpi *X, const mbedtls_mpi *Y ) { size_t i, j; for( i = X->n; i > 0; i-- ) if( X->p[i - 1] != 0 ) break; for( j = Y->n; j > 0; j-- ) if( Y->p[j - 1] != 0 ) break; if( i == 0 && j == 0 ) return( 0 ); if( i > j ) return( X->s ); if( j > i ) return( -Y->s ); if( X->s > 0 && Y->s < 0 ) return( 1 ); if( Y->s > 0 && X->s < 0 ) return( -1 ); for( ; i > 0; i-- ) { if( X->p[i - 1] > Y->p[i - 1] ) return( X->s ); if( X->p[i - 1] < Y->p[i - 1] ) return( -X->s ); } return( 0 ); } /* * Compare signed values */ int mbedtls_mpi_cmp_int( const mbedtls_mpi *X, mbedtls_mpi_sint z ) { mbedtls_mpi Y; mbedtls_mpi_uint p[1]; *p = ( z < 0 ) ? -z : z; Y.s = ( z < 0 ) ? -1 : 1; Y.n = 1; Y.p = p; return( mbedtls_mpi_cmp_mpi( X, &Y ) ); } /* * Unsigned addition: X = |A| + |B| (HAC 14.7) */ int mbedtls_mpi_add_abs( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; size_t i, j; mbedtls_mpi_uint *o, *p, c, tmp; if( X == B ) { const mbedtls_mpi *T = A; A = X; B = T; } if( X != A ) MBEDTLS_MPI_CHK( mbedtls_mpi_copy( X, A ) ); /* * X should always be positive as a result of unsigned additions. */ X->s = 1; for( j = B->n; j > 0; j-- ) if( B->p[j - 1] != 0 ) break; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, j ) ); o = B->p; p = X->p; c = 0; /* * tmp is used because it might happen that p == o */ for( i = 0; i < j; i++, o++, p++ ) { tmp= *o; *p += c; c = ( *p < c ); *p += tmp; c += ( *p < tmp ); } while( c != 0 ) { if( i >= X->n ) { MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, i + 1 ) ); p = X->p + i; } *p += c; c = ( *p < c ); i++; p++; } cleanup: return( ret ); } /* * Helper for mbedtls_mpi subtraction */ static void mpi_sub_hlp( size_t n, mbedtls_mpi_uint *s, mbedtls_mpi_uint *d ) { size_t i; mbedtls_mpi_uint c, z; for( i = c = 0; i < n; i++, s++, d++ ) { z = ( *d < c ); *d -= c; c = ( *d < *s ) + z; *d -= *s; } while( c != 0 ) { z = ( *d < c ); *d -= c; c = z; i++; d++; } } /* * Unsigned subtraction: X = |A| - |B| (HAC 14.9) */ int mbedtls_mpi_sub_abs( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { mbedtls_mpi TB; int ret; size_t n; if( mbedtls_mpi_cmp_abs( A, B ) < 0 ) return( MBEDTLS_ERR_MPI_NEGATIVE_VALUE ); mbedtls_mpi_init( &TB ); if( X == B ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TB, B ) ); B = &TB; } if( X != A ) MBEDTLS_MPI_CHK( mbedtls_mpi_copy( X, A ) ); /* * X should always be positive as a result of unsigned subtractions. */ X->s = 1; ret = 0; for( n = B->n; n > 0; n-- ) if( B->p[n - 1] != 0 ) break; mpi_sub_hlp( n, B->p, X->p ); cleanup: mbedtls_mpi_free( &TB ); return( ret ); } /* * Signed addition: X = A + B */ int mbedtls_mpi_add_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret, s = A->s; if( A->s * B->s < 0 ) { if( mbedtls_mpi_cmp_abs( A, B ) >= 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( X, A, B ) ); X->s = s; } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( X, B, A ) ); X->s = -s; } } else { MBEDTLS_MPI_CHK( mbedtls_mpi_add_abs( X, A, B ) ); X->s = s; } cleanup: return( ret ); } /* * Signed subtraction: X = A - B */ int mbedtls_mpi_sub_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret, s = A->s; if( A->s * B->s > 0 ) { if( mbedtls_mpi_cmp_abs( A, B ) >= 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( X, A, B ) ); X->s = s; } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( X, B, A ) ); X->s = -s; } } else { MBEDTLS_MPI_CHK( mbedtls_mpi_add_abs( X, A, B ) ); X->s = s; } cleanup: return( ret ); } /* * Signed addition: X = A + b */ int mbedtls_mpi_add_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_sint b ) { mbedtls_mpi _B; mbedtls_mpi_uint p[1]; p[0] = ( b < 0 ) ? -b : b; _B.s = ( b < 0 ) ? -1 : 1; _B.n = 1; _B.p = p; return( mbedtls_mpi_add_mpi( X, A, &_B ) ); } /* * Signed subtraction: X = A - b */ int mbedtls_mpi_sub_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_sint b ) { mbedtls_mpi _B; mbedtls_mpi_uint p[1]; p[0] = ( b < 0 ) ? -b : b; _B.s = ( b < 0 ) ? -1 : 1; _B.n = 1; _B.p = p; return( mbedtls_mpi_sub_mpi( X, A, &_B ) ); } /* * Helper for mbedtls_mpi multiplication */ static #if defined(__APPLE__) && defined(__arm__) /* * Apple LLVM version 4.2 (clang-425.0.24) (based on LLVM 3.2svn) * appears to need this to prevent bad ARM code generation at -O3. */ __attribute__ ((noinline)) #endif void mpi_mul_hlp( size_t i, mbedtls_mpi_uint *s, mbedtls_mpi_uint *d, mbedtls_mpi_uint b ) { mbedtls_mpi_uint c = 0, t = 0; #if defined(MULADDC_HUIT) for( ; i >= 8; i -= 8 ) { MULADDC_INIT MULADDC_HUIT MULADDC_STOP } for( ; i > 0; i-- ) { MULADDC_INIT MULADDC_CORE MULADDC_STOP } #else /* MULADDC_HUIT */ for( ; i >= 16; i -= 16 ) { MULADDC_INIT MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_STOP } for( ; i >= 8; i -= 8 ) { MULADDC_INIT MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_STOP } for( ; i > 0; i-- ) { MULADDC_INIT MULADDC_CORE MULADDC_STOP } #endif /* MULADDC_HUIT */ t++; do { *d += c; c = ( *d < c ); d++; } while( c != 0 ); } /* * Baseline multiplication: X = A * B (HAC 14.12) */ int mbedtls_mpi_mul_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; size_t i, j; mbedtls_mpi TA, TB; mbedtls_mpi_init( &TA ); mbedtls_mpi_init( &TB ); if( X == A ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TA, A ) ); A = &TA; } if( X == B ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TB, B ) ); B = &TB; } for( i = A->n; i > 0; i-- ) if( A->p[i - 1] != 0 ) break; for( j = B->n; j > 0; j-- ) if( B->p[j - 1] != 0 ) break; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, i + j ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( X, 0 ) ); for( i++; j > 0; j-- ) mpi_mul_hlp( i - 1, A->p, X->p + j - 1, B->p[j - 1] ); X->s = A->s * B->s; cleanup: mbedtls_mpi_free( &TB ); mbedtls_mpi_free( &TA ); return( ret ); } /* * Baseline multiplication: X = A * b */ int mbedtls_mpi_mul_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_uint b ) { mbedtls_mpi _B; mbedtls_mpi_uint p[1]; _B.s = 1; _B.n = 1; _B.p = p; p[0] = b; return( mbedtls_mpi_mul_mpi( X, A, &_B ) ); } /* * Unsigned integer divide - double mbedtls_mpi_uint dividend, u1/u0, and * mbedtls_mpi_uint divisor, d */ static mbedtls_mpi_uint mbedtls_int_div_int( mbedtls_mpi_uint u1, mbedtls_mpi_uint u0, mbedtls_mpi_uint d, mbedtls_mpi_uint *r ) { #if defined(MBEDTLS_HAVE_UDBL) mbedtls_t_udbl dividend, quotient; #else const mbedtls_mpi_uint radix = (mbedtls_mpi_uint) 1 << biH; const mbedtls_mpi_uint uint_halfword_mask = ( (mbedtls_mpi_uint) 1 << biH ) - 1; mbedtls_mpi_uint d0, d1, q0, q1, rAX, r0, quotient; mbedtls_mpi_uint u0_msw, u0_lsw; size_t s; #endif /* * Check for overflow */ if( 0 == d || u1 >= d ) { if (r != NULL) *r = ~0; return ( ~0 ); } #if defined(MBEDTLS_HAVE_UDBL) dividend = (mbedtls_t_udbl) u1 << biL; dividend |= (mbedtls_t_udbl) u0; quotient = dividend / d; if( quotient > ( (mbedtls_t_udbl) 1 << biL ) - 1 ) quotient = ( (mbedtls_t_udbl) 1 << biL ) - 1; if( r != NULL ) *r = (mbedtls_mpi_uint)( dividend - (quotient * d ) ); return (mbedtls_mpi_uint) quotient; #else /* * Algorithm D, Section 4.3.1 - The Art of Computer Programming * Vol. 2 - Seminumerical Algorithms, Knuth */ /* * Normalize the divisor, d, and dividend, u0, u1 */ s = mbedtls_clz( d ); d = d << s; u1 = u1 << s; u1 |= ( u0 >> ( biL - s ) ) & ( -(mbedtls_mpi_sint)s >> ( biL - 1 ) ); u0 = u0 << s; d1 = d >> biH; d0 = d & uint_halfword_mask; u0_msw = u0 >> biH; u0_lsw = u0 & uint_halfword_mask; /* * Find the first quotient and remainder */ q1 = u1 / d1; r0 = u1 - d1 * q1; while( q1 >= radix || ( q1 * d0 > radix * r0 + u0_msw ) ) { q1 -= 1; r0 += d1; if ( r0 >= radix ) break; } rAX = ( u1 * radix ) + ( u0_msw - q1 * d ); q0 = rAX / d1; r0 = rAX - q0 * d1; while( q0 >= radix || ( q0 * d0 > radix * r0 + u0_lsw ) ) { q0 -= 1; r0 += d1; if ( r0 >= radix ) break; } if (r != NULL) *r = ( rAX * radix + u0_lsw - q0 * d ) >> s; quotient = q1 * radix + q0; return quotient; #endif } /* * Division by mbedtls_mpi: A = Q * B + R (HAC 14.20) */ int mbedtls_mpi_div_mpi( mbedtls_mpi *Q, mbedtls_mpi *R, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; size_t i, n, t, k; mbedtls_mpi X, Y, Z, T1, T2; if( mbedtls_mpi_cmp_int( B, 0 ) == 0 ) return( MBEDTLS_ERR_MPI_DIVISION_BY_ZERO ); mbedtls_mpi_init( &X ); mbedtls_mpi_init( &Y ); mbedtls_mpi_init( &Z ); mbedtls_mpi_init( &T1 ); mbedtls_mpi_init( &T2 ); if( mbedtls_mpi_cmp_abs( A, B ) < 0 ) { if( Q != NULL ) MBEDTLS_MPI_CHK( mbedtls_mpi_lset( Q, 0 ) ); if( R != NULL ) MBEDTLS_MPI_CHK( mbedtls_mpi_copy( R, A ) ); return( 0 ); } MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &X, A ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &Y, B ) ); X.s = Y.s = 1; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &Z, A->n + 2 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &Z, 0 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &T1, 2 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &T2, 3 ) ); k = mbedtls_mpi_bitlen( &Y ) % biL; if( k < biL - 1 ) { k = biL - 1 - k; MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &X, k ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &Y, k ) ); } else k = 0; n = X.n - 1; t = Y.n - 1; MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &Y, biL * ( n - t ) ) ); while( mbedtls_mpi_cmp_mpi( &X, &Y ) >= 0 ) { Z.p[n - t]++; MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &X, &X, &Y ) ); } MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &Y, biL * ( n - t ) ) ); for( i = n; i > t ; i-- ) { if( X.p[i] >= Y.p[t] ) Z.p[i - t - 1] = ~0; else { Z.p[i - t - 1] = mbedtls_int_div_int( X.p[i], X.p[i - 1], Y.p[t], NULL); } Z.p[i - t - 1]++; do { Z.p[i - t - 1]--; MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &T1, 0 ) ); T1.p[0] = ( t < 1 ) ? 0 : Y.p[t - 1]; T1.p[1] = Y.p[t]; MBEDTLS_MPI_CHK( mbedtls_mpi_mul_int( &T1, &T1, Z.p[i - t - 1] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &T2, 0 ) ); T2.p[0] = ( i < 2 ) ? 0 : X.p[i - 2]; T2.p[1] = ( i < 1 ) ? 0 : X.p[i - 1]; T2.p[2] = X.p[i]; } while( mbedtls_mpi_cmp_mpi( &T1, &T2 ) > 0 ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_int( &T1, &Y, Z.p[i - t - 1] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &T1, biL * ( i - t - 1 ) ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &X, &X, &T1 ) ); if( mbedtls_mpi_cmp_int( &X, 0 ) < 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &T1, &Y ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &T1, biL * ( i - t - 1 ) ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &X, &X, &T1 ) ); Z.p[i - t - 1]--; } } if( Q != NULL ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( Q, &Z ) ); Q->s = A->s * B->s; } if( R != NULL ) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &X, k ) ); X.s = A->s; MBEDTLS_MPI_CHK( mbedtls_mpi_copy( R, &X ) ); if( mbedtls_mpi_cmp_int( R, 0 ) == 0 ) R->s = 1; } cleanup: mbedtls_mpi_free( &X ); mbedtls_mpi_free( &Y ); mbedtls_mpi_free( &Z ); mbedtls_mpi_free( &T1 ); mbedtls_mpi_free( &T2 ); return( ret ); } /* * Division by int: A = Q * b + R */ int mbedtls_mpi_div_int( mbedtls_mpi *Q, mbedtls_mpi *R, const mbedtls_mpi *A, mbedtls_mpi_sint b ) { mbedtls_mpi _B; mbedtls_mpi_uint p[1]; p[0] = ( b < 0 ) ? -b : b; _B.s = ( b < 0 ) ? -1 : 1; _B.n = 1; _B.p = p; return( mbedtls_mpi_div_mpi( Q, R, A, &_B ) ); } /* * Modulo: R = A mod B */ int mbedtls_mpi_mod_mpi( mbedtls_mpi *R, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; if( mbedtls_mpi_cmp_int( B, 0 ) < 0 ) return( MBEDTLS_ERR_MPI_NEGATIVE_VALUE ); MBEDTLS_MPI_CHK( mbedtls_mpi_div_mpi( NULL, R, A, B ) ); while( mbedtls_mpi_cmp_int( R, 0 ) < 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( R, R, B ) ); while( mbedtls_mpi_cmp_mpi( R, B ) >= 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( R, R, B ) ); cleanup: return( ret ); } /* * Modulo: r = A mod b */ int mbedtls_mpi_mod_int( mbedtls_mpi_uint *r, const mbedtls_mpi *A, mbedtls_mpi_sint b ) { size_t i; mbedtls_mpi_uint x, y, z; if( b == 0 ) return( MBEDTLS_ERR_MPI_DIVISION_BY_ZERO ); if( b < 0 ) return( MBEDTLS_ERR_MPI_NEGATIVE_VALUE ); /* * handle trivial cases */ if( b == 1 ) { *r = 0; return( 0 ); } if( b == 2 ) { *r = A->p[0] & 1; return( 0 ); } /* * general case */ for( i = A->n, y = 0; i > 0; i-- ) { x = A->p[i - 1]; y = ( y << biH ) | ( x >> biH ); z = y / b; y -= z * b; x <<= biH; y = ( y << biH ) | ( x >> biH ); z = y / b; y -= z * b; } /* * If A is negative, then the current y represents a negative value. * Flipping it to the positive side. */ if( A->s < 0 && y != 0 ) y = b - y; *r = y; return( 0 ); } /* * Fast Montgomery initialization (thanks to Tom St Denis) */ static void mpi_montg_init( mbedtls_mpi_uint *mm, const mbedtls_mpi *N ) { mbedtls_mpi_uint x, m0 = N->p[0]; unsigned int i; x = m0; x += ( ( m0 + 2 ) & 4 ) << 1; for( i = biL; i >= 8; i /= 2 ) x *= ( 2 - ( m0 * x ) ); *mm = ~x + 1; } /* * Montgomery multiplication: A = A * B * R^-1 mod N (HAC 14.36) */ static int mpi_montmul( mbedtls_mpi *A, const mbedtls_mpi *B, const mbedtls_mpi *N, mbedtls_mpi_uint mm, const mbedtls_mpi *T ) { size_t i, n, m; mbedtls_mpi_uint u0, u1, *d; if( T->n < N->n + 1 || T->p == NULL ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); memset( T->p, 0, T->n * ciL ); d = T->p; n = N->n; m = ( B->n < n ) ? B->n : n; for( i = 0; i < n; i++ ) { /* * T = (T + u0*B + u1*N) / 2^biL */ u0 = A->p[i]; u1 = ( d[0] + u0 * B->p[0] ) * mm; mpi_mul_hlp( m, B->p, d, u0 ); mpi_mul_hlp( n, N->p, d, u1 ); *d++ = u0; d[n + 1] = 0; } memcpy( A->p, d, ( n + 1 ) * ciL ); if( mbedtls_mpi_cmp_abs( A, N ) >= 0 ) mpi_sub_hlp( n, N->p, A->p ); else /* prevent timing attacks */ mpi_sub_hlp( n, A->p, T->p ); return( 0 ); } /* * Montgomery reduction: A = A * R^-1 mod N */ static int mpi_montred( mbedtls_mpi *A, const mbedtls_mpi *N, mbedtls_mpi_uint mm, const mbedtls_mpi *T ) { mbedtls_mpi_uint z = 1; mbedtls_mpi U; U.n = U.s = (int) z; U.p = &z; return( mpi_montmul( A, &U, N, mm, T ) ); } /* * Sliding-window exponentiation: X = A^E mod N (HAC 14.85) */ int mbedtls_mpi_exp_mod( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *E, const mbedtls_mpi *N, mbedtls_mpi *_RR ) { int ret; size_t wbits, wsize, one = 1; size_t i, j, nblimbs; size_t bufsize, nbits; mbedtls_mpi_uint ei, mm, state; struct { mbedtls_mpi RR, T, W[ 2 << MBEDTLS_MPI_WINDOW_SIZE ], Apos; } *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); int neg; if (!ctx) return -ENOMEM; if( mbedtls_mpi_cmp_int( N, 0 ) < 0 || ( N->p[0] & 1 ) == 0 ) { ret = ( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); goto free_ctx; } if( mbedtls_mpi_cmp_int( E, 0 ) < 0 ) { ret = ( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); goto free_ctx; } /* * Init temps and window size */ mpi_montg_init( &mm, N ); mbedtls_mpi_init( &ctx->RR ); mbedtls_mpi_init( &ctx->T ); mbedtls_mpi_init( &ctx->Apos ); memset( ctx->W, 0, sizeof( ctx->W ) ); i = mbedtls_mpi_bitlen( E ); wsize = ( i > 671 ) ? 6 : ( i > 239 ) ? 5 : ( i > 79 ) ? 4 : ( i > 23 ) ? 3 : 1; if( wsize > MBEDTLS_MPI_WINDOW_SIZE ) wsize = MBEDTLS_MPI_WINDOW_SIZE; j = N->n + 1; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, j ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &ctx->W[1], j ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &ctx->T, j * 2 ) ); /* * Compensate for negative A (and correct at the end) */ neg = ( A->s == -1 ); if( neg ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &ctx->Apos, A ) ); ctx->Apos.s = 1; A = &ctx->Apos; } /* * If 1st call, pre-compute R^2 mod N */ if( _RR == NULL || _RR->p == NULL ) { MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &ctx->RR, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &ctx->RR, N->n * 2 * biL ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &ctx->RR, &ctx->RR, N ) ); if( _RR != NULL ) memcpy( _RR, &ctx->RR, sizeof( mbedtls_mpi ) ); } else memcpy( &ctx->RR, _RR, sizeof( mbedtls_mpi ) ); /* * W[1] = A * R^2 * R^-1 mod N = A * R mod N */ if( mbedtls_mpi_cmp_mpi( A, N ) >= 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &ctx->W[1], A, N ) ); else MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &ctx->W[1], A ) ); MBEDTLS_MPI_CHK( mpi_montmul( &ctx->W[1], &ctx->RR, N, mm, &ctx->T ) ); /* * X = R^2 * R^-1 mod N = R mod N */ MBEDTLS_MPI_CHK( mbedtls_mpi_copy( X, &ctx->RR ) ); MBEDTLS_MPI_CHK( mpi_montred( X, N, mm, &ctx->T ) ); if( wsize > 1 ) { /* * W[1 << (wsize - 1)] = W[1] ^ (wsize - 1) */ j = one << ( wsize - 1 ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &ctx->W[j], N->n + 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &ctx->W[j], &ctx->W[1] ) ); for( i = 0; i < wsize - 1; i++ ) MBEDTLS_MPI_CHK( mpi_montmul( &ctx->W[j], &ctx->W[j], N, mm, &ctx->T ) ); /* * W[i] = W[i - 1] * W[1] */ for( i = j + 1; i < ( one << wsize ); i++ ) { MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &ctx->W[i], N->n + 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &ctx->W[i], &ctx->W[i - 1] ) ); MBEDTLS_MPI_CHK( mpi_montmul( &ctx->W[i], &ctx->W[1], N, mm, &ctx->T ) ); } } nblimbs = E->n; bufsize = 0; nbits = 0; wbits = 0; state = 0; while( 1 ) { if( bufsize == 0 ) { if( nblimbs == 0 ) break; nblimbs--; bufsize = sizeof( mbedtls_mpi_uint ) << 3; } bufsize--; ei = (E->p[nblimbs] >> bufsize) & 1; /* * skip leading 0s */ if( ei == 0 && state == 0 ) continue; if( ei == 0 && state == 1 ) { /* * out of window, square X */ MBEDTLS_MPI_CHK( mpi_montmul( X, X, N, mm, &ctx->T ) ); continue; } /* * add ei to current window */ state = 2; nbits++; wbits |= ( ei << ( wsize - nbits ) ); if( nbits == wsize ) { /* * X = X^wsize R^-1 mod N */ for( i = 0; i < wsize; i++ ) MBEDTLS_MPI_CHK( mpi_montmul( X, X, N, mm, &ctx->T ) ); /* * X = X * W[wbits] R^-1 mod N */ MBEDTLS_MPI_CHK( mpi_montmul( X, &ctx->W[wbits], N, mm, &ctx->T ) ); state--; nbits = 0; wbits = 0; } } /* * process the remaining bits */ for( i = 0; i < nbits; i++ ) { MBEDTLS_MPI_CHK( mpi_montmul( X, X, N, mm, &ctx->T ) ); wbits <<= 1; if( ( wbits & ( one << wsize ) ) != 0 ) MBEDTLS_MPI_CHK( mpi_montmul( X, &ctx->W[1], N, mm, &ctx->T ) ); } /* * X = A^E * R * R^-1 mod N = A^E mod N */ MBEDTLS_MPI_CHK( mpi_montred( X, N, mm, &ctx->T ) ); if( neg && E->n != 0 && ( E->p[0] & 1 ) != 0 ) { X->s = -1; MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( X, N, X ) ); } cleanup: for( i = ( one << ( wsize - 1 ) ); i < ( one << wsize ); i++ ) mbedtls_mpi_free( &ctx->W[i] ); mbedtls_mpi_free( &ctx->W[1] ); mbedtls_mpi_free( &ctx->T ); mbedtls_mpi_free( &ctx->Apos ); if( _RR == NULL || _RR->p == NULL ) mbedtls_mpi_free( &ctx->RR ); free_ctx: kfree(ctx); return( ret ); } /* * Greatest common divisor: G = gcd(A, B) (HAC 14.54) */ int mbedtls_mpi_gcd( mbedtls_mpi *G, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; size_t lz, lzt; mbedtls_mpi TG, TA, TB; mbedtls_mpi_init( &TG ); mbedtls_mpi_init( &TA ); mbedtls_mpi_init( &TB ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TA, A ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TB, B ) ); lz = mbedtls_mpi_lsb( &TA ); lzt = mbedtls_mpi_lsb( &TB ); if( lzt < lz ) lz = lzt; MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TA, lz ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TB, lz ) ); TA.s = TB.s = 1; while( mbedtls_mpi_cmp_int( &TA, 0 ) != 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TA, mbedtls_mpi_lsb( &TA ) ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TB, mbedtls_mpi_lsb( &TB ) ) ); if( mbedtls_mpi_cmp_mpi( &TA, &TB ) >= 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( &TA, &TA, &TB ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TA, 1 ) ); } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( &TB, &TB, &TA ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TB, 1 ) ); } } MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &TB, lz ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( G, &TB ) ); cleanup: mbedtls_mpi_free( &TG ); mbedtls_mpi_free( &TA ); mbedtls_mpi_free( &TB ); return( ret ); } /* * Fill X with size bytes of random. * * Use a temporary bytes representation to make sure the result is the same * regardless of the platform endianness (useful when f_rng is actually * deterministic, eg for tests). */ int mbedtls_mpi_fill_random( mbedtls_mpi *X, size_t size, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret; unsigned char buf[MBEDTLS_MPI_MAX_SIZE]; if( size > MBEDTLS_MPI_MAX_SIZE ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); MBEDTLS_MPI_CHK( f_rng( p_rng, buf, size ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( X, buf, size ) ); cleanup: return( ret ); } /* * Modular inverse: X = A^-1 mod N (HAC 14.61 / 14.64) */ int mbedtls_mpi_inv_mod( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *N ) { int ret; mbedtls_mpi G, TA, TU, U1, U2, TB, TV, V1, V2; if( mbedtls_mpi_cmp_int( N, 1 ) <= 0 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); mbedtls_mpi_init( &TA ); mbedtls_mpi_init( &TU ); mbedtls_mpi_init( &U1 ); mbedtls_mpi_init( &U2 ); mbedtls_mpi_init( &G ); mbedtls_mpi_init( &TB ); mbedtls_mpi_init( &TV ); mbedtls_mpi_init( &V1 ); mbedtls_mpi_init( &V2 ); MBEDTLS_MPI_CHK( mbedtls_mpi_gcd( &G, A, N ) ); if( mbedtls_mpi_cmp_int( &G, 1 ) != 0 ) { ret = MBEDTLS_ERR_MPI_NOT_ACCEPTABLE; goto cleanup; } MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &TA, A, N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TU, &TA ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TB, N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TV, N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &U1, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &U2, 0 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &V1, 0 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &V2, 1 ) ); do { while( ( TU.p[0] & 1 ) == 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TU, 1 ) ); if( ( U1.p[0] & 1 ) != 0 || ( U2.p[0] & 1 ) != 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &U1, &U1, &TB ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &U2, &U2, &TA ) ); } MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &U1, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &U2, 1 ) ); } while( ( TV.p[0] & 1 ) == 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TV, 1 ) ); if( ( V1.p[0] & 1 ) != 0 || ( V2.p[0] & 1 ) != 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &V1, &V1, &TB ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &V2, &V2, &TA ) ); } MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &V1, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &V2, 1 ) ); } if( mbedtls_mpi_cmp_mpi( &TU, &TV ) >= 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &TU, &TU, &TV ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &U1, &U1, &V1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &U2, &U2, &V2 ) ); } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &TV, &TV, &TU ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &V1, &V1, &U1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &V2, &V2, &U2 ) ); } } while( mbedtls_mpi_cmp_int( &TU, 0 ) != 0 ); while( mbedtls_mpi_cmp_int( &V1, 0 ) < 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &V1, &V1, N ) ); while( mbedtls_mpi_cmp_mpi( &V1, N ) >= 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &V1, &V1, N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( X, &V1 ) ); cleanup: mbedtls_mpi_free( &TA ); mbedtls_mpi_free( &TU ); mbedtls_mpi_free( &U1 ); mbedtls_mpi_free( &U2 ); mbedtls_mpi_free( &G ); mbedtls_mpi_free( &TB ); mbedtls_mpi_free( &TV ); mbedtls_mpi_free( &V1 ); mbedtls_mpi_free( &V2 ); return( ret ); } #if defined(MBEDTLS_GENPRIME) static const int small_prime[] = { 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, -103 }; /* * Small divisors test (X must be positive) * * Return values: * 0: no small factor (possible prime, more tests needed) * 1: certain prime * MBEDTLS_ERR_MPI_NOT_ACCEPTABLE: certain non-prime * other negative: error */ static int mpi_check_small_factors( const mbedtls_mpi *X ) { int ret = 0; size_t i; mbedtls_mpi_uint r; if( ( X->p[0] & 1 ) == 0 ) return( MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ); for( i = 0; small_prime[i] > 0; i++ ) { if( mbedtls_mpi_cmp_int( X, small_prime[i] ) <= 0 ) return( 1 ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_int( &r, X, small_prime[i] ) ); if( r == 0 ) return( MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ); } cleanup: return( ret ); } /* * Miller-Rabin pseudo-primality test (HAC 4.24) */ static int mpi_miller_rabin( const mbedtls_mpi *X, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret, count; size_t i, j, k, n, s; mbedtls_mpi W, R, T, A, RR; mbedtls_mpi_init( &W ); mbedtls_mpi_init( &R ); mbedtls_mpi_init( &T ); mbedtls_mpi_init( &A ); mbedtls_mpi_init( &RR ); /* * W = |X| - 1 * R = W >> lsb( W ) */ MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( &W, X, 1 ) ); s = mbedtls_mpi_lsb( &W ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &R, &W ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &R, s ) ); i = mbedtls_mpi_bitlen( X ); /* * HAC, table 4.4 */ n = ( ( i >= 1300 ) ? 2 : ( i >= 850 ) ? 3 : ( i >= 650 ) ? 4 : ( i >= 350 ) ? 8 : ( i >= 250 ) ? 12 : ( i >= 150 ) ? 18 : 27 ); for( i = 0; i < n; i++ ) { /* * pick a random A, 1 < A < |X| - 1 */ MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &A, X->n * ciL, f_rng, p_rng ) ); if( mbedtls_mpi_cmp_mpi( &A, &W ) >= 0 ) { j = mbedtls_mpi_bitlen( &A ) - mbedtls_mpi_bitlen( &W ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &A, j + 1 ) ); } A.p[0] |= 3; count = 0; do { MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &A, X->n * ciL, f_rng, p_rng ) ); j = mbedtls_mpi_bitlen( &A ); k = mbedtls_mpi_bitlen( &W ); if (j > k) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &A, j - k ) ); } if (count++ > 30) { return MBEDTLS_ERR_MPI_NOT_ACCEPTABLE; } } while ( mbedtls_mpi_cmp_mpi( &A, &W ) >= 0 || mbedtls_mpi_cmp_int( &A, 1 ) <= 0 ); /* * A = A^R mod |X| */ MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &A, &A, &R, X, &RR ) ); if( mbedtls_mpi_cmp_mpi( &A, &W ) == 0 || mbedtls_mpi_cmp_int( &A, 1 ) == 0 ) continue; j = 1; while( j < s && mbedtls_mpi_cmp_mpi( &A, &W ) != 0 ) { /* * A = A * A mod |X| */ MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &T, &A, &A ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &A, &T, X ) ); if( mbedtls_mpi_cmp_int( &A, 1 ) == 0 ) break; j++; } /* * not prime if A != |X| - 1 or A == 1 */ if( mbedtls_mpi_cmp_mpi( &A, &W ) != 0 || mbedtls_mpi_cmp_int( &A, 1 ) == 0 ) { ret = MBEDTLS_ERR_MPI_NOT_ACCEPTABLE; break; } } cleanup: mbedtls_mpi_free( &W ); mbedtls_mpi_free( &R ); mbedtls_mpi_free( &T ); mbedtls_mpi_free( &A ); mbedtls_mpi_free( &RR ); return( ret ); } /* * Pseudo-primality test: small factors, then Miller-Rabin */ int mbedtls_mpi_is_prime( const mbedtls_mpi *X, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret; mbedtls_mpi XX; XX.s = 1; XX.n = X->n; XX.p = X->p; if( mbedtls_mpi_cmp_int( &XX, 0 ) == 0 || mbedtls_mpi_cmp_int( &XX, 1 ) == 0 ) return( MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ); if( mbedtls_mpi_cmp_int( &XX, 2 ) == 0 ) return( 0 ); if( ( ret = mpi_check_small_factors( &XX ) ) != 0 ) { if( ret == 1 ) return( 0 ); return( ret ); } return( mpi_miller_rabin( &XX, f_rng, p_rng ) ); } /* * Prime number generation */ int mbedtls_mpi_gen_prime( mbedtls_mpi *X, size_t nbits, int dh_flag, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret; size_t k, n; mbedtls_mpi_uint r; mbedtls_mpi Y; if( nbits < 3 || nbits > MBEDTLS_MPI_MAX_BITS ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); mbedtls_mpi_init( &Y ); n = BITS_TO_LIMBS( nbits ); MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( X, n * ciL, f_rng, p_rng ) ); k = mbedtls_mpi_bitlen( X ); if( k > nbits ) MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( X, k - nbits + 1 ) ); mbedtls_mpi_set_bit( X, nbits-1, 1 ); X->p[0] |= 1; if( dh_flag == 0 ) { while( ( ret = mbedtls_mpi_is_prime( X, f_rng, p_rng ) ) != 0 ) { if( ret != MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ) goto cleanup; MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, X, 2 ) ); } } else { /* * An necessary condition for Y and X = 2Y + 1 to be prime * is X = 2 mod 3 (which is equivalent to Y = 2 mod 3). * Make sure it is satisfied, while keeping X = 3 mod 4 */ X->p[0] |= 2; MBEDTLS_MPI_CHK( mbedtls_mpi_mod_int( &r, X, 3 ) ); if( r == 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, X, 8 ) ); else if( r == 1 ) MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, X, 4 ) ); /* Set Y = (X-1) / 2, which is X / 2 because X is odd */ MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &Y, X ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &Y, 1 ) ); while( 1 ) { /* * First, check small factors for X and Y * before doing Miller-Rabin on any of them */ if( ( ret = mpi_check_small_factors( X ) ) == 0 && ( ret = mpi_check_small_factors( &Y ) ) == 0 && ( ret = mpi_miller_rabin( X, f_rng, p_rng ) ) == 0 && ( ret = mpi_miller_rabin( &Y, f_rng, p_rng ) ) == 0 ) { break; } if( ret != MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ) goto cleanup; /* * Next candidates. We want to preserve Y = (X-1) / 2 and * Y = 1 mod 2 and Y = 2 mod 3 (eq X = 3 mod 4 and X = 2 mod 3) * so up Y by 6 and X by 12. */ MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, X, 12 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( &Y, &Y, 6 ) ); } } cleanup: mbedtls_mpi_free( &Y ); return( ret ); } #endif /* MBEDTLS_GENPRIME */ #if defined(MBEDTLS_SELF_TEST) #define GCD_PAIR_COUNT 3 static const int gcd_pairs[GCD_PAIR_COUNT][3] = { { 693, 609, 21 }, { 1764, 868, 28 }, { 768454923, 542167814, 1 } }; /* * Checkup routine */ int mbedtls_mpi_self_test( int verbose ) { int ret, i; mbedtls_mpi A, E, N, X, Y, U, V; mbedtls_mpi_init( &A ); mbedtls_mpi_init( &E ); mbedtls_mpi_init( &N ); mbedtls_mpi_init( &X ); mbedtls_mpi_init( &Y ); mbedtls_mpi_init( &U ); mbedtls_mpi_init( &V ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &A, 16, "EFE021C2645FD1DC586E69184AF4A31E" \ "D5F53E93B5F123FA41680867BA110131" \ "944FE7952E2517337780CB0DB80E61AA" \ "E7C8DDC6C5C6AADEB34EB38A2F40D5E6" ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &E, 16, "B2E7EFD37075B9F03FF989C7C5051C20" \ "34D2A323810251127E7BF8625A4F49A5" \ "F3E27F4DA8BD59C47D6DAABA4C8127BD" \ "5B5C25763222FEFCCFC38B832366C29E" ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &N, 16, "0066A198186C18C10B2F5ED9B522752A" \ "9830B69916E535C8F047518A889A43A5" \ "94B6BED27A168D31D4A52F88925AA8F5" ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &X, &A, &N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &U, 16, "602AB7ECA597A3D6B56FF9829A5E8B85" \ "9E857EA95A03512E2BAE7391688D264A" \ "A5663B0341DB9CCFD2C4C5F421FEC814" \ "8001B72E848A38CAE1C65F78E56ABDEF" \ "E12D3C039B8A02D6BE593F0BBBDA56F1" \ "ECF677152EF804370C1A305CAF3B5BF1" \ "30879B56C61DE584A0F53A2447A51E" ) ); if( verbose != 0 ) mbedtls_printf( " MPI test #1 (mul_mpi): " ); if( mbedtls_mpi_cmp_mpi( &X, &U ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto cleanup; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); MBEDTLS_MPI_CHK( mbedtls_mpi_div_mpi( &X, &Y, &A, &N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &U, 16, "256567336059E52CAE22925474705F39A94" ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &V, 16, "6613F26162223DF488E9CD48CC132C7A" \ "0AC93C701B001B092E4E5B9F73BCD27B" \ "9EE50D0657C77F374E903CDFA4C642" ) ); if( verbose != 0 ) mbedtls_printf( " MPI test #2 (div_mpi): " ); if( mbedtls_mpi_cmp_mpi( &X, &U ) != 0 || mbedtls_mpi_cmp_mpi( &Y, &V ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto cleanup; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &X, &A, &E, &N, NULL ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &U, 16, "36E139AEA55215609D2816998ED020BB" \ "BD96C37890F65171D948E9BC7CBAA4D9" \ "325D24D6A3C12710F10A09FA08AB87" ) ); if( verbose != 0 ) mbedtls_printf( " MPI test #3 (exp_mod): " ); if( mbedtls_mpi_cmp_mpi( &X, &U ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto cleanup; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); MBEDTLS_MPI_CHK( mbedtls_mpi_inv_mod( &X, &A, &N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &U, 16, "003A0AAEDD7E784FC07D8F9EC6E3BFD5" \ "C3DBA76456363A10869622EAC2DD84EC" \ "C5B8A74DAC4D09E03B5E0BE779F2DF61" ) ); if( verbose != 0 ) mbedtls_printf( " MPI test #4 (inv_mod): " ); if( mbedtls_mpi_cmp_mpi( &X, &U ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto cleanup; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); if( verbose != 0 ) mbedtls_printf( " MPI test #5 (simple gcd): " ); for( i = 0; i < GCD_PAIR_COUNT; i++ ) { MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &X, gcd_pairs[i][0] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &Y, gcd_pairs[i][1] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_gcd( &A, &X, &Y ) ); if( mbedtls_mpi_cmp_int( &A, gcd_pairs[i][2] ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed at %d\n", i ); ret = 1; goto cleanup; } } if( verbose != 0 ) mbedtls_printf( "passed\n" ); cleanup: if( ret != 0 && verbose != 0 ) mbedtls_printf( "Unexpected error, return code = %08X\n", ret ); mbedtls_mpi_free( &A ); mbedtls_mpi_free( &E ); mbedtls_mpi_free( &N ); mbedtls_mpi_free( &X ); mbedtls_mpi_free( &Y ); mbedtls_mpi_free( &U ); mbedtls_mpi_free( &V ); if( verbose != 0 ) mbedtls_printf( "\n" ); return( ret ); } #endif /* MBEDTLS_SELF_TEST */ #endif /* MBEDTLS_BIGNUM_C */ backport-iwlwifi-dkms-7906/compat/verification/key.c000066400000000000000000000066431351064634500226130ustar00rootroot00000000000000#include #include #include #include #include #include "x509_parser.h" static void keyring_clear(struct key *keyring) { struct key *key, *tmp; if (!keyring->keyring) return; list_for_each_entry_safe(key, tmp, &keyring->list, list) { WARN_ON(refcount_read(&key->refcount) > 1); key_put(key); } } void key_put(struct key *key) { if (refcount_dec_and_test(&key->refcount)) { keyring_clear(key); list_del(&key->list); kfree(key->description); public_key_free(key->public_key); public_key_signature_free(key->sig); kfree(key->kids.id[0]); kfree(key->kids.id[1]); kfree(key); } } EXPORT_SYMBOL_GPL(key_put); static struct key *key_alloc(void) { struct key *key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; refcount_set(&key->refcount, 1); INIT_LIST_HEAD(&key->list); return key; } struct key *bp_keyring_alloc(void) { struct key *key = key_alloc(); if (!key) return NULL; key->keyring = true; return key; } EXPORT_SYMBOL_GPL(bp_keyring_alloc); key_ref_t bp_key_create_or_update(key_ref_t keyring, const char *description, const void *payload, size_t plen) { struct key *key = key_alloc(); struct x509_certificate *cert; const char *q; size_t srlen, sulen; char *desc = NULL, *p; int err; if (!key) return NULL; cert = x509_cert_parse(payload, plen); if (IS_ERR(cert)) { err = PTR_ERR(cert); goto free; } if (cert->unsupported_sig) { public_key_signature_free(cert->sig); cert->sig = NULL; } sulen = strlen(cert->subject); if (cert->raw_skid) { srlen = cert->raw_skid_size; q = cert->raw_skid; } else { srlen = cert->raw_serial_size; q = cert->raw_serial; } err = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); if (!desc) goto free; p = memcpy(desc, cert->subject, sulen); p += sulen; *p++ = ':'; *p++ = ' '; p = bin2hex(p, q, srlen); *p = 0; key->description = desc; key->kids.id[0] = cert->id; key->kids.id[1] = cert->skid; key->public_key = cert->pub; key->sig = cert->sig; cert->id = NULL; cert->skid = NULL; cert->pub = NULL; cert->sig = NULL; x509_free_certificate(cert); refcount_inc(&key->refcount); list_add_tail(&key->list, &key_ref_to_ptr(keyring)->list); return make_key_ref(key, 0); free: kfree(key); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(bp_key_create_or_update); struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_0, const struct asymmetric_key_id *id_1, bool partial) { struct key *key; if (WARN_ON(partial)) return ERR_PTR(-ENOENT); if (WARN_ON(!keyring)) return ERR_PTR(-EINVAL); list_for_each_entry(key, &keyring->list, list) { const struct asymmetric_key_ids *kids = &key->kids; if (id_0 && (!kids->id[0] || !asymmetric_key_id_same(id_0, kids->id[0]))) continue; if (id_1 && (!kids->id[1] || !asymmetric_key_id_same(id_0, kids->id[1]))) continue; refcount_inc(&key->refcount); return key; } return ERR_PTR(-ENOKEY); } struct asymmetric_key_id * asymmetric_key_generate_id(const void *val_1, size_t len_1, const void *val_2, size_t len_2) { struct asymmetric_key_id *kid; kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2, GFP_KERNEL); if (!kid) return ERR_PTR(-ENOMEM); kid->len = len_1 + len_2; memcpy(kid->data, val_1, len_1); memcpy(kid->data + len_1, val_2, len_2); return kid; } backport-iwlwifi-dkms-7906/compat/verification/mbedtls/000077500000000000000000000000001351064634500233005ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/compat/verification/mbedtls/asn1.h000066400000000000000000000272021351064634500243160ustar00rootroot00000000000000/** * \file asn1.h * * \brief Generic ASN.1 parsing * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_ASN1_H #define MBEDTLS_ASN1_H #if !defined(MBEDTLS_CONFIG_FILE) #include "config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_BIGNUM_C) #include "bignum.h" #endif /** * \addtogroup asn1_module * \{ */ /** * \name ASN1 Error codes * These error codes are OR'ed to X509 error codes for * higher error granularity. * ASN1 is a standard to specify data structures. * \{ */ #define MBEDTLS_ERR_ASN1_OUT_OF_DATA -0x0060 /**< Out of data when parsing an ASN1 data structure. */ #define MBEDTLS_ERR_ASN1_UNEXPECTED_TAG -0x0062 /**< ASN1 tag was of an unexpected value. */ #define MBEDTLS_ERR_ASN1_INVALID_LENGTH -0x0064 /**< Error when trying to determine the length or invalid length. */ #define MBEDTLS_ERR_ASN1_LENGTH_MISMATCH -0x0066 /**< Actual length differs from expected length. */ #define MBEDTLS_ERR_ASN1_INVALID_DATA -0x0068 /**< Data is invalid. (not used) */ #define MBEDTLS_ERR_ASN1_ALLOC_FAILED -0x006A /**< Memory allocation failed */ #define MBEDTLS_ERR_ASN1_BUF_TOO_SMALL -0x006C /**< Buffer too small when writing ASN.1 data structure. */ /* \} name */ /** * \name DER constants * These constants comply with DER encoded the ANS1 type tags. * DER encoding uses hexadecimal representation. * An example DER sequence is:\n * - 0x02 -- tag indicating INTEGER * - 0x01 -- length in octets * - 0x05 -- value * Such sequences are typically read into \c ::mbedtls_x509_buf. * \{ */ #define MBEDTLS_ASN1_BOOLEAN 0x01 #define MBEDTLS_ASN1_INTEGER 0x02 #define MBEDTLS_ASN1_BIT_STRING 0x03 #define MBEDTLS_ASN1_OCTET_STRING 0x04 #define MBEDTLS_ASN1_NULL 0x05 #define MBEDTLS_ASN1_OID 0x06 #define MBEDTLS_ASN1_UTF8_STRING 0x0C #define MBEDTLS_ASN1_SEQUENCE 0x10 #define MBEDTLS_ASN1_SET 0x11 #define MBEDTLS_ASN1_PRINTABLE_STRING 0x13 #define MBEDTLS_ASN1_T61_STRING 0x14 #define MBEDTLS_ASN1_IA5_STRING 0x16 #define MBEDTLS_ASN1_UTC_TIME 0x17 #define MBEDTLS_ASN1_GENERALIZED_TIME 0x18 #define MBEDTLS_ASN1_UNIVERSAL_STRING 0x1C #define MBEDTLS_ASN1_BMP_STRING 0x1E #define MBEDTLS_ASN1_PRIMITIVE 0x00 #define MBEDTLS_ASN1_CONSTRUCTED 0x20 #define MBEDTLS_ASN1_CONTEXT_SPECIFIC 0x80 /* \} name */ /* \} addtogroup asn1_module */ /** Returns the size of the binary string, without the trailing \\0 */ #define MBEDTLS_OID_SIZE(x) (sizeof(x) - 1) /** * Compares an mbedtls_asn1_buf structure to a reference OID. * * Only works for 'defined' oid_str values (MBEDTLS_OID_HMAC_SHA1), you cannot use a * 'unsigned char *oid' here! */ #define MBEDTLS_OID_CMP(oid_str, oid_buf) \ ( ( MBEDTLS_OID_SIZE(oid_str) != (oid_buf)->len ) || \ memcmp( (oid_str), (oid_buf)->p, (oid_buf)->len) != 0 ) #ifdef __cplusplus extern "C" { #endif /** * \name Functions to parse ASN.1 data structures * \{ */ /** * Type-length-value structure that allows for ASN1 using DER. */ typedef struct mbedtls_asn1_buf { int tag; /**< ASN1 type, e.g. MBEDTLS_ASN1_UTF8_STRING. */ size_t len; /**< ASN1 length, in octets. */ unsigned char *p; /**< ASN1 data, e.g. in ASCII. */ } mbedtls_asn1_buf; /** * Container for ASN1 bit strings. */ typedef struct mbedtls_asn1_bitstring { size_t len; /**< ASN1 length, in octets. */ unsigned char unused_bits; /**< Number of unused bits at the end of the string */ unsigned char *p; /**< Raw ASN1 data for the bit string */ } mbedtls_asn1_bitstring; /** * Container for a sequence of ASN.1 items */ typedef struct mbedtls_asn1_sequence { mbedtls_asn1_buf buf; /**< Buffer containing the given ASN.1 item. */ struct mbedtls_asn1_sequence *next; /**< The next entry in the sequence. */ } mbedtls_asn1_sequence; /** * Container for a sequence or list of 'named' ASN.1 data items */ typedef struct mbedtls_asn1_named_data { mbedtls_asn1_buf oid; /**< The object identifier. */ mbedtls_asn1_buf val; /**< The named value. */ struct mbedtls_asn1_named_data *next; /**< The next entry in the sequence. */ unsigned char next_merged; /**< Merge next item into the current one? */ } mbedtls_asn1_named_data; /** * \brief Get the length of an ASN.1 element. * Updates the pointer to immediately behind the length. * * \param p The position in the ASN.1 data * \param end End of data * \param len The variable that will receive the value * * \return 0 if successful, MBEDTLS_ERR_ASN1_OUT_OF_DATA on reaching * end of data, MBEDTLS_ERR_ASN1_INVALID_LENGTH if length is * unparseable. */ int mbedtls_asn1_get_len( unsigned char **p, const unsigned char *end, size_t *len ); /** * \brief Get the tag and length of the tag. Check for the requested tag. * Updates the pointer to immediately behind the tag and length. * * \param p The position in the ASN.1 data * \param end End of data * \param len The variable that will receive the length * \param tag The expected tag * * \return 0 if successful, MBEDTLS_ERR_ASN1_UNEXPECTED_TAG if tag did * not match requested tag, or another specific ASN.1 error code. */ int mbedtls_asn1_get_tag( unsigned char **p, const unsigned char *end, size_t *len, int tag ); /** * \brief Retrieve a boolean ASN.1 tag and its value. * Updates the pointer to immediately behind the full tag. * * \param p The position in the ASN.1 data * \param end End of data * \param val The variable that will receive the value * * \return 0 if successful or a specific ASN.1 error code. */ int mbedtls_asn1_get_bool( unsigned char **p, const unsigned char *end, int *val ); /** * \brief Retrieve an integer ASN.1 tag and its value. * Updates the pointer to immediately behind the full tag. * * \param p The position in the ASN.1 data * \param end End of data * \param val The variable that will receive the value * * \return 0 if successful or a specific ASN.1 error code. */ int mbedtls_asn1_get_int( unsigned char **p, const unsigned char *end, int *val ); /** * \brief Retrieve a bitstring ASN.1 tag and its value. * Updates the pointer to immediately behind the full tag. * * \param p The position in the ASN.1 data * \param end End of data * \param bs The variable that will receive the value * * \return 0 if successful or a specific ASN.1 error code. */ int mbedtls_asn1_get_bitstring( unsigned char **p, const unsigned char *end, mbedtls_asn1_bitstring *bs); /** * \brief Retrieve a bitstring ASN.1 tag without unused bits and its * value. * Updates the pointer to the beginning of the bit/octet string. * * \param p The position in the ASN.1 data * \param end End of data * \param len Length of the actual bit/octect string in bytes * * \return 0 if successful or a specific ASN.1 error code. */ int mbedtls_asn1_get_bitstring_null( unsigned char **p, const unsigned char *end, size_t *len ); /** * \brief Parses and splits an ASN.1 "SEQUENCE OF " * Updated the pointer to immediately behind the full sequence tag. * * \param p The position in the ASN.1 data * \param end End of data * \param cur First variable in the chain to fill * \param tag Type of sequence * * \return 0 if successful or a specific ASN.1 error code. */ int mbedtls_asn1_get_sequence_of( unsigned char **p, const unsigned char *end, mbedtls_asn1_sequence *cur, int tag); #if defined(MBEDTLS_BIGNUM_C) /** * \brief Retrieve a MPI value from an integer ASN.1 tag. * Updates the pointer to immediately behind the full tag. * * \param p The position in the ASN.1 data * \param end End of data * \param X The MPI that will receive the value * * \return 0 if successful or a specific ASN.1 or MPI error code. */ int mbedtls_asn1_get_mpi( unsigned char **p, const unsigned char *end, mbedtls_mpi *X ); #endif /* MBEDTLS_BIGNUM_C */ /** * \brief Retrieve an AlgorithmIdentifier ASN.1 sequence. * Updates the pointer to immediately behind the full * AlgorithmIdentifier. * * \param p The position in the ASN.1 data * \param end End of data * \param alg The buffer to receive the OID * \param params The buffer to receive the params (if any) * * \return 0 if successful or a specific ASN.1 or MPI error code. */ int mbedtls_asn1_get_alg( unsigned char **p, const unsigned char *end, mbedtls_asn1_buf *alg, mbedtls_asn1_buf *params ); /** * \brief Retrieve an AlgorithmIdentifier ASN.1 sequence with NULL or no * params. * Updates the pointer to immediately behind the full * AlgorithmIdentifier. * * \param p The position in the ASN.1 data * \param end End of data * \param alg The buffer to receive the OID * * \return 0 if successful or a specific ASN.1 or MPI error code. */ int mbedtls_asn1_get_alg_null( unsigned char **p, const unsigned char *end, mbedtls_asn1_buf *alg ); /** * \brief Find a specific named_data entry in a sequence or list based on * the OID. * * \param list The list to seek through * \param oid The OID to look for * \param len Size of the OID * * \return NULL if not found, or a pointer to the existing entry. */ mbedtls_asn1_named_data *mbedtls_asn1_find_named_data( mbedtls_asn1_named_data *list, const char *oid, size_t len ); /** * \brief Free a mbedtls_asn1_named_data entry * * \param entry The named data entry to free */ void mbedtls_asn1_free_named_data( mbedtls_asn1_named_data *entry ); /** * \brief Free all entries in a mbedtls_asn1_named_data list * Head will be set to NULL * * \param head Pointer to the head of the list of named data entries to free */ void mbedtls_asn1_free_named_data_list( mbedtls_asn1_named_data **head ); #ifdef __cplusplus } #endif #endif /* asn1.h */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/bignum.h000066400000000000000000000634401351064634500247410ustar00rootroot00000000000000/** * \file bignum.h * * \brief Multi-precision integer library * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_BIGNUM_H #define MBEDTLS_BIGNUM_H #if !defined(MBEDTLS_CONFIG_FILE) #include "config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_FS_IO) #include #endif #define MBEDTLS_ERR_MPI_FILE_IO_ERROR -0x0002 /**< An error occurred while reading from or writing to a file. */ #define MBEDTLS_ERR_MPI_BAD_INPUT_DATA -0x0004 /**< Bad input parameters to function. */ #define MBEDTLS_ERR_MPI_INVALID_CHARACTER -0x0006 /**< There is an invalid character in the digit string. */ #define MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL -0x0008 /**< The buffer is too small to write to. */ #define MBEDTLS_ERR_MPI_NEGATIVE_VALUE -0x000A /**< The input arguments are negative or result in illegal output. */ #define MBEDTLS_ERR_MPI_DIVISION_BY_ZERO -0x000C /**< The input argument for division is zero, which is not allowed. */ #define MBEDTLS_ERR_MPI_NOT_ACCEPTABLE -0x000E /**< The input arguments are not acceptable. */ #define MBEDTLS_ERR_MPI_ALLOC_FAILED -0x0010 /**< Memory allocation failed. */ #define MBEDTLS_MPI_CHK(f) do { if( ( ret = f ) != 0 ) goto cleanup; } while( 0 ) /* * Maximum size MPIs are allowed to grow to in number of limbs. */ #define MBEDTLS_MPI_MAX_LIMBS 10000 #if !defined(MBEDTLS_MPI_WINDOW_SIZE) /* * Maximum window size used for modular exponentiation. Default: 6 * Minimum value: 1. Maximum value: 6. * * Result is an array of ( 2 << MBEDTLS_MPI_WINDOW_SIZE ) MPIs used * for the sliding window calculation. (So 64 by default) * * Reduction in size, reduces speed. */ #define MBEDTLS_MPI_WINDOW_SIZE 6 /**< Maximum windows size used. */ #endif /* !MBEDTLS_MPI_WINDOW_SIZE */ #if !defined(MBEDTLS_MPI_MAX_SIZE) /* * Maximum size of MPIs allowed in bits and bytes for user-MPIs. * ( Default: 512 bytes => 4096 bits, Maximum tested: 2048 bytes => 16384 bits ) * * Note: Calculations can results temporarily in larger MPIs. So the number * of limbs required (MBEDTLS_MPI_MAX_LIMBS) is higher. */ #define MBEDTLS_MPI_MAX_SIZE 1024 /**< Maximum number of bytes for usable MPIs. */ #endif /* !MBEDTLS_MPI_MAX_SIZE */ #define MBEDTLS_MPI_MAX_BITS ( 8 * MBEDTLS_MPI_MAX_SIZE ) /**< Maximum number of bits for usable MPIs. */ /* * When reading from files with mbedtls_mpi_read_file() and writing to files with * mbedtls_mpi_write_file() the buffer should have space * for a (short) label, the MPI (in the provided radix), the newline * characters and the '\0'. * * By default we assume at least a 10 char label, a minimum radix of 10 * (decimal) and a maximum of 4096 bit numbers (1234 decimal chars). * Autosized at compile time for at least a 10 char label, a minimum radix * of 10 (decimal) for a number of MBEDTLS_MPI_MAX_BITS size. * * This used to be statically sized to 1250 for a maximum of 4096 bit * numbers (1234 decimal chars). * * Calculate using the formula: * MBEDTLS_MPI_RW_BUFFER_SIZE = ceil(MBEDTLS_MPI_MAX_BITS / ln(10) * ln(2)) + * LabelSize + 6 */ #define MBEDTLS_MPI_MAX_BITS_SCALE100 ( 100 * MBEDTLS_MPI_MAX_BITS ) #define MBEDTLS_LN_2_DIV_LN_10_SCALE100 332 #define MBEDTLS_MPI_RW_BUFFER_SIZE ( ((MBEDTLS_MPI_MAX_BITS_SCALE100 + MBEDTLS_LN_2_DIV_LN_10_SCALE100 - 1) / MBEDTLS_LN_2_DIV_LN_10_SCALE100) + 10 + 6 ) /* * Define the base integer type, architecture-wise. * * 32 or 64-bit integer types can be forced regardless of the underlying * architecture by defining MBEDTLS_HAVE_INT32 or MBEDTLS_HAVE_INT64 * respectively and undefining MBEDTLS_HAVE_ASM. * * Double-width integers (e.g. 128-bit in 64-bit architectures) can be * disabled by defining MBEDTLS_NO_UDBL_DIVISION. */ #if !defined(MBEDTLS_HAVE_INT32) #if defined(_MSC_VER) && defined(_M_AMD64) /* Always choose 64-bit when using MSC */ #if !defined(MBEDTLS_HAVE_INT64) #define MBEDTLS_HAVE_INT64 #endif /* !MBEDTLS_HAVE_INT64 */ typedef int64_t mbedtls_mpi_sint; typedef uint64_t mbedtls_mpi_uint; #elif defined(__GNUC__) && ( \ defined(__amd64__) || defined(__x86_64__) || \ defined(__ppc64__) || defined(__powerpc64__) || \ defined(__ia64__) || defined(__alpha__) || \ ( defined(__sparc__) && defined(__arch64__) ) || \ defined(__s390x__) || defined(__mips64) ) #if !defined(MBEDTLS_HAVE_INT64) #define MBEDTLS_HAVE_INT64 #endif /* MBEDTLS_HAVE_INT64 */ typedef int64_t mbedtls_mpi_sint; typedef uint64_t mbedtls_mpi_uint; #if !defined(MBEDTLS_NO_UDBL_DIVISION) /* mbedtls_t_udbl defined as 128-bit unsigned int */ typedef unsigned int mbedtls_t_udbl __attribute__((mode(TI))); #define MBEDTLS_HAVE_UDBL #endif /* !MBEDTLS_NO_UDBL_DIVISION */ #elif defined(__ARMCC_VERSION) && defined(__aarch64__) /* * __ARMCC_VERSION is defined for both armcc and armclang and * __aarch64__ is only defined by armclang when compiling 64-bit code */ #if !defined(MBEDTLS_HAVE_INT64) #define MBEDTLS_HAVE_INT64 #endif /* !MBEDTLS_HAVE_INT64 */ typedef int64_t mbedtls_mpi_sint; typedef uint64_t mbedtls_mpi_uint; #if !defined(MBEDTLS_NO_UDBL_DIVISION) /* mbedtls_t_udbl defined as 128-bit unsigned int */ typedef __uint128_t mbedtls_t_udbl; #define MBEDTLS_HAVE_UDBL #endif /* !MBEDTLS_NO_UDBL_DIVISION */ #elif defined(MBEDTLS_HAVE_INT64) /* Force 64-bit integers with unknown compiler */ typedef int64_t mbedtls_mpi_sint; typedef uint64_t mbedtls_mpi_uint; #endif #endif /* !MBEDTLS_HAVE_INT32 */ #if !defined(MBEDTLS_HAVE_INT64) /* Default to 32-bit compilation */ #if !defined(MBEDTLS_HAVE_INT32) #define MBEDTLS_HAVE_INT32 #endif /* !MBEDTLS_HAVE_INT32 */ typedef int32_t mbedtls_mpi_sint; typedef uint32_t mbedtls_mpi_uint; #if !defined(MBEDTLS_NO_UDBL_DIVISION) typedef uint64_t mbedtls_t_udbl; #define MBEDTLS_HAVE_UDBL #endif /* !MBEDTLS_NO_UDBL_DIVISION */ #endif /* !MBEDTLS_HAVE_INT64 */ #ifdef __cplusplus extern "C" { #endif /** * \brief MPI structure */ typedef struct { int s; /*!< integer sign */ size_t n; /*!< total # of limbs */ mbedtls_mpi_uint *p; /*!< pointer to limbs */ } mbedtls_mpi; /** * \brief Initialize one MPI (make internal references valid) * This just makes it ready to be set or freed, * but does not define a value for the MPI. * * \param X One MPI to initialize. */ void mbedtls_mpi_init( mbedtls_mpi *X ); /** * \brief Unallocate one MPI * * \param X One MPI to unallocate. */ void mbedtls_mpi_free( mbedtls_mpi *X ); /** * \brief Enlarge to the specified number of limbs * * \param X MPI to grow * \param nblimbs The target number of limbs * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_grow( mbedtls_mpi *X, size_t nblimbs ); /** * \brief Resize down, keeping at least the specified number of limbs * * \param X MPI to shrink * \param nblimbs The minimum number of limbs to keep * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_shrink( mbedtls_mpi *X, size_t nblimbs ); /** * \brief Copy the contents of Y into X * * \param X Destination MPI * \param Y Source MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_copy( mbedtls_mpi *X, const mbedtls_mpi *Y ); /** * \brief Swap the contents of X and Y * * \param X First MPI value * \param Y Second MPI value */ void mbedtls_mpi_swap( mbedtls_mpi *X, mbedtls_mpi *Y ); /** * \brief Safe conditional assignement X = Y if assign is 1 * * \param X MPI to conditionally assign to * \param Y Value to be assigned * \param assign 1: perform the assignment, 0: keep X's original value * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * * \note This function is equivalent to * if( assign ) mbedtls_mpi_copy( X, Y ); * except that it avoids leaking any information about whether * the assignment was done or not (the above code may leak * information through branch prediction and/or memory access * patterns analysis). */ int mbedtls_mpi_safe_cond_assign( mbedtls_mpi *X, const mbedtls_mpi *Y, unsigned char assign ); /** * \brief Safe conditional swap X <-> Y if swap is 1 * * \param X First mbedtls_mpi value * \param Y Second mbedtls_mpi value * \param assign 1: perform the swap, 0: keep X and Y's original values * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * * \note This function is equivalent to * if( assign ) mbedtls_mpi_swap( X, Y ); * except that it avoids leaking any information about whether * the assignment was done or not (the above code may leak * information through branch prediction and/or memory access * patterns analysis). */ int mbedtls_mpi_safe_cond_swap( mbedtls_mpi *X, mbedtls_mpi *Y, unsigned char assign ); /** * \brief Set value from integer * * \param X MPI to set * \param z Value to use * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_lset( mbedtls_mpi *X, mbedtls_mpi_sint z ); /** * \brief Get a specific bit from X * * \param X MPI to use * \param pos Zero-based index of the bit in X * * \return Either a 0 or a 1 */ int mbedtls_mpi_get_bit( const mbedtls_mpi *X, size_t pos ); /** * \brief Set a bit of X to a specific value of 0 or 1 * * \note Will grow X if necessary to set a bit to 1 in a not yet * existing limb. Will not grow if bit should be set to 0 * * \param X MPI to use * \param pos Zero-based index of the bit in X * \param val The value to set the bit to (0 or 1) * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_BAD_INPUT_DATA if val is not 0 or 1 */ int mbedtls_mpi_set_bit( mbedtls_mpi *X, size_t pos, unsigned char val ); /** * \brief Return the number of zero-bits before the least significant * '1' bit * * Note: Thus also the zero-based index of the least significant '1' bit * * \param X MPI to use */ size_t mbedtls_mpi_lsb( const mbedtls_mpi *X ); /** * \brief Return the number of bits up to and including the most * significant '1' bit' * * Note: Thus also the one-based index of the most significant '1' bit * * \param X MPI to use */ size_t mbedtls_mpi_bitlen( const mbedtls_mpi *X ); /** * \brief Return the total size in bytes * * \param X MPI to use */ size_t mbedtls_mpi_size( const mbedtls_mpi *X ); /** * \brief Import from an ASCII string * * \param X Destination MPI * \param radix Input numeric base * \param s Null-terminated string buffer * * \return 0 if successful, or a MBEDTLS_ERR_MPI_XXX error code */ int mbedtls_mpi_read_string( mbedtls_mpi *X, int radix, const char *s ); /** * \brief Export into an ASCII string * * \param X Source MPI * \param radix Output numeric base * \param buf Buffer to write the string to * \param buflen Length of buf * \param olen Length of the string written, including final NUL byte * * \return 0 if successful, or a MBEDTLS_ERR_MPI_XXX error code. * *olen is always updated to reflect the amount * of data that has (or would have) been written. * * \note Call this function with buflen = 0 to obtain the * minimum required buffer size in *olen. */ int mbedtls_mpi_write_string( const mbedtls_mpi *X, int radix, char *buf, size_t buflen, size_t *olen ); #if defined(MBEDTLS_FS_IO) /** * \brief Read MPI from a line in an opened file * * \param X Destination MPI * \param radix Input numeric base * \param fin Input file handle * * \return 0 if successful, MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL if * the file read buffer is too small or a * MBEDTLS_ERR_MPI_XXX error code * * \note On success, this function advances the file stream * to the end of the current line or to EOF. * * The function returns 0 on an empty line. * * Leading whitespaces are ignored, as is a * '0x' prefix for radix 16. * */ int mbedtls_mpi_read_file( mbedtls_mpi *X, int radix, FILE *fin ); /** * \brief Write X into an opened file, or stdout if fout is NULL * * \param p Prefix, can be NULL * \param X Source MPI * \param radix Output numeric base * \param fout Output file handle (can be NULL) * * \return 0 if successful, or a MBEDTLS_ERR_MPI_XXX error code * * \note Set fout == NULL to print X on the console. */ int mbedtls_mpi_write_file( const char *p, const mbedtls_mpi *X, int radix, FILE *fout ); #endif /* MBEDTLS_FS_IO */ /** * \brief Import X from unsigned binary data, big endian * * \param X Destination MPI * \param buf Input buffer * \param buflen Input buffer size * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_read_binary( mbedtls_mpi *X, const unsigned char *buf, size_t buflen ); /** * \brief Export X into unsigned binary data, big endian. * Always fills the whole buffer, which will start with zeros * if the number is smaller. * * \param X Source MPI * \param buf Output buffer * \param buflen Output buffer size * * \return 0 if successful, * MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL if buf isn't large enough */ int mbedtls_mpi_write_binary( const mbedtls_mpi *X, unsigned char *buf, size_t buflen ); /** * \brief Left-shift: X <<= count * * \param X MPI to shift * \param count Amount to shift * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_shift_l( mbedtls_mpi *X, size_t count ); /** * \brief Right-shift: X >>= count * * \param X MPI to shift * \param count Amount to shift * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_shift_r( mbedtls_mpi *X, size_t count ); /** * \brief Compare unsigned values * * \param X Left-hand MPI * \param Y Right-hand MPI * * \return 1 if |X| is greater than |Y|, * -1 if |X| is lesser than |Y| or * 0 if |X| is equal to |Y| */ int mbedtls_mpi_cmp_abs( const mbedtls_mpi *X, const mbedtls_mpi *Y ); /** * \brief Compare signed values * * \param X Left-hand MPI * \param Y Right-hand MPI * * \return 1 if X is greater than Y, * -1 if X is lesser than Y or * 0 if X is equal to Y */ int mbedtls_mpi_cmp_mpi( const mbedtls_mpi *X, const mbedtls_mpi *Y ); /** * \brief Compare signed values * * \param X Left-hand MPI * \param z The integer value to compare to * * \return 1 if X is greater than z, * -1 if X is lesser than z or * 0 if X is equal to z */ int mbedtls_mpi_cmp_int( const mbedtls_mpi *X, mbedtls_mpi_sint z ); /** * \brief Unsigned addition: X = |A| + |B| * * \param X Destination MPI * \param A Left-hand MPI * \param B Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_add_abs( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ); /** * \brief Unsigned subtraction: X = |A| - |B| * * \param X Destination MPI * \param A Left-hand MPI * \param B Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_NEGATIVE_VALUE if B is greater than A */ int mbedtls_mpi_sub_abs( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ); /** * \brief Signed addition: X = A + B * * \param X Destination MPI * \param A Left-hand MPI * \param B Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_add_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ); /** * \brief Signed subtraction: X = A - B * * \param X Destination MPI * \param A Left-hand MPI * \param B Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_sub_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ); /** * \brief Signed addition: X = A + b * * \param X Destination MPI * \param A Left-hand MPI * \param b The integer value to add * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_add_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_sint b ); /** * \brief Signed subtraction: X = A - b * * \param X Destination MPI * \param A Left-hand MPI * \param b The integer value to subtract * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_sub_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_sint b ); /** * \brief Baseline multiplication: X = A * B * * \param X Destination MPI * \param A Left-hand MPI * \param B Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_mul_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ); /** * \brief Baseline multiplication: X = A * b * * \param X Destination MPI * \param A Left-hand MPI * \param b The unsigned integer value to multiply with * * \note b is unsigned * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_mul_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_uint b ); /** * \brief Division by mbedtls_mpi: A = Q * B + R * * \param Q Destination MPI for the quotient * \param R Destination MPI for the rest value * \param A Left-hand MPI * \param B Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_DIVISION_BY_ZERO if B == 0 * * \note Either Q or R can be NULL. */ int mbedtls_mpi_div_mpi( mbedtls_mpi *Q, mbedtls_mpi *R, const mbedtls_mpi *A, const mbedtls_mpi *B ); /** * \brief Division by int: A = Q * b + R * * \param Q Destination MPI for the quotient * \param R Destination MPI for the rest value * \param A Left-hand MPI * \param b Integer to divide by * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_DIVISION_BY_ZERO if b == 0 * * \note Either Q or R can be NULL. */ int mbedtls_mpi_div_int( mbedtls_mpi *Q, mbedtls_mpi *R, const mbedtls_mpi *A, mbedtls_mpi_sint b ); /** * \brief Modulo: R = A mod B * * \param R Destination MPI for the rest value * \param A Left-hand MPI * \param B Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_DIVISION_BY_ZERO if B == 0, * MBEDTLS_ERR_MPI_NEGATIVE_VALUE if B < 0 */ int mbedtls_mpi_mod_mpi( mbedtls_mpi *R, const mbedtls_mpi *A, const mbedtls_mpi *B ); /** * \brief Modulo: r = A mod b * * \param r Destination mbedtls_mpi_uint * \param A Left-hand MPI * \param b Integer to divide by * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_DIVISION_BY_ZERO if b == 0, * MBEDTLS_ERR_MPI_NEGATIVE_VALUE if b < 0 */ int mbedtls_mpi_mod_int( mbedtls_mpi_uint *r, const mbedtls_mpi *A, mbedtls_mpi_sint b ); /** * \brief Sliding-window exponentiation: X = A^E mod N * * \param X Destination MPI * \param A Left-hand MPI * \param E Exponent MPI * \param N Modular MPI * \param _RR Speed-up MPI used for recalculations * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_BAD_INPUT_DATA if N is negative or even or * if E is negative * * \note _RR is used to avoid re-computing R*R mod N across * multiple calls, which speeds up things a bit. It can * be set to NULL if the extra performance is unneeded. */ int mbedtls_mpi_exp_mod( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *E, const mbedtls_mpi *N, mbedtls_mpi *_RR ); /** * \brief Fill an MPI X with size bytes of random * * \param X Destination MPI * \param size Size in bytes * \param f_rng RNG function * \param p_rng RNG parameter * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_fill_random( mbedtls_mpi *X, size_t size, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ); /** * \brief Greatest common divisor: G = gcd(A, B) * * \param G Destination MPI * \param A Left-hand MPI * \param B Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed */ int mbedtls_mpi_gcd( mbedtls_mpi *G, const mbedtls_mpi *A, const mbedtls_mpi *B ); /** * \brief Modular inverse: X = A^-1 mod N * * \param X Destination MPI * \param A Left-hand MPI * \param N Right-hand MPI * * \return 0 if successful, * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_BAD_INPUT_DATA if N is <= 1, MBEDTLS_ERR_MPI_NOT_ACCEPTABLE if A has no inverse mod N. */ int mbedtls_mpi_inv_mod( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *N ); /** * \brief Miller-Rabin primality test * * \param X MPI to check * \param f_rng RNG function * \param p_rng RNG parameter * * \return 0 if successful (probably prime), * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_NOT_ACCEPTABLE if X is not prime */ int mbedtls_mpi_is_prime( const mbedtls_mpi *X, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ); /** * \brief Prime number generation * * \param X Destination MPI * \param nbits Required size of X in bits * ( 3 <= nbits <= MBEDTLS_MPI_MAX_BITS ) * \param dh_flag If 1, then (X-1)/2 will be prime too * \param f_rng RNG function * \param p_rng RNG parameter * * \return 0 if successful (probably prime), * MBEDTLS_ERR_MPI_ALLOC_FAILED if memory allocation failed, * MBEDTLS_ERR_MPI_BAD_INPUT_DATA if nbits is < 3 */ int mbedtls_mpi_gen_prime( mbedtls_mpi *X, size_t nbits, int dh_flag, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ); /** * \brief Checkup routine * * \return 0 if successful, or 1 if the test failed */ int mbedtls_mpi_self_test( int verbose ); #ifdef __cplusplus } #endif #endif /* bignum.h */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/bn_mul.h000066400000000000000000001051371351064634500247340ustar00rootroot00000000000000/** * \file bn_mul.h * * \brief Multi-precision integer library * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ /* * Multiply source vector [s] with b, add result * to destination vector [d] and set carry c. * * Currently supports: * * . IA-32 (386+) . AMD64 / EM64T * . IA-32 (SSE2) . Motorola 68000 * . PowerPC, 32-bit . MicroBlaze * . PowerPC, 64-bit . TriCore * . SPARC v8 . ARM v3+ * . Alpha . MIPS32 * . C, longlong . C, generic */ #ifndef MBEDTLS_BN_MUL_H #define MBEDTLS_BN_MUL_H #include "bignum.h" #if defined(MBEDTLS_HAVE_ASM) #ifndef asm #define asm __asm #endif /* armcc5 --gnu defines __GNUC__ but doesn't support GNU's extended asm */ #if defined(__GNUC__) && \ ( !defined(__ARMCC_VERSION) || __ARMCC_VERSION >= 6000000 ) #if defined(__i386__) #define MULADDC_INIT \ asm( \ "movl %%ebx, %0 \n\t" \ "movl %5, %%esi \n\t" \ "movl %6, %%edi \n\t" \ "movl %7, %%ecx \n\t" \ "movl %8, %%ebx \n\t" #define MULADDC_CORE \ "lodsl \n\t" \ "mull %%ebx \n\t" \ "addl %%ecx, %%eax \n\t" \ "adcl $0, %%edx \n\t" \ "addl (%%edi), %%eax \n\t" \ "adcl $0, %%edx \n\t" \ "movl %%edx, %%ecx \n\t" \ "stosl \n\t" #if defined(MBEDTLS_HAVE_SSE2) #define MULADDC_HUIT \ "movd %%ecx, %%mm1 \n\t" \ "movd %%ebx, %%mm0 \n\t" \ "movd (%%edi), %%mm3 \n\t" \ "paddq %%mm3, %%mm1 \n\t" \ "movd (%%esi), %%mm2 \n\t" \ "pmuludq %%mm0, %%mm2 \n\t" \ "movd 4(%%esi), %%mm4 \n\t" \ "pmuludq %%mm0, %%mm4 \n\t" \ "movd 8(%%esi), %%mm6 \n\t" \ "pmuludq %%mm0, %%mm6 \n\t" \ "movd 12(%%esi), %%mm7 \n\t" \ "pmuludq %%mm0, %%mm7 \n\t" \ "paddq %%mm2, %%mm1 \n\t" \ "movd 4(%%edi), %%mm3 \n\t" \ "paddq %%mm4, %%mm3 \n\t" \ "movd 8(%%edi), %%mm5 \n\t" \ "paddq %%mm6, %%mm5 \n\t" \ "movd 12(%%edi), %%mm4 \n\t" \ "paddq %%mm4, %%mm7 \n\t" \ "movd %%mm1, (%%edi) \n\t" \ "movd 16(%%esi), %%mm2 \n\t" \ "pmuludq %%mm0, %%mm2 \n\t" \ "psrlq $32, %%mm1 \n\t" \ "movd 20(%%esi), %%mm4 \n\t" \ "pmuludq %%mm0, %%mm4 \n\t" \ "paddq %%mm3, %%mm1 \n\t" \ "movd 24(%%esi), %%mm6 \n\t" \ "pmuludq %%mm0, %%mm6 \n\t" \ "movd %%mm1, 4(%%edi) \n\t" \ "psrlq $32, %%mm1 \n\t" \ "movd 28(%%esi), %%mm3 \n\t" \ "pmuludq %%mm0, %%mm3 \n\t" \ "paddq %%mm5, %%mm1 \n\t" \ "movd 16(%%edi), %%mm5 \n\t" \ "paddq %%mm5, %%mm2 \n\t" \ "movd %%mm1, 8(%%edi) \n\t" \ "psrlq $32, %%mm1 \n\t" \ "paddq %%mm7, %%mm1 \n\t" \ "movd 20(%%edi), %%mm5 \n\t" \ "paddq %%mm5, %%mm4 \n\t" \ "movd %%mm1, 12(%%edi) \n\t" \ "psrlq $32, %%mm1 \n\t" \ "paddq %%mm2, %%mm1 \n\t" \ "movd 24(%%edi), %%mm5 \n\t" \ "paddq %%mm5, %%mm6 \n\t" \ "movd %%mm1, 16(%%edi) \n\t" \ "psrlq $32, %%mm1 \n\t" \ "paddq %%mm4, %%mm1 \n\t" \ "movd 28(%%edi), %%mm5 \n\t" \ "paddq %%mm5, %%mm3 \n\t" \ "movd %%mm1, 20(%%edi) \n\t" \ "psrlq $32, %%mm1 \n\t" \ "paddq %%mm6, %%mm1 \n\t" \ "movd %%mm1, 24(%%edi) \n\t" \ "psrlq $32, %%mm1 \n\t" \ "paddq %%mm3, %%mm1 \n\t" \ "movd %%mm1, 28(%%edi) \n\t" \ "addl $32, %%edi \n\t" \ "addl $32, %%esi \n\t" \ "psrlq $32, %%mm1 \n\t" \ "movd %%mm1, %%ecx \n\t" #define MULADDC_STOP \ "emms \n\t" \ "movl %4, %%ebx \n\t" \ "movl %%ecx, %1 \n\t" \ "movl %%edi, %2 \n\t" \ "movl %%esi, %3 \n\t" \ : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \ : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \ : "eax", "ecx", "edx", "esi", "edi" \ ); #else #define MULADDC_STOP \ "movl %4, %%ebx \n\t" \ "movl %%ecx, %1 \n\t" \ "movl %%edi, %2 \n\t" \ "movl %%esi, %3 \n\t" \ : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \ : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \ : "eax", "ecx", "edx", "esi", "edi" \ ); #endif /* SSE2 */ #endif /* i386 */ #if defined(__amd64__) || defined (__x86_64__) #define MULADDC_INIT \ asm( \ "xorq %%r8, %%r8 \n\t" #define MULADDC_CORE \ "movq (%%rsi), %%rax \n\t" \ "mulq %%rbx \n\t" \ "addq $8, %%rsi \n\t" \ "addq %%rcx, %%rax \n\t" \ "movq %%r8, %%rcx \n\t" \ "adcq $0, %%rdx \n\t" \ "nop \n\t" \ "addq %%rax, (%%rdi) \n\t" \ "adcq %%rdx, %%rcx \n\t" \ "addq $8, %%rdi \n\t" #define MULADDC_STOP \ : "+c" (c), "+D" (d), "+S" (s) \ : "b" (b) \ : "rax", "rdx", "r8" \ ); #endif /* AMD64 */ #if defined(__mc68020__) || defined(__mcpu32__) #define MULADDC_INIT \ asm( \ "movl %3, %%a2 \n\t" \ "movl %4, %%a3 \n\t" \ "movl %5, %%d3 \n\t" \ "movl %6, %%d2 \n\t" \ "moveq #0, %%d0 \n\t" #define MULADDC_CORE \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d4:%%d1 \n\t" \ "addl %%d3, %%d1 \n\t" \ "addxl %%d0, %%d4 \n\t" \ "moveq #0, %%d3 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "addxl %%d4, %%d3 \n\t" #define MULADDC_STOP \ "movl %%d3, %0 \n\t" \ "movl %%a3, %1 \n\t" \ "movl %%a2, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "d0", "d1", "d2", "d3", "d4", "a2", "a3" \ ); #define MULADDC_HUIT \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d4:%%d1 \n\t" \ "addxl %%d3, %%d1 \n\t" \ "addxl %%d0, %%d4 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d3:%%d1 \n\t" \ "addxl %%d4, %%d1 \n\t" \ "addxl %%d0, %%d3 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d4:%%d1 \n\t" \ "addxl %%d3, %%d1 \n\t" \ "addxl %%d0, %%d4 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d3:%%d1 \n\t" \ "addxl %%d4, %%d1 \n\t" \ "addxl %%d0, %%d3 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d4:%%d1 \n\t" \ "addxl %%d3, %%d1 \n\t" \ "addxl %%d0, %%d4 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d3:%%d1 \n\t" \ "addxl %%d4, %%d1 \n\t" \ "addxl %%d0, %%d3 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d4:%%d1 \n\t" \ "addxl %%d3, %%d1 \n\t" \ "addxl %%d0, %%d4 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "movel %%a2@+, %%d1 \n\t" \ "mulul %%d2, %%d3:%%d1 \n\t" \ "addxl %%d4, %%d1 \n\t" \ "addxl %%d0, %%d3 \n\t" \ "addl %%d1, %%a3@+ \n\t" \ "addxl %%d0, %%d3 \n\t" #endif /* MC68000 */ #if defined(__powerpc64__) || defined(__ppc64__) #if defined(__MACH__) && defined(__APPLE__) #define MULADDC_INIT \ asm( \ "ld r3, %3 \n\t" \ "ld r4, %4 \n\t" \ "ld r5, %5 \n\t" \ "ld r6, %6 \n\t" \ "addi r3, r3, -8 \n\t" \ "addi r4, r4, -8 \n\t" \ "addic r5, r5, 0 \n\t" #define MULADDC_CORE \ "ldu r7, 8(r3) \n\t" \ "mulld r8, r7, r6 \n\t" \ "mulhdu r9, r7, r6 \n\t" \ "adde r8, r8, r5 \n\t" \ "ld r7, 8(r4) \n\t" \ "addze r5, r9 \n\t" \ "addc r8, r8, r7 \n\t" \ "stdu r8, 8(r4) \n\t" #define MULADDC_STOP \ "addze r5, r5 \n\t" \ "addi r4, r4, 8 \n\t" \ "addi r3, r3, 8 \n\t" \ "std r5, %0 \n\t" \ "std r4, %1 \n\t" \ "std r3, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "r3", "r4", "r5", "r6", "r7", "r8", "r9" \ ); #else /* __MACH__ && __APPLE__ */ #define MULADDC_INIT \ asm( \ "ld %%r3, %3 \n\t" \ "ld %%r4, %4 \n\t" \ "ld %%r5, %5 \n\t" \ "ld %%r6, %6 \n\t" \ "addi %%r3, %%r3, -8 \n\t" \ "addi %%r4, %%r4, -8 \n\t" \ "addic %%r5, %%r5, 0 \n\t" #define MULADDC_CORE \ "ldu %%r7, 8(%%r3) \n\t" \ "mulld %%r8, %%r7, %%r6 \n\t" \ "mulhdu %%r9, %%r7, %%r6 \n\t" \ "adde %%r8, %%r8, %%r5 \n\t" \ "ld %%r7, 8(%%r4) \n\t" \ "addze %%r5, %%r9 \n\t" \ "addc %%r8, %%r8, %%r7 \n\t" \ "stdu %%r8, 8(%%r4) \n\t" #define MULADDC_STOP \ "addze %%r5, %%r5 \n\t" \ "addi %%r4, %%r4, 8 \n\t" \ "addi %%r3, %%r3, 8 \n\t" \ "std %%r5, %0 \n\t" \ "std %%r4, %1 \n\t" \ "std %%r3, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "r3", "r4", "r5", "r6", "r7", "r8", "r9" \ ); #endif /* __MACH__ && __APPLE__ */ #elif defined(__powerpc__) || defined(__ppc__) /* end PPC64/begin PPC32 */ #if defined(__MACH__) && defined(__APPLE__) #define MULADDC_INIT \ asm( \ "lwz r3, %3 \n\t" \ "lwz r4, %4 \n\t" \ "lwz r5, %5 \n\t" \ "lwz r6, %6 \n\t" \ "addi r3, r3, -4 \n\t" \ "addi r4, r4, -4 \n\t" \ "addic r5, r5, 0 \n\t" #define MULADDC_CORE \ "lwzu r7, 4(r3) \n\t" \ "mullw r8, r7, r6 \n\t" \ "mulhwu r9, r7, r6 \n\t" \ "adde r8, r8, r5 \n\t" \ "lwz r7, 4(r4) \n\t" \ "addze r5, r9 \n\t" \ "addc r8, r8, r7 \n\t" \ "stwu r8, 4(r4) \n\t" #define MULADDC_STOP \ "addze r5, r5 \n\t" \ "addi r4, r4, 4 \n\t" \ "addi r3, r3, 4 \n\t" \ "stw r5, %0 \n\t" \ "stw r4, %1 \n\t" \ "stw r3, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "r3", "r4", "r5", "r6", "r7", "r8", "r9" \ ); #else /* __MACH__ && __APPLE__ */ #define MULADDC_INIT \ asm( \ "lwz %%r3, %3 \n\t" \ "lwz %%r4, %4 \n\t" \ "lwz %%r5, %5 \n\t" \ "lwz %%r6, %6 \n\t" \ "addi %%r3, %%r3, -4 \n\t" \ "addi %%r4, %%r4, -4 \n\t" \ "addic %%r5, %%r5, 0 \n\t" #define MULADDC_CORE \ "lwzu %%r7, 4(%%r3) \n\t" \ "mullw %%r8, %%r7, %%r6 \n\t" \ "mulhwu %%r9, %%r7, %%r6 \n\t" \ "adde %%r8, %%r8, %%r5 \n\t" \ "lwz %%r7, 4(%%r4) \n\t" \ "addze %%r5, %%r9 \n\t" \ "addc %%r8, %%r8, %%r7 \n\t" \ "stwu %%r8, 4(%%r4) \n\t" #define MULADDC_STOP \ "addze %%r5, %%r5 \n\t" \ "addi %%r4, %%r4, 4 \n\t" \ "addi %%r3, %%r3, 4 \n\t" \ "stw %%r5, %0 \n\t" \ "stw %%r4, %1 \n\t" \ "stw %%r3, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "r3", "r4", "r5", "r6", "r7", "r8", "r9" \ ); #endif /* __MACH__ && __APPLE__ */ #endif /* PPC32 */ /* * The Sparc(64) assembly is reported to be broken. * Disable it for now, until we're able to fix it. */ #if 0 && defined(__sparc__) #if defined(__sparc64__) #define MULADDC_INIT \ asm( \ "ldx %3, %%o0 \n\t" \ "ldx %4, %%o1 \n\t" \ "ld %5, %%o2 \n\t" \ "ld %6, %%o3 \n\t" #define MULADDC_CORE \ "ld [%%o0], %%o4 \n\t" \ "inc 4, %%o0 \n\t" \ "ld [%%o1], %%o5 \n\t" \ "umul %%o3, %%o4, %%o4 \n\t" \ "addcc %%o4, %%o2, %%o4 \n\t" \ "rd %%y, %%g1 \n\t" \ "addx %%g1, 0, %%g1 \n\t" \ "addcc %%o4, %%o5, %%o4 \n\t" \ "st %%o4, [%%o1] \n\t" \ "addx %%g1, 0, %%o2 \n\t" \ "inc 4, %%o1 \n\t" #define MULADDC_STOP \ "st %%o2, %0 \n\t" \ "stx %%o1, %1 \n\t" \ "stx %%o0, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "g1", "o0", "o1", "o2", "o3", "o4", \ "o5" \ ); #else /* __sparc64__ */ #define MULADDC_INIT \ asm( \ "ld %3, %%o0 \n\t" \ "ld %4, %%o1 \n\t" \ "ld %5, %%o2 \n\t" \ "ld %6, %%o3 \n\t" #define MULADDC_CORE \ "ld [%%o0], %%o4 \n\t" \ "inc 4, %%o0 \n\t" \ "ld [%%o1], %%o5 \n\t" \ "umul %%o3, %%o4, %%o4 \n\t" \ "addcc %%o4, %%o2, %%o4 \n\t" \ "rd %%y, %%g1 \n\t" \ "addx %%g1, 0, %%g1 \n\t" \ "addcc %%o4, %%o5, %%o4 \n\t" \ "st %%o4, [%%o1] \n\t" \ "addx %%g1, 0, %%o2 \n\t" \ "inc 4, %%o1 \n\t" #define MULADDC_STOP \ "st %%o2, %0 \n\t" \ "st %%o1, %1 \n\t" \ "st %%o0, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "g1", "o0", "o1", "o2", "o3", "o4", \ "o5" \ ); #endif /* __sparc64__ */ #endif /* __sparc__ */ #if defined(__microblaze__) || defined(microblaze) #define MULADDC_INIT \ asm( \ "lwi r3, %3 \n\t" \ "lwi r4, %4 \n\t" \ "lwi r5, %5 \n\t" \ "lwi r6, %6 \n\t" \ "andi r7, r6, 0xffff \n\t" \ "bsrli r6, r6, 16 \n\t" #define MULADDC_CORE \ "lhui r8, r3, 0 \n\t" \ "addi r3, r3, 2 \n\t" \ "lhui r9, r3, 0 \n\t" \ "addi r3, r3, 2 \n\t" \ "mul r10, r9, r6 \n\t" \ "mul r11, r8, r7 \n\t" \ "mul r12, r9, r7 \n\t" \ "mul r13, r8, r6 \n\t" \ "bsrli r8, r10, 16 \n\t" \ "bsrli r9, r11, 16 \n\t" \ "add r13, r13, r8 \n\t" \ "add r13, r13, r9 \n\t" \ "bslli r10, r10, 16 \n\t" \ "bslli r11, r11, 16 \n\t" \ "add r12, r12, r10 \n\t" \ "addc r13, r13, r0 \n\t" \ "add r12, r12, r11 \n\t" \ "addc r13, r13, r0 \n\t" \ "lwi r10, r4, 0 \n\t" \ "add r12, r12, r10 \n\t" \ "addc r13, r13, r0 \n\t" \ "add r12, r12, r5 \n\t" \ "addc r5, r13, r0 \n\t" \ "swi r12, r4, 0 \n\t" \ "addi r4, r4, 4 \n\t" #define MULADDC_STOP \ "swi r5, %0 \n\t" \ "swi r4, %1 \n\t" \ "swi r3, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "r3", "r4" "r5", "r6", "r7", "r8", \ "r9", "r10", "r11", "r12", "r13" \ ); #endif /* MicroBlaze */ #if defined(__tricore__) #define MULADDC_INIT \ asm( \ "ld.a %%a2, %3 \n\t" \ "ld.a %%a3, %4 \n\t" \ "ld.w %%d4, %5 \n\t" \ "ld.w %%d1, %6 \n\t" \ "xor %%d5, %%d5 \n\t" #define MULADDC_CORE \ "ld.w %%d0, [%%a2+] \n\t" \ "madd.u %%e2, %%e4, %%d0, %%d1 \n\t" \ "ld.w %%d0, [%%a3] \n\t" \ "addx %%d2, %%d2, %%d0 \n\t" \ "addc %%d3, %%d3, 0 \n\t" \ "mov %%d4, %%d3 \n\t" \ "st.w [%%a3+], %%d2 \n\t" #define MULADDC_STOP \ "st.w %0, %%d4 \n\t" \ "st.a %1, %%a3 \n\t" \ "st.a %2, %%a2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "d0", "d1", "e2", "d4", "a2", "a3" \ ); #endif /* TriCore */ /* * gcc -O0 by default uses r7 for the frame pointer, so it complains about our * use of r7 below, unless -fomit-frame-pointer is passed. Unfortunately, * passing that option is not easy when building with yotta. * * On the other hand, -fomit-frame-pointer is implied by any -Ox options with * x !=0, which we can detect using __OPTIMIZE__ (which is also defined by * clang and armcc5 under the same conditions). * * So, only use the optimized assembly below for optimized build, which avoids * the build error and is pretty reasonable anyway. */ #if defined(__GNUC__) && !defined(__OPTIMIZE__) #define MULADDC_CANNOT_USE_R7 #endif #if defined(__arm__) && !defined(MULADDC_CANNOT_USE_R7) #if defined(__thumb__) && !defined(__thumb2__) #define MULADDC_INIT \ asm( \ "ldr r0, %3 \n\t" \ "ldr r1, %4 \n\t" \ "ldr r2, %5 \n\t" \ "ldr r3, %6 \n\t" \ "lsr r7, r3, #16 \n\t" \ "mov r9, r7 \n\t" \ "lsl r7, r3, #16 \n\t" \ "lsr r7, r7, #16 \n\t" \ "mov r8, r7 \n\t" #define MULADDC_CORE \ "ldmia r0!, {r6} \n\t" \ "lsr r7, r6, #16 \n\t" \ "lsl r6, r6, #16 \n\t" \ "lsr r6, r6, #16 \n\t" \ "mov r4, r8 \n\t" \ "mul r4, r6 \n\t" \ "mov r3, r9 \n\t" \ "mul r6, r3 \n\t" \ "mov r5, r9 \n\t" \ "mul r5, r7 \n\t" \ "mov r3, r8 \n\t" \ "mul r7, r3 \n\t" \ "lsr r3, r6, #16 \n\t" \ "add r5, r5, r3 \n\t" \ "lsr r3, r7, #16 \n\t" \ "add r5, r5, r3 \n\t" \ "add r4, r4, r2 \n\t" \ "mov r2, #0 \n\t" \ "adc r5, r2 \n\t" \ "lsl r3, r6, #16 \n\t" \ "add r4, r4, r3 \n\t" \ "adc r5, r2 \n\t" \ "lsl r3, r7, #16 \n\t" \ "add r4, r4, r3 \n\t" \ "adc r5, r2 \n\t" \ "ldr r3, [r1] \n\t" \ "add r4, r4, r3 \n\t" \ "adc r2, r5 \n\t" \ "stmia r1!, {r4} \n\t" #define MULADDC_STOP \ "str r2, %0 \n\t" \ "str r1, %1 \n\t" \ "str r0, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "r0", "r1", "r2", "r3", "r4", "r5", \ "r6", "r7", "r8", "r9", "cc" \ ); #else #define MULADDC_INIT \ asm( \ "ldr r0, %3 \n\t" \ "ldr r1, %4 \n\t" \ "ldr r2, %5 \n\t" \ "ldr r3, %6 \n\t" #define MULADDC_CORE \ "ldr r4, [r0], #4 \n\t" \ "mov r5, #0 \n\t" \ "ldr r6, [r1] \n\t" \ "umlal r2, r5, r3, r4 \n\t" \ "adds r7, r6, r2 \n\t" \ "adc r2, r5, #0 \n\t" \ "str r7, [r1], #4 \n\t" #define MULADDC_STOP \ "str r2, %0 \n\t" \ "str r1, %1 \n\t" \ "str r0, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "r0", "r1", "r2", "r3", "r4", "r5", \ "r6", "r7", "cc" \ ); #endif /* Thumb */ #endif /* ARMv3 */ #if defined(__alpha__) #define MULADDC_INIT \ asm( \ "ldq $1, %3 \n\t" \ "ldq $2, %4 \n\t" \ "ldq $3, %5 \n\t" \ "ldq $4, %6 \n\t" #define MULADDC_CORE \ "ldq $6, 0($1) \n\t" \ "addq $1, 8, $1 \n\t" \ "mulq $6, $4, $7 \n\t" \ "umulh $6, $4, $6 \n\t" \ "addq $7, $3, $7 \n\t" \ "cmpult $7, $3, $3 \n\t" \ "ldq $5, 0($2) \n\t" \ "addq $7, $5, $7 \n\t" \ "cmpult $7, $5, $5 \n\t" \ "stq $7, 0($2) \n\t" \ "addq $2, 8, $2 \n\t" \ "addq $6, $3, $3 \n\t" \ "addq $5, $3, $3 \n\t" #define MULADDC_STOP \ "stq $3, %0 \n\t" \ "stq $2, %1 \n\t" \ "stq $1, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "$1", "$2", "$3", "$4", "$5", "$6", "$7" \ ); #endif /* Alpha */ #if defined(__mips__) && !defined(__mips64) #define MULADDC_INIT \ asm( \ "lw $10, %3 \n\t" \ "lw $11, %4 \n\t" \ "lw $12, %5 \n\t" \ "lw $13, %6 \n\t" #define MULADDC_CORE \ "lw $14, 0($10) \n\t" \ "multu $13, $14 \n\t" \ "addi $10, $10, 4 \n\t" \ "mflo $14 \n\t" \ "mfhi $9 \n\t" \ "addu $14, $12, $14 \n\t" \ "lw $15, 0($11) \n\t" \ "sltu $12, $14, $12 \n\t" \ "addu $15, $14, $15 \n\t" \ "sltu $14, $15, $14 \n\t" \ "addu $12, $12, $9 \n\t" \ "sw $15, 0($11) \n\t" \ "addu $12, $12, $14 \n\t" \ "addi $11, $11, 4 \n\t" #define MULADDC_STOP \ "sw $12, %0 \n\t" \ "sw $11, %1 \n\t" \ "sw $10, %2 \n\t" \ : "=m" (c), "=m" (d), "=m" (s) \ : "m" (s), "m" (d), "m" (c), "m" (b) \ : "$9", "$10", "$11", "$12", "$13", "$14", "$15" \ ); #endif /* MIPS */ #endif /* GNUC */ #if (defined(_MSC_VER) && defined(_M_IX86)) || defined(__WATCOMC__) #define MULADDC_INIT \ __asm mov esi, s \ __asm mov edi, d \ __asm mov ecx, c \ __asm mov ebx, b #define MULADDC_CORE \ __asm lodsd \ __asm mul ebx \ __asm add eax, ecx \ __asm adc edx, 0 \ __asm add eax, [edi] \ __asm adc edx, 0 \ __asm mov ecx, edx \ __asm stosd #if defined(MBEDTLS_HAVE_SSE2) #define EMIT __asm _emit #define MULADDC_HUIT \ EMIT 0x0F EMIT 0x6E EMIT 0xC9 \ EMIT 0x0F EMIT 0x6E EMIT 0xC3 \ EMIT 0x0F EMIT 0x6E EMIT 0x1F \ EMIT 0x0F EMIT 0xD4 EMIT 0xCB \ EMIT 0x0F EMIT 0x6E EMIT 0x16 \ EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \ EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x04 \ EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \ EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x08 \ EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \ EMIT 0x0F EMIT 0x6E EMIT 0x7E EMIT 0x0C \ EMIT 0x0F EMIT 0xF4 EMIT 0xF8 \ EMIT 0x0F EMIT 0xD4 EMIT 0xCA \ EMIT 0x0F EMIT 0x6E EMIT 0x5F EMIT 0x04 \ EMIT 0x0F EMIT 0xD4 EMIT 0xDC \ EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x08 \ EMIT 0x0F EMIT 0xD4 EMIT 0xEE \ EMIT 0x0F EMIT 0x6E EMIT 0x67 EMIT 0x0C \ EMIT 0x0F EMIT 0xD4 EMIT 0xFC \ EMIT 0x0F EMIT 0x7E EMIT 0x0F \ EMIT 0x0F EMIT 0x6E EMIT 0x56 EMIT 0x10 \ EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \ EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \ EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x14 \ EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \ EMIT 0x0F EMIT 0xD4 EMIT 0xCB \ EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x18 \ EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \ EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x04 \ EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \ EMIT 0x0F EMIT 0x6E EMIT 0x5E EMIT 0x1C \ EMIT 0x0F EMIT 0xF4 EMIT 0xD8 \ EMIT 0x0F EMIT 0xD4 EMIT 0xCD \ EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x10 \ EMIT 0x0F EMIT 0xD4 EMIT 0xD5 \ EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x08 \ EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \ EMIT 0x0F EMIT 0xD4 EMIT 0xCF \ EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x14 \ EMIT 0x0F EMIT 0xD4 EMIT 0xE5 \ EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x0C \ EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \ EMIT 0x0F EMIT 0xD4 EMIT 0xCA \ EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x18 \ EMIT 0x0F EMIT 0xD4 EMIT 0xF5 \ EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x10 \ EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \ EMIT 0x0F EMIT 0xD4 EMIT 0xCC \ EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x1C \ EMIT 0x0F EMIT 0xD4 EMIT 0xDD \ EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x14 \ EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \ EMIT 0x0F EMIT 0xD4 EMIT 0xCE \ EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x18 \ EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \ EMIT 0x0F EMIT 0xD4 EMIT 0xCB \ EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x1C \ EMIT 0x83 EMIT 0xC7 EMIT 0x20 \ EMIT 0x83 EMIT 0xC6 EMIT 0x20 \ EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \ EMIT 0x0F EMIT 0x7E EMIT 0xC9 #define MULADDC_STOP \ EMIT 0x0F EMIT 0x77 \ __asm mov c, ecx \ __asm mov d, edi \ __asm mov s, esi \ #else #define MULADDC_STOP \ __asm mov c, ecx \ __asm mov d, edi \ __asm mov s, esi \ #endif /* SSE2 */ #endif /* MSVC */ #endif /* MBEDTLS_HAVE_ASM */ #if !defined(MULADDC_CORE) #if defined(MBEDTLS_HAVE_UDBL) #define MULADDC_INIT \ { \ mbedtls_t_udbl r; \ mbedtls_mpi_uint r0, r1; #define MULADDC_CORE \ r = *(s++) * (mbedtls_t_udbl) b; \ r0 = (mbedtls_mpi_uint) r; \ r1 = (mbedtls_mpi_uint)( r >> biL ); \ r0 += c; r1 += (r0 < c); \ r0 += *d; r1 += (r0 < *d); \ c = r1; *(d++) = r0; #define MULADDC_STOP \ } #else #define MULADDC_INIT \ { \ mbedtls_mpi_uint s0, s1, b0, b1; \ mbedtls_mpi_uint r0, r1, rx, ry; \ b0 = ( b << biH ) >> biH; \ b1 = ( b >> biH ); #define MULADDC_CORE \ s0 = ( *s << biH ) >> biH; \ s1 = ( *s >> biH ); s++; \ rx = s0 * b1; r0 = s0 * b0; \ ry = s1 * b0; r1 = s1 * b1; \ r1 += ( rx >> biH ); \ r1 += ( ry >> biH ); \ rx <<= biH; ry <<= biH; \ r0 += rx; r1 += (r0 < rx); \ r0 += ry; r1 += (r0 < ry); \ r0 += c; r1 += (r0 < c); \ r0 += *d; r1 += (r0 < *d); \ c = r1; *(d++) = r0; #define MULADDC_STOP \ } #endif /* C (generic) */ #endif /* C (longlong) */ #endif /* bn_mul.h */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/config.h000066400000000000000000000005771351064634500247270ustar00rootroot00000000000000#ifndef __MBEDTLS_CONFIG_H #define __MBEDTLS_CONFIG_H #define MBEDTLS_RSA_C #define MBEDTLS_PKCS1_V15 #define MBEDTLS_MD_C #define __OpenBSD__ #define MBEDTLS_PLATFORM_C #define MBEDTLS_BIGNUM_C #define MBEDTLS_OID_C #define MBEDTLS_ASN1_PARSE_C #define MBEDTLS_NO_UDBL_DIVISION #define MBEDTLS_SHA256_C #include #include "platform.h" #endif /* __MBEDTLS_CONFIG_H */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/md.h000066400000000000000000000301151351064634500240510ustar00rootroot00000000000000/** * \file md.h * * \brief Generic message digest wrapper * * \author Adriaan de Jong * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_MD_H #define MBEDTLS_MD_H #define MBEDTLS_ERR_MD_FEATURE_UNAVAILABLE -0x5080 /**< The selected feature is not available. */ #define MBEDTLS_ERR_MD_BAD_INPUT_DATA -0x5100 /**< Bad input parameters to function. */ #define MBEDTLS_ERR_MD_ALLOC_FAILED -0x5180 /**< Failed to allocate memory. */ #define MBEDTLS_ERR_MD_FILE_IO_ERROR -0x5200 /**< Opening or reading of file failed. */ #ifdef __cplusplus extern "C" { #endif typedef enum { MBEDTLS_MD_NONE=0, MBEDTLS_MD_MD2, MBEDTLS_MD_MD4, MBEDTLS_MD_MD5, MBEDTLS_MD_SHA1, MBEDTLS_MD_SHA224, MBEDTLS_MD_SHA256, MBEDTLS_MD_SHA384, MBEDTLS_MD_SHA512, MBEDTLS_MD_RIPEMD160, } mbedtls_md_type_t; #if defined(MBEDTLS_SHA512_C) #define MBEDTLS_MD_MAX_SIZE 64 /* longest known is SHA512 */ #else #define MBEDTLS_MD_MAX_SIZE 32 /* longest known is SHA256 or less */ #endif /** * Opaque struct defined in md_internal.h */ typedef struct mbedtls_md_info_t mbedtls_md_info_t; /** * Generic message digest context. */ typedef struct { /** Information about the associated message digest */ const mbedtls_md_info_t *md_info; /** Digest-specific context */ void *md_ctx; /** HMAC part of the context */ void *hmac_ctx; } mbedtls_md_context_t; /** * \brief Returns the list of digests supported by the generic digest module. * * \return a statically allocated array of digests, the last entry * is 0. */ const int *mbedtls_md_list( void ); /** * \brief Returns the message digest information associated with the * given digest name. * * \param md_name Name of the digest to search for. * * \return The message digest information associated with md_name or * NULL if not found. */ const mbedtls_md_info_t *mbedtls_md_info_from_string( const char *md_name ); /** * \brief Returns the message digest information associated with the * given digest type. * * \param md_type type of digest to search for. * * \return The message digest information associated with md_type or * NULL if not found. */ const mbedtls_md_info_t *mbedtls_md_info_from_type( mbedtls_md_type_t md_type ); /** * \brief Initialize a md_context (as NONE) * This should always be called first. * Prepares the context for mbedtls_md_setup() or mbedtls_md_free(). */ void mbedtls_md_init( mbedtls_md_context_t *ctx ); /** * \brief Free and clear the internal structures of ctx. * Can be called at any time after mbedtls_md_init(). * Mandatory once mbedtls_md_setup() has been called. */ void mbedtls_md_free( mbedtls_md_context_t *ctx ); #if ! defined(MBEDTLS_DEPRECATED_REMOVED) #if defined(MBEDTLS_DEPRECATED_WARNING) #define MBEDTLS_DEPRECATED __attribute__((deprecated)) #else #define MBEDTLS_DEPRECATED #endif /** * \brief Select MD to use and allocate internal structures. * Should be called after mbedtls_md_init() or mbedtls_md_free(). * Makes it necessary to call mbedtls_md_free() later. * * \deprecated Superseded by mbedtls_md_setup() in 2.0.0 * * \param ctx Context to set up. * \param md_info Message digest to use. * * \returns \c 0 on success, * \c MBEDTLS_ERR_MD_BAD_INPUT_DATA on parameter failure, * \c MBEDTLS_ERR_MD_ALLOC_FAILED memory allocation failure. */ int mbedtls_md_init_ctx( mbedtls_md_context_t *ctx, const mbedtls_md_info_t *md_info ) MBEDTLS_DEPRECATED; #undef MBEDTLS_DEPRECATED #endif /* MBEDTLS_DEPRECATED_REMOVED */ /** * \brief Select MD to use and allocate internal structures. * Should be called after mbedtls_md_init() or mbedtls_md_free(). * Makes it necessary to call mbedtls_md_free() later. * * \param ctx Context to set up. * \param md_info Message digest to use. * \param hmac 0 to save some memory if HMAC will not be used, * non-zero is HMAC is going to be used with this context. * * \returns \c 0 on success, * \c MBEDTLS_ERR_MD_BAD_INPUT_DATA on parameter failure, * \c MBEDTLS_ERR_MD_ALLOC_FAILED memory allocation failure. */ int mbedtls_md_setup( mbedtls_md_context_t *ctx, const mbedtls_md_info_t *md_info, int hmac ); /** * \brief Clone the state of an MD context * * \note The two contexts must have been setup to the same type * (cloning from SHA-256 to SHA-512 make no sense). * * \warning Only clones the MD state, not the HMAC state! (for now) * * \param dst The destination context * \param src The context to be cloned * * \return \c 0 on success, * \c MBEDTLS_ERR_MD_BAD_INPUT_DATA on parameter failure. */ int mbedtls_md_clone( mbedtls_md_context_t *dst, const mbedtls_md_context_t *src ); /** * \brief Returns the size of the message digest output. * * \param md_info message digest info * * \return size of the message digest output in bytes. */ unsigned char mbedtls_md_get_size( const mbedtls_md_info_t *md_info ); /** * \brief Returns the type of the message digest output. * * \param md_info message digest info * * \return type of the message digest output. */ mbedtls_md_type_t mbedtls_md_get_type( const mbedtls_md_info_t *md_info ); /** * \brief Returns the name of the message digest output. * * \param md_info message digest info * * \return name of the message digest output. */ const char *mbedtls_md_get_name( const mbedtls_md_info_t *md_info ); /** * \brief Prepare the context to digest a new message. * Generally called after mbedtls_md_setup() or mbedtls_md_finish(). * Followed by mbedtls_md_update(). * * \param ctx generic message digest context. * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md_starts( mbedtls_md_context_t *ctx ); /** * \brief Generic message digest process buffer * Called between mbedtls_md_starts() and mbedtls_md_finish(). * May be called repeatedly. * * \param ctx Generic message digest context * \param input buffer holding the datal * \param ilen length of the input data * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md_update( mbedtls_md_context_t *ctx, const unsigned char *input, size_t ilen ); /** * \brief Generic message digest final digest * Called after mbedtls_md_update(). * Usually followed by mbedtls_md_free() or mbedtls_md_starts(). * * \param ctx Generic message digest context * \param output Generic message digest checksum result * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md_finish( mbedtls_md_context_t *ctx, unsigned char *output ); /** * \brief Output = message_digest( input buffer ) * * \param md_info message digest info * \param input buffer holding the data * \param ilen length of the input data * \param output Generic message digest checksum result * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md( const mbedtls_md_info_t *md_info, const unsigned char *input, size_t ilen, unsigned char *output ); #if defined(MBEDTLS_FS_IO) /** * \brief Output = message_digest( file contents ) * * \param md_info message digest info * \param path input file name * \param output generic message digest checksum result * * \return 0 if successful, * MBEDTLS_ERR_MD_FILE_IO_ERROR if file input failed, * MBEDTLS_ERR_MD_BAD_INPUT_DATA if md_info was NULL. */ int mbedtls_md_file( const mbedtls_md_info_t *md_info, const char *path, unsigned char *output ); #endif /* MBEDTLS_FS_IO */ /** * \brief Set HMAC key and prepare to authenticate a new message. * Usually called after mbedtls_md_setup() or mbedtls_md_hmac_finish(). * * \param ctx HMAC context * \param key HMAC secret key * \param keylen length of the HMAC key in bytes * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md_hmac_starts( mbedtls_md_context_t *ctx, const unsigned char *key, size_t keylen ); /** * \brief Generic HMAC process buffer. * Called between mbedtls_md_hmac_starts() or mbedtls_md_hmac_reset() * and mbedtls_md_hmac_finish(). * May be called repeatedly. * * \param ctx HMAC context * \param input buffer holding the data * \param ilen length of the input data * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md_hmac_update( mbedtls_md_context_t *ctx, const unsigned char *input, size_t ilen ); /** * \brief Output HMAC. * Called after mbedtls_md_hmac_update(). * Usually followed by mbedtls_md_hmac_reset(), * mbedtls_md_hmac_starts(), or mbedtls_md_free(). * * \param ctx HMAC context * \param output Generic HMAC checksum result * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md_hmac_finish( mbedtls_md_context_t *ctx, unsigned char *output); /** * \brief Prepare to authenticate a new message with the same key. * Called after mbedtls_md_hmac_finish() and before * mbedtls_md_hmac_update(). * * \param ctx HMAC context to be reset * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md_hmac_reset( mbedtls_md_context_t *ctx ); /** * \brief Output = Generic_HMAC( hmac key, input buffer ) * * \param md_info message digest info * \param key HMAC secret key * \param keylen length of the HMAC key in bytes * \param input buffer holding the data * \param ilen length of the input data * \param output Generic HMAC-result * * \returns 0 on success, MBEDTLS_ERR_MD_BAD_INPUT_DATA if parameter * verification fails. */ int mbedtls_md_hmac( const mbedtls_md_info_t *md_info, const unsigned char *key, size_t keylen, const unsigned char *input, size_t ilen, unsigned char *output ); /* Internal use */ int mbedtls_md_process( mbedtls_md_context_t *ctx, const unsigned char *data ); #ifdef __cplusplus } #endif #endif /* MBEDTLS_MD_H */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/md_internal.h000066400000000000000000000062621351064634500257530ustar00rootroot00000000000000/** * \file md_internal.h * * \brief Message digest wrappers. * * \warning This in an internal header. Do not include directly. * * \author Adriaan de Jong * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_MD_WRAP_H #define MBEDTLS_MD_WRAP_H #if !defined(MBEDTLS_CONFIG_FILE) #include "config.h" #else #include MBEDTLS_CONFIG_FILE #endif #include "md.h" #ifdef __cplusplus extern "C" { #endif /** * Message digest information. * Allows message digest functions to be called in a generic way. */ struct mbedtls_md_info_t { /** Digest identifier */ mbedtls_md_type_t type; /** Name of the message digest */ const char * name; /** Output length of the digest function in bytes */ int size; /** Block length of the digest function in bytes */ int block_size; /** Digest initialisation function */ void (*starts_func)( void *ctx ); /** Digest update function */ void (*update_func)( void *ctx, const unsigned char *input, size_t ilen ); /** Digest finalisation function */ void (*finish_func)( void *ctx, unsigned char *output ); /** Generic digest function */ void (*digest_func)( const unsigned char *input, size_t ilen, unsigned char *output ); /** Allocate a new context */ void * (*ctx_alloc_func)( void ); /** Free the given context */ void (*ctx_free_func)( void *ctx ); /** Clone state from a context */ void (*clone_func)( void *dst, const void *src ); /** Internal use only */ void (*process_func)( void *ctx, const unsigned char *input ); }; #if defined(MBEDTLS_MD2_C) extern const mbedtls_md_info_t mbedtls_md2_info; #endif #if defined(MBEDTLS_MD4_C) extern const mbedtls_md_info_t mbedtls_md4_info; #endif #if defined(MBEDTLS_MD5_C) extern const mbedtls_md_info_t mbedtls_md5_info; #endif #if defined(MBEDTLS_RIPEMD160_C) extern const mbedtls_md_info_t mbedtls_ripemd160_info; #endif #if defined(MBEDTLS_SHA1_C) extern const mbedtls_md_info_t mbedtls_sha1_info; #endif #if defined(MBEDTLS_SHA256_C) extern const mbedtls_md_info_t mbedtls_sha224_info; extern const mbedtls_md_info_t mbedtls_sha256_info; #endif #if defined(MBEDTLS_SHA512_C) extern const mbedtls_md_info_t mbedtls_sha384_info; extern const mbedtls_md_info_t mbedtls_sha512_info; #endif #ifdef __cplusplus } #endif #endif /* MBEDTLS_MD_WRAP_H */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/oid.h000066400000000000000000000723041351064634500242320ustar00rootroot00000000000000/** * \file oid.h * * \brief Object Identifier (OID) database * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_OID_H #define MBEDTLS_OID_H #if !defined(MBEDTLS_CONFIG_FILE) #include "config.h" #else #include MBEDTLS_CONFIG_FILE #endif #include "asn1.h" #include "pk.h" #if defined(MBEDTLS_CIPHER_C) #include "cipher.h" #endif #if defined(MBEDTLS_MD_C) #include "md.h" #endif #if defined(MBEDTLS_X509_USE_C) || defined(MBEDTLS_X509_CREATE_C) #include "x509.h" #endif #define MBEDTLS_ERR_OID_NOT_FOUND -0x002E /**< OID is not found. */ #define MBEDTLS_ERR_OID_BUF_TOO_SMALL -0x000B /**< output buffer is too small */ /* * Top level OID tuples */ #define MBEDTLS_OID_ISO_MEMBER_BODIES "\x2a" /* {iso(1) member-body(2)} */ #define MBEDTLS_OID_ISO_IDENTIFIED_ORG "\x2b" /* {iso(1) identified-organization(3)} */ #define MBEDTLS_OID_ISO_CCITT_DS "\x55" /* {joint-iso-ccitt(2) ds(5)} */ #define MBEDTLS_OID_ISO_ITU_COUNTRY "\x60" /* {joint-iso-itu-t(2) country(16)} */ /* * ISO Member bodies OID parts */ #define MBEDTLS_OID_COUNTRY_US "\x86\x48" /* {us(840)} */ #define MBEDTLS_OID_ORG_RSA_DATA_SECURITY "\x86\xf7\x0d" /* {rsadsi(113549)} */ #define MBEDTLS_OID_RSA_COMPANY MBEDTLS_OID_ISO_MEMBER_BODIES MBEDTLS_OID_COUNTRY_US \ MBEDTLS_OID_ORG_RSA_DATA_SECURITY /* {iso(1) member-body(2) us(840) rsadsi(113549)} */ #define MBEDTLS_OID_ORG_ANSI_X9_62 "\xce\x3d" /* ansi-X9-62(10045) */ #define MBEDTLS_OID_ANSI_X9_62 MBEDTLS_OID_ISO_MEMBER_BODIES MBEDTLS_OID_COUNTRY_US \ MBEDTLS_OID_ORG_ANSI_X9_62 /* * ISO Identified organization OID parts */ #define MBEDTLS_OID_ORG_DOD "\x06" /* {dod(6)} */ #define MBEDTLS_OID_ORG_OIW "\x0e" #define MBEDTLS_OID_OIW_SECSIG MBEDTLS_OID_ORG_OIW "\x03" #define MBEDTLS_OID_OIW_SECSIG_ALG MBEDTLS_OID_OIW_SECSIG "\x02" #define MBEDTLS_OID_OIW_SECSIG_SHA1 MBEDTLS_OID_OIW_SECSIG_ALG "\x1a" #define MBEDTLS_OID_ORG_CERTICOM "\x81\x04" /* certicom(132) */ #define MBEDTLS_OID_CERTICOM MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_ORG_CERTICOM #define MBEDTLS_OID_ORG_TELETRUST "\x24" /* teletrust(36) */ #define MBEDTLS_OID_TELETRUST MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_ORG_TELETRUST /* * ISO ITU OID parts */ #define MBEDTLS_OID_ORGANIZATION "\x01" /* {organization(1)} */ #define MBEDTLS_OID_ISO_ITU_US_ORG MBEDTLS_OID_ISO_ITU_COUNTRY MBEDTLS_OID_COUNTRY_US MBEDTLS_OID_ORGANIZATION /* {joint-iso-itu-t(2) country(16) us(840) organization(1)} */ #define MBEDTLS_OID_ORG_GOV "\x65" /* {gov(101)} */ #define MBEDTLS_OID_GOV MBEDTLS_OID_ISO_ITU_US_ORG MBEDTLS_OID_ORG_GOV /* {joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101)} */ #define MBEDTLS_OID_ORG_NETSCAPE "\x86\xF8\x42" /* {netscape(113730)} */ #define MBEDTLS_OID_NETSCAPE MBEDTLS_OID_ISO_ITU_US_ORG MBEDTLS_OID_ORG_NETSCAPE /* Netscape OID {joint-iso-itu-t(2) country(16) us(840) organization(1) netscape(113730)} */ /* ISO arc for standard certificate and CRL extensions */ #define MBEDTLS_OID_ID_CE MBEDTLS_OID_ISO_CCITT_DS "\x1D" /**< id-ce OBJECT IDENTIFIER ::= {joint-iso-ccitt(2) ds(5) 29} */ /** * Private Internet Extensions * { iso(1) identified-organization(3) dod(6) internet(1) * security(5) mechanisms(5) pkix(7) } */ #define MBEDTLS_OID_PKIX MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_ORG_DOD "\x01\x05\x05\x07" /* * Arc for standard naming attributes */ #define MBEDTLS_OID_AT MBEDTLS_OID_ISO_CCITT_DS "\x04" /**< id-at OBJECT IDENTIFIER ::= {joint-iso-ccitt(2) ds(5) 4} */ #define MBEDTLS_OID_AT_CN MBEDTLS_OID_AT "\x03" /**< id-at-commonName AttributeType:= {id-at 3} */ #define MBEDTLS_OID_AT_SUR_NAME MBEDTLS_OID_AT "\x04" /**< id-at-surName AttributeType:= {id-at 4} */ #define MBEDTLS_OID_AT_SERIAL_NUMBER MBEDTLS_OID_AT "\x05" /**< id-at-serialNumber AttributeType:= {id-at 5} */ #define MBEDTLS_OID_AT_COUNTRY MBEDTLS_OID_AT "\x06" /**< id-at-countryName AttributeType:= {id-at 6} */ #define MBEDTLS_OID_AT_LOCALITY MBEDTLS_OID_AT "\x07" /**< id-at-locality AttributeType:= {id-at 7} */ #define MBEDTLS_OID_AT_STATE MBEDTLS_OID_AT "\x08" /**< id-at-state AttributeType:= {id-at 8} */ #define MBEDTLS_OID_AT_ORGANIZATION MBEDTLS_OID_AT "\x0A" /**< id-at-organizationName AttributeType:= {id-at 10} */ #define MBEDTLS_OID_AT_ORG_UNIT MBEDTLS_OID_AT "\x0B" /**< id-at-organizationalUnitName AttributeType:= {id-at 11} */ #define MBEDTLS_OID_AT_TITLE MBEDTLS_OID_AT "\x0C" /**< id-at-title AttributeType:= {id-at 12} */ #define MBEDTLS_OID_AT_POSTAL_ADDRESS MBEDTLS_OID_AT "\x10" /**< id-at-postalAddress AttributeType:= {id-at 16} */ #define MBEDTLS_OID_AT_POSTAL_CODE MBEDTLS_OID_AT "\x11" /**< id-at-postalCode AttributeType:= {id-at 17} */ #define MBEDTLS_OID_AT_GIVEN_NAME MBEDTLS_OID_AT "\x2A" /**< id-at-givenName AttributeType:= {id-at 42} */ #define MBEDTLS_OID_AT_INITIALS MBEDTLS_OID_AT "\x2B" /**< id-at-initials AttributeType:= {id-at 43} */ #define MBEDTLS_OID_AT_GENERATION_QUALIFIER MBEDTLS_OID_AT "\x2C" /**< id-at-generationQualifier AttributeType:= {id-at 44} */ #define MBEDTLS_OID_AT_UNIQUE_IDENTIFIER MBEDTLS_OID_AT "\x2D" /**< id-at-uniqueIdentifier AttributType:= {id-at 45} */ #define MBEDTLS_OID_AT_DN_QUALIFIER MBEDTLS_OID_AT "\x2E" /**< id-at-dnQualifier AttributeType:= {id-at 46} */ #define MBEDTLS_OID_AT_PSEUDONYM MBEDTLS_OID_AT "\x41" /**< id-at-pseudonym AttributeType:= {id-at 65} */ #define MBEDTLS_OID_DOMAIN_COMPONENT "\x09\x92\x26\x89\x93\xF2\x2C\x64\x01\x19" /** id-domainComponent AttributeType:= {itu-t(0) data(9) pss(2342) ucl(19200300) pilot(100) pilotAttributeType(1) domainComponent(25)} */ /* * OIDs for standard certificate extensions */ #define MBEDTLS_OID_AUTHORITY_KEY_IDENTIFIER MBEDTLS_OID_ID_CE "\x23" /**< id-ce-authorityKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 35 } */ #define MBEDTLS_OID_SUBJECT_KEY_IDENTIFIER MBEDTLS_OID_ID_CE "\x0E" /**< id-ce-subjectKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 14 } */ #define MBEDTLS_OID_KEY_USAGE MBEDTLS_OID_ID_CE "\x0F" /**< id-ce-keyUsage OBJECT IDENTIFIER ::= { id-ce 15 } */ #define MBEDTLS_OID_CERTIFICATE_POLICIES MBEDTLS_OID_ID_CE "\x20" /**< id-ce-certificatePolicies OBJECT IDENTIFIER ::= { id-ce 32 } */ #define MBEDTLS_OID_POLICY_MAPPINGS MBEDTLS_OID_ID_CE "\x21" /**< id-ce-policyMappings OBJECT IDENTIFIER ::= { id-ce 33 } */ #define MBEDTLS_OID_SUBJECT_ALT_NAME MBEDTLS_OID_ID_CE "\x11" /**< id-ce-subjectAltName OBJECT IDENTIFIER ::= { id-ce 17 } */ #define MBEDTLS_OID_ISSUER_ALT_NAME MBEDTLS_OID_ID_CE "\x12" /**< id-ce-issuerAltName OBJECT IDENTIFIER ::= { id-ce 18 } */ #define MBEDTLS_OID_SUBJECT_DIRECTORY_ATTRS MBEDTLS_OID_ID_CE "\x09" /**< id-ce-subjectDirectoryAttributes OBJECT IDENTIFIER ::= { id-ce 9 } */ #define MBEDTLS_OID_BASIC_CONSTRAINTS MBEDTLS_OID_ID_CE "\x13" /**< id-ce-basicConstraints OBJECT IDENTIFIER ::= { id-ce 19 } */ #define MBEDTLS_OID_NAME_CONSTRAINTS MBEDTLS_OID_ID_CE "\x1E" /**< id-ce-nameConstraints OBJECT IDENTIFIER ::= { id-ce 30 } */ #define MBEDTLS_OID_POLICY_CONSTRAINTS MBEDTLS_OID_ID_CE "\x24" /**< id-ce-policyConstraints OBJECT IDENTIFIER ::= { id-ce 36 } */ #define MBEDTLS_OID_EXTENDED_KEY_USAGE MBEDTLS_OID_ID_CE "\x25" /**< id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } */ #define MBEDTLS_OID_CRL_DISTRIBUTION_POINTS MBEDTLS_OID_ID_CE "\x1F" /**< id-ce-cRLDistributionPoints OBJECT IDENTIFIER ::= { id-ce 31 } */ #define MBEDTLS_OID_INIHIBIT_ANYPOLICY MBEDTLS_OID_ID_CE "\x36" /**< id-ce-inhibitAnyPolicy OBJECT IDENTIFIER ::= { id-ce 54 } */ #define MBEDTLS_OID_FRESHEST_CRL MBEDTLS_OID_ID_CE "\x2E" /**< id-ce-freshestCRL OBJECT IDENTIFIER ::= { id-ce 46 } */ /* * Netscape certificate extensions */ #define MBEDTLS_OID_NS_CERT MBEDTLS_OID_NETSCAPE "\x01" #define MBEDTLS_OID_NS_CERT_TYPE MBEDTLS_OID_NS_CERT "\x01" #define MBEDTLS_OID_NS_BASE_URL MBEDTLS_OID_NS_CERT "\x02" #define MBEDTLS_OID_NS_REVOCATION_URL MBEDTLS_OID_NS_CERT "\x03" #define MBEDTLS_OID_NS_CA_REVOCATION_URL MBEDTLS_OID_NS_CERT "\x04" #define MBEDTLS_OID_NS_RENEWAL_URL MBEDTLS_OID_NS_CERT "\x07" #define MBEDTLS_OID_NS_CA_POLICY_URL MBEDTLS_OID_NS_CERT "\x08" #define MBEDTLS_OID_NS_SSL_SERVER_NAME MBEDTLS_OID_NS_CERT "\x0C" #define MBEDTLS_OID_NS_COMMENT MBEDTLS_OID_NS_CERT "\x0D" #define MBEDTLS_OID_NS_DATA_TYPE MBEDTLS_OID_NETSCAPE "\x02" #define MBEDTLS_OID_NS_CERT_SEQUENCE MBEDTLS_OID_NS_DATA_TYPE "\x05" /* * OIDs for CRL extensions */ #define MBEDTLS_OID_PRIVATE_KEY_USAGE_PERIOD MBEDTLS_OID_ID_CE "\x10" #define MBEDTLS_OID_CRL_NUMBER MBEDTLS_OID_ID_CE "\x14" /**< id-ce-cRLNumber OBJECT IDENTIFIER ::= { id-ce 20 } */ /* * X.509 v3 Extended key usage OIDs */ #define MBEDTLS_OID_ANY_EXTENDED_KEY_USAGE MBEDTLS_OID_EXTENDED_KEY_USAGE "\x00" /**< anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } */ #define MBEDTLS_OID_KP MBEDTLS_OID_PKIX "\x03" /**< id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } */ #define MBEDTLS_OID_SERVER_AUTH MBEDTLS_OID_KP "\x01" /**< id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } */ #define MBEDTLS_OID_CLIENT_AUTH MBEDTLS_OID_KP "\x02" /**< id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } */ #define MBEDTLS_OID_CODE_SIGNING MBEDTLS_OID_KP "\x03" /**< id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } */ #define MBEDTLS_OID_EMAIL_PROTECTION MBEDTLS_OID_KP "\x04" /**< id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } */ #define MBEDTLS_OID_TIME_STAMPING MBEDTLS_OID_KP "\x08" /**< id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } */ #define MBEDTLS_OID_OCSP_SIGNING MBEDTLS_OID_KP "\x09" /**< id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } */ /* * PKCS definition OIDs */ #define MBEDTLS_OID_PKCS MBEDTLS_OID_RSA_COMPANY "\x01" /**< pkcs OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) 1 } */ #define MBEDTLS_OID_PKCS1 MBEDTLS_OID_PKCS "\x01" /**< pkcs-1 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } */ #define MBEDTLS_OID_PKCS5 MBEDTLS_OID_PKCS "\x05" /**< pkcs-5 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 5 } */ #define MBEDTLS_OID_PKCS9 MBEDTLS_OID_PKCS "\x09" /**< pkcs-9 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 9 } */ #define MBEDTLS_OID_PKCS12 MBEDTLS_OID_PKCS "\x0c" /**< pkcs-12 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 12 } */ /* * PKCS#1 OIDs */ #define MBEDTLS_OID_PKCS1_RSA MBEDTLS_OID_PKCS1 "\x01" /**< rsaEncryption OBJECT IDENTIFIER ::= { pkcs-1 1 } */ #define MBEDTLS_OID_PKCS1_MD2 MBEDTLS_OID_PKCS1 "\x02" /**< md2WithRSAEncryption ::= { pkcs-1 2 } */ #define MBEDTLS_OID_PKCS1_MD4 MBEDTLS_OID_PKCS1 "\x03" /**< md4WithRSAEncryption ::= { pkcs-1 3 } */ #define MBEDTLS_OID_PKCS1_MD5 MBEDTLS_OID_PKCS1 "\x04" /**< md5WithRSAEncryption ::= { pkcs-1 4 } */ #define MBEDTLS_OID_PKCS1_SHA1 MBEDTLS_OID_PKCS1 "\x05" /**< sha1WithRSAEncryption ::= { pkcs-1 5 } */ #define MBEDTLS_OID_PKCS1_SHA224 MBEDTLS_OID_PKCS1 "\x0e" /**< sha224WithRSAEncryption ::= { pkcs-1 14 } */ #define MBEDTLS_OID_PKCS1_SHA256 MBEDTLS_OID_PKCS1 "\x0b" /**< sha256WithRSAEncryption ::= { pkcs-1 11 } */ #define MBEDTLS_OID_PKCS1_SHA384 MBEDTLS_OID_PKCS1 "\x0c" /**< sha384WithRSAEncryption ::= { pkcs-1 12 } */ #define MBEDTLS_OID_PKCS1_SHA512 MBEDTLS_OID_PKCS1 "\x0d" /**< sha512WithRSAEncryption ::= { pkcs-1 13 } */ #define MBEDTLS_OID_RSA_SHA_OBS "\x2B\x0E\x03\x02\x1D" #define MBEDTLS_OID_PKCS9_EMAIL MBEDTLS_OID_PKCS9 "\x01" /**< emailAddress AttributeType ::= { pkcs-9 1 } */ /* RFC 4055 */ #define MBEDTLS_OID_RSASSA_PSS MBEDTLS_OID_PKCS1 "\x0a" /**< id-RSASSA-PSS ::= { pkcs-1 10 } */ #define MBEDTLS_OID_MGF1 MBEDTLS_OID_PKCS1 "\x08" /**< id-mgf1 ::= { pkcs-1 8 } */ /* * Digest algorithms */ #define MBEDTLS_OID_DIGEST_ALG_MD2 MBEDTLS_OID_RSA_COMPANY "\x02\x02" /**< id-mbedtls_md2 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 2 } */ #define MBEDTLS_OID_DIGEST_ALG_MD4 MBEDTLS_OID_RSA_COMPANY "\x02\x04" /**< id-mbedtls_md4 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 4 } */ #define MBEDTLS_OID_DIGEST_ALG_MD5 MBEDTLS_OID_RSA_COMPANY "\x02\x05" /**< id-mbedtls_md5 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 5 } */ #define MBEDTLS_OID_DIGEST_ALG_SHA1 MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_OIW_SECSIG_SHA1 /**< id-mbedtls_sha1 OBJECT IDENTIFIER ::= { iso(1) identified-organization(3) oiw(14) secsig(3) algorithms(2) 26 } */ #define MBEDTLS_OID_DIGEST_ALG_SHA224 MBEDTLS_OID_GOV "\x03\x04\x02\x04" /**< id-sha224 OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 4 } */ #define MBEDTLS_OID_DIGEST_ALG_SHA256 MBEDTLS_OID_GOV "\x03\x04\x02\x01" /**< id-mbedtls_sha256 OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 1 } */ #define MBEDTLS_OID_DIGEST_ALG_SHA384 MBEDTLS_OID_GOV "\x03\x04\x02\x02" /**< id-sha384 OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 2 } */ #define MBEDTLS_OID_DIGEST_ALG_SHA512 MBEDTLS_OID_GOV "\x03\x04\x02\x03" /**< id-mbedtls_sha512 OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 3 } */ #define MBEDTLS_OID_HMAC_SHA1 MBEDTLS_OID_RSA_COMPANY "\x02\x07" /**< id-hmacWithSHA1 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 7 } */ /* * Encryption algorithms */ #define MBEDTLS_OID_DES_CBC MBEDTLS_OID_ISO_IDENTIFIED_ORG MBEDTLS_OID_OIW_SECSIG_ALG "\x07" /**< desCBC OBJECT IDENTIFIER ::= { iso(1) identified-organization(3) oiw(14) secsig(3) algorithms(2) 7 } */ #define MBEDTLS_OID_DES_EDE3_CBC MBEDTLS_OID_RSA_COMPANY "\x03\x07" /**< des-ede3-cbc OBJECT IDENTIFIER ::= { iso(1) member-body(2) -- us(840) rsadsi(113549) encryptionAlgorithm(3) 7 } */ /* * PKCS#5 OIDs */ #define MBEDTLS_OID_PKCS5_PBKDF2 MBEDTLS_OID_PKCS5 "\x0c" /**< id-PBKDF2 OBJECT IDENTIFIER ::= {pkcs-5 12} */ #define MBEDTLS_OID_PKCS5_PBES2 MBEDTLS_OID_PKCS5 "\x0d" /**< id-PBES2 OBJECT IDENTIFIER ::= {pkcs-5 13} */ #define MBEDTLS_OID_PKCS5_PBMAC1 MBEDTLS_OID_PKCS5 "\x0e" /**< id-PBMAC1 OBJECT IDENTIFIER ::= {pkcs-5 14} */ /* * PKCS#5 PBES1 algorithms */ #define MBEDTLS_OID_PKCS5_PBE_MD2_DES_CBC MBEDTLS_OID_PKCS5 "\x01" /**< pbeWithMD2AndDES-CBC OBJECT IDENTIFIER ::= {pkcs-5 1} */ #define MBEDTLS_OID_PKCS5_PBE_MD2_RC2_CBC MBEDTLS_OID_PKCS5 "\x04" /**< pbeWithMD2AndRC2-CBC OBJECT IDENTIFIER ::= {pkcs-5 4} */ #define MBEDTLS_OID_PKCS5_PBE_MD5_DES_CBC MBEDTLS_OID_PKCS5 "\x03" /**< pbeWithMD5AndDES-CBC OBJECT IDENTIFIER ::= {pkcs-5 3} */ #define MBEDTLS_OID_PKCS5_PBE_MD5_RC2_CBC MBEDTLS_OID_PKCS5 "\x06" /**< pbeWithMD5AndRC2-CBC OBJECT IDENTIFIER ::= {pkcs-5 6} */ #define MBEDTLS_OID_PKCS5_PBE_SHA1_DES_CBC MBEDTLS_OID_PKCS5 "\x0a" /**< pbeWithSHA1AndDES-CBC OBJECT IDENTIFIER ::= {pkcs-5 10} */ #define MBEDTLS_OID_PKCS5_PBE_SHA1_RC2_CBC MBEDTLS_OID_PKCS5 "\x0b" /**< pbeWithSHA1AndRC2-CBC OBJECT IDENTIFIER ::= {pkcs-5 11} */ /* * PKCS#8 OIDs */ #define MBEDTLS_OID_PKCS9_CSR_EXT_REQ MBEDTLS_OID_PKCS9 "\x0e" /**< extensionRequest OBJECT IDENTIFIER ::= {pkcs-9 14} */ /* * PKCS#12 PBE OIDs */ #define MBEDTLS_OID_PKCS12_PBE MBEDTLS_OID_PKCS12 "\x01" /**< pkcs-12PbeIds OBJECT IDENTIFIER ::= {pkcs-12 1} */ #define MBEDTLS_OID_PKCS12_PBE_SHA1_RC4_128 MBEDTLS_OID_PKCS12_PBE "\x01" /**< pbeWithSHAAnd128BitRC4 OBJECT IDENTIFIER ::= {pkcs-12PbeIds 1} */ #define MBEDTLS_OID_PKCS12_PBE_SHA1_RC4_40 MBEDTLS_OID_PKCS12_PBE "\x02" /**< pbeWithSHAAnd40BitRC4 OBJECT IDENTIFIER ::= {pkcs-12PbeIds 2} */ #define MBEDTLS_OID_PKCS12_PBE_SHA1_DES3_EDE_CBC MBEDTLS_OID_PKCS12_PBE "\x03" /**< pbeWithSHAAnd3-KeyTripleDES-CBC OBJECT IDENTIFIER ::= {pkcs-12PbeIds 3} */ #define MBEDTLS_OID_PKCS12_PBE_SHA1_DES2_EDE_CBC MBEDTLS_OID_PKCS12_PBE "\x04" /**< pbeWithSHAAnd2-KeyTripleDES-CBC OBJECT IDENTIFIER ::= {pkcs-12PbeIds 4} */ #define MBEDTLS_OID_PKCS12_PBE_SHA1_RC2_128_CBC MBEDTLS_OID_PKCS12_PBE "\x05" /**< pbeWithSHAAnd128BitRC2-CBC OBJECT IDENTIFIER ::= {pkcs-12PbeIds 5} */ #define MBEDTLS_OID_PKCS12_PBE_SHA1_RC2_40_CBC MBEDTLS_OID_PKCS12_PBE "\x06" /**< pbeWithSHAAnd40BitRC2-CBC OBJECT IDENTIFIER ::= {pkcs-12PbeIds 6} */ /* * EC key algorithms from RFC 5480 */ /* id-ecPublicKey OBJECT IDENTIFIER ::= { * iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } */ #define MBEDTLS_OID_EC_ALG_UNRESTRICTED MBEDTLS_OID_ANSI_X9_62 "\x02\01" /* id-ecDH OBJECT IDENTIFIER ::= { * iso(1) identified-organization(3) certicom(132) * schemes(1) ecdh(12) } */ #define MBEDTLS_OID_EC_ALG_ECDH MBEDTLS_OID_CERTICOM "\x01\x0c" /* * ECParameters namedCurve identifiers, from RFC 5480, RFC 5639, and SEC2 */ /* secp192r1 OBJECT IDENTIFIER ::= { * iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) prime(1) 1 } */ #define MBEDTLS_OID_EC_GRP_SECP192R1 MBEDTLS_OID_ANSI_X9_62 "\x03\x01\x01" /* secp224r1 OBJECT IDENTIFIER ::= { * iso(1) identified-organization(3) certicom(132) curve(0) 33 } */ #define MBEDTLS_OID_EC_GRP_SECP224R1 MBEDTLS_OID_CERTICOM "\x00\x21" /* secp256r1 OBJECT IDENTIFIER ::= { * iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) prime(1) 7 } */ #define MBEDTLS_OID_EC_GRP_SECP256R1 MBEDTLS_OID_ANSI_X9_62 "\x03\x01\x07" /* secp384r1 OBJECT IDENTIFIER ::= { * iso(1) identified-organization(3) certicom(132) curve(0) 34 } */ #define MBEDTLS_OID_EC_GRP_SECP384R1 MBEDTLS_OID_CERTICOM "\x00\x22" /* secp521r1 OBJECT IDENTIFIER ::= { * iso(1) identified-organization(3) certicom(132) curve(0) 35 } */ #define MBEDTLS_OID_EC_GRP_SECP521R1 MBEDTLS_OID_CERTICOM "\x00\x23" /* secp192k1 OBJECT IDENTIFIER ::= { * iso(1) identified-organization(3) certicom(132) curve(0) 31 } */ #define MBEDTLS_OID_EC_GRP_SECP192K1 MBEDTLS_OID_CERTICOM "\x00\x1f" /* secp224k1 OBJECT IDENTIFIER ::= { * iso(1) identified-organization(3) certicom(132) curve(0) 32 } */ #define MBEDTLS_OID_EC_GRP_SECP224K1 MBEDTLS_OID_CERTICOM "\x00\x20" /* secp256k1 OBJECT IDENTIFIER ::= { * iso(1) identified-organization(3) certicom(132) curve(0) 10 } */ #define MBEDTLS_OID_EC_GRP_SECP256K1 MBEDTLS_OID_CERTICOM "\x00\x0a" /* RFC 5639 4.1 * ecStdCurvesAndGeneration OBJECT IDENTIFIER::= {iso(1) * identified-organization(3) teletrust(36) algorithm(3) signature- * algorithm(3) ecSign(2) 8} * ellipticCurve OBJECT IDENTIFIER ::= {ecStdCurvesAndGeneration 1} * versionOne OBJECT IDENTIFIER ::= {ellipticCurve 1} */ #define MBEDTLS_OID_EC_BRAINPOOL_V1 MBEDTLS_OID_TELETRUST "\x03\x03\x02\x08\x01\x01" /* brainpoolP256r1 OBJECT IDENTIFIER ::= {versionOne 7} */ #define MBEDTLS_OID_EC_GRP_BP256R1 MBEDTLS_OID_EC_BRAINPOOL_V1 "\x07" /* brainpoolP384r1 OBJECT IDENTIFIER ::= {versionOne 11} */ #define MBEDTLS_OID_EC_GRP_BP384R1 MBEDTLS_OID_EC_BRAINPOOL_V1 "\x0B" /* brainpoolP512r1 OBJECT IDENTIFIER ::= {versionOne 13} */ #define MBEDTLS_OID_EC_GRP_BP512R1 MBEDTLS_OID_EC_BRAINPOOL_V1 "\x0D" /* * SEC1 C.1 * * prime-field OBJECT IDENTIFIER ::= { id-fieldType 1 } * id-fieldType OBJECT IDENTIFIER ::= { ansi-X9-62 fieldType(1)} */ #define MBEDTLS_OID_ANSI_X9_62_FIELD_TYPE MBEDTLS_OID_ANSI_X9_62 "\x01" #define MBEDTLS_OID_ANSI_X9_62_PRIME_FIELD MBEDTLS_OID_ANSI_X9_62_FIELD_TYPE "\x01" /* * ECDSA signature identifiers, from RFC 5480 */ #define MBEDTLS_OID_ANSI_X9_62_SIG MBEDTLS_OID_ANSI_X9_62 "\x04" /* signatures(4) */ #define MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 MBEDTLS_OID_ANSI_X9_62_SIG "\x03" /* ecdsa-with-SHA2(3) */ /* ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { * iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4) 1 } */ #define MBEDTLS_OID_ECDSA_SHA1 MBEDTLS_OID_ANSI_X9_62_SIG "\x01" /* ecdsa-with-SHA224 OBJECT IDENTIFIER ::= { * iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4) * ecdsa-with-SHA2(3) 1 } */ #define MBEDTLS_OID_ECDSA_SHA224 MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 "\x01" /* ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { * iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4) * ecdsa-with-SHA2(3) 2 } */ #define MBEDTLS_OID_ECDSA_SHA256 MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 "\x02" /* ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { * iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4) * ecdsa-with-SHA2(3) 3 } */ #define MBEDTLS_OID_ECDSA_SHA384 MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 "\x03" /* ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { * iso(1) member-body(2) us(840) ansi-X9-62(10045) signatures(4) * ecdsa-with-SHA2(3) 4 } */ #define MBEDTLS_OID_ECDSA_SHA512 MBEDTLS_OID_ANSI_X9_62_SIG_SHA2 "\x04" #ifdef __cplusplus extern "C" { #endif /** * \brief Base OID descriptor structure */ typedef struct { const char *asn1; /*!< OID ASN.1 representation */ size_t asn1_len; /*!< length of asn1 */ const char *name; /*!< official name (e.g. from RFC) */ const char *description; /*!< human friendly description */ } mbedtls_oid_descriptor_t; /** * \brief Translate an ASN.1 OID into its numeric representation * (e.g. "\x2A\x86\x48\x86\xF7\x0D" into "1.2.840.113549") * * \param buf buffer to put representation in * \param size size of the buffer * \param oid OID to translate * * \return Length of the string written (excluding final NULL) or * MBEDTLS_ERR_OID_BUF_TOO_SMALL in case of error */ int mbedtls_oid_get_numeric_string( char *buf, size_t size, const mbedtls_asn1_buf *oid ); #if defined(MBEDTLS_X509_USE_C) || defined(MBEDTLS_X509_CREATE_C) /** * \brief Translate an X.509 extension OID into local values * * \param oid OID to use * \param ext_type place to store the extension type * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_x509_ext_type( const mbedtls_asn1_buf *oid, int *ext_type ); #endif /** * \brief Translate an X.509 attribute type OID into the short name * (e.g. the OID for an X520 Common Name into "CN") * * \param oid OID to use * \param short_name place to store the string pointer * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_attr_short_name( const mbedtls_asn1_buf *oid, const char **short_name ); /** * \brief Translate PublicKeyAlgorithm OID into pk_type * * \param oid OID to use * \param pk_alg place to store public key algorithm * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_pk_alg( const mbedtls_asn1_buf *oid, mbedtls_pk_type_t *pk_alg ); /** * \brief Translate pk_type into PublicKeyAlgorithm OID * * \param pk_alg Public key type to look for * \param oid place to store ASN.1 OID string pointer * \param olen length of the OID * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_oid_by_pk_alg( mbedtls_pk_type_t pk_alg, const char **oid, size_t *olen ); #if defined(MBEDTLS_ECP_C) /** * \brief Translate NamedCurve OID into an EC group identifier * * \param oid OID to use * \param grp_id place to store group id * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_ec_grp( const mbedtls_asn1_buf *oid, mbedtls_ecp_group_id *grp_id ); /** * \brief Translate EC group identifier into NamedCurve OID * * \param grp_id EC group identifier * \param oid place to store ASN.1 OID string pointer * \param olen length of the OID * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_oid_by_ec_grp( mbedtls_ecp_group_id grp_id, const char **oid, size_t *olen ); #endif /* MBEDTLS_ECP_C */ #if defined(MBEDTLS_MD_C) /** * \brief Translate SignatureAlgorithm OID into md_type and pk_type * * \param oid OID to use * \param md_alg place to store message digest algorithm * \param pk_alg place to store public key algorithm * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_sig_alg( const mbedtls_asn1_buf *oid, mbedtls_md_type_t *md_alg, mbedtls_pk_type_t *pk_alg ); /** * \brief Translate SignatureAlgorithm OID into description * * \param oid OID to use * \param desc place to store string pointer * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_sig_alg_desc( const mbedtls_asn1_buf *oid, const char **desc ); /** * \brief Translate md_type and pk_type into SignatureAlgorithm OID * * \param md_alg message digest algorithm * \param pk_alg public key algorithm * \param oid place to store ASN.1 OID string pointer * \param olen length of the OID * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_oid_by_sig_alg( mbedtls_pk_type_t pk_alg, mbedtls_md_type_t md_alg, const char **oid, size_t *olen ); /** * \brief Translate hash algorithm OID into md_type * * \param oid OID to use * \param md_alg place to store message digest algorithm * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_md_alg( const mbedtls_asn1_buf *oid, mbedtls_md_type_t *md_alg ); #endif /* MBEDTLS_MD_C */ /** * \brief Translate Extended Key Usage OID into description * * \param oid OID to use * \param desc place to store string pointer * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_extended_key_usage( const mbedtls_asn1_buf *oid, const char **desc ); /** * \brief Translate md_type into hash algorithm OID * * \param md_alg message digest algorithm * \param oid place to store ASN.1 OID string pointer * \param olen length of the OID * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_oid_by_md( mbedtls_md_type_t md_alg, const char **oid, size_t *olen ); #if defined(MBEDTLS_CIPHER_C) /** * \brief Translate encryption algorithm OID into cipher_type * * \param oid OID to use * \param cipher_alg place to store cipher algorithm * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_cipher_alg( const mbedtls_asn1_buf *oid, mbedtls_cipher_type_t *cipher_alg ); #endif /* MBEDTLS_CIPHER_C */ #if defined(MBEDTLS_PKCS12_C) /** * \brief Translate PKCS#12 PBE algorithm OID into md_type and * cipher_type * * \param oid OID to use * \param md_alg place to store message digest algorithm * \param cipher_alg place to store cipher algorithm * * \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND */ int mbedtls_oid_get_pkcs12_pbe_alg( const mbedtls_asn1_buf *oid, mbedtls_md_type_t *md_alg, mbedtls_cipher_type_t *cipher_alg ); #endif /* MBEDTLS_PKCS12_C */ #ifdef __cplusplus } #endif #endif /* oid.h */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/pk.h000066400000000000000000000531021351064634500240640ustar00rootroot00000000000000/** * \file pk.h * * \brief Public Key abstraction layer * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_PK_H #define MBEDTLS_PK_H #if !defined(MBEDTLS_CONFIG_FILE) #include "config.h" #else #include MBEDTLS_CONFIG_FILE #endif #include "md.h" #if defined(MBEDTLS_RSA_C) #include "rsa.h" #endif #if defined(MBEDTLS_ECP_C) #include "ecp.h" #endif #if defined(MBEDTLS_ECDSA_C) #include "ecdsa.h" #endif #if ( defined(__ARMCC_VERSION) || defined(_MSC_VER) ) && \ !defined(inline) && !defined(__cplusplus) #define inline __inline #endif #define MBEDTLS_ERR_PK_ALLOC_FAILED -0x3F80 /**< Memory allocation failed. */ #define MBEDTLS_ERR_PK_TYPE_MISMATCH -0x3F00 /**< Type mismatch, eg attempt to encrypt with an ECDSA key */ #define MBEDTLS_ERR_PK_BAD_INPUT_DATA -0x3E80 /**< Bad input parameters to function. */ #define MBEDTLS_ERR_PK_FILE_IO_ERROR -0x3E00 /**< Read/write of file failed. */ #define MBEDTLS_ERR_PK_KEY_INVALID_VERSION -0x3D80 /**< Unsupported key version */ #define MBEDTLS_ERR_PK_KEY_INVALID_FORMAT -0x3D00 /**< Invalid key tag or value. */ #define MBEDTLS_ERR_PK_UNKNOWN_PK_ALG -0x3C80 /**< Key algorithm is unsupported (only RSA and EC are supported). */ #define MBEDTLS_ERR_PK_PASSWORD_REQUIRED -0x3C00 /**< Private key password can't be empty. */ #define MBEDTLS_ERR_PK_PASSWORD_MISMATCH -0x3B80 /**< Given private key password does not allow for correct decryption. */ #define MBEDTLS_ERR_PK_INVALID_PUBKEY -0x3B00 /**< The pubkey tag or value is invalid (only RSA and EC are supported). */ #define MBEDTLS_ERR_PK_INVALID_ALG -0x3A80 /**< The algorithm tag or value is invalid. */ #define MBEDTLS_ERR_PK_UNKNOWN_NAMED_CURVE -0x3A00 /**< Elliptic curve is unsupported (only NIST curves are supported). */ #define MBEDTLS_ERR_PK_FEATURE_UNAVAILABLE -0x3980 /**< Unavailable feature, e.g. RSA disabled for RSA key. */ #define MBEDTLS_ERR_PK_SIG_LEN_MISMATCH -0x3900 /**< The signature is valid but its length is less than expected. */ #ifdef __cplusplus extern "C" { #endif /** * \brief Public key types */ typedef enum { MBEDTLS_PK_NONE=0, MBEDTLS_PK_RSA, MBEDTLS_PK_ECKEY, MBEDTLS_PK_ECKEY_DH, MBEDTLS_PK_ECDSA, MBEDTLS_PK_RSA_ALT, MBEDTLS_PK_RSASSA_PSS, } mbedtls_pk_type_t; /** * \brief Options for RSASSA-PSS signature verification. * See \c mbedtls_rsa_rsassa_pss_verify_ext() */ typedef struct { mbedtls_md_type_t mgf1_hash_id; int expected_salt_len; } mbedtls_pk_rsassa_pss_options; /** * \brief Types for interfacing with the debug module */ typedef enum { MBEDTLS_PK_DEBUG_NONE = 0, MBEDTLS_PK_DEBUG_MPI, MBEDTLS_PK_DEBUG_ECP, } mbedtls_pk_debug_type; /** * \brief Item to send to the debug module */ typedef struct { mbedtls_pk_debug_type type; const char *name; void *value; } mbedtls_pk_debug_item; /** Maximum number of item send for debugging, plus 1 */ #define MBEDTLS_PK_DEBUG_MAX_ITEMS 3 /** * \brief Public key information and operations */ typedef struct mbedtls_pk_info_t mbedtls_pk_info_t; /** * \brief Public key container */ typedef struct { const mbedtls_pk_info_t * pk_info; /**< Public key informations */ void * pk_ctx; /**< Underlying public key context */ } mbedtls_pk_context; #if defined(MBEDTLS_RSA_C) /** * Quick access to an RSA context inside a PK context. * * \warning You must make sure the PK context actually holds an RSA context * before using this function! */ static inline mbedtls_rsa_context *mbedtls_pk_rsa( const mbedtls_pk_context pk ) { return( (mbedtls_rsa_context *) (pk).pk_ctx ); } #endif /* MBEDTLS_RSA_C */ #if defined(MBEDTLS_ECP_C) /** * Quick access to an EC context inside a PK context. * * \warning You must make sure the PK context actually holds an EC context * before using this function! */ static inline mbedtls_ecp_keypair *mbedtls_pk_ec( const mbedtls_pk_context pk ) { return( (mbedtls_ecp_keypair *) (pk).pk_ctx ); } #endif /* MBEDTLS_ECP_C */ #if defined(MBEDTLS_PK_RSA_ALT_SUPPORT) /** * \brief Types for RSA-alt abstraction */ typedef int (*mbedtls_pk_rsa_alt_decrypt_func)( void *ctx, int mode, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len ); typedef int (*mbedtls_pk_rsa_alt_sign_func)( void *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ); typedef size_t (*mbedtls_pk_rsa_alt_key_len_func)( void *ctx ); #endif /* MBEDTLS_PK_RSA_ALT_SUPPORT */ /** * \brief Return information associated with the given PK type * * \param pk_type PK type to search for. * * \return The PK info associated with the type or NULL if not found. */ const mbedtls_pk_info_t *mbedtls_pk_info_from_type( mbedtls_pk_type_t pk_type ); /** * \brief Initialize a mbedtls_pk_context (as NONE) */ void mbedtls_pk_init( mbedtls_pk_context *ctx ); /** * \brief Free a mbedtls_pk_context */ void mbedtls_pk_free( mbedtls_pk_context *ctx ); /** * \brief Initialize a PK context with the information given * and allocates the type-specific PK subcontext. * * \param ctx Context to initialize. Must be empty (type NONE). * \param info Information to use * * \return 0 on success, * MBEDTLS_ERR_PK_BAD_INPUT_DATA on invalid input, * MBEDTLS_ERR_PK_ALLOC_FAILED on allocation failure. * * \note For contexts holding an RSA-alt key, use * \c mbedtls_pk_setup_rsa_alt() instead. */ int mbedtls_pk_setup( mbedtls_pk_context *ctx, const mbedtls_pk_info_t *info ); #if defined(MBEDTLS_PK_RSA_ALT_SUPPORT) /** * \brief Initialize an RSA-alt context * * \param ctx Context to initialize. Must be empty (type NONE). * \param key RSA key pointer * \param decrypt_func Decryption function * \param sign_func Signing function * \param key_len_func Function returning key length in bytes * * \return 0 on success, or MBEDTLS_ERR_PK_BAD_INPUT_DATA if the * context wasn't already initialized as RSA_ALT. * * \note This function replaces \c mbedtls_pk_setup() for RSA-alt. */ int mbedtls_pk_setup_rsa_alt( mbedtls_pk_context *ctx, void * key, mbedtls_pk_rsa_alt_decrypt_func decrypt_func, mbedtls_pk_rsa_alt_sign_func sign_func, mbedtls_pk_rsa_alt_key_len_func key_len_func ); #endif /* MBEDTLS_PK_RSA_ALT_SUPPORT */ /** * \brief Get the size in bits of the underlying key * * \param ctx Context to use * * \return Key size in bits, or 0 on error */ size_t mbedtls_pk_get_bitlen( const mbedtls_pk_context *ctx ); /** * \brief Get the length in bytes of the underlying key * \param ctx Context to use * * \return Key length in bytes, or 0 on error */ static inline size_t mbedtls_pk_get_len( const mbedtls_pk_context *ctx ) { return( ( mbedtls_pk_get_bitlen( ctx ) + 7 ) / 8 ); } /** * \brief Tell if a context can do the operation given by type * * \param ctx Context to test * \param type Target type * * \return 0 if context can't do the operations, * 1 otherwise. */ int mbedtls_pk_can_do( const mbedtls_pk_context *ctx, mbedtls_pk_type_t type ); /** * \brief Verify signature (including padding if relevant). * * \param ctx PK context to use * \param md_alg Hash algorithm used (see notes) * \param hash Hash of the message to sign * \param hash_len Hash length or 0 (see notes) * \param sig Signature to verify * \param sig_len Signature length * * \return 0 on success (signature is valid), * MBEDTLS_ERR_PK_SIG_LEN_MISMATCH if the signature is * valid but its actual length is less than sig_len, * or a specific error code. * * \note For RSA keys, the default padding type is PKCS#1 v1.5. * Use \c mbedtls_pk_verify_ext( MBEDTLS_PK_RSASSA_PSS, ... ) * to verify RSASSA_PSS signatures. * * \note If hash_len is 0, then the length associated with md_alg * is used instead, or an error returned if it is invalid. * * \note md_alg may be MBEDTLS_MD_NONE, only if hash_len != 0 */ int mbedtls_pk_verify( mbedtls_pk_context *ctx, mbedtls_md_type_t md_alg, const unsigned char *hash, size_t hash_len, const unsigned char *sig, size_t sig_len ); /** * \brief Verify signature, with options. * (Includes verification of the padding depending on type.) * * \param type Signature type (inc. possible padding type) to verify * \param options Pointer to type-specific options, or NULL * \param ctx PK context to use * \param md_alg Hash algorithm used (see notes) * \param hash Hash of the message to sign * \param hash_len Hash length or 0 (see notes) * \param sig Signature to verify * \param sig_len Signature length * * \return 0 on success (signature is valid), * MBEDTLS_ERR_PK_TYPE_MISMATCH if the PK context can't be * used for this type of signatures, * MBEDTLS_ERR_PK_SIG_LEN_MISMATCH if the signature is * valid but its actual length is less than sig_len, * or a specific error code. * * \note If hash_len is 0, then the length associated with md_alg * is used instead, or an error returned if it is invalid. * * \note md_alg may be MBEDTLS_MD_NONE, only if hash_len != 0 * * \note If type is MBEDTLS_PK_RSASSA_PSS, then options must point * to a mbedtls_pk_rsassa_pss_options structure, * otherwise it must be NULL. */ int mbedtls_pk_verify_ext( mbedtls_pk_type_t type, const void *options, mbedtls_pk_context *ctx, mbedtls_md_type_t md_alg, const unsigned char *hash, size_t hash_len, const unsigned char *sig, size_t sig_len ); /** * \brief Make signature, including padding if relevant. * * \param ctx PK context to use - must hold a private key * \param md_alg Hash algorithm used (see notes) * \param hash Hash of the message to sign * \param hash_len Hash length or 0 (see notes) * \param sig Place to write the signature * \param sig_len Number of bytes written * \param f_rng RNG function * \param p_rng RNG parameter * * \return 0 on success, or a specific error code. * * \note For RSA keys, the default padding type is PKCS#1 v1.5. * There is no interface in the PK module to make RSASSA-PSS * signatures yet. * * \note If hash_len is 0, then the length associated with md_alg * is used instead, or an error returned if it is invalid. * * \note For RSA, md_alg may be MBEDTLS_MD_NONE if hash_len != 0. * For ECDSA, md_alg may never be MBEDTLS_MD_NONE. */ int mbedtls_pk_sign( mbedtls_pk_context *ctx, mbedtls_md_type_t md_alg, const unsigned char *hash, size_t hash_len, unsigned char *sig, size_t *sig_len, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ); /** * \brief Decrypt message (including padding if relevant). * * \param ctx PK context to use - must hold a private key * \param input Input to decrypt * \param ilen Input size * \param output Decrypted output * \param olen Decrypted message length * \param osize Size of the output buffer * \param f_rng RNG function * \param p_rng RNG parameter * * \note For RSA keys, the default padding type is PKCS#1 v1.5. * * \return 0 on success, or a specific error code. */ int mbedtls_pk_decrypt( mbedtls_pk_context *ctx, const unsigned char *input, size_t ilen, unsigned char *output, size_t *olen, size_t osize, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ); /** * \brief Encrypt message (including padding if relevant). * * \param ctx PK context to use * \param input Message to encrypt * \param ilen Message size * \param output Encrypted output * \param olen Encrypted output length * \param osize Size of the output buffer * \param f_rng RNG function * \param p_rng RNG parameter * * \note For RSA keys, the default padding type is PKCS#1 v1.5. * * \return 0 on success, or a specific error code. */ int mbedtls_pk_encrypt( mbedtls_pk_context *ctx, const unsigned char *input, size_t ilen, unsigned char *output, size_t *olen, size_t osize, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ); /** * \brief Check if a public-private pair of keys matches. * * \param pub Context holding a public key. * \param prv Context holding a private (and public) key. * * \return 0 on success or MBEDTLS_ERR_PK_BAD_INPUT_DATA */ int mbedtls_pk_check_pair( const mbedtls_pk_context *pub, const mbedtls_pk_context *prv ); /** * \brief Export debug information * * \param ctx Context to use * \param items Place to write debug items * * \return 0 on success or MBEDTLS_ERR_PK_BAD_INPUT_DATA */ int mbedtls_pk_debug( const mbedtls_pk_context *ctx, mbedtls_pk_debug_item *items ); /** * \brief Access the type name * * \param ctx Context to use * * \return Type name on success, or "invalid PK" */ const char * mbedtls_pk_get_name( const mbedtls_pk_context *ctx ); /** * \brief Get the key type * * \param ctx Context to use * * \return Type on success, or MBEDTLS_PK_NONE */ mbedtls_pk_type_t mbedtls_pk_get_type( const mbedtls_pk_context *ctx ); #if defined(MBEDTLS_PK_PARSE_C) /** \ingroup pk_module */ /** * \brief Parse a private key in PEM or DER format * * \param ctx key to be initialized * \param key input buffer * \param keylen size of the buffer * (including the terminating null byte for PEM data) * \param pwd password for decryption (optional) * \param pwdlen size of the password * * \note On entry, ctx must be empty, either freshly initialised * with mbedtls_pk_init() or reset with mbedtls_pk_free(). If you need a * specific key type, check the result with mbedtls_pk_can_do(). * * \note The key is also checked for correctness. * * \return 0 if successful, or a specific PK or PEM error code */ int mbedtls_pk_parse_key( mbedtls_pk_context *ctx, const unsigned char *key, size_t keylen, const unsigned char *pwd, size_t pwdlen ); /** \ingroup pk_module */ /** * \brief Parse a public key in PEM or DER format * * \param ctx key to be initialized * \param key input buffer * \param keylen size of the buffer * (including the terminating null byte for PEM data) * * \note On entry, ctx must be empty, either freshly initialised * with mbedtls_pk_init() or reset with mbedtls_pk_free(). If you need a * specific key type, check the result with mbedtls_pk_can_do(). * * \note The key is also checked for correctness. * * \return 0 if successful, or a specific PK or PEM error code */ int mbedtls_pk_parse_public_key( mbedtls_pk_context *ctx, const unsigned char *key, size_t keylen ); #if defined(MBEDTLS_FS_IO) /** \ingroup pk_module */ /** * \brief Load and parse a private key * * \param ctx key to be initialized * \param path filename to read the private key from * \param password password to decrypt the file (can be NULL) * * \note On entry, ctx must be empty, either freshly initialised * with mbedtls_pk_init() or reset with mbedtls_pk_free(). If you need a * specific key type, check the result with mbedtls_pk_can_do(). * * \note The key is also checked for correctness. * * \return 0 if successful, or a specific PK or PEM error code */ int mbedtls_pk_parse_keyfile( mbedtls_pk_context *ctx, const char *path, const char *password ); /** \ingroup pk_module */ /** * \brief Load and parse a public key * * \param ctx key to be initialized * \param path filename to read the public key from * * \note On entry, ctx must be empty, either freshly initialised * with mbedtls_pk_init() or reset with mbedtls_pk_free(). If * you need a specific key type, check the result with * mbedtls_pk_can_do(). * * \note The key is also checked for correctness. * * \return 0 if successful, or a specific PK or PEM error code */ int mbedtls_pk_parse_public_keyfile( mbedtls_pk_context *ctx, const char *path ); #endif /* MBEDTLS_FS_IO */ #endif /* MBEDTLS_PK_PARSE_C */ #if defined(MBEDTLS_PK_WRITE_C) /** * \brief Write a private key to a PKCS#1 or SEC1 DER structure * Note: data is written at the end of the buffer! Use the * return value to determine where you should start * using the buffer * * \param ctx private to write away * \param buf buffer to write to * \param size size of the buffer * * \return length of data written if successful, or a specific * error code */ int mbedtls_pk_write_key_der( mbedtls_pk_context *ctx, unsigned char *buf, size_t size ); /** * \brief Write a public key to a SubjectPublicKeyInfo DER structure * Note: data is written at the end of the buffer! Use the * return value to determine where you should start * using the buffer * * \param ctx public key to write away * \param buf buffer to write to * \param size size of the buffer * * \return length of data written if successful, or a specific * error code */ int mbedtls_pk_write_pubkey_der( mbedtls_pk_context *ctx, unsigned char *buf, size_t size ); #if defined(MBEDTLS_PEM_WRITE_C) /** * \brief Write a public key to a PEM string * * \param ctx public key to write away * \param buf buffer to write to * \param size size of the buffer * * \return 0 if successful, or a specific error code */ int mbedtls_pk_write_pubkey_pem( mbedtls_pk_context *ctx, unsigned char *buf, size_t size ); /** * \brief Write a private key to a PKCS#1 or SEC1 PEM string * * \param ctx private to write away * \param buf buffer to write to * \param size size of the buffer * * \return 0 if successful, or a specific error code */ int mbedtls_pk_write_key_pem( mbedtls_pk_context *ctx, unsigned char *buf, size_t size ); #endif /* MBEDTLS_PEM_WRITE_C */ #endif /* MBEDTLS_PK_WRITE_C */ /* * WARNING: Low-level functions. You probably do not want to use these unless * you are certain you do ;) */ #if defined(MBEDTLS_PK_PARSE_C) /** * \brief Parse a SubjectPublicKeyInfo DER structure * * \param p the position in the ASN.1 data * \param end end of the buffer * \param pk the key to fill * * \return 0 if successful, or a specific PK error code */ int mbedtls_pk_parse_subpubkey( unsigned char **p, const unsigned char *end, mbedtls_pk_context *pk ); #endif /* MBEDTLS_PK_PARSE_C */ #if defined(MBEDTLS_PK_WRITE_C) /** * \brief Write a subjectPublicKey to ASN.1 data * Note: function works backwards in data buffer * * \param p reference to current position pointer * \param start start of the buffer (for bounds-checking) * \param key public key to write away * * \return the length written or a negative error code */ int mbedtls_pk_write_pubkey( unsigned char **p, unsigned char *start, const mbedtls_pk_context *key ); #endif /* MBEDTLS_PK_WRITE_C */ /* * Internal module functions. You probably do not want to use these unless you * know you do. */ #if defined(MBEDTLS_FS_IO) int mbedtls_pk_load_file( const char *path, unsigned char **buf, size_t *n ); #endif #ifdef __cplusplus } #endif #endif /* MBEDTLS_PK_H */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/platform.h000066400000000000000000000005001351064634500252700ustar00rootroot00000000000000#ifndef __MBEDTLS_PLATFORM_H #define __MBEDTLS_PLATFORM_H #include #include #include #define mbedtls_printf pr_debug #define mbedtls_calloc(a, b) kcalloc(a, b, GFP_KERNEL) #define mbedtls_free kfree #define mbedtls_snprintf snprintf #endif /* __MBEDTLS_PLATFORM_H */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/rsa.h000066400000000000000000000675611351064634500242550ustar00rootroot00000000000000/** * \file rsa.h * * \brief The RSA public-key cryptosystem * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_RSA_H #define MBEDTLS_RSA_H #if !defined(MBEDTLS_CONFIG_FILE) #include "config.h" #else #include MBEDTLS_CONFIG_FILE #endif #include "bignum.h" #include "md.h" #if defined(MBEDTLS_THREADING_C) #include "threading.h" #endif /* * RSA Error codes */ #define MBEDTLS_ERR_RSA_BAD_INPUT_DATA -0x4080 /**< Bad input parameters to function. */ #define MBEDTLS_ERR_RSA_INVALID_PADDING -0x4100 /**< Input data contains invalid padding and is rejected. */ #define MBEDTLS_ERR_RSA_KEY_GEN_FAILED -0x4180 /**< Something failed during generation of a key. */ #define MBEDTLS_ERR_RSA_KEY_CHECK_FAILED -0x4200 /**< Key failed to pass the library's validity check. */ #define MBEDTLS_ERR_RSA_PUBLIC_FAILED -0x4280 /**< The public key operation failed. */ #define MBEDTLS_ERR_RSA_PRIVATE_FAILED -0x4300 /**< The private key operation failed. */ #define MBEDTLS_ERR_RSA_VERIFY_FAILED -0x4380 /**< The PKCS#1 verification failed. */ #define MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE -0x4400 /**< The output buffer for decryption is not large enough. */ #define MBEDTLS_ERR_RSA_RNG_FAILED -0x4480 /**< The random generator failed to generate non-zeros. */ /* * RSA constants */ #define MBEDTLS_RSA_PUBLIC 0 #define MBEDTLS_RSA_PRIVATE 1 #define MBEDTLS_RSA_PKCS_V15 0 #define MBEDTLS_RSA_PKCS_V21 1 #define MBEDTLS_RSA_SIGN 1 #define MBEDTLS_RSA_CRYPT 2 #define MBEDTLS_RSA_SALT_LEN_ANY -1 /* * The above constants may be used even if the RSA module is compile out, * eg for alternative (PKCS#11) RSA implemenations in the PK layers. */ #if defined(MBEDTLS_RSA_C) #ifdef __cplusplus extern "C" { #endif /** * \brief RSA context structure */ typedef struct { int ver; /*!< always 0 */ size_t len; /*!< size(N) in chars */ mbedtls_mpi N; /*!< public modulus */ mbedtls_mpi E; /*!< public exponent */ mbedtls_mpi D; /*!< private exponent */ mbedtls_mpi P; /*!< 1st prime factor */ mbedtls_mpi Q; /*!< 2nd prime factor */ mbedtls_mpi DP; /*!< D % (P - 1) */ mbedtls_mpi DQ; /*!< D % (Q - 1) */ mbedtls_mpi QP; /*!< 1 / (Q % P) */ mbedtls_mpi RN; /*!< cached R^2 mod N */ mbedtls_mpi RP; /*!< cached R^2 mod P */ mbedtls_mpi RQ; /*!< cached R^2 mod Q */ mbedtls_mpi Vi; /*!< cached blinding value */ mbedtls_mpi Vf; /*!< cached un-blinding value */ int padding; /*!< MBEDTLS_RSA_PKCS_V15 for 1.5 padding and MBEDTLS_RSA_PKCS_v21 for OAEP/PSS */ int hash_id; /*!< Hash identifier of mbedtls_md_type_t as specified in the mbedtls_md.h header file for the EME-OAEP and EMSA-PSS encoding */ #if defined(MBEDTLS_THREADING_C) mbedtls_threading_mutex_t mutex; /*!< Thread-safety mutex */ #endif } mbedtls_rsa_context; /** * \brief Initialize an RSA context * * Note: Set padding to MBEDTLS_RSA_PKCS_V21 for the RSAES-OAEP * encryption scheme and the RSASSA-PSS signature scheme. * * \param ctx RSA context to be initialized * \param padding MBEDTLS_RSA_PKCS_V15 or MBEDTLS_RSA_PKCS_V21 * \param hash_id MBEDTLS_RSA_PKCS_V21 hash identifier * * \note The hash_id parameter is actually ignored * when using MBEDTLS_RSA_PKCS_V15 padding. * * \note Choice of padding mode is strictly enforced for private key * operations, since there might be security concerns in * mixing padding modes. For public key operations it's merely * a default value, which can be overriden by calling specific * rsa_rsaes_xxx or rsa_rsassa_xxx functions. * * \note The chosen hash is always used for OEAP encryption. * For PSS signatures, it's always used for making signatures, * but can be overriden (and always is, if set to * MBEDTLS_MD_NONE) for verifying them. */ void mbedtls_rsa_init( mbedtls_rsa_context *ctx, int padding, int hash_id); /** * \brief Set padding for an already initialized RSA context * See \c mbedtls_rsa_init() for details. * * \param ctx RSA context to be set * \param padding MBEDTLS_RSA_PKCS_V15 or MBEDTLS_RSA_PKCS_V21 * \param hash_id MBEDTLS_RSA_PKCS_V21 hash identifier */ void mbedtls_rsa_set_padding( mbedtls_rsa_context *ctx, int padding, int hash_id); /** * \brief Generate an RSA keypair * * \param ctx RSA context that will hold the key * \param f_rng RNG function * \param p_rng RNG parameter * \param nbits size of the public key in bits * \param exponent public exponent (e.g., 65537) * * \note mbedtls_rsa_init() must be called beforehand to setup * the RSA context. * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code */ int mbedtls_rsa_gen_key( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, unsigned int nbits, int exponent ); /** * \brief Check a public RSA key * * \param ctx RSA context to be checked * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code */ int mbedtls_rsa_check_pubkey( const mbedtls_rsa_context *ctx ); /** * \brief Check a private RSA key * * \param ctx RSA context to be checked * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code */ int mbedtls_rsa_check_privkey( const mbedtls_rsa_context *ctx ); /** * \brief Check a public-private RSA key pair. * Check each of the contexts, and make sure they match. * * \param pub RSA context holding the public key * \param prv RSA context holding the private key * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code */ int mbedtls_rsa_check_pub_priv( const mbedtls_rsa_context *pub, const mbedtls_rsa_context *prv ); /** * \brief Do an RSA public key operation * * \param ctx RSA context * \param input input buffer * \param output output buffer * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code * * \note This function does NOT take care of message * padding. Also, be sure to set input[0] = 0 or ensure that * input is smaller than N. * * \note The input and output buffers must be large * enough (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_public( mbedtls_rsa_context *ctx, const unsigned char *input, unsigned char *output ); /** * \brief Do an RSA private key operation * * \param ctx RSA context * \param f_rng RNG function (Needed for blinding) * \param p_rng RNG parameter * \param input input buffer * \param output output buffer * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code * * \note The input and output buffers must be large * enough (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_private( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, const unsigned char *input, unsigned char *output ); /** * \brief Generic wrapper to perform a PKCS#1 encryption using the * mode from the context. Add the message padding, then do an * RSA operation. * * \param ctx RSA context * \param f_rng RNG function (Needed for padding and PKCS#1 v2.1 encoding * and MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param ilen contains the plaintext length * \param input buffer holding the data to be encrypted * \param output buffer that will hold the ciphertext * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code * * \note The output buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_pkcs1_encrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, size_t ilen, const unsigned char *input, unsigned char *output ); /** * \brief Perform a PKCS#1 v1.5 encryption (RSAES-PKCS1-v1_5-ENCRYPT) * * \param ctx RSA context * \param f_rng RNG function (Needed for padding and MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param ilen contains the plaintext length * \param input buffer holding the data to be encrypted * \param output buffer that will hold the ciphertext * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code * * \note The output buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_rsaes_pkcs1_v15_encrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, size_t ilen, const unsigned char *input, unsigned char *output ); /** * \brief Perform a PKCS#1 v2.1 OAEP encryption (RSAES-OAEP-ENCRYPT) * * \param ctx RSA context * \param f_rng RNG function (Needed for padding and PKCS#1 v2.1 encoding * and MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param label buffer holding the custom label to use * \param label_len contains the label length * \param ilen contains the plaintext length * \param input buffer holding the data to be encrypted * \param output buffer that will hold the ciphertext * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code * * \note The output buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_rsaes_oaep_encrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, const unsigned char *label, size_t label_len, size_t ilen, const unsigned char *input, unsigned char *output ); /** * \brief Generic wrapper to perform a PKCS#1 decryption using the * mode from the context. Do an RSA operation, then remove * the message padding * * \param ctx RSA context * \param f_rng RNG function (Only needed for MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param olen will contain the plaintext length * \param input buffer holding the encrypted data * \param output buffer that will hold the plaintext * \param output_max_len maximum length of the output buffer * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code * * \note The output buffer length \c output_max_len should be * as large as the size ctx->len of ctx->N (eg. 128 bytes * if RSA-1024 is used) to be able to hold an arbitrary * decrypted message. If it is not large enough to hold * the decryption of the particular ciphertext provided, * the function will return MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE. * * \note The input buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_pkcs1_decrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len ); /** * \brief Perform a PKCS#1 v1.5 decryption (RSAES-PKCS1-v1_5-DECRYPT) * * \param ctx RSA context * \param f_rng RNG function (Only needed for MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param olen will contain the plaintext length * \param input buffer holding the encrypted data * \param output buffer that will hold the plaintext * \param output_max_len maximum length of the output buffer * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code * * \note The output buffer length \c output_max_len should be * as large as the size ctx->len of ctx->N (eg. 128 bytes * if RSA-1024 is used) to be able to hold an arbitrary * decrypted message. If it is not large enough to hold * the decryption of the particular ciphertext provided, * the function will return MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE. * * \note The input buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_rsaes_pkcs1_v15_decrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len ); /** * \brief Perform a PKCS#1 v2.1 OAEP decryption (RSAES-OAEP-DECRYPT) * * \param ctx RSA context * \param f_rng RNG function (Only needed for MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param label buffer holding the custom label to use * \param label_len contains the label length * \param olen will contain the plaintext length * \param input buffer holding the encrypted data * \param output buffer that will hold the plaintext * \param output_max_len maximum length of the output buffer * * \return 0 if successful, or an MBEDTLS_ERR_RSA_XXX error code * * \note The output buffer length \c output_max_len should be * as large as the size ctx->len of ctx->N (eg. 128 bytes * if RSA-1024 is used) to be able to hold an arbitrary * decrypted message. If it is not large enough to hold * the decryption of the particular ciphertext provided, * the function will return MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE. * * \note The input buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_rsaes_oaep_decrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, const unsigned char *label, size_t label_len, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len ); /** * \brief Generic wrapper to perform a PKCS#1 signature using the * mode from the context. Do a private RSA operation to sign * a message digest * * \param ctx RSA context * \param f_rng RNG function (Needed for PKCS#1 v2.1 encoding and for * MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param md_alg a MBEDTLS_MD_XXX (use MBEDTLS_MD_NONE for signing raw data) * \param hashlen message digest length (for MBEDTLS_MD_NONE only) * \param hash buffer holding the message digest * \param sig buffer that will hold the ciphertext * * \return 0 if the signing operation was successful, * or an MBEDTLS_ERR_RSA_XXX error code * * \note The "sig" buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). * * \note In case of PKCS#1 v2.1 encoding, see comments on * \note \c mbedtls_rsa_rsassa_pss_sign() for details on md_alg and hash_id. */ int mbedtls_rsa_pkcs1_sign( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ); /** * \brief Perform a PKCS#1 v1.5 signature (RSASSA-PKCS1-v1_5-SIGN) * * \param ctx RSA context * \param f_rng RNG function (Only needed for MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param md_alg a MBEDTLS_MD_XXX (use MBEDTLS_MD_NONE for signing raw data) * \param hashlen message digest length (for MBEDTLS_MD_NONE only) * \param hash buffer holding the message digest * \param sig buffer that will hold the ciphertext * * \return 0 if the signing operation was successful, * or an MBEDTLS_ERR_RSA_XXX error code * * \note The "sig" buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_rsassa_pkcs1_v15_sign( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ); /** * \brief Perform a PKCS#1 v2.1 PSS signature (RSASSA-PSS-SIGN) * * \param ctx RSA context * \param f_rng RNG function (Needed for PKCS#1 v2.1 encoding and for * MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param md_alg a MBEDTLS_MD_XXX (use MBEDTLS_MD_NONE for signing raw data) * \param hashlen message digest length (for MBEDTLS_MD_NONE only) * \param hash buffer holding the message digest * \param sig buffer that will hold the ciphertext * * \return 0 if the signing operation was successful, * or an MBEDTLS_ERR_RSA_XXX error code * * \note The "sig" buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). * * \note The hash_id in the RSA context is the one used for the * encoding. md_alg in the function call is the type of hash * that is encoded. According to RFC 3447 it is advised to * keep both hashes the same. */ int mbedtls_rsa_rsassa_pss_sign( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ); /** * \brief Generic wrapper to perform a PKCS#1 verification using the * mode from the context. Do a public RSA operation and check * the message digest * * \param ctx points to an RSA public key * \param f_rng RNG function (Only needed for MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param md_alg a MBEDTLS_MD_XXX (use MBEDTLS_MD_NONE for signing raw data) * \param hashlen message digest length (for MBEDTLS_MD_NONE only) * \param hash buffer holding the message digest * \param sig buffer holding the ciphertext * * \return 0 if the verify operation was successful, * or an MBEDTLS_ERR_RSA_XXX error code * * \note The "sig" buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). * * \note In case of PKCS#1 v2.1 encoding, see comments on * \c mbedtls_rsa_rsassa_pss_verify() about md_alg and hash_id. */ int mbedtls_rsa_pkcs1_verify( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, const unsigned char *sig ); /** * \brief Perform a PKCS#1 v1.5 verification (RSASSA-PKCS1-v1_5-VERIFY) * * \param ctx points to an RSA public key * \param f_rng RNG function (Only needed for MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param md_alg a MBEDTLS_MD_XXX (use MBEDTLS_MD_NONE for signing raw data) * \param hashlen message digest length (for MBEDTLS_MD_NONE only) * \param hash buffer holding the message digest * \param sig buffer holding the ciphertext * * \return 0 if the verify operation was successful, * or an MBEDTLS_ERR_RSA_XXX error code * * \note The "sig" buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). */ int mbedtls_rsa_rsassa_pkcs1_v15_verify( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, const unsigned char *sig ); /** * \brief Perform a PKCS#1 v2.1 PSS verification (RSASSA-PSS-VERIFY) * (This is the "simple" version.) * * \param ctx points to an RSA public key * \param f_rng RNG function (Only needed for MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param md_alg a MBEDTLS_MD_XXX (use MBEDTLS_MD_NONE for signing raw data) * \param hashlen message digest length (for MBEDTLS_MD_NONE only) * \param hash buffer holding the message digest * \param sig buffer holding the ciphertext * * \return 0 if the verify operation was successful, * or an MBEDTLS_ERR_RSA_XXX error code * * \note The "sig" buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). * * \note The hash_id in the RSA context is the one used for the * verification. md_alg in the function call is the type of * hash that is verified. According to RFC 3447 it is advised to * keep both hashes the same. If hash_id in the RSA context is * unset, the md_alg from the function call is used. */ int mbedtls_rsa_rsassa_pss_verify( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, const unsigned char *sig ); /** * \brief Perform a PKCS#1 v2.1 PSS verification (RSASSA-PSS-VERIFY) * (This is the version with "full" options.) * * \param ctx points to an RSA public key * \param f_rng RNG function (Only needed for MBEDTLS_RSA_PRIVATE) * \param p_rng RNG parameter * \param mode MBEDTLS_RSA_PUBLIC or MBEDTLS_RSA_PRIVATE * \param md_alg a MBEDTLS_MD_XXX (use MBEDTLS_MD_NONE for signing raw data) * \param hashlen message digest length (for MBEDTLS_MD_NONE only) * \param hash buffer holding the message digest * \param mgf1_hash_id message digest used for mask generation * \param expected_salt_len Length of the salt used in padding, use * MBEDTLS_RSA_SALT_LEN_ANY to accept any salt length * \param sig buffer holding the ciphertext * * \return 0 if the verify operation was successful, * or an MBEDTLS_ERR_RSA_XXX error code * * \note The "sig" buffer must be as large as the size * of ctx->N (eg. 128 bytes if RSA-1024 is used). * * \note The hash_id in the RSA context is ignored. */ int mbedtls_rsa_rsassa_pss_verify_ext( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, mbedtls_md_type_t mgf1_hash_id, int expected_salt_len, const unsigned char *sig ); /** * \brief Copy the components of an RSA context * * \param dst Destination context * \param src Source context * * \return 0 on success, * MBEDTLS_ERR_MPI_ALLOC_FAILED on memory allocation failure */ int mbedtls_rsa_copy( mbedtls_rsa_context *dst, const mbedtls_rsa_context *src ); /** * \brief Free the components of an RSA key * * \param ctx RSA Context to free */ void mbedtls_rsa_free( mbedtls_rsa_context *ctx ); /** * \brief Checkup routine * * \return 0 if successful, or 1 if the test failed */ int mbedtls_rsa_self_test( int verbose ); #ifdef __cplusplus } #endif #endif /* MBEDTLS_RSA_C */ #endif /* rsa.h */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/sha1.h000066400000000000000000000065721351064634500243170ustar00rootroot00000000000000/** * \file sha1.h * * \brief SHA-1 cryptographic hash function * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_SHA1_H #define MBEDTLS_SHA1_H #if !defined(MBEDTLS_CONFIG_FILE) #include "config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if !defined(MBEDTLS_SHA1_ALT) // Regular implementation // #ifdef __cplusplus extern "C" { #endif /** * \brief SHA-1 context structure */ typedef struct { uint32_t total[2]; /*!< number of bytes processed */ uint32_t state[5]; /*!< intermediate digest state */ unsigned char buffer[64]; /*!< data block being processed */ } mbedtls_sha1_context; /** * \brief Initialize SHA-1 context * * \param ctx SHA-1 context to be initialized */ void mbedtls_sha1_init( mbedtls_sha1_context *ctx ); /** * \brief Clear SHA-1 context * * \param ctx SHA-1 context to be cleared */ void mbedtls_sha1_free( mbedtls_sha1_context *ctx ); /** * \brief Clone (the state of) a SHA-1 context * * \param dst The destination context * \param src The context to be cloned */ void mbedtls_sha1_clone( mbedtls_sha1_context *dst, const mbedtls_sha1_context *src ); /** * \brief SHA-1 context setup * * \param ctx context to be initialized */ void mbedtls_sha1_starts( mbedtls_sha1_context *ctx ); /** * \brief SHA-1 process buffer * * \param ctx SHA-1 context * \param input buffer holding the data * \param ilen length of the input data */ void mbedtls_sha1_update( mbedtls_sha1_context *ctx, const unsigned char *input, size_t ilen ); /** * \brief SHA-1 final digest * * \param ctx SHA-1 context * \param output SHA-1 checksum result */ void mbedtls_sha1_finish( mbedtls_sha1_context *ctx, unsigned char output[20] ); /* Internal use */ void mbedtls_sha1_process( mbedtls_sha1_context *ctx, const unsigned char data[64] ); #ifdef __cplusplus } #endif #else /* MBEDTLS_SHA1_ALT */ #include "sha1_alt.h" #endif /* MBEDTLS_SHA1_ALT */ #ifdef __cplusplus extern "C" { #endif /** * \brief Output = SHA-1( input buffer ) * * \param input buffer holding the data * \param ilen length of the input data * \param output SHA-1 checksum result */ void mbedtls_sha1( const unsigned char *input, size_t ilen, unsigned char output[20] ); /** * \brief Checkup routine * * \return 0 if successful, or 1 if the test failed */ int mbedtls_sha1_self_test( int verbose ); #ifdef __cplusplus } #endif #endif /* mbedtls_sha1.h */ backport-iwlwifi-dkms-7906/compat/verification/mbedtls/sha256.h000066400000000000000000000072761351064634500244750ustar00rootroot00000000000000/** * \file sha256.h * * \brief SHA-224 and SHA-256 cryptographic hash function * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #ifndef MBEDTLS_SHA256_H #define MBEDTLS_SHA256_H #if !defined(MBEDTLS_CONFIG_FILE) #include "config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if !defined(MBEDTLS_SHA256_ALT) // Regular implementation // #ifdef __cplusplus extern "C" { #endif /** * \brief SHA-256 context structure */ typedef struct { uint32_t total[2]; /*!< number of bytes processed */ uint32_t state[8]; /*!< intermediate digest state */ unsigned char buffer[64]; /*!< data block being processed */ int is224; /*!< 0 => SHA-256, else SHA-224 */ } mbedtls_sha256_context; /** * \brief Initialize SHA-256 context * * \param ctx SHA-256 context to be initialized */ void mbedtls_sha256_init( mbedtls_sha256_context *ctx ); /** * \brief Clear SHA-256 context * * \param ctx SHA-256 context to be cleared */ void mbedtls_sha256_free( mbedtls_sha256_context *ctx ); /** * \brief Clone (the state of) a SHA-256 context * * \param dst The destination context * \param src The context to be cloned */ void mbedtls_sha256_clone( mbedtls_sha256_context *dst, const mbedtls_sha256_context *src ); /** * \brief SHA-256 context setup * * \param ctx context to be initialized * \param is224 0 = use SHA256, 1 = use SHA224 */ void mbedtls_sha256_starts( mbedtls_sha256_context *ctx, int is224 ); /** * \brief SHA-256 process buffer * * \param ctx SHA-256 context * \param input buffer holding the data * \param ilen length of the input data */ void mbedtls_sha256_update( mbedtls_sha256_context *ctx, const unsigned char *input, size_t ilen ); /** * \brief SHA-256 final digest * * \param ctx SHA-256 context * \param output SHA-224/256 checksum result */ void mbedtls_sha256_finish( mbedtls_sha256_context *ctx, unsigned char output[32] ); /* Internal use */ void mbedtls_sha256_process( mbedtls_sha256_context *ctx, const unsigned char data[64] ); #ifdef __cplusplus } #endif #else /* MBEDTLS_SHA256_ALT */ #include "sha256_alt.h" #endif /* MBEDTLS_SHA256_ALT */ #ifdef __cplusplus extern "C" { #endif /** * \brief Output = SHA-256( input buffer ) * * \param input buffer holding the data * \param ilen length of the input data * \param output SHA-224/256 checksum result * \param is224 0 = use SHA256, 1 = use SHA224 */ void mbedtls_sha256( const unsigned char *input, size_t ilen, unsigned char output[32], int is224 ); /** * \brief Checkup routine * * \return 0 if successful, or 1 if the test failed */ int mbedtls_sha256_self_test( int verbose ); #ifdef __cplusplus } #endif #endif /* mbedtls_sha256.h */ backport-iwlwifi-dkms-7906/compat/verification/md.c000066400000000000000000000300431351064634500224120ustar00rootroot00000000000000/** * \file mbedtls_md.c * * \brief Generic message digest wrapper for mbed TLS * * \author Adriaan de Jong * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_MD_C) #include "mbedtls/md.h" #include "mbedtls/md_internal.h" #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include #define mbedtls_calloc calloc #define mbedtls_free free #endif #if defined(MBEDTLS_FS_IO) #include #endif /* Implementation that should never be optimized out by the compiler */ static void mbedtls_zeroize( void *v, size_t n ) { volatile unsigned char *p = v; while( n-- ) *p++ = 0; } /* * Reminder: update profiles in x509_crt.c when adding a new hash! */ static const int supported_digests[] = { #if defined(MBEDTLS_SHA512_C) MBEDTLS_MD_SHA512, MBEDTLS_MD_SHA384, #endif #if defined(MBEDTLS_SHA256_C) MBEDTLS_MD_SHA256, MBEDTLS_MD_SHA224, #endif #if defined(MBEDTLS_SHA1_C) MBEDTLS_MD_SHA1, #endif #if defined(MBEDTLS_RIPEMD160_C) MBEDTLS_MD_RIPEMD160, #endif #if defined(MBEDTLS_MD5_C) MBEDTLS_MD_MD5, #endif #if defined(MBEDTLS_MD4_C) MBEDTLS_MD_MD4, #endif #if defined(MBEDTLS_MD2_C) MBEDTLS_MD_MD2, #endif MBEDTLS_MD_NONE }; const int *mbedtls_md_list( void ) { return( supported_digests ); } const mbedtls_md_info_t *mbedtls_md_info_from_string( const char *md_name ) { if( NULL == md_name ) return( NULL ); /* Get the appropriate digest information */ #if defined(MBEDTLS_MD2_C) if( !strcmp( "MD2", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_MD2 ); #endif #if defined(MBEDTLS_MD4_C) if( !strcmp( "MD4", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_MD4 ); #endif #if defined(MBEDTLS_MD5_C) if( !strcmp( "MD5", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_MD5 ); #endif #if defined(MBEDTLS_RIPEMD160_C) if( !strcmp( "RIPEMD160", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_RIPEMD160 ); #endif #if defined(MBEDTLS_SHA1_C) if( !strcmp( "SHA1", md_name ) || !strcmp( "SHA", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_SHA1 ); #endif #if defined(MBEDTLS_SHA256_C) if( !strcmp( "SHA224", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_SHA224 ); if( !strcmp( "SHA256", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_SHA256 ); #endif #if defined(MBEDTLS_SHA512_C) if( !strcmp( "SHA384", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_SHA384 ); if( !strcmp( "SHA512", md_name ) ) return mbedtls_md_info_from_type( MBEDTLS_MD_SHA512 ); #endif return( NULL ); } const mbedtls_md_info_t *mbedtls_md_info_from_type( mbedtls_md_type_t md_type ) { switch( md_type ) { #if defined(MBEDTLS_MD2_C) case MBEDTLS_MD_MD2: return( &mbedtls_md2_info ); #endif #if defined(MBEDTLS_MD4_C) case MBEDTLS_MD_MD4: return( &mbedtls_md4_info ); #endif #if defined(MBEDTLS_MD5_C) case MBEDTLS_MD_MD5: return( &mbedtls_md5_info ); #endif #if defined(MBEDTLS_RIPEMD160_C) case MBEDTLS_MD_RIPEMD160: return( &mbedtls_ripemd160_info ); #endif #if defined(MBEDTLS_SHA1_C) case MBEDTLS_MD_SHA1: return( &mbedtls_sha1_info ); #endif #if defined(MBEDTLS_SHA256_C) case MBEDTLS_MD_SHA224: return( &mbedtls_sha224_info ); case MBEDTLS_MD_SHA256: return( &mbedtls_sha256_info ); #endif #if defined(MBEDTLS_SHA512_C) case MBEDTLS_MD_SHA384: return( &mbedtls_sha384_info ); case MBEDTLS_MD_SHA512: return( &mbedtls_sha512_info ); #endif default: return( NULL ); } } void mbedtls_md_init( mbedtls_md_context_t *ctx ) { memset( ctx, 0, sizeof( mbedtls_md_context_t ) ); } void mbedtls_md_free( mbedtls_md_context_t *ctx ) { if( ctx == NULL || ctx->md_info == NULL ) return; if( ctx->md_ctx != NULL ) ctx->md_info->ctx_free_func( ctx->md_ctx ); if( ctx->hmac_ctx != NULL ) { mbedtls_zeroize( ctx->hmac_ctx, 2 * ctx->md_info->block_size ); mbedtls_free( ctx->hmac_ctx ); } mbedtls_zeroize( ctx, sizeof( mbedtls_md_context_t ) ); } int mbedtls_md_clone( mbedtls_md_context_t *dst, const mbedtls_md_context_t *src ) { if( dst == NULL || dst->md_info == NULL || src == NULL || src->md_info == NULL || dst->md_info != src->md_info ) { return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); } dst->md_info->clone_func( dst->md_ctx, src->md_ctx ); return( 0 ); } #if ! defined(MBEDTLS_DEPRECATED_REMOVED) int mbedtls_md_init_ctx( mbedtls_md_context_t *ctx, const mbedtls_md_info_t *md_info ) { return mbedtls_md_setup( ctx, md_info, 1 ); } #endif int mbedtls_md_setup( mbedtls_md_context_t *ctx, const mbedtls_md_info_t *md_info, int hmac ) { if( md_info == NULL || ctx == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); if( ( ctx->md_ctx = md_info->ctx_alloc_func() ) == NULL ) return( MBEDTLS_ERR_MD_ALLOC_FAILED ); if( hmac != 0 ) { ctx->hmac_ctx = mbedtls_calloc( 2, md_info->block_size ); if( ctx->hmac_ctx == NULL ) { md_info->ctx_free_func( ctx->md_ctx ); return( MBEDTLS_ERR_MD_ALLOC_FAILED ); } } ctx->md_info = md_info; return( 0 ); } int mbedtls_md_starts( mbedtls_md_context_t *ctx ) { if( ctx == NULL || ctx->md_info == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); ctx->md_info->starts_func( ctx->md_ctx ); return( 0 ); } int mbedtls_md_update( mbedtls_md_context_t *ctx, const unsigned char *input, size_t ilen ) { if( ctx == NULL || ctx->md_info == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); ctx->md_info->update_func( ctx->md_ctx, input, ilen ); return( 0 ); } int mbedtls_md_finish( mbedtls_md_context_t *ctx, unsigned char *output ) { if( ctx == NULL || ctx->md_info == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); ctx->md_info->finish_func( ctx->md_ctx, output ); return( 0 ); } int mbedtls_md( const mbedtls_md_info_t *md_info, const unsigned char *input, size_t ilen, unsigned char *output ) { if( md_info == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); md_info->digest_func( input, ilen, output ); return( 0 ); } #if defined(MBEDTLS_FS_IO) int mbedtls_md_file( const mbedtls_md_info_t *md_info, const char *path, unsigned char *output ) { int ret; FILE *f; size_t n; mbedtls_md_context_t ctx; unsigned char buf[1024]; if( md_info == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); if( ( f = fopen( path, "rb" ) ) == NULL ) return( MBEDTLS_ERR_MD_FILE_IO_ERROR ); mbedtls_md_init( &ctx ); if( ( ret = mbedtls_md_setup( &ctx, md_info, 0 ) ) != 0 ) goto cleanup; md_info->starts_func( ctx.md_ctx ); while( ( n = fread( buf, 1, sizeof( buf ), f ) ) > 0 ) md_info->update_func( ctx.md_ctx, buf, n ); if( ferror( f ) != 0 ) { ret = MBEDTLS_ERR_MD_FILE_IO_ERROR; goto cleanup; } md_info->finish_func( ctx.md_ctx, output ); cleanup: fclose( f ); mbedtls_md_free( &ctx ); return( ret ); } #endif /* MBEDTLS_FS_IO */ int mbedtls_md_hmac_starts( mbedtls_md_context_t *ctx, const unsigned char *key, size_t keylen ) { unsigned char sum[MBEDTLS_MD_MAX_SIZE]; unsigned char *ipad, *opad; size_t i; if( ctx == NULL || ctx->md_info == NULL || ctx->hmac_ctx == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); if( keylen > (size_t) ctx->md_info->block_size ) { ctx->md_info->starts_func( ctx->md_ctx ); ctx->md_info->update_func( ctx->md_ctx, key, keylen ); ctx->md_info->finish_func( ctx->md_ctx, sum ); keylen = ctx->md_info->size; key = sum; } ipad = (unsigned char *) ctx->hmac_ctx; opad = (unsigned char *) ctx->hmac_ctx + ctx->md_info->block_size; memset( ipad, 0x36, ctx->md_info->block_size ); memset( opad, 0x5C, ctx->md_info->block_size ); for( i = 0; i < keylen; i++ ) { ipad[i] = (unsigned char)( ipad[i] ^ key[i] ); opad[i] = (unsigned char)( opad[i] ^ key[i] ); } mbedtls_zeroize( sum, sizeof( sum ) ); ctx->md_info->starts_func( ctx->md_ctx ); ctx->md_info->update_func( ctx->md_ctx, ipad, ctx->md_info->block_size ); return( 0 ); } int mbedtls_md_hmac_update( mbedtls_md_context_t *ctx, const unsigned char *input, size_t ilen ) { if( ctx == NULL || ctx->md_info == NULL || ctx->hmac_ctx == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); ctx->md_info->update_func( ctx->md_ctx, input, ilen ); return( 0 ); } int mbedtls_md_hmac_finish( mbedtls_md_context_t *ctx, unsigned char *output ) { unsigned char tmp[MBEDTLS_MD_MAX_SIZE]; unsigned char *opad; if( ctx == NULL || ctx->md_info == NULL || ctx->hmac_ctx == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); opad = (unsigned char *) ctx->hmac_ctx + ctx->md_info->block_size; ctx->md_info->finish_func( ctx->md_ctx, tmp ); ctx->md_info->starts_func( ctx->md_ctx ); ctx->md_info->update_func( ctx->md_ctx, opad, ctx->md_info->block_size ); ctx->md_info->update_func( ctx->md_ctx, tmp, ctx->md_info->size ); ctx->md_info->finish_func( ctx->md_ctx, output ); return( 0 ); } int mbedtls_md_hmac_reset( mbedtls_md_context_t *ctx ) { unsigned char *ipad; if( ctx == NULL || ctx->md_info == NULL || ctx->hmac_ctx == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); ipad = (unsigned char *) ctx->hmac_ctx; ctx->md_info->starts_func( ctx->md_ctx ); ctx->md_info->update_func( ctx->md_ctx, ipad, ctx->md_info->block_size ); return( 0 ); } int mbedtls_md_hmac( const mbedtls_md_info_t *md_info, const unsigned char *key, size_t keylen, const unsigned char *input, size_t ilen, unsigned char *output ) { mbedtls_md_context_t ctx; int ret; if( md_info == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); mbedtls_md_init( &ctx ); if( ( ret = mbedtls_md_setup( &ctx, md_info, 1 ) ) != 0 ) return( ret ); mbedtls_md_hmac_starts( &ctx, key, keylen ); mbedtls_md_hmac_update( &ctx, input, ilen ); mbedtls_md_hmac_finish( &ctx, output ); mbedtls_md_free( &ctx ); return( 0 ); } int mbedtls_md_process( mbedtls_md_context_t *ctx, const unsigned char *data ) { if( ctx == NULL || ctx->md_info == NULL ) return( MBEDTLS_ERR_MD_BAD_INPUT_DATA ); ctx->md_info->process_func( ctx->md_ctx, data ); return( 0 ); } unsigned char mbedtls_md_get_size( const mbedtls_md_info_t *md_info ) { if( md_info == NULL ) return( 0 ); return md_info->size; } mbedtls_md_type_t mbedtls_md_get_type( const mbedtls_md_info_t *md_info ) { if( md_info == NULL ) return( MBEDTLS_MD_NONE ); return md_info->type; } const char *mbedtls_md_get_name( const mbedtls_md_info_t *md_info ) { if( md_info == NULL ) return( NULL ); return md_info->name; } #endif /* MBEDTLS_MD_C */ backport-iwlwifi-dkms-7906/compat/verification/md_wrap.c000066400000000000000000000320471351064634500234510ustar00rootroot00000000000000/** * \file md_wrap.c * * \brief Generic message digest wrapper for mbed TLS * * \author Adriaan de Jong * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_MD_C) #include "mbedtls/md_internal.h" #if defined(MBEDTLS_MD2_C) #include "mbedtls/md2.h" #endif #if defined(MBEDTLS_MD4_C) #include "mbedtls/md4.h" #endif #if defined(MBEDTLS_MD5_C) #include "mbedtls/md5.h" #endif #if defined(MBEDTLS_RIPEMD160_C) #include "mbedtls/ripemd160.h" #endif #if defined(MBEDTLS_SHA1_C) #include "mbedtls/sha1.h" #endif #if defined(MBEDTLS_SHA256_C) #include "mbedtls/sha256.h" #endif #if defined(MBEDTLS_SHA512_C) #include "mbedtls/sha512.h" #endif #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include #define mbedtls_calloc calloc #define mbedtls_free free #endif #if defined(MBEDTLS_MD2_C) static void md2_starts_wrap( void *ctx ) { mbedtls_md2_starts( (mbedtls_md2_context *) ctx ); } static void md2_update_wrap( void *ctx, const unsigned char *input, size_t ilen ) { mbedtls_md2_update( (mbedtls_md2_context *) ctx, input, ilen ); } static void md2_finish_wrap( void *ctx, unsigned char *output ) { mbedtls_md2_finish( (mbedtls_md2_context *) ctx, output ); } static void *md2_ctx_alloc( void ) { void *ctx = mbedtls_calloc( 1, sizeof( mbedtls_md2_context ) ); if( ctx != NULL ) mbedtls_md2_init( (mbedtls_md2_context *) ctx ); return( ctx ); } static void md2_ctx_free( void *ctx ) { mbedtls_md2_free( (mbedtls_md2_context *) ctx ); mbedtls_free( ctx ); } static void md2_clone_wrap( void *dst, const void *src ) { mbedtls_md2_clone( (mbedtls_md2_context *) dst, (const mbedtls_md2_context *) src ); } static void md2_process_wrap( void *ctx, const unsigned char *data ) { ((void) data); mbedtls_md2_process( (mbedtls_md2_context *) ctx ); } const mbedtls_md_info_t mbedtls_md2_info = { MBEDTLS_MD_MD2, "MD2", 16, 16, md2_starts_wrap, md2_update_wrap, md2_finish_wrap, mbedtls_md2, md2_ctx_alloc, md2_ctx_free, md2_clone_wrap, md2_process_wrap, }; #endif /* MBEDTLS_MD2_C */ #if defined(MBEDTLS_MD4_C) static void md4_starts_wrap( void *ctx ) { mbedtls_md4_starts( (mbedtls_md4_context *) ctx ); } static void md4_update_wrap( void *ctx, const unsigned char *input, size_t ilen ) { mbedtls_md4_update( (mbedtls_md4_context *) ctx, input, ilen ); } static void md4_finish_wrap( void *ctx, unsigned char *output ) { mbedtls_md4_finish( (mbedtls_md4_context *) ctx, output ); } static void *md4_ctx_alloc( void ) { void *ctx = mbedtls_calloc( 1, sizeof( mbedtls_md4_context ) ); if( ctx != NULL ) mbedtls_md4_init( (mbedtls_md4_context *) ctx ); return( ctx ); } static void md4_ctx_free( void *ctx ) { mbedtls_md4_free( (mbedtls_md4_context *) ctx ); mbedtls_free( ctx ); } static void md4_clone_wrap( void *dst, const void *src ) { mbedtls_md4_clone( (mbedtls_md4_context *) dst, (const mbedtls_md4_context *) src ); } static void md4_process_wrap( void *ctx, const unsigned char *data ) { mbedtls_md4_process( (mbedtls_md4_context *) ctx, data ); } const mbedtls_md_info_t mbedtls_md4_info = { MBEDTLS_MD_MD4, "MD4", 16, 64, md4_starts_wrap, md4_update_wrap, md4_finish_wrap, mbedtls_md4, md4_ctx_alloc, md4_ctx_free, md4_clone_wrap, md4_process_wrap, }; #endif /* MBEDTLS_MD4_C */ #if defined(MBEDTLS_MD5_C) static void md5_starts_wrap( void *ctx ) { mbedtls_md5_starts( (mbedtls_md5_context *) ctx ); } static void md5_update_wrap( void *ctx, const unsigned char *input, size_t ilen ) { mbedtls_md5_update( (mbedtls_md5_context *) ctx, input, ilen ); } static void md5_finish_wrap( void *ctx, unsigned char *output ) { mbedtls_md5_finish( (mbedtls_md5_context *) ctx, output ); } static void *md5_ctx_alloc( void ) { void *ctx = mbedtls_calloc( 1, sizeof( mbedtls_md5_context ) ); if( ctx != NULL ) mbedtls_md5_init( (mbedtls_md5_context *) ctx ); return( ctx ); } static void md5_ctx_free( void *ctx ) { mbedtls_md5_free( (mbedtls_md5_context *) ctx ); mbedtls_free( ctx ); } static void md5_clone_wrap( void *dst, const void *src ) { mbedtls_md5_clone( (mbedtls_md5_context *) dst, (const mbedtls_md5_context *) src ); } static void md5_process_wrap( void *ctx, const unsigned char *data ) { mbedtls_md5_process( (mbedtls_md5_context *) ctx, data ); } const mbedtls_md_info_t mbedtls_md5_info = { MBEDTLS_MD_MD5, "MD5", 16, 64, md5_starts_wrap, md5_update_wrap, md5_finish_wrap, mbedtls_md5, md5_ctx_alloc, md5_ctx_free, md5_clone_wrap, md5_process_wrap, }; #endif /* MBEDTLS_MD5_C */ #if defined(MBEDTLS_RIPEMD160_C) static void ripemd160_starts_wrap( void *ctx ) { mbedtls_ripemd160_starts( (mbedtls_ripemd160_context *) ctx ); } static void ripemd160_update_wrap( void *ctx, const unsigned char *input, size_t ilen ) { mbedtls_ripemd160_update( (mbedtls_ripemd160_context *) ctx, input, ilen ); } static void ripemd160_finish_wrap( void *ctx, unsigned char *output ) { mbedtls_ripemd160_finish( (mbedtls_ripemd160_context *) ctx, output ); } static void *ripemd160_ctx_alloc( void ) { void *ctx = mbedtls_calloc( 1, sizeof( mbedtls_ripemd160_context ) ); if( ctx != NULL ) mbedtls_ripemd160_init( (mbedtls_ripemd160_context *) ctx ); return( ctx ); } static void ripemd160_ctx_free( void *ctx ) { mbedtls_ripemd160_free( (mbedtls_ripemd160_context *) ctx ); mbedtls_free( ctx ); } static void ripemd160_clone_wrap( void *dst, const void *src ) { mbedtls_ripemd160_clone( (mbedtls_ripemd160_context *) dst, (const mbedtls_ripemd160_context *) src ); } static void ripemd160_process_wrap( void *ctx, const unsigned char *data ) { mbedtls_ripemd160_process( (mbedtls_ripemd160_context *) ctx, data ); } const mbedtls_md_info_t mbedtls_ripemd160_info = { MBEDTLS_MD_RIPEMD160, "RIPEMD160", 20, 64, ripemd160_starts_wrap, ripemd160_update_wrap, ripemd160_finish_wrap, mbedtls_ripemd160, ripemd160_ctx_alloc, ripemd160_ctx_free, ripemd160_clone_wrap, ripemd160_process_wrap, }; #endif /* MBEDTLS_RIPEMD160_C */ #if defined(MBEDTLS_SHA1_C) static void sha1_starts_wrap( void *ctx ) { mbedtls_sha1_starts( (mbedtls_sha1_context *) ctx ); } static void sha1_update_wrap( void *ctx, const unsigned char *input, size_t ilen ) { mbedtls_sha1_update( (mbedtls_sha1_context *) ctx, input, ilen ); } static void sha1_finish_wrap( void *ctx, unsigned char *output ) { mbedtls_sha1_finish( (mbedtls_sha1_context *) ctx, output ); } static void *sha1_ctx_alloc( void ) { void *ctx = mbedtls_calloc( 1, sizeof( mbedtls_sha1_context ) ); if( ctx != NULL ) mbedtls_sha1_init( (mbedtls_sha1_context *) ctx ); return( ctx ); } static void sha1_clone_wrap( void *dst, const void *src ) { mbedtls_sha1_clone( (mbedtls_sha1_context *) dst, (const mbedtls_sha1_context *) src ); } static void sha1_ctx_free( void *ctx ) { mbedtls_sha1_free( (mbedtls_sha1_context *) ctx ); mbedtls_free( ctx ); } static void sha1_process_wrap( void *ctx, const unsigned char *data ) { mbedtls_sha1_process( (mbedtls_sha1_context *) ctx, data ); } const mbedtls_md_info_t mbedtls_sha1_info = { MBEDTLS_MD_SHA1, "SHA1", 20, 64, sha1_starts_wrap, sha1_update_wrap, sha1_finish_wrap, mbedtls_sha1, sha1_ctx_alloc, sha1_ctx_free, sha1_clone_wrap, sha1_process_wrap, }; #endif /* MBEDTLS_SHA1_C */ /* * Wrappers for generic message digests */ #if defined(MBEDTLS_SHA256_C) static void sha224_starts_wrap( void *ctx ) { mbedtls_sha256_starts( (mbedtls_sha256_context *) ctx, 1 ); } static void sha224_update_wrap( void *ctx, const unsigned char *input, size_t ilen ) { mbedtls_sha256_update( (mbedtls_sha256_context *) ctx, input, ilen ); } static void sha224_finish_wrap( void *ctx, unsigned char *output ) { mbedtls_sha256_finish( (mbedtls_sha256_context *) ctx, output ); } static void sha224_wrap( const unsigned char *input, size_t ilen, unsigned char *output ) { mbedtls_sha256( input, ilen, output, 1 ); } static void *sha224_ctx_alloc( void ) { void *ctx = mbedtls_calloc( 1, sizeof( mbedtls_sha256_context ) ); if( ctx != NULL ) mbedtls_sha256_init( (mbedtls_sha256_context *) ctx ); return( ctx ); } static void sha224_ctx_free( void *ctx ) { mbedtls_sha256_free( (mbedtls_sha256_context *) ctx ); mbedtls_free( ctx ); } static void sha224_clone_wrap( void *dst, const void *src ) { mbedtls_sha256_clone( (mbedtls_sha256_context *) dst, (const mbedtls_sha256_context *) src ); } static void sha224_process_wrap( void *ctx, const unsigned char *data ) { mbedtls_sha256_process( (mbedtls_sha256_context *) ctx, data ); } const mbedtls_md_info_t mbedtls_sha224_info = { MBEDTLS_MD_SHA224, "SHA224", 28, 64, sha224_starts_wrap, sha224_update_wrap, sha224_finish_wrap, sha224_wrap, sha224_ctx_alloc, sha224_ctx_free, sha224_clone_wrap, sha224_process_wrap, }; static void sha256_starts_wrap( void *ctx ) { mbedtls_sha256_starts( (mbedtls_sha256_context *) ctx, 0 ); } static void sha256_wrap( const unsigned char *input, size_t ilen, unsigned char *output ) { mbedtls_sha256( input, ilen, output, 0 ); } const mbedtls_md_info_t mbedtls_sha256_info = { MBEDTLS_MD_SHA256, "SHA256", 32, 64, sha256_starts_wrap, sha224_update_wrap, sha224_finish_wrap, sha256_wrap, sha224_ctx_alloc, sha224_ctx_free, sha224_clone_wrap, sha224_process_wrap, }; #endif /* MBEDTLS_SHA256_C */ #if defined(MBEDTLS_SHA512_C) static void sha384_starts_wrap( void *ctx ) { mbedtls_sha512_starts( (mbedtls_sha512_context *) ctx, 1 ); } static void sha384_update_wrap( void *ctx, const unsigned char *input, size_t ilen ) { mbedtls_sha512_update( (mbedtls_sha512_context *) ctx, input, ilen ); } static void sha384_finish_wrap( void *ctx, unsigned char *output ) { mbedtls_sha512_finish( (mbedtls_sha512_context *) ctx, output ); } static void sha384_wrap( const unsigned char *input, size_t ilen, unsigned char *output ) { mbedtls_sha512( input, ilen, output, 1 ); } static void *sha384_ctx_alloc( void ) { void *ctx = mbedtls_calloc( 1, sizeof( mbedtls_sha512_context ) ); if( ctx != NULL ) mbedtls_sha512_init( (mbedtls_sha512_context *) ctx ); return( ctx ); } static void sha384_ctx_free( void *ctx ) { mbedtls_sha512_free( (mbedtls_sha512_context *) ctx ); mbedtls_free( ctx ); } static void sha384_clone_wrap( void *dst, const void *src ) { mbedtls_sha512_clone( (mbedtls_sha512_context *) dst, (const mbedtls_sha512_context *) src ); } static void sha384_process_wrap( void *ctx, const unsigned char *data ) { mbedtls_sha512_process( (mbedtls_sha512_context *) ctx, data ); } const mbedtls_md_info_t mbedtls_sha384_info = { MBEDTLS_MD_SHA384, "SHA384", 48, 128, sha384_starts_wrap, sha384_update_wrap, sha384_finish_wrap, sha384_wrap, sha384_ctx_alloc, sha384_ctx_free, sha384_clone_wrap, sha384_process_wrap, }; static void sha512_starts_wrap( void *ctx ) { mbedtls_sha512_starts( (mbedtls_sha512_context *) ctx, 0 ); } static void sha512_wrap( const unsigned char *input, size_t ilen, unsigned char *output ) { mbedtls_sha512( input, ilen, output, 0 ); } const mbedtls_md_info_t mbedtls_sha512_info = { MBEDTLS_MD_SHA512, "SHA512", 64, 128, sha512_starts_wrap, sha384_update_wrap, sha384_finish_wrap, sha512_wrap, sha384_ctx_alloc, sha384_ctx_free, sha384_clone_wrap, sha384_process_wrap, }; #endif /* MBEDTLS_SHA512_C */ #endif /* MBEDTLS_MD_C */ backport-iwlwifi-dkms-7906/compat/verification/oid.c000066400000000000000000000617611351064634500226000ustar00rootroot00000000000000/** * \file oid.c * * \brief Object Identifier (OID) database * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_OID_C) #include "mbedtls/oid.h" #include "mbedtls/rsa.h" #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #define mbedtls_snprintf snprintf #endif #if defined(MBEDTLS_X509_USE_C) || defined(MBEDTLS_X509_CREATE_C) #include "mbedtls/x509.h" #endif /* * Macro to automatically add the size of #define'd OIDs */ #define ADD_LEN(s) s, MBEDTLS_OID_SIZE(s) /* * Macro to generate an internal function for oid_XXX_from_asn1() (used by * the other functions) */ #define FN_OID_TYPED_FROM_ASN1( TYPE_T, NAME, LIST ) \ static const TYPE_T * oid_ ## NAME ## _from_asn1( const mbedtls_asn1_buf *oid ) \ { \ const TYPE_T *p = LIST; \ const mbedtls_oid_descriptor_t *cur = (const mbedtls_oid_descriptor_t *) p; \ if( p == NULL || oid == NULL ) return( NULL ); \ while( cur->asn1 != NULL ) { \ if( cur->asn1_len == oid->len && \ memcmp( cur->asn1, oid->p, oid->len ) == 0 ) { \ return( p ); \ } \ p++; \ cur = (const mbedtls_oid_descriptor_t *) p; \ } \ return( NULL ); \ } /* * Macro to generate a function for retrieving a single attribute from the * descriptor of an mbedtls_oid_descriptor_t wrapper. */ #define FN_OID_GET_DESCRIPTOR_ATTR1(FN_NAME, TYPE_T, TYPE_NAME, ATTR1_TYPE, ATTR1) \ int FN_NAME( const mbedtls_asn1_buf *oid, ATTR1_TYPE * ATTR1 ) \ { \ const TYPE_T *data = oid_ ## TYPE_NAME ## _from_asn1( oid ); \ if( data == NULL ) return( MBEDTLS_ERR_OID_NOT_FOUND ); \ *ATTR1 = data->descriptor.ATTR1; \ return( 0 ); \ } /* * Macro to generate a function for retrieving a single attribute from an * mbedtls_oid_descriptor_t wrapper. */ #define FN_OID_GET_ATTR1(FN_NAME, TYPE_T, TYPE_NAME, ATTR1_TYPE, ATTR1) \ int FN_NAME( const mbedtls_asn1_buf *oid, ATTR1_TYPE * ATTR1 ) \ { \ const TYPE_T *data = oid_ ## TYPE_NAME ## _from_asn1( oid ); \ if( data == NULL ) return( MBEDTLS_ERR_OID_NOT_FOUND ); \ *ATTR1 = data->ATTR1; \ return( 0 ); \ } /* * Macro to generate a function for retrieving two attributes from an * mbedtls_oid_descriptor_t wrapper. */ #define FN_OID_GET_ATTR2(FN_NAME, TYPE_T, TYPE_NAME, ATTR1_TYPE, ATTR1, \ ATTR2_TYPE, ATTR2) \ int FN_NAME( const mbedtls_asn1_buf *oid, ATTR1_TYPE * ATTR1, ATTR2_TYPE * ATTR2 ) \ { \ const TYPE_T *data = oid_ ## TYPE_NAME ## _from_asn1( oid ); \ if( data == NULL ) return( MBEDTLS_ERR_OID_NOT_FOUND ); \ *ATTR1 = data->ATTR1; \ *ATTR2 = data->ATTR2; \ return( 0 ); \ } /* * Macro to generate a function for retrieving the OID based on a single * attribute from a mbedtls_oid_descriptor_t wrapper. */ #define FN_OID_GET_OID_BY_ATTR1(FN_NAME, TYPE_T, LIST, ATTR1_TYPE, ATTR1) \ int FN_NAME( ATTR1_TYPE ATTR1, const char **oid, size_t *olen ) \ { \ const TYPE_T *cur = LIST; \ while( cur->descriptor.asn1 != NULL ) { \ if( cur->ATTR1 == ATTR1 ) { \ *oid = cur->descriptor.asn1; \ *olen = cur->descriptor.asn1_len; \ return( 0 ); \ } \ cur++; \ } \ return( MBEDTLS_ERR_OID_NOT_FOUND ); \ } /* * Macro to generate a function for retrieving the OID based on two * attributes from a mbedtls_oid_descriptor_t wrapper. */ #define FN_OID_GET_OID_BY_ATTR2(FN_NAME, TYPE_T, LIST, ATTR1_TYPE, ATTR1, \ ATTR2_TYPE, ATTR2) \ int FN_NAME( ATTR1_TYPE ATTR1, ATTR2_TYPE ATTR2, const char **oid , \ size_t *olen ) \ { \ const TYPE_T *cur = LIST; \ while( cur->descriptor.asn1 != NULL ) { \ if( cur->ATTR1 == ATTR1 && cur->ATTR2 == ATTR2 ) { \ *oid = cur->descriptor.asn1; \ *olen = cur->descriptor.asn1_len; \ return( 0 ); \ } \ cur++; \ } \ return( MBEDTLS_ERR_OID_NOT_FOUND ); \ } #if defined(MBEDTLS_X509_USE_C) || defined(MBEDTLS_X509_CREATE_C) /* * For X520 attribute types */ typedef struct { mbedtls_oid_descriptor_t descriptor; const char *short_name; } oid_x520_attr_t; static const oid_x520_attr_t oid_x520_attr_type[] = { { { ADD_LEN( MBEDTLS_OID_AT_CN ), "id-at-commonName", "Common Name" }, "CN", }, { { ADD_LEN( MBEDTLS_OID_AT_COUNTRY ), "id-at-countryName", "Country" }, "C", }, { { ADD_LEN( MBEDTLS_OID_AT_LOCALITY ), "id-at-locality", "Locality" }, "L", }, { { ADD_LEN( MBEDTLS_OID_AT_STATE ), "id-at-state", "State" }, "ST", }, { { ADD_LEN( MBEDTLS_OID_AT_ORGANIZATION ),"id-at-organizationName", "Organization" }, "O", }, { { ADD_LEN( MBEDTLS_OID_AT_ORG_UNIT ), "id-at-organizationalUnitName", "Org Unit" }, "OU", }, { { ADD_LEN( MBEDTLS_OID_PKCS9_EMAIL ), "emailAddress", "E-mail address" }, "emailAddress", }, { { ADD_LEN( MBEDTLS_OID_AT_SERIAL_NUMBER ),"id-at-serialNumber", "Serial number" }, "serialNumber", }, { { ADD_LEN( MBEDTLS_OID_AT_POSTAL_ADDRESS ),"id-at-postalAddress", "Postal address" }, "postalAddress", }, { { ADD_LEN( MBEDTLS_OID_AT_POSTAL_CODE ), "id-at-postalCode", "Postal code" }, "postalCode", }, { { ADD_LEN( MBEDTLS_OID_AT_SUR_NAME ), "id-at-surName", "Surname" }, "SN", }, { { ADD_LEN( MBEDTLS_OID_AT_GIVEN_NAME ), "id-at-givenName", "Given name" }, "GN", }, { { ADD_LEN( MBEDTLS_OID_AT_INITIALS ), "id-at-initials", "Initials" }, "initials", }, { { ADD_LEN( MBEDTLS_OID_AT_GENERATION_QUALIFIER ), "id-at-generationQualifier", "Generation qualifier" }, "generationQualifier", }, { { ADD_LEN( MBEDTLS_OID_AT_TITLE ), "id-at-title", "Title" }, "title", }, { { ADD_LEN( MBEDTLS_OID_AT_DN_QUALIFIER ),"id-at-dnQualifier", "Distinguished Name qualifier" }, "dnQualifier", }, { { ADD_LEN( MBEDTLS_OID_AT_PSEUDONYM ), "id-at-pseudonym", "Pseudonym" }, "pseudonym", }, { { ADD_LEN( MBEDTLS_OID_DOMAIN_COMPONENT ), "id-domainComponent", "Domain component" }, "DC", }, { { ADD_LEN( MBEDTLS_OID_AT_UNIQUE_IDENTIFIER ), "id-at-uniqueIdentifier", "Unique Identifier" }, "uniqueIdentifier", }, { { NULL, 0, NULL, NULL }, NULL, } }; FN_OID_TYPED_FROM_ASN1(oid_x520_attr_t, x520_attr, oid_x520_attr_type) FN_OID_GET_ATTR1(mbedtls_oid_get_attr_short_name, oid_x520_attr_t, x520_attr, const char *, short_name) /* * For X509 extensions */ typedef struct { mbedtls_oid_descriptor_t descriptor; int ext_type; } oid_x509_ext_t; static const oid_x509_ext_t oid_x509_ext[] = { { { ADD_LEN( MBEDTLS_OID_BASIC_CONSTRAINTS ), "id-ce-basicConstraints", "Basic Constraints" }, MBEDTLS_X509_EXT_BASIC_CONSTRAINTS, }, { { ADD_LEN( MBEDTLS_OID_KEY_USAGE ), "id-ce-keyUsage", "Key Usage" }, MBEDTLS_X509_EXT_KEY_USAGE, }, { { ADD_LEN( MBEDTLS_OID_EXTENDED_KEY_USAGE ), "id-ce-extKeyUsage", "Extended Key Usage" }, MBEDTLS_X509_EXT_EXTENDED_KEY_USAGE, }, { { ADD_LEN( MBEDTLS_OID_SUBJECT_ALT_NAME ), "id-ce-subjectAltName", "Subject Alt Name" }, MBEDTLS_X509_EXT_SUBJECT_ALT_NAME, }, { { ADD_LEN( MBEDTLS_OID_NS_CERT_TYPE ), "id-netscape-certtype", "Netscape Certificate Type" }, MBEDTLS_X509_EXT_NS_CERT_TYPE, }, { { NULL, 0, NULL, NULL }, 0, }, }; FN_OID_TYPED_FROM_ASN1(oid_x509_ext_t, x509_ext, oid_x509_ext) FN_OID_GET_ATTR1(mbedtls_oid_get_x509_ext_type, oid_x509_ext_t, x509_ext, int, ext_type) static const mbedtls_oid_descriptor_t oid_ext_key_usage[] = { { ADD_LEN( MBEDTLS_OID_SERVER_AUTH ), "id-kp-serverAuth", "TLS Web Server Authentication" }, { ADD_LEN( MBEDTLS_OID_CLIENT_AUTH ), "id-kp-clientAuth", "TLS Web Client Authentication" }, { ADD_LEN( MBEDTLS_OID_CODE_SIGNING ), "id-kp-codeSigning", "Code Signing" }, { ADD_LEN( MBEDTLS_OID_EMAIL_PROTECTION ), "id-kp-emailProtection", "E-mail Protection" }, { ADD_LEN( MBEDTLS_OID_TIME_STAMPING ), "id-kp-timeStamping", "Time Stamping" }, { ADD_LEN( MBEDTLS_OID_OCSP_SIGNING ), "id-kp-OCSPSigning", "OCSP Signing" }, { NULL, 0, NULL, NULL }, }; FN_OID_TYPED_FROM_ASN1(mbedtls_oid_descriptor_t, ext_key_usage, oid_ext_key_usage) FN_OID_GET_ATTR1(mbedtls_oid_get_extended_key_usage, mbedtls_oid_descriptor_t, ext_key_usage, const char *, description) #endif /* MBEDTLS_X509_USE_C || MBEDTLS_X509_CREATE_C */ #if defined(MBEDTLS_MD_C) /* * For SignatureAlgorithmIdentifier */ typedef struct { mbedtls_oid_descriptor_t descriptor; mbedtls_md_type_t md_alg; mbedtls_pk_type_t pk_alg; } oid_sig_alg_t; static const oid_sig_alg_t oid_sig_alg[] = { #if defined(MBEDTLS_RSA_C) #if defined(MBEDTLS_MD2_C) { { ADD_LEN( MBEDTLS_OID_PKCS1_MD2 ), "md2WithRSAEncryption", "RSA with MD2" }, MBEDTLS_MD_MD2, MBEDTLS_PK_RSA, }, #endif /* MBEDTLS_MD2_C */ #if defined(MBEDTLS_MD4_C) { { ADD_LEN( MBEDTLS_OID_PKCS1_MD4 ), "md4WithRSAEncryption", "RSA with MD4" }, MBEDTLS_MD_MD4, MBEDTLS_PK_RSA, }, #endif /* MBEDTLS_MD4_C */ #if defined(MBEDTLS_MD5_C) { { ADD_LEN( MBEDTLS_OID_PKCS1_MD5 ), "md5WithRSAEncryption", "RSA with MD5" }, MBEDTLS_MD_MD5, MBEDTLS_PK_RSA, }, #endif /* MBEDTLS_MD5_C */ #if defined(MBEDTLS_SHA1_C) { { ADD_LEN( MBEDTLS_OID_PKCS1_SHA1 ), "sha-1WithRSAEncryption", "RSA with SHA1" }, MBEDTLS_MD_SHA1, MBEDTLS_PK_RSA, }, #endif /* MBEDTLS_SHA1_C */ #if defined(MBEDTLS_SHA256_C) { { ADD_LEN( MBEDTLS_OID_PKCS1_SHA224 ), "sha224WithRSAEncryption", "RSA with SHA-224" }, MBEDTLS_MD_SHA224, MBEDTLS_PK_RSA, }, { { ADD_LEN( MBEDTLS_OID_PKCS1_SHA256 ), "sha256WithRSAEncryption", "RSA with SHA-256" }, MBEDTLS_MD_SHA256, MBEDTLS_PK_RSA, }, #endif /* MBEDTLS_SHA256_C */ #if defined(MBEDTLS_SHA512_C) { { ADD_LEN( MBEDTLS_OID_PKCS1_SHA384 ), "sha384WithRSAEncryption", "RSA with SHA-384" }, MBEDTLS_MD_SHA384, MBEDTLS_PK_RSA, }, { { ADD_LEN( MBEDTLS_OID_PKCS1_SHA512 ), "sha512WithRSAEncryption", "RSA with SHA-512" }, MBEDTLS_MD_SHA512, MBEDTLS_PK_RSA, }, #endif /* MBEDTLS_SHA512_C */ #if defined(MBEDTLS_SHA1_C) { { ADD_LEN( MBEDTLS_OID_RSA_SHA_OBS ), "sha-1WithRSAEncryption", "RSA with SHA1" }, MBEDTLS_MD_SHA1, MBEDTLS_PK_RSA, }, #endif /* MBEDTLS_SHA1_C */ #endif /* MBEDTLS_RSA_C */ #if defined(MBEDTLS_ECDSA_C) #if defined(MBEDTLS_SHA1_C) { { ADD_LEN( MBEDTLS_OID_ECDSA_SHA1 ), "ecdsa-with-SHA1", "ECDSA with SHA1" }, MBEDTLS_MD_SHA1, MBEDTLS_PK_ECDSA, }, #endif /* MBEDTLS_SHA1_C */ #if defined(MBEDTLS_SHA256_C) { { ADD_LEN( MBEDTLS_OID_ECDSA_SHA224 ), "ecdsa-with-SHA224", "ECDSA with SHA224" }, MBEDTLS_MD_SHA224, MBEDTLS_PK_ECDSA, }, { { ADD_LEN( MBEDTLS_OID_ECDSA_SHA256 ), "ecdsa-with-SHA256", "ECDSA with SHA256" }, MBEDTLS_MD_SHA256, MBEDTLS_PK_ECDSA, }, #endif /* MBEDTLS_SHA256_C */ #if defined(MBEDTLS_SHA512_C) { { ADD_LEN( MBEDTLS_OID_ECDSA_SHA384 ), "ecdsa-with-SHA384", "ECDSA with SHA384" }, MBEDTLS_MD_SHA384, MBEDTLS_PK_ECDSA, }, { { ADD_LEN( MBEDTLS_OID_ECDSA_SHA512 ), "ecdsa-with-SHA512", "ECDSA with SHA512" }, MBEDTLS_MD_SHA512, MBEDTLS_PK_ECDSA, }, #endif /* MBEDTLS_SHA512_C */ #endif /* MBEDTLS_ECDSA_C */ #if defined(MBEDTLS_RSA_C) { { ADD_LEN( MBEDTLS_OID_RSASSA_PSS ), "RSASSA-PSS", "RSASSA-PSS" }, MBEDTLS_MD_NONE, MBEDTLS_PK_RSASSA_PSS, }, #endif /* MBEDTLS_RSA_C */ { { NULL, 0, NULL, NULL }, MBEDTLS_MD_NONE, MBEDTLS_PK_NONE, }, }; FN_OID_TYPED_FROM_ASN1(oid_sig_alg_t, sig_alg, oid_sig_alg) FN_OID_GET_DESCRIPTOR_ATTR1(mbedtls_oid_get_sig_alg_desc, oid_sig_alg_t, sig_alg, const char *, description) FN_OID_GET_ATTR2(mbedtls_oid_get_sig_alg, oid_sig_alg_t, sig_alg, mbedtls_md_type_t, md_alg, mbedtls_pk_type_t, pk_alg) FN_OID_GET_OID_BY_ATTR2(mbedtls_oid_get_oid_by_sig_alg, oid_sig_alg_t, oid_sig_alg, mbedtls_pk_type_t, pk_alg, mbedtls_md_type_t, md_alg) #endif /* MBEDTLS_MD_C */ /* * For PublicKeyInfo (PKCS1, RFC 5480) */ typedef struct { mbedtls_oid_descriptor_t descriptor; mbedtls_pk_type_t pk_alg; } oid_pk_alg_t; static const oid_pk_alg_t oid_pk_alg[] = { { { ADD_LEN( MBEDTLS_OID_PKCS1_RSA ), "rsaEncryption", "RSA" }, MBEDTLS_PK_RSA, }, { { ADD_LEN( MBEDTLS_OID_EC_ALG_UNRESTRICTED ), "id-ecPublicKey", "Generic EC key" }, MBEDTLS_PK_ECKEY, }, { { ADD_LEN( MBEDTLS_OID_EC_ALG_ECDH ), "id-ecDH", "EC key for ECDH" }, MBEDTLS_PK_ECKEY_DH, }, { { NULL, 0, NULL, NULL }, MBEDTLS_PK_NONE, }, }; FN_OID_TYPED_FROM_ASN1(oid_pk_alg_t, pk_alg, oid_pk_alg) FN_OID_GET_ATTR1(mbedtls_oid_get_pk_alg, oid_pk_alg_t, pk_alg, mbedtls_pk_type_t, pk_alg) FN_OID_GET_OID_BY_ATTR1(mbedtls_oid_get_oid_by_pk_alg, oid_pk_alg_t, oid_pk_alg, mbedtls_pk_type_t, pk_alg) #if defined(MBEDTLS_ECP_C) /* * For namedCurve (RFC 5480) */ typedef struct { mbedtls_oid_descriptor_t descriptor; mbedtls_ecp_group_id grp_id; } oid_ecp_grp_t; static const oid_ecp_grp_t oid_ecp_grp[] = { #if defined(MBEDTLS_ECP_DP_SECP192R1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_SECP192R1 ), "secp192r1", "secp192r1" }, MBEDTLS_ECP_DP_SECP192R1, }, #endif /* MBEDTLS_ECP_DP_SECP192R1_ENABLED */ #if defined(MBEDTLS_ECP_DP_SECP224R1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_SECP224R1 ), "secp224r1", "secp224r1" }, MBEDTLS_ECP_DP_SECP224R1, }, #endif /* MBEDTLS_ECP_DP_SECP224R1_ENABLED */ #if defined(MBEDTLS_ECP_DP_SECP256R1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_SECP256R1 ), "secp256r1", "secp256r1" }, MBEDTLS_ECP_DP_SECP256R1, }, #endif /* MBEDTLS_ECP_DP_SECP256R1_ENABLED */ #if defined(MBEDTLS_ECP_DP_SECP384R1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_SECP384R1 ), "secp384r1", "secp384r1" }, MBEDTLS_ECP_DP_SECP384R1, }, #endif /* MBEDTLS_ECP_DP_SECP384R1_ENABLED */ #if defined(MBEDTLS_ECP_DP_SECP521R1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_SECP521R1 ), "secp521r1", "secp521r1" }, MBEDTLS_ECP_DP_SECP521R1, }, #endif /* MBEDTLS_ECP_DP_SECP521R1_ENABLED */ #if defined(MBEDTLS_ECP_DP_SECP192K1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_SECP192K1 ), "secp192k1", "secp192k1" }, MBEDTLS_ECP_DP_SECP192K1, }, #endif /* MBEDTLS_ECP_DP_SECP192K1_ENABLED */ #if defined(MBEDTLS_ECP_DP_SECP224K1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_SECP224K1 ), "secp224k1", "secp224k1" }, MBEDTLS_ECP_DP_SECP224K1, }, #endif /* MBEDTLS_ECP_DP_SECP224K1_ENABLED */ #if defined(MBEDTLS_ECP_DP_SECP256K1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_SECP256K1 ), "secp256k1", "secp256k1" }, MBEDTLS_ECP_DP_SECP256K1, }, #endif /* MBEDTLS_ECP_DP_SECP256K1_ENABLED */ #if defined(MBEDTLS_ECP_DP_BP256R1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_BP256R1 ), "brainpoolP256r1","brainpool256r1" }, MBEDTLS_ECP_DP_BP256R1, }, #endif /* MBEDTLS_ECP_DP_BP256R1_ENABLED */ #if defined(MBEDTLS_ECP_DP_BP384R1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_BP384R1 ), "brainpoolP384r1","brainpool384r1" }, MBEDTLS_ECP_DP_BP384R1, }, #endif /* MBEDTLS_ECP_DP_BP384R1_ENABLED */ #if defined(MBEDTLS_ECP_DP_BP512R1_ENABLED) { { ADD_LEN( MBEDTLS_OID_EC_GRP_BP512R1 ), "brainpoolP512r1","brainpool512r1" }, MBEDTLS_ECP_DP_BP512R1, }, #endif /* MBEDTLS_ECP_DP_BP512R1_ENABLED */ { { NULL, 0, NULL, NULL }, MBEDTLS_ECP_DP_NONE, }, }; FN_OID_TYPED_FROM_ASN1(oid_ecp_grp_t, grp_id, oid_ecp_grp) FN_OID_GET_ATTR1(mbedtls_oid_get_ec_grp, oid_ecp_grp_t, grp_id, mbedtls_ecp_group_id, grp_id) FN_OID_GET_OID_BY_ATTR1(mbedtls_oid_get_oid_by_ec_grp, oid_ecp_grp_t, oid_ecp_grp, mbedtls_ecp_group_id, grp_id) #endif /* MBEDTLS_ECP_C */ #if defined(MBEDTLS_CIPHER_C) /* * For PKCS#5 PBES2 encryption algorithm */ typedef struct { mbedtls_oid_descriptor_t descriptor; mbedtls_cipher_type_t cipher_alg; } oid_cipher_alg_t; static const oid_cipher_alg_t oid_cipher_alg[] = { { { ADD_LEN( MBEDTLS_OID_DES_CBC ), "desCBC", "DES-CBC" }, MBEDTLS_CIPHER_DES_CBC, }, { { ADD_LEN( MBEDTLS_OID_DES_EDE3_CBC ), "des-ede3-cbc", "DES-EDE3-CBC" }, MBEDTLS_CIPHER_DES_EDE3_CBC, }, { { NULL, 0, NULL, NULL }, MBEDTLS_CIPHER_NONE, }, }; FN_OID_TYPED_FROM_ASN1(oid_cipher_alg_t, cipher_alg, oid_cipher_alg) FN_OID_GET_ATTR1(mbedtls_oid_get_cipher_alg, oid_cipher_alg_t, cipher_alg, mbedtls_cipher_type_t, cipher_alg) #endif /* MBEDTLS_CIPHER_C */ #if defined(MBEDTLS_MD_C) /* * For digestAlgorithm */ typedef struct { mbedtls_oid_descriptor_t descriptor; mbedtls_md_type_t md_alg; } oid_md_alg_t; static const oid_md_alg_t oid_md_alg[] = { #if defined(MBEDTLS_MD2_C) { { ADD_LEN( MBEDTLS_OID_DIGEST_ALG_MD2 ), "id-md2", "MD2" }, MBEDTLS_MD_MD2, }, #endif /* MBEDTLS_MD2_C */ #if defined(MBEDTLS_MD4_C) { { ADD_LEN( MBEDTLS_OID_DIGEST_ALG_MD4 ), "id-md4", "MD4" }, MBEDTLS_MD_MD4, }, #endif /* MBEDTLS_MD4_C */ #if defined(MBEDTLS_MD5_C) { { ADD_LEN( MBEDTLS_OID_DIGEST_ALG_MD5 ), "id-md5", "MD5" }, MBEDTLS_MD_MD5, }, #endif /* MBEDTLS_MD5_C */ #if defined(MBEDTLS_SHA1_C) { { ADD_LEN( MBEDTLS_OID_DIGEST_ALG_SHA1 ), "id-sha1", "SHA-1" }, MBEDTLS_MD_SHA1, }, #endif /* MBEDTLS_SHA1_C */ #if defined(MBEDTLS_SHA256_C) { { ADD_LEN( MBEDTLS_OID_DIGEST_ALG_SHA224 ), "id-sha224", "SHA-224" }, MBEDTLS_MD_SHA224, }, { { ADD_LEN( MBEDTLS_OID_DIGEST_ALG_SHA256 ), "id-sha256", "SHA-256" }, MBEDTLS_MD_SHA256, }, #endif /* MBEDTLS_SHA256_C */ #if defined(MBEDTLS_SHA512_C) { { ADD_LEN( MBEDTLS_OID_DIGEST_ALG_SHA384 ), "id-sha384", "SHA-384" }, MBEDTLS_MD_SHA384, }, { { ADD_LEN( MBEDTLS_OID_DIGEST_ALG_SHA512 ), "id-sha512", "SHA-512" }, MBEDTLS_MD_SHA512, }, #endif /* MBEDTLS_SHA512_C */ { { NULL, 0, NULL, NULL }, MBEDTLS_MD_NONE, }, }; FN_OID_TYPED_FROM_ASN1(oid_md_alg_t, md_alg, oid_md_alg) FN_OID_GET_ATTR1(mbedtls_oid_get_md_alg, oid_md_alg_t, md_alg, mbedtls_md_type_t, md_alg) FN_OID_GET_OID_BY_ATTR1(mbedtls_oid_get_oid_by_md, oid_md_alg_t, oid_md_alg, mbedtls_md_type_t, md_alg) #endif /* MBEDTLS_MD_C */ #if defined(MBEDTLS_PKCS12_C) /* * For PKCS#12 PBEs */ typedef struct { mbedtls_oid_descriptor_t descriptor; mbedtls_md_type_t md_alg; mbedtls_cipher_type_t cipher_alg; } oid_pkcs12_pbe_alg_t; static const oid_pkcs12_pbe_alg_t oid_pkcs12_pbe_alg[] = { { { ADD_LEN( MBEDTLS_OID_PKCS12_PBE_SHA1_DES3_EDE_CBC ), "pbeWithSHAAnd3-KeyTripleDES-CBC", "PBE with SHA1 and 3-Key 3DES" }, MBEDTLS_MD_SHA1, MBEDTLS_CIPHER_DES_EDE3_CBC, }, { { ADD_LEN( MBEDTLS_OID_PKCS12_PBE_SHA1_DES2_EDE_CBC ), "pbeWithSHAAnd2-KeyTripleDES-CBC", "PBE with SHA1 and 2-Key 3DES" }, MBEDTLS_MD_SHA1, MBEDTLS_CIPHER_DES_EDE_CBC, }, { { NULL, 0, NULL, NULL }, MBEDTLS_MD_NONE, MBEDTLS_CIPHER_NONE, }, }; FN_OID_TYPED_FROM_ASN1(oid_pkcs12_pbe_alg_t, pkcs12_pbe_alg, oid_pkcs12_pbe_alg) FN_OID_GET_ATTR2(mbedtls_oid_get_pkcs12_pbe_alg, oid_pkcs12_pbe_alg_t, pkcs12_pbe_alg, mbedtls_md_type_t, md_alg, mbedtls_cipher_type_t, cipher_alg) #endif /* MBEDTLS_PKCS12_C */ #define OID_SAFE_SNPRINTF \ do { \ if( ret < 0 || (size_t) ret >= n ) \ return( MBEDTLS_ERR_OID_BUF_TOO_SMALL ); \ \ n -= (size_t) ret; \ p += (size_t) ret; \ } while( 0 ) /* Return the x.y.z.... style numeric string for the given OID */ int mbedtls_oid_get_numeric_string( char *buf, size_t size, const mbedtls_asn1_buf *oid ) { int ret; size_t i, n; unsigned int value; char *p; p = buf; n = size; /* First byte contains first two dots */ if( oid->len > 0 ) { ret = mbedtls_snprintf( p, n, "%d.%d", oid->p[0] / 40, oid->p[0] % 40 ); OID_SAFE_SNPRINTF; } value = 0; for( i = 1; i < oid->len; i++ ) { /* Prevent overflow in value. */ if( ( ( value << 7 ) >> 7 ) != value ) return( MBEDTLS_ERR_OID_BUF_TOO_SMALL ); value <<= 7; value += oid->p[i] & 0x7F; if( !( oid->p[i] & 0x80 ) ) { /* Last byte */ ret = mbedtls_snprintf( p, n, ".%d", value ); OID_SAFE_SNPRINTF; value = 0; } } return( (int) ( size - n ) ); } #endif /* MBEDTLS_OID_C */ backport-iwlwifi-dkms-7906/compat/verification/pkcs7.asn1.c000066400000000000000000000261611351064634500237100ustar00rootroot00000000000000/* * Automatically generated by asn1_compiler. Do not edit * * ASN.1 parser for pkcs7 */ #include #include "pkcs7.asn1.h" enum pkcs7_actions { ACT_pkcs7_check_content_type = 0, ACT_pkcs7_extract_cert = 1, ACT_pkcs7_note_OID = 2, ACT_pkcs7_note_certificate_list = 3, ACT_pkcs7_note_content = 4, ACT_pkcs7_note_data = 5, ACT_pkcs7_note_signed_info = 6, ACT_pkcs7_note_signeddata_version = 7, ACT_pkcs7_note_signerinfo_version = 8, ACT_pkcs7_sig_note_authenticated_attr = 9, ACT_pkcs7_sig_note_digest_algo = 10, ACT_pkcs7_sig_note_issuer = 11, ACT_pkcs7_sig_note_pkey_algo = 12, ACT_pkcs7_sig_note_serial = 13, ACT_pkcs7_sig_note_set_of_authattrs = 14, ACT_pkcs7_sig_note_signature = 15, ACT_pkcs7_sig_note_skid = 16, NR__pkcs7_actions = 17 }; static const asn1_action_t pkcs7_action_table[NR__pkcs7_actions] = { [ 0] = pkcs7_check_content_type, [ 1] = pkcs7_extract_cert, [ 2] = pkcs7_note_OID, [ 3] = pkcs7_note_certificate_list, [ 4] = pkcs7_note_content, [ 5] = pkcs7_note_data, [ 6] = pkcs7_note_signed_info, [ 7] = pkcs7_note_signeddata_version, [ 8] = pkcs7_note_signerinfo_version, [ 9] = pkcs7_sig_note_authenticated_attr, [ 10] = pkcs7_sig_note_digest_algo, [ 11] = pkcs7_sig_note_issuer, [ 12] = pkcs7_sig_note_pkey_algo, [ 13] = pkcs7_sig_note_serial, [ 14] = pkcs7_sig_note_set_of_authattrs, [ 15] = pkcs7_sig_note_signature, [ 16] = pkcs7_sig_note_skid, }; static const unsigned char pkcs7_machine[] = { // PKCS7ContentInfo [ 0] = ASN1_OP_MATCH, [ 1] = _tag(UNIV, CONS, SEQ), // ContentType [ 2] = ASN1_OP_MATCH_ACT, [ 3] = _tag(UNIV, PRIM, OID), [ 4] = _action(ACT_pkcs7_note_OID), [ 5] = ASN1_OP_ACT, [ 6] = _action(ACT_pkcs7_check_content_type), [ 7] = ASN1_OP_MATCH_JUMP_OR_SKIP, // content [ 8] = _tagn(CONT, CONS, 0), [ 9] = _jump_target(12), [ 10] = ASN1_OP_END_SEQ, [ 11] = ASN1_OP_COMPLETE, // SignedData [ 12] = ASN1_OP_MATCH, [ 13] = _tag(UNIV, CONS, SEQ), [ 14] = ASN1_OP_MATCH_ACT, // version [ 15] = _tag(UNIV, PRIM, INT), [ 16] = _action(ACT_pkcs7_note_signeddata_version), // DigestAlgorithmIdentifiers [ 17] = ASN1_OP_MATCH_JUMP_OR_SKIP, // daSet [ 18] = _tag(UNIV, CONS, SET), [ 19] = _jump_target(61), [ 20] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // daSequence [ 21] = _tag(UNIV, CONS, SEQ), [ 22] = _jump_target(67), [ 23] = ASN1_OP_COND_FAIL, // ContentInfo [ 24] = ASN1_OP_MATCH, [ 25] = _tag(UNIV, CONS, SEQ), // ContentType [ 26] = ASN1_OP_MATCH_ACT, [ 27] = _tag(UNIV, PRIM, OID), [ 28] = _action(ACT_pkcs7_note_OID), [ 29] = ASN1_OP_ACT, [ 30] = _action(ACT_pkcs7_note_OID), [ 31] = ASN1_OP_MATCH_JUMP_OR_SKIP, // content [ 32] = _tagn(CONT, CONS, 0), [ 33] = _jump_target(73), [ 34] = ASN1_OP_END_SEQ, [ 35] = ASN1_OP_ACT, [ 36] = _action(ACT_pkcs7_note_content), // ExtendedCertificatesAndCertificates [ 37] = ASN1_OP_MATCH_JUMP_OR_SKIP, // certSet [ 38] = _tagn(CONT, CONS, 0), [ 39] = _jump_target(77), // --> ExtendedCertificatesAndCertificates // Certificates [ 40] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // certSequence [ 41] = _tagn(CONT, CONS, 2), [ 42] = _jump_target(85), // --> Certificates [ 43] = ASN1_OP_ACT, [ 44] = _action(ACT_pkcs7_note_certificate_list), // CertificateRevocationLists [ 45] = ASN1_OP_MATCH_JUMP_OR_SKIP, // crlSet [ 46] = _tagn(CONT, CONS, 1), [ 47] = _jump_target(90), // --> CertificateRevocationLists // CRLSequence [ 48] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // crlSequence [ 49] = _tagn(CONT, CONS, 3), [ 50] = _jump_target(96), // --> CRLSequence // SignerInfos [ 51] = ASN1_OP_MATCH_JUMP_OR_SKIP, // siSet [ 52] = _tag(UNIV, CONS, SET), [ 53] = _jump_target(102), [ 54] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // siSequence [ 55] = _tag(UNIV, CONS, SEQ), [ 56] = _jump_target(108), [ 57] = ASN1_OP_COND_FAIL, [ 58] = ASN1_OP_END_SEQ, [ 59] = ASN1_OP_END_SEQ, [ 60] = ASN1_OP_RETURN, // DigestAlgorithmIdentifier [ 61] = ASN1_OP_MATCH_JUMP, [ 62] = _tag(UNIV, CONS, SEQ), [ 63] = _jump_target(114), // --> DigestAlgorithmIdentifier [ 64] = ASN1_OP_END_SET_OF, [ 65] = _jump_target(61), [ 66] = ASN1_OP_RETURN, // DigestAlgorithmIdentifier [ 67] = ASN1_OP_MATCH_JUMP, [ 68] = _tag(UNIV, CONS, SEQ), [ 69] = _jump_target(114), // --> DigestAlgorithmIdentifier [ 70] = ASN1_OP_END_SEQ_OF, [ 71] = _jump_target(67), [ 72] = ASN1_OP_RETURN, // Data [ 73] = ASN1_OP_MATCH_ANY_ACT, [ 74] = _action(ACT_pkcs7_note_data), [ 75] = ASN1_OP_END_SEQ, [ 76] = ASN1_OP_RETURN, // ExtendedCertificateOrCertificate // Certificate [ 77] = ASN1_OP_MATCH_ANY_ACT_OR_SKIP, [ 78] = _action(ACT_pkcs7_extract_cert), // ExtendedCertificate // Certificate [ 79] = ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP, [ 80] = _action(ACT_pkcs7_extract_cert), [ 81] = ASN1_OP_COND_FAIL, [ 82] = ASN1_OP_END_SET_OF, [ 83] = _jump_target(77), [ 84] = ASN1_OP_RETURN, // Certificate [ 85] = ASN1_OP_MATCH_ANY_ACT, [ 86] = _action(ACT_pkcs7_extract_cert), [ 87] = ASN1_OP_END_SEQ_OF, [ 88] = _jump_target(85), [ 89] = ASN1_OP_RETURN, // CertificateList [ 90] = ASN1_OP_MATCH_JUMP, [ 91] = _tag(UNIV, CONS, SEQ), [ 92] = _jump_target(120), // --> CertificateList [ 93] = ASN1_OP_END_SET_OF, [ 94] = _jump_target(90), [ 95] = ASN1_OP_RETURN, // CertificateList [ 96] = ASN1_OP_MATCH_JUMP, [ 97] = _tag(UNIV, CONS, SEQ), [ 98] = _jump_target(120), // --> CertificateList [ 99] = ASN1_OP_END_SEQ_OF, [ 100] = _jump_target(96), [ 101] = ASN1_OP_RETURN, // SignerInfo [ 102] = ASN1_OP_MATCH_JUMP, [ 103] = _tag(UNIV, CONS, SEQ), [ 104] = _jump_target(125), // --> SignerInfo [ 105] = ASN1_OP_END_SET_OF, [ 106] = _jump_target(102), [ 107] = ASN1_OP_RETURN, // SignerInfo [ 108] = ASN1_OP_MATCH_JUMP, [ 109] = _tag(UNIV, CONS, SEQ), [ 110] = _jump_target(125), // --> SignerInfo [ 111] = ASN1_OP_END_SEQ_OF, [ 112] = _jump_target(108), [ 113] = ASN1_OP_RETURN, [ 114] = ASN1_OP_MATCH_ACT, // algorithm [ 115] = _tag(UNIV, PRIM, OID), [ 116] = _action(ACT_pkcs7_note_OID), [ 117] = ASN1_OP_MATCH_ANY_OR_SKIP, // parameters [ 118] = ASN1_OP_END_SEQ, [ 119] = ASN1_OP_RETURN, // Certificate [ 120] = ASN1_OP_MATCH_ANY_ACT, [ 121] = _action(ACT_pkcs7_extract_cert), [ 122] = ASN1_OP_END_SEQ_OF, [ 123] = _jump_target(120), [ 124] = ASN1_OP_RETURN, [ 125] = ASN1_OP_MATCH_ACT, // version [ 126] = _tag(UNIV, PRIM, INT), [ 127] = _action(ACT_pkcs7_note_signerinfo_version), // SignerIdentifier // IssuerAndSerialNumber [ 128] = ASN1_OP_MATCH_JUMP_OR_SKIP, // issuerAndSerialNumber [ 129] = _tag(UNIV, CONS, SEQ), [ 130] = _jump_target(169), // --> IssuerAndSerialNumber // SubjectKeyIdentifier [ 131] = ASN1_OP_COND_MATCH_ACT_OR_SKIP, // subjectKeyIdentifier [ 132] = _tagn(CONT, PRIM, 0), [ 133] = _action(ACT_pkcs7_sig_note_skid), [ 134] = ASN1_OP_COND_FAIL, // DigestAlgorithmIdentifier [ 135] = ASN1_OP_MATCH_JUMP, [ 136] = _tag(UNIV, CONS, SEQ), [ 137] = _jump_target(114), // --> DigestAlgorithmIdentifier [ 138] = ASN1_OP_ACT, [ 139] = _action(ACT_pkcs7_sig_note_digest_algo), // SetOfAuthenticatedAttribute [ 140] = ASN1_OP_MATCH_JUMP_OR_SKIP, // aaSet [ 141] = _tagn(CONT, CONS, 0), [ 142] = _jump_target(192), // --> SetOfAuthenticatedAttribute [ 143] = ASN1_OP_MAYBE_ACT, [ 144] = _action(ACT_pkcs7_sig_note_set_of_authattrs), [ 145] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // aaSequence [ 146] = _tagn(CONT, CONS, 2), [ 147] = _jump_target(198), // DigestEncryptionAlgorithmIdentifier [ 148] = ASN1_OP_MATCH, [ 149] = _tag(UNIV, CONS, SEQ), [ 150] = ASN1_OP_MATCH_ACT, // algorithm [ 151] = _tag(UNIV, PRIM, OID), [ 152] = _action(ACT_pkcs7_note_OID), [ 153] = ASN1_OP_MATCH_ANY_OR_SKIP, // parameters [ 154] = ASN1_OP_END_SEQ, [ 155] = ASN1_OP_ACT, [ 156] = _action(ACT_pkcs7_sig_note_pkey_algo), // EncryptedDigest [ 157] = ASN1_OP_MATCH_ACT, [ 158] = _tag(UNIV, PRIM, OTS), [ 159] = _action(ACT_pkcs7_sig_note_signature), [ 160] = ASN1_OP_MATCH_JUMP_OR_SKIP, // uaSet [ 161] = _tagn(CONT, CONS, 1), [ 162] = _jump_target(207), [ 163] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // uaSequence [ 164] = _tagn(CONT, CONS, 3), [ 165] = _jump_target(213), [ 166] = ASN1_OP_END_SEQ_ACT, [ 167] = _action(ACT_pkcs7_note_signed_info), [ 168] = ASN1_OP_RETURN, // Name [ 169] = ASN1_OP_MATCH, [ 170] = _tag(UNIV, CONS, SEQ), // RelativeDistinguishedName [ 171] = ASN1_OP_MATCH, [ 172] = _tag(UNIV, CONS, SET), // AttributeValueAssertion [ 173] = ASN1_OP_MATCH, [ 174] = _tag(UNIV, CONS, SEQ), [ 175] = ASN1_OP_MATCH_ACT, // attributeType [ 176] = _tag(UNIV, PRIM, OID), [ 177] = _action(ACT_pkcs7_note_OID), [ 178] = ASN1_OP_MATCH_ANY, // attributeValue [ 179] = ASN1_OP_END_SEQ, [ 180] = ASN1_OP_END_SET_OF, [ 181] = _jump_target(173), [ 182] = ASN1_OP_END_SEQ_OF, [ 183] = _jump_target(171), [ 184] = ASN1_OP_ACT, [ 185] = _action(ACT_pkcs7_sig_note_issuer), // CertificateSerialNumber [ 186] = ASN1_OP_MATCH, [ 187] = _tag(UNIV, PRIM, INT), [ 188] = ASN1_OP_ACT, [ 189] = _action(ACT_pkcs7_sig_note_serial), [ 190] = ASN1_OP_END_SEQ, [ 191] = ASN1_OP_RETURN, // AuthenticatedAttribute [ 192] = ASN1_OP_MATCH_JUMP, [ 193] = _tag(UNIV, CONS, SEQ), [ 194] = _jump_target(219), // --> AuthenticatedAttribute [ 195] = ASN1_OP_END_SET_OF, [ 196] = _jump_target(192), [ 197] = ASN1_OP_RETURN, [ 198] = ASN1_OP_MATCH, // aaSequence [ 199] = _tag(UNIV, CONS, SEQ), // AuthenticatedAttribute [ 200] = ASN1_OP_MATCH_JUMP, [ 201] = _tag(UNIV, CONS, SEQ), [ 202] = _jump_target(219), // --> AuthenticatedAttribute [ 203] = ASN1_OP_END_SEQ_OF, [ 204] = _jump_target(200), [ 205] = ASN1_OP_END_SEQ, [ 206] = ASN1_OP_RETURN, // UnauthenticatedAttribute [ 207] = ASN1_OP_MATCH_JUMP, [ 208] = _tag(UNIV, CONS, SEQ), [ 209] = _jump_target(230), // --> UnauthenticatedAttribute [ 210] = ASN1_OP_END_SET_OF, [ 211] = _jump_target(207), [ 212] = ASN1_OP_RETURN, // UnauthenticatedAttribute [ 213] = ASN1_OP_MATCH_JUMP, [ 214] = _tag(UNIV, CONS, SEQ), [ 215] = _jump_target(230), // --> UnauthenticatedAttribute [ 216] = ASN1_OP_END_SEQ_OF, [ 217] = _jump_target(213), [ 218] = ASN1_OP_RETURN, [ 219] = ASN1_OP_MATCH_ACT, // type [ 220] = _tag(UNIV, PRIM, OID), [ 221] = _action(ACT_pkcs7_note_OID), [ 222] = ASN1_OP_MATCH, // values [ 223] = _tag(UNIV, CONS, SET), [ 224] = ASN1_OP_MATCH_ANY_ACT, [ 225] = _action(ACT_pkcs7_sig_note_authenticated_attr), [ 226] = ASN1_OP_END_SET_OF, [ 227] = _jump_target(224), [ 228] = ASN1_OP_END_SEQ, [ 229] = ASN1_OP_RETURN, [ 230] = ASN1_OP_MATCH, // type [ 231] = _tag(UNIV, PRIM, OID), [ 232] = ASN1_OP_MATCH, // values [ 233] = _tag(UNIV, CONS, SET), [ 234] = ASN1_OP_MATCH_ANY, [ 235] = ASN1_OP_END_SET_OF, [ 236] = _jump_target(234), [ 237] = ASN1_OP_END_SEQ, [ 238] = ASN1_OP_RETURN, }; const struct asn1_decoder pkcs7_decoder = { .machine = pkcs7_machine, .machlen = sizeof(pkcs7_machine), .actions = pkcs7_action_table, }; backport-iwlwifi-dkms-7906/compat/verification/pkcs7.asn1.h000066400000000000000000000032361351064634500237130ustar00rootroot00000000000000/* * Automatically generated by asn1_compiler. Do not edit * * ASN.1 parser for pkcs7 */ #include extern const struct asn1_decoder pkcs7_decoder; extern int pkcs7_check_content_type(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_extract_cert(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_note_OID(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_note_certificate_list(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_note_content(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_note_data(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_note_signed_info(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_note_signeddata_version(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_note_signerinfo_version(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_sig_note_authenticated_attr(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_sig_note_digest_algo(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_sig_note_issuer(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_sig_note_pkey_algo(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_sig_note_serial(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_sig_note_set_of_authattrs(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_sig_note_signature(void *, size_t, unsigned char, const void *, size_t); extern int pkcs7_sig_note_skid(void *, size_t, unsigned char, const void *, size_t); backport-iwlwifi-dkms-7906/compat/verification/pkcs7_parser.c000066400000000000000000000407271351064634500244270ustar00rootroot00000000000000/* PKCS#7 parser * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "PKCS7: "fmt #include #include #include #include #include #include #include #include "pkcs7_parser.h" #include "pkcs7.asn1.h" MODULE_DESCRIPTION("PKCS#7 parser"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); struct pkcs7_parse_context { struct pkcs7_message *msg; /* Message being constructed */ struct pkcs7_signed_info *sinfo; /* SignedInfo being constructed */ struct pkcs7_signed_info **ppsinfo; struct x509_certificate *certs; /* Certificate cache */ struct x509_certificate **ppcerts; unsigned long data; /* Start of data */ enum OID last_oid; /* Last OID encountered */ unsigned x509_index; unsigned sinfo_index; const void *raw_serial; unsigned raw_serial_size; unsigned raw_issuer_size; const void *raw_issuer; const void *raw_skid; unsigned raw_skid_size; bool expect_skid; }; /* * Free a signed information block. */ static void pkcs7_free_signed_info(struct pkcs7_signed_info *sinfo) { if (sinfo) { public_key_signature_free(sinfo->sig); kfree(sinfo); } } /** * pkcs7_free_message - Free a PKCS#7 message * @pkcs7: The PKCS#7 message to free */ void pkcs7_free_message(struct pkcs7_message *pkcs7) { struct x509_certificate *cert; struct pkcs7_signed_info *sinfo; if (pkcs7) { while (pkcs7->certs) { cert = pkcs7->certs; pkcs7->certs = cert->next; x509_free_certificate(cert); } while (pkcs7->crl) { cert = pkcs7->crl; pkcs7->crl = cert->next; x509_free_certificate(cert); } while (pkcs7->signed_infos) { sinfo = pkcs7->signed_infos; pkcs7->signed_infos = sinfo->next; pkcs7_free_signed_info(sinfo); } kfree(pkcs7); } } EXPORT_SYMBOL_GPL(pkcs7_free_message); /* * Check authenticatedAttributes are provided or not provided consistently. */ static int pkcs7_check_authattrs(struct pkcs7_message *msg) { struct pkcs7_signed_info *sinfo; bool want = false; sinfo = msg->signed_infos; if (!sinfo) goto inconsistent; if (sinfo->authattrs) { want = true; msg->have_authattrs = true; } for (sinfo = sinfo->next; sinfo; sinfo = sinfo->next) if (!!sinfo->authattrs != want) goto inconsistent; return 0; inconsistent: pr_warn("Inconsistently supplied authAttrs\n"); return -EINVAL; } /** * pkcs7_parse_message - Parse a PKCS#7 message * @data: The raw binary ASN.1 encoded message to be parsed * @datalen: The size of the encoded message */ struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen) { struct pkcs7_parse_context *ctx; struct pkcs7_message *msg = ERR_PTR(-ENOMEM); int ret; ctx = kzalloc(sizeof(struct pkcs7_parse_context), GFP_KERNEL); if (!ctx) goto out_no_ctx; ctx->msg = kzalloc(sizeof(struct pkcs7_message), GFP_KERNEL); if (!ctx->msg) goto out_no_msg; ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL); if (!ctx->sinfo) goto out_no_sinfo; ctx->sinfo->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL); if (!ctx->sinfo->sig) goto out_no_sig; ctx->data = (unsigned long)data; ctx->ppcerts = &ctx->certs; ctx->ppsinfo = &ctx->msg->signed_infos; /* Attempt to decode the signature */ ret = asn1_ber_decoder(&pkcs7_decoder, ctx, data, datalen); if (ret < 0) { msg = ERR_PTR(ret); goto out; } ret = pkcs7_check_authattrs(ctx->msg); if (ret < 0) { msg = ERR_PTR(ret); goto out; } msg = ctx->msg; ctx->msg = NULL; out: while (ctx->certs) { struct x509_certificate *cert = ctx->certs; ctx->certs = cert->next; x509_free_certificate(cert); } out_no_sig: pkcs7_free_signed_info(ctx->sinfo); out_no_sinfo: pkcs7_free_message(ctx->msg); out_no_msg: kfree(ctx); out_no_ctx: return msg; } EXPORT_SYMBOL_GPL(pkcs7_parse_message); /** * pkcs7_get_content_data - Get access to the PKCS#7 content * @pkcs7: The preparsed PKCS#7 message to access * @_data: Place to return a pointer to the data * @_data_len: Place to return the data length * @_headerlen: Size of ASN.1 header not included in _data * * Get access to the data content of the PKCS#7 message. The size of the * header of the ASN.1 object that contains it is also provided and can be used * to adjust *_data and *_data_len to get the entire object. * * Returns -ENODATA if the data object was missing from the message. */ int pkcs7_get_content_data(const struct pkcs7_message *pkcs7, const void **_data, size_t *_data_len, size_t *_headerlen) { if (!pkcs7->data) return -ENODATA; *_data = pkcs7->data; *_data_len = pkcs7->data_len; if (_headerlen) *_headerlen = pkcs7->data_hdrlen; return 0; } EXPORT_SYMBOL_GPL(pkcs7_get_content_data); /* * Note an OID when we find one for later processing when we know how * to interpret it. */ int pkcs7_note_OID(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; ctx->last_oid = look_up_OID(value, vlen); if (ctx->last_oid == OID__NR) { char buffer[50]; sprint_oid(value, vlen, buffer, sizeof(buffer)); printk("PKCS7: Unknown OID: [%lu] %s\n", (unsigned long)value - ctx->data, buffer); } return 0; } /* * Note the digest algorithm for the signature. */ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { case OID_md4: ctx->sinfo->sig->hash_algo = "md4"; break; case OID_md5: ctx->sinfo->sig->hash_algo = "md5"; break; case OID_sha1: ctx->sinfo->sig->hash_algo = "sha1"; break; case OID_sha256: ctx->sinfo->sig->hash_algo = "sha256"; break; case OID_sha384: ctx->sinfo->sig->hash_algo = "sha384"; break; case OID_sha512: ctx->sinfo->sig->hash_algo = "sha512"; break; case OID_sha224: ctx->sinfo->sig->hash_algo = "sha224"; break; default: printk("Unsupported digest algo: %u\n", ctx->last_oid); return -ENOPKG; } return 0; } /* * Note the public key algorithm for the signature. */ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { case OID_rsaEncryption: ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; default: printk("Unsupported pkey algo: %u\n", ctx->last_oid); return -ENOPKG; } return 0; } /* * We only support signed data [RFC2315 sec 9]. */ int pkcs7_check_content_type(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; if (ctx->last_oid != OID_signed_data) { pr_warn("Only support pkcs7_signedData type\n"); return -EINVAL; } return 0; } /* * Note the SignedData version */ int pkcs7_note_signeddata_version(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; unsigned version; if (vlen != 1) goto unsupported; ctx->msg->version = version = *(const u8 *)value; switch (version) { case 1: /* PKCS#7 SignedData [RFC2315 sec 9.1] * CMS ver 1 SignedData [RFC5652 sec 5.1] */ break; case 3: /* CMS ver 3 SignedData [RFC2315 sec 5.1] */ break; default: goto unsupported; } return 0; unsupported: pr_warn("Unsupported SignedData version\n"); return -EINVAL; } /* * Note the SignerInfo version */ int pkcs7_note_signerinfo_version(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; unsigned version; if (vlen != 1) goto unsupported; version = *(const u8 *)value; switch (version) { case 1: /* PKCS#7 SignerInfo [RFC2315 sec 9.2] * CMS ver 1 SignerInfo [RFC5652 sec 5.3] */ if (ctx->msg->version != 1) goto version_mismatch; ctx->expect_skid = false; break; case 3: /* CMS ver 3 SignerInfo [RFC2315 sec 5.3] */ if (ctx->msg->version == 1) goto version_mismatch; ctx->expect_skid = true; break; default: goto unsupported; } return 0; unsupported: pr_warn("Unsupported SignerInfo version\n"); return -EINVAL; version_mismatch: pr_warn("SignedData-SignerInfo version mismatch\n"); return -EBADMSG; } /* * Extract a certificate and store it in the context. */ int pkcs7_extract_cert(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; struct x509_certificate *x509; if (tag != ((ASN1_UNIV << 6) | ASN1_CONS_BIT | ASN1_SEQ)) { pr_debug("Cert began with tag %02x at %lu\n", tag, (unsigned long)ctx - ctx->data); return -EBADMSG; } /* We have to correct for the header so that the X.509 parser can start * from the beginning. Note that since X.509 stipulates DER, there * probably shouldn't be an EOC trailer - but it is in PKCS#7 (which * stipulates BER). */ value -= hdrlen; vlen += hdrlen; if (((u8*)value)[1] == 0x80) vlen += 2; /* Indefinite length - there should be an EOC */ x509 = x509_cert_parse(value, vlen); if (IS_ERR(x509)) return PTR_ERR(x509); x509->index = ++ctx->x509_index; pr_debug("Got cert %u for %s\n", x509->index, x509->subject); pr_debug("- fingerprint %*phN\n", x509->id->len, x509->id->data); *ctx->ppcerts = x509; ctx->ppcerts = &x509->next; return 0; } /* * Save the certificate list */ int pkcs7_note_certificate_list(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; pr_devel("Got cert list (%02x)\n", tag); *ctx->ppcerts = ctx->msg->certs; ctx->msg->certs = ctx->certs; ctx->certs = NULL; ctx->ppcerts = &ctx->certs; return 0; } /* * Note the content type. */ int pkcs7_note_content(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; if (ctx->last_oid != OID_data && ctx->last_oid != OID_msIndirectData) { pr_warn("Unsupported data type %d\n", ctx->last_oid); return -EINVAL; } ctx->msg->data_type = ctx->last_oid; return 0; } /* * Extract the data from the message and store that and its content type OID in * the context. */ int pkcs7_note_data(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; pr_debug("Got data\n"); ctx->msg->data = value; ctx->msg->data_len = vlen; ctx->msg->data_hdrlen = hdrlen; return 0; } /* * Parse authenticated attributes. */ int pkcs7_sig_note_authenticated_attr(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; struct pkcs7_signed_info *sinfo = ctx->sinfo; enum OID content_type; pr_devel("AuthAttr: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value); switch (ctx->last_oid) { case OID_contentType: if (__test_and_set_bit(sinfo_has_content_type, &sinfo->aa_set)) goto repeated; content_type = look_up_OID(value, vlen); if (content_type != ctx->msg->data_type) { pr_warn("Mismatch between global data type (%d) and sinfo %u (%d)\n", ctx->msg->data_type, sinfo->index, content_type); return -EBADMSG; } return 0; case OID_signingTime: if (__test_and_set_bit(sinfo_has_signing_time, &sinfo->aa_set)) goto repeated; /* Should we check that the signing time is consistent * with the signer's X.509 cert? */ return x509_decode_time(&sinfo->signing_time, hdrlen, tag, value, vlen); case OID_messageDigest: if (__test_and_set_bit(sinfo_has_message_digest, &sinfo->aa_set)) goto repeated; if (tag != ASN1_OTS) return -EBADMSG; sinfo->msgdigest = value; sinfo->msgdigest_len = vlen; return 0; case OID_smimeCapabilites: if (__test_and_set_bit(sinfo_has_smime_caps, &sinfo->aa_set)) goto repeated; if (ctx->msg->data_type != OID_msIndirectData) { pr_warn("S/MIME Caps only allowed with Authenticode\n"); return -EKEYREJECTED; } return 0; /* Microsoft SpOpusInfo seems to be contain cont[0] 16-bit BE * char URLs and cont[1] 8-bit char URLs. * * Microsoft StatementType seems to contain a list of OIDs that * are also used as extendedKeyUsage types in X.509 certs. */ case OID_msSpOpusInfo: if (__test_and_set_bit(sinfo_has_ms_opus_info, &sinfo->aa_set)) goto repeated; goto authenticode_check; case OID_msStatementType: if (__test_and_set_bit(sinfo_has_ms_statement_type, &sinfo->aa_set)) goto repeated; authenticode_check: if (ctx->msg->data_type != OID_msIndirectData) { pr_warn("Authenticode AuthAttrs only allowed with Authenticode\n"); return -EKEYREJECTED; } /* I'm not sure how to validate these */ return 0; default: return 0; } repeated: /* We permit max one item per AuthenticatedAttribute and no repeats */ pr_warn("Repeated/multivalue AuthAttrs not permitted\n"); return -EKEYREJECTED; } /* * Note the set of auth attributes for digestion purposes [RFC2315 sec 9.3] */ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; struct pkcs7_signed_info *sinfo = ctx->sinfo; if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) || !test_bit(sinfo_has_message_digest, &sinfo->aa_set)) { pr_warn("Missing required AuthAttr\n"); return -EBADMSG; } if (ctx->msg->data_type != OID_msIndirectData && test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set)) { pr_warn("Unexpected Authenticode AuthAttr\n"); return -EBADMSG; } /* We need to switch the 'CONT 0' to a 'SET OF' when we digest */ sinfo->authattrs = value - (hdrlen - 1); sinfo->authattrs_len = vlen + (hdrlen - 1); return 0; } /* * Note the issuing certificate serial number */ int pkcs7_sig_note_serial(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; ctx->raw_serial = value; ctx->raw_serial_size = vlen; return 0; } /* * Note the issuer's name */ int pkcs7_sig_note_issuer(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; ctx->raw_issuer = value; ctx->raw_issuer_size = vlen; return 0; } /* * Note the issuing cert's subjectKeyIdentifier */ int pkcs7_sig_note_skid(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; pr_devel("SKID: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value); ctx->raw_skid = value; ctx->raw_skid_size = vlen; return 0; } /* * Note the signature data */ int pkcs7_sig_note_signature(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; ctx->sinfo->sig->s = kmemdup(value, vlen, GFP_KERNEL); if (!ctx->sinfo->sig->s) return -ENOMEM; ctx->sinfo->sig->s_size = vlen; return 0; } /* * Note a signature information block */ int pkcs7_note_signed_info(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; struct pkcs7_signed_info *sinfo = ctx->sinfo; struct asymmetric_key_id *kid; if (ctx->msg->data_type == OID_msIndirectData && !sinfo->authattrs) { pr_warn("Authenticode requires AuthAttrs\n"); return -EBADMSG; } /* Generate cert issuer + serial number key ID */ if (!ctx->expect_skid) { kid = asymmetric_key_generate_id(ctx->raw_serial, ctx->raw_serial_size, ctx->raw_issuer, ctx->raw_issuer_size); } else { kid = asymmetric_key_generate_id(ctx->raw_skid, ctx->raw_skid_size, "", 0); } if (IS_ERR(kid)) return PTR_ERR(kid); pr_devel("SINFO KID: %u [%*phN]\n", kid->len, kid->len, kid->data); sinfo->sig->auth_ids[0] = kid; sinfo->index = ++ctx->sinfo_index; *ctx->ppsinfo = sinfo; ctx->ppsinfo = &sinfo->next; ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL); if (!ctx->sinfo) return -ENOMEM; ctx->sinfo->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL); if (!ctx->sinfo->sig) return -ENOMEM; return 0; } backport-iwlwifi-dkms-7906/compat/verification/pkcs7_parser.h000066400000000000000000000044071351064634500244270ustar00rootroot00000000000000/* PKCS#7 crypto data parser internal definitions * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include #include #include "x509_parser.h" #define kenter(FMT, ...) \ pr_devel("==> %s("FMT")\n", __func__, ##__VA_ARGS__) #define kleave(FMT, ...) \ pr_devel("<== %s()"FMT"\n", __func__, ##__VA_ARGS__) struct pkcs7_signed_info { struct pkcs7_signed_info *next; struct x509_certificate *signer; /* Signing certificate (in msg->certs) */ unsigned index; bool unsupported_crypto; /* T if not usable due to missing crypto */ bool blacklisted; /* Message digest - the digest of the Content Data (or NULL) */ const void *msgdigest; unsigned msgdigest_len; /* Authenticated Attribute data (or NULL) */ unsigned authattrs_len; const void *authattrs; unsigned long aa_set; #define sinfo_has_content_type 0 #define sinfo_has_signing_time 1 #define sinfo_has_message_digest 2 #define sinfo_has_smime_caps 3 #define sinfo_has_ms_opus_info 4 #define sinfo_has_ms_statement_type 5 time64_t signing_time; /* Message signature. * * This contains the generated digest of _either_ the Content Data or * the Authenticated Attributes [RFC2315 9.3]. If the latter, one of * the attributes contains the digest of the the Content Data within * it. * * THis also contains the issuing cert serial number and issuer's name * [PKCS#7 or CMS ver 1] or issuing cert's SKID [CMS ver 3]. */ struct public_key_signature *sig; }; struct pkcs7_message { struct x509_certificate *certs; /* Certificate list */ struct x509_certificate *crl; /* Revocation list */ struct pkcs7_signed_info *signed_infos; u8 version; /* Version of cert (1 -> PKCS#7 or CMS; 3 -> CMS) */ bool have_authattrs; /* T if have authattrs */ /* Content Data (or NULL) */ enum OID data_type; /* Type of Data */ size_t data_len; /* Length of Data */ size_t data_hdrlen; /* Length of Data ASN.1 header */ const void *data; /* Content Data (or 0) */ }; backport-iwlwifi-dkms-7906/compat/verification/pkcs7_trust.c000066400000000000000000000114541351064634500243070ustar00rootroot00000000000000/* Validate the trust chain of a PKCS#7 message. * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "PKCS7: "fmt #include #include #include #include #include #include #include #include #include "pkcs7_parser.h" /** * Check the trust on one PKCS#7 SignedInfo block. */ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo, struct key *trust_keyring) { struct public_key_signature *sig = sinfo->sig; struct x509_certificate *x509, *last = NULL, *p; struct key *key; int ret; kenter(",%u,", sinfo->index); if (sinfo->unsupported_crypto) { kleave(" = -ENOPKG [cached]"); return -ENOPKG; } for (x509 = sinfo->signer; x509; x509 = x509->signer) { if (x509->seen) { if (x509->verified) goto verified; kleave(" = -ENOKEY [cached]"); return -ENOKEY; } x509->seen = true; /* Look to see if this certificate is present in the trusted * keys. */ key = find_asymmetric_key(trust_keyring, x509->id, x509->skid, false); if (!IS_ERR(key)) { /* One of the X.509 certificates in the PKCS#7 message * is apparently the same as one we already trust. * Verify that the trusted variant can also validate * the signature on the descendant. */ pr_devel("sinfo %u: Cert %u as key %x\n", sinfo->index, x509->index, key_serial(key)); goto matched; } if (key == ERR_PTR(-ENOMEM)) return -ENOMEM; /* Self-signed certificates form roots of their own, and if we * don't know them, then we can't accept them. */ if (x509->signer == x509) { kleave(" = -ENOKEY [unknown self-signed]"); return -ENOKEY; } might_sleep(); last = x509; sig = last->sig; } /* No match - see if the root certificate has a signer amongst the * trusted keys. */ if (last && (last->sig->auth_ids[0] || last->sig->auth_ids[1])) { key = find_asymmetric_key(trust_keyring, last->sig->auth_ids[0], last->sig->auth_ids[1], false); if (!IS_ERR(key)) { x509 = last; pr_devel("sinfo %u: Root cert %u signer is key %x\n", sinfo->index, x509->index, key_serial(key)); goto matched; } if (PTR_ERR(key) != -ENOKEY) return PTR_ERR(key); } /* As a last resort, see if we have a trusted public key that matches * the signed info directly. */ key = find_asymmetric_key(trust_keyring, sinfo->sig->auth_ids[0], NULL, false); if (!IS_ERR(key)) { pr_devel("sinfo %u: Direct signer is key %x\n", sinfo->index, key_serial(key)); x509 = NULL; sig = sinfo->sig; goto matched; } if (PTR_ERR(key) != -ENOKEY) return PTR_ERR(key); kleave(" = -ENOKEY [no backref]"); return -ENOKEY; matched: ret = public_key_verify_signature(key->public_key, sig); key_put(key); if (ret < 0) { if (ret == -ENOMEM) return ret; kleave(" = -EKEYREJECTED [verify %d]", ret); return -EKEYREJECTED; } verified: if (x509) { x509->verified = true; for (p = sinfo->signer; p != x509; p = p->signer) p->verified = true; } kleave(" = 0"); return 0; } /** * pkcs7_validate_trust - Validate PKCS#7 trust chain * @pkcs7: The PKCS#7 certificate to validate * @trust_keyring: Signing certificates to use as starting points * * Validate that the certificate chain inside the PKCS#7 message intersects * keys we already know and trust. * * Returns, in order of descending priority: * * (*) -EKEYREJECTED if a signature failed to match for which we have a valid * key, or: * * (*) 0 if at least one signature chain intersects with the keys in the trust * keyring, or: * * (*) -ENOPKG if a suitable crypto module couldn't be found for a check on a * chain. * * (*) -ENOKEY if we couldn't find a match for any of the signature chains in * the message. * * May also return -ENOMEM. */ int pkcs7_validate_trust(struct pkcs7_message *pkcs7, struct key *trust_keyring) { struct pkcs7_signed_info *sinfo; struct x509_certificate *p; int cached_ret = -ENOKEY; int ret; for (p = pkcs7->certs; p; p = p->next) p->seen = false; for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { ret = pkcs7_validate_trust_one(pkcs7, sinfo, trust_keyring); switch (ret) { case -ENOKEY: continue; case -ENOPKG: if (cached_ret == -ENOKEY) cached_ret = -ENOPKG; continue; case 0: cached_ret = 0; continue; default: return ret; } } return cached_ret; } EXPORT_SYMBOL_GPL(pkcs7_validate_trust); backport-iwlwifi-dkms-7906/compat/verification/pkcs7_verify.c000066400000000000000000000313501351064634500244270ustar00rootroot00000000000000/* Verify the signature on a PKCS#7 message. * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "PKCS7: "fmt #include #include #include #include #include #include #include #include "pkcs7_parser.h" /* * Digest the relevant parts of the PKCS#7 data */ static int pkcs7_digest(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { struct public_key_signature *sig = sinfo->sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t desc_size; int ret; kenter(",%u,%s", sinfo->index, sinfo->sig->hash_algo); if (!sinfo->sig->hash_algo) return -ENOPKG; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(sinfo->sig->hash_algo, 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); sig->digest_size = crypto_shash_digestsize(tfm); ret = -ENOMEM; sig->digest = kmalloc(sig->digest_size, GFP_KERNEL); if (!sig->digest) goto error_no_desc; desc = kzalloc(desc_size, GFP_KERNEL); if (!desc) goto error_no_desc; desc->tfm = tfm; /* Digest the message [RFC2315 9.3] */ ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len, sig->digest); if (ret < 0) goto error; pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest); /* However, if there are authenticated attributes, there must be a * message digest attribute amongst them which corresponds to the * digest we just calculated. */ if (sinfo->authattrs) { u8 tag; if (!sinfo->msgdigest) { pr_warn("Sig %u: No messageDigest\n", sinfo->index); ret = -EKEYREJECTED; goto error; } if (sinfo->msgdigest_len != sig->digest_size) { pr_debug("Sig %u: Invalid digest size (%u)\n", sinfo->index, sinfo->msgdigest_len); ret = -EBADMSG; goto error; } if (memcmp(sig->digest, sinfo->msgdigest, sinfo->msgdigest_len) != 0) { pr_debug("Sig %u: Message digest doesn't match\n", sinfo->index); ret = -EKEYREJECTED; goto error; } /* We then calculate anew, using the authenticated attributes * as the contents of the digest instead. Note that we need to * convert the attributes from a CONT.0 into a SET before we * hash it. */ memset(sig->digest, 0, sig->digest_size); ret = crypto_shash_init(desc); if (ret < 0) goto error; tag = ASN1_CONS_BIT | ASN1_SET; ret = crypto_shash_update(desc, &tag, 1); if (ret < 0) goto error; ret = crypto_shash_finup(desc, sinfo->authattrs, sinfo->authattrs_len, sig->digest); if (ret < 0) goto error; pr_devel("AADigest = [%*ph]\n", 8, sig->digest); } error: kfree(desc); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); return ret; } /* * Find the key (X.509 certificate) to use to verify a PKCS#7 message. PKCS#7 * uses the issuer's name and the issuing certificate serial number for * matching purposes. These must match the certificate issuer's name (not * subject's name) and the certificate serial number [RFC 2315 6.7]. */ static int pkcs7_find_key(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { struct x509_certificate *x509; unsigned certix = 1; kenter("%u", sinfo->index); for (x509 = pkcs7->certs; x509; x509 = x509->next, certix++) { /* I'm _assuming_ that the generator of the PKCS#7 message will * encode the fields from the X.509 cert in the same way in the * PKCS#7 message - but I can't be 100% sure of that. It's * possible this will need element-by-element comparison. */ if (!asymmetric_key_id_same(x509->id, sinfo->sig->auth_ids[0])) continue; pr_devel("Sig %u: Found cert serial match X.509[%u]\n", sinfo->index, certix); if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) { pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", sinfo->index); continue; } sinfo->signer = x509; return 0; } /* The relevant X.509 cert isn't found here, but it might be found in * the trust keyring. */ pr_debug("Sig %u: Issuing X.509 cert not found (#%*phN)\n", sinfo->index, sinfo->sig->auth_ids[0]->len, sinfo->sig->auth_ids[0]->data); return 0; } /* * Verify the internal certificate chain as best we can. */ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { struct public_key_signature *sig; struct x509_certificate *x509 = sinfo->signer, *p; struct asymmetric_key_id *auth; int ret; kenter(""); for (p = pkcs7->certs; p; p = p->next) p->seen = false; for (;;) { pr_debug("verify %s: %*phN\n", x509->subject, x509->raw_serial_size, x509->raw_serial); x509->seen = true; if (x509->blacklisted) { /* If this cert is blacklisted, then mark everything * that depends on this as blacklisted too. */ sinfo->blacklisted = true; for (p = sinfo->signer; p != x509; p = p->signer) p->blacklisted = true; pr_debug("- blacklisted\n"); return 0; } if (x509->unsupported_key) goto unsupported_crypto_in_x509; pr_debug("- issuer %s\n", x509->issuer); sig = x509->sig; if (sig->auth_ids[0]) pr_debug("- authkeyid.id %*phN\n", sig->auth_ids[0]->len, sig->auth_ids[0]->data); if (sig->auth_ids[1]) pr_debug("- authkeyid.skid %*phN\n", sig->auth_ids[1]->len, sig->auth_ids[1]->data); if (x509->self_signed) { /* If there's no authority certificate specified, then * the certificate must be self-signed and is the root * of the chain. Likewise if the cert is its own * authority. */ if (x509->unsupported_sig) goto unsupported_crypto_in_x509; x509->signer = x509; pr_debug("- self-signed\n"); return 0; } /* Look through the X.509 certificates in the PKCS#7 message's * list to see if the next one is there. */ auth = sig->auth_ids[0]; if (auth) { pr_debug("- want %*phN\n", auth->len, auth->data); for (p = pkcs7->certs; p; p = p->next) { pr_debug("- cmp [%u] %*phN\n", p->index, p->id->len, p->id->data); if (asymmetric_key_id_same(p->id, auth)) goto found_issuer_check_skid; } } else if (sig->auth_ids[1]) { auth = sig->auth_ids[1]; pr_debug("- want %*phN\n", auth->len, auth->data); for (p = pkcs7->certs; p; p = p->next) { if (!p->skid) continue; pr_debug("- cmp [%u] %*phN\n", p->index, p->skid->len, p->skid->data); if (asymmetric_key_id_same(p->skid, auth)) goto found_issuer; } } /* We didn't find the root of this chain */ pr_debug("- top\n"); return 0; found_issuer_check_skid: /* We matched issuer + serialNumber, but if there's an * authKeyId.keyId, that must match the CA subjKeyId also. */ if (sig->auth_ids[1] && !asymmetric_key_id_same(p->skid, sig->auth_ids[1])) { pr_warn("Sig %u: X.509 chain contains auth-skid nonmatch (%u->%u)\n", sinfo->index, x509->index, p->index); return -EKEYREJECTED; } found_issuer: pr_debug("- subject %s\n", p->subject); if (p->seen) { pr_warn("Sig %u: X.509 chain contains loop\n", sinfo->index); return 0; } ret = public_key_verify_signature(p->pub, x509->sig); if (ret < 0) return ret; x509->signer = p; if (x509 == p) { pr_debug("- self-signed\n"); return 0; } x509 = p; might_sleep(); } unsupported_crypto_in_x509: /* Just prune the certificate chain at this point if we lack some * crypto module to go further. Note, however, we don't want to set * sinfo->unsupported_crypto as the signed info block may still be * validatable against an X.509 cert lower in the chain that we have a * trusted copy of. */ return 0; } /* * Verify one signed information block from a PKCS#7 message. */ static int pkcs7_verify_one(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { int ret; kenter(",%u", sinfo->index); /* First of all, digest the data in the PKCS#7 message and the * signed information block */ ret = pkcs7_digest(pkcs7, sinfo); if (ret < 0) return ret; /* Find the key for the signature if there is one */ ret = pkcs7_find_key(pkcs7, sinfo); if (ret < 0) return ret; if (!sinfo->signer) return 0; pr_devel("Using X.509[%u] for sig %u\n", sinfo->signer->index, sinfo->index); /* Check that the PKCS#7 signing time is valid according to the X.509 * certificate. We can't, however, check against the system clock * since that may not have been set yet and may be wrong. */ if (test_bit(sinfo_has_signing_time, &sinfo->aa_set)) { if (sinfo->signing_time < sinfo->signer->valid_from || sinfo->signing_time > sinfo->signer->valid_to) { pr_warn("Message signed outside of X.509 validity window\n"); return -EKEYREJECTED; } } /* Verify the PKCS#7 binary against the key */ ret = public_key_verify_signature(sinfo->signer->pub, sinfo->sig); if (ret < 0) return ret; pr_devel("Verified signature %u\n", sinfo->index); /* Verify the internal certificate chain */ return pkcs7_verify_sig_chain(pkcs7, sinfo); } /** * pkcs7_verify - Verify a PKCS#7 message * @pkcs7: The PKCS#7 message to be verified * @usage: The use to which the key is being put * * Verify a PKCS#7 message is internally consistent - that is, the data digest * matches the digest in the AuthAttrs and any signature in the message or one * of the X.509 certificates it carries that matches another X.509 cert in the * message can be verified. * * This does not look to match the contents of the PKCS#7 message against any * external public keys. * * Returns, in order of descending priority: * * (*) -EKEYREJECTED if a key was selected that had a usage restriction at * odds with the specified usage, or: * * (*) -EKEYREJECTED if a signature failed to match for which we found an * appropriate X.509 certificate, or: * * (*) -EBADMSG if some part of the message was invalid, or: * * (*) 0 if a signature chain passed verification, or: * * (*) -EKEYREJECTED if a blacklisted key was encountered, or: * * (*) -ENOPKG if none of the signature chains are verifiable because suitable * crypto modules couldn't be found. */ int pkcs7_verify(struct pkcs7_message *pkcs7, enum key_being_used_for usage) { struct pkcs7_signed_info *sinfo; int actual_ret = -ENOPKG; int ret; kenter(""); switch (usage) { case VERIFYING_MODULE_SIGNATURE: if (pkcs7->data_type != OID_data) { pr_warn("Invalid module sig (not pkcs7-data)\n"); return -EKEYREJECTED; } if (pkcs7->have_authattrs) { pr_warn("Invalid module sig (has authattrs)\n"); return -EKEYREJECTED; } break; case VERIFYING_FIRMWARE_SIGNATURE: if (pkcs7->data_type != OID_data) { pr_warn("Invalid firmware sig (not pkcs7-data)\n"); return -EKEYREJECTED; } if (!pkcs7->have_authattrs) { pr_warn("Invalid firmware sig (missing authattrs)\n"); return -EKEYREJECTED; } break; case VERIFYING_KEXEC_PE_SIGNATURE: if (pkcs7->data_type != OID_msIndirectData) { pr_warn("Invalid kexec sig (not Authenticode)\n"); return -EKEYREJECTED; } /* Authattr presence checked in parser */ break; case VERIFYING_UNSPECIFIED_SIGNATURE: if (pkcs7->data_type != OID_data) { pr_warn("Invalid unspecified sig (not pkcs7-data)\n"); return -EKEYREJECTED; } break; default: return -EINVAL; } for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { ret = pkcs7_verify_one(pkcs7, sinfo); if (sinfo->blacklisted) { if (actual_ret == -ENOPKG) actual_ret = -EKEYREJECTED; continue; } if (ret < 0) { if (ret == -ENOPKG) { sinfo->unsupported_crypto = true; continue; } kleave(" = %d", ret); return ret; } actual_ret = 0; } kleave(" = %d", actual_ret); return actual_ret; } EXPORT_SYMBOL_GPL(pkcs7_verify); /** * pkcs7_supply_detached_data - Supply the data needed to verify a PKCS#7 message * @pkcs7: The PKCS#7 message * @data: The data to be verified * @datalen: The amount of data * * Supply the detached data needed to verify a PKCS#7 message. Note that no * attempt to retain/pin the data is made. That is left to the caller. The * data will not be modified by pkcs7_verify() and will not be freed when the * PKCS#7 message is freed. * * Returns -EINVAL if data is already supplied in the message, 0 otherwise. */ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, const void *data, size_t datalen) { if (pkcs7->data) { pr_debug("Data already supplied\n"); return -EINVAL; } pkcs7->data = data; pkcs7->data_len = datalen; return 0; } backport-iwlwifi-dkms-7906/compat/verification/public_key.c000066400000000000000000000057301351064634500241450ustar00rootroot00000000000000/* * Adapted from the kernel for simplicity in backports. * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "PKEY: "fmt #include #include #include #include #include #include #include #include "rsapubkey.asn1.h" #include "mbedtls/rsa.h" #include "mbedtls/md.h" void public_key_free(struct public_key *key) { if (key) { kfree(key->key); kfree(key); } } int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { mbedtls_rsa_context *rsa = context; /* invalid key provided */ if (!value || !vlen) return -EINVAL; return mbedtls_mpi_read_binary(&rsa->N, value, vlen) ? -EINVAL : 0; } int rsa_get_e(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { mbedtls_rsa_context *rsa = context; /* invalid key provided */ if (!value || !vlen) return -EINVAL; return mbedtls_mpi_read_binary(&rsa->E, value, vlen) ? -EINVAL : 0; } int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig) { mbedtls_rsa_context rsa; mbedtls_md_type_t md_alg; const u8 *sigdata = sig->s; int s_size = sig->s_size; int ret; if (WARN_ON(!pkey)) return -EINVAL; if (strcmp(sig->pkey_algo, "rsa")) return -ENOTSUPP; if (strcmp(sig->hash_algo, "sha1") == 0) md_alg = MBEDTLS_MD_SHA1; else if (strcmp(sig->hash_algo, "sha256") == 0) md_alg = MBEDTLS_MD_SHA256; else return -ENOTSUPP; mbedtls_rsa_init(&rsa, MBEDTLS_RSA_PKCS_V15, 0); ret = asn1_ber_decoder(&rsapubkey_decoder, &rsa, pkey->key, pkey->keylen); if (ret) goto free; rsa.len = (mbedtls_mpi_bitlen(&rsa.N) + 7) >> 3; /* * In some cases (from X.509 certificates) we get here with a * BIT_STRING ASN.1 object, in which the first byte indicates * the number of unused bits in the bit string (in case the * string isn't a multiple of 8 long). * Assume here that it's always a multiple of 8, and just skip * the additional byte. */ if (s_size == rsa.len + 1 && sigdata[0] == 0) { sigdata = sig->s + 1; s_size -= 1; } if (rsa.len != s_size) { ret = -EINVAL; goto free; } ret = mbedtls_rsa_pkcs1_verify(&rsa, NULL, NULL, MBEDTLS_RSA_PUBLIC, md_alg, 0, sig->digest, sigdata); if (ret) ret = -EKEYREJECTED; else ret = 0; free: mbedtls_rsa_free(&rsa); return ret; } void public_key_signature_free(struct public_key_signature *sig) { int i; if (sig) { for (i = 0; i < ARRAY_SIZE(sig->auth_ids); i++) kfree(sig->auth_ids[i]); kfree(sig->s); kfree(sig->digest); kfree(sig); } } backport-iwlwifi-dkms-7906/compat/verification/rsa.c000066400000000000000000001554661351064634500226200ustar00rootroot00000000000000/* * The RSA public-key cryptosystem * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ /* * The following sources were referenced in the design of this implementation * of the RSA algorithm: * * [1] A method for obtaining digital signatures and public-key cryptosystems * R Rivest, A Shamir, and L Adleman * http://people.csail.mit.edu/rivest/pubs.html#RSA78 * * [2] Handbook of Applied Cryptography - 1997, Chapter 8 * Menezes, van Oorschot and Vanstone * * [3] Malware Guard Extension: Using SGX to Conceal Cache Attacks * Michael Schwarz, Samuel Weiser, Daniel Gruss, Clémentine Maurice and * Stefan Mangard * https://arxiv.org/abs/1702.08719v2 * */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_RSA_C) #include "mbedtls/rsa.h" #include "mbedtls/oid.h" #if defined(MBEDTLS_PKCS1_V21) #include "mbedtls/md.h" #endif #if defined(MBEDTLS_PKCS1_V15) && !defined(__OpenBSD__) #include #endif #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include #define mbedtls_printf printf #define mbedtls_calloc calloc #define mbedtls_free free #endif /* Implementation that should never be optimized out by the compiler */ static void mbedtls_zeroize( void *v, size_t n ) { volatile unsigned char *p = (unsigned char*)v; while( n-- ) *p++ = 0; } /* * Initialize an RSA context */ void mbedtls_rsa_init( mbedtls_rsa_context *ctx, int padding, int hash_id ) { memset( ctx, 0, sizeof( mbedtls_rsa_context ) ); mbedtls_rsa_set_padding( ctx, padding, hash_id ); #if defined(MBEDTLS_THREADING_C) mbedtls_mutex_init( &ctx->mutex ); #endif } /* * Set padding for an existing RSA context */ void mbedtls_rsa_set_padding( mbedtls_rsa_context *ctx, int padding, int hash_id ) { ctx->padding = padding; ctx->hash_id = hash_id; } #if defined(MBEDTLS_GENPRIME) /* * Generate an RSA keypair */ int mbedtls_rsa_gen_key( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, unsigned int nbits, int exponent ) { int ret; mbedtls_mpi P1, Q1, H, G; if( f_rng == NULL || nbits < 128 || exponent < 3 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); if( nbits % 2 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); mbedtls_mpi_init( &P1 ); mbedtls_mpi_init( &Q1 ); mbedtls_mpi_init( &H ); mbedtls_mpi_init( &G ); /* * find primes P and Q with Q < P so that: * GCD( E, (P-1)*(Q-1) ) == 1 */ MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &ctx->E, exponent ) ); do { MBEDTLS_MPI_CHK( mbedtls_mpi_gen_prime( &ctx->P, nbits >> 1, 0, f_rng, p_rng ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_gen_prime( &ctx->Q, nbits >> 1, 0, f_rng, p_rng ) ); if( mbedtls_mpi_cmp_mpi( &ctx->P, &ctx->Q ) == 0 ) continue; MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &ctx->N, &ctx->P, &ctx->Q ) ); if( mbedtls_mpi_bitlen( &ctx->N ) != nbits ) continue; if( mbedtls_mpi_cmp_mpi( &ctx->P, &ctx->Q ) < 0 ) mbedtls_mpi_swap( &ctx->P, &ctx->Q ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( &P1, &ctx->P, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( &Q1, &ctx->Q, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &H, &P1, &Q1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_gcd( &G, &ctx->E, &H ) ); } while( mbedtls_mpi_cmp_int( &G, 1 ) != 0 ); /* * D = E^-1 mod ((P-1)*(Q-1)) * DP = D mod (P - 1) * DQ = D mod (Q - 1) * QP = Q^-1 mod P */ MBEDTLS_MPI_CHK( mbedtls_mpi_inv_mod( &ctx->D , &ctx->E, &H ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &ctx->DP, &ctx->D, &P1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &ctx->DQ, &ctx->D, &Q1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_inv_mod( &ctx->QP, &ctx->Q, &ctx->P ) ); ctx->len = ( mbedtls_mpi_bitlen( &ctx->N ) + 7 ) >> 3; cleanup: mbedtls_mpi_free( &P1 ); mbedtls_mpi_free( &Q1 ); mbedtls_mpi_free( &H ); mbedtls_mpi_free( &G ); if( ret != 0 ) { mbedtls_rsa_free( ctx ); return( MBEDTLS_ERR_RSA_KEY_GEN_FAILED + ret ); } return( 0 ); } #endif /* MBEDTLS_GENPRIME */ /* * Check a public RSA key */ int mbedtls_rsa_check_pubkey( const mbedtls_rsa_context *ctx ) { if( !ctx->N.p || !ctx->E.p ) return( MBEDTLS_ERR_RSA_KEY_CHECK_FAILED ); if( ( ctx->N.p[0] & 1 ) == 0 || ( ctx->E.p[0] & 1 ) == 0 ) return( MBEDTLS_ERR_RSA_KEY_CHECK_FAILED ); if( mbedtls_mpi_bitlen( &ctx->N ) < 128 || mbedtls_mpi_bitlen( &ctx->N ) > MBEDTLS_MPI_MAX_BITS ) return( MBEDTLS_ERR_RSA_KEY_CHECK_FAILED ); if( mbedtls_mpi_bitlen( &ctx->E ) < 2 || mbedtls_mpi_cmp_mpi( &ctx->E, &ctx->N ) >= 0 ) return( MBEDTLS_ERR_RSA_KEY_CHECK_FAILED ); return( 0 ); } /* * Check a private RSA key */ int mbedtls_rsa_check_privkey( const mbedtls_rsa_context *ctx ) { int ret; mbedtls_mpi PQ, DE, P1, Q1, H, I, G, G2, L1, L2, DP, DQ, QP; if( ( ret = mbedtls_rsa_check_pubkey( ctx ) ) != 0 ) return( ret ); if( !ctx->P.p || !ctx->Q.p || !ctx->D.p ) return( MBEDTLS_ERR_RSA_KEY_CHECK_FAILED ); mbedtls_mpi_init( &PQ ); mbedtls_mpi_init( &DE ); mbedtls_mpi_init( &P1 ); mbedtls_mpi_init( &Q1 ); mbedtls_mpi_init( &H ); mbedtls_mpi_init( &I ); mbedtls_mpi_init( &G ); mbedtls_mpi_init( &G2 ); mbedtls_mpi_init( &L1 ); mbedtls_mpi_init( &L2 ); mbedtls_mpi_init( &DP ); mbedtls_mpi_init( &DQ ); mbedtls_mpi_init( &QP ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &PQ, &ctx->P, &ctx->Q ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &DE, &ctx->D, &ctx->E ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( &P1, &ctx->P, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( &Q1, &ctx->Q, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &H, &P1, &Q1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_gcd( &G, &ctx->E, &H ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_gcd( &G2, &P1, &Q1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_div_mpi( &L1, &L2, &H, &G2 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &I, &DE, &L1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &DP, &ctx->D, &P1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &DQ, &ctx->D, &Q1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_inv_mod( &QP, &ctx->Q, &ctx->P ) ); /* * Check for a valid PKCS1v2 private key */ if( mbedtls_mpi_cmp_mpi( &PQ, &ctx->N ) != 0 || mbedtls_mpi_cmp_mpi( &DP, &ctx->DP ) != 0 || mbedtls_mpi_cmp_mpi( &DQ, &ctx->DQ ) != 0 || mbedtls_mpi_cmp_mpi( &QP, &ctx->QP ) != 0 || mbedtls_mpi_cmp_int( &L2, 0 ) != 0 || mbedtls_mpi_cmp_int( &I, 1 ) != 0 || mbedtls_mpi_cmp_int( &G, 1 ) != 0 ) { ret = MBEDTLS_ERR_RSA_KEY_CHECK_FAILED; } cleanup: mbedtls_mpi_free( &PQ ); mbedtls_mpi_free( &DE ); mbedtls_mpi_free( &P1 ); mbedtls_mpi_free( &Q1 ); mbedtls_mpi_free( &H ); mbedtls_mpi_free( &I ); mbedtls_mpi_free( &G ); mbedtls_mpi_free( &G2 ); mbedtls_mpi_free( &L1 ); mbedtls_mpi_free( &L2 ); mbedtls_mpi_free( &DP ); mbedtls_mpi_free( &DQ ); mbedtls_mpi_free( &QP ); if( ret == MBEDTLS_ERR_RSA_KEY_CHECK_FAILED ) return( ret ); if( ret != 0 ) return( MBEDTLS_ERR_RSA_KEY_CHECK_FAILED + ret ); return( 0 ); } /* * Check if contexts holding a public and private key match */ int mbedtls_rsa_check_pub_priv( const mbedtls_rsa_context *pub, const mbedtls_rsa_context *prv ) { if( mbedtls_rsa_check_pubkey( pub ) != 0 || mbedtls_rsa_check_privkey( prv ) != 0 ) { return( MBEDTLS_ERR_RSA_KEY_CHECK_FAILED ); } if( mbedtls_mpi_cmp_mpi( &pub->N, &prv->N ) != 0 || mbedtls_mpi_cmp_mpi( &pub->E, &prv->E ) != 0 ) { return( MBEDTLS_ERR_RSA_KEY_CHECK_FAILED ); } return( 0 ); } /* * Do an RSA public key operation */ int mbedtls_rsa_public( mbedtls_rsa_context *ctx, const unsigned char *input, unsigned char *output ) { int ret; size_t olen; mbedtls_mpi T; mbedtls_mpi_init( &T ); #if defined(MBEDTLS_THREADING_C) if( ( ret = mbedtls_mutex_lock( &ctx->mutex ) ) != 0 ) return( ret ); #endif MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( &T, input, ctx->len ) ); if( mbedtls_mpi_cmp_mpi( &T, &ctx->N ) >= 0 ) { ret = MBEDTLS_ERR_MPI_BAD_INPUT_DATA; goto cleanup; } olen = ctx->len; MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &T, &T, &ctx->E, &ctx->N, &ctx->RN ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_write_binary( &T, output, olen ) ); cleanup: #if defined(MBEDTLS_THREADING_C) if( mbedtls_mutex_unlock( &ctx->mutex ) != 0 ) return( MBEDTLS_ERR_THREADING_MUTEX_ERROR ); #endif mbedtls_mpi_free( &T ); if( ret != 0 ) return( MBEDTLS_ERR_RSA_PUBLIC_FAILED + ret ); return( 0 ); } /* * Generate or update blinding values, see section 10 of: * KOCHER, Paul C. Timing attacks on implementations of Diffie-Hellman, RSA, * DSS, and other systems. In : Advances in Cryptology-CRYPTO'96. Springer * Berlin Heidelberg, 1996. p. 104-113. */ static int rsa_prepare_blinding( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret, count = 0; if( ctx->Vf.p != NULL ) { /* We already have blinding values, just update them by squaring */ MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &ctx->Vi, &ctx->Vi, &ctx->Vi ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &ctx->Vi, &ctx->Vi, &ctx->N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &ctx->Vf, &ctx->Vf, &ctx->Vf ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &ctx->Vf, &ctx->Vf, &ctx->N ) ); goto cleanup; } /* Unblinding value: Vf = random number, invertible mod N */ do { if( count++ > 10 ) return( MBEDTLS_ERR_RSA_RNG_FAILED ); MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &ctx->Vf, ctx->len - 1, f_rng, p_rng ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_gcd( &ctx->Vi, &ctx->Vf, &ctx->N ) ); } while( mbedtls_mpi_cmp_int( &ctx->Vi, 1 ) != 0 ); /* Blinding value: Vi = Vf^(-e) mod N */ MBEDTLS_MPI_CHK( mbedtls_mpi_inv_mod( &ctx->Vi, &ctx->Vf, &ctx->N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &ctx->Vi, &ctx->Vi, &ctx->E, &ctx->N, &ctx->RN ) ); cleanup: return( ret ); } /* * Exponent blinding supposed to prevent side-channel attacks using multiple * traces of measurements to recover the RSA key. The more collisions are there, * the more bits of the key can be recovered. See [3]. * * Collecting n collisions with m bit long blinding value requires 2^(m-m/n) * observations on avarage. * * For example with 28 byte blinding to achieve 2 collisions the adversary has * to make 2^112 observations on avarage. * * (With the currently (as of 2017 April) known best algorithms breaking 2048 * bit RSA requires approximately as much time as trying out 2^112 random keys. * Thus in this sense with 28 byte blinding the security is not reduced by * side-channel attacks like the one in [3]) * * This countermeasure does not help if the key recovery is possible with a * single trace. */ #define RSA_EXPONENT_BLINDING 28 /* * Do an RSA private key operation */ int mbedtls_rsa_private( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, const unsigned char *input, unsigned char *output ) { int ret; size_t olen; mbedtls_mpi T, T1, T2; mbedtls_mpi P1, Q1, R; #if defined(MBEDTLS_RSA_NO_CRT) mbedtls_mpi D_blind; mbedtls_mpi *D = &ctx->D; #else mbedtls_mpi DP_blind, DQ_blind; mbedtls_mpi *DP = &ctx->DP; mbedtls_mpi *DQ = &ctx->DQ; #endif /* Make sure we have private key info, prevent possible misuse */ if( ctx->P.p == NULL || ctx->Q.p == NULL || ctx->D.p == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); mbedtls_mpi_init( &T ); mbedtls_mpi_init( &T1 ); mbedtls_mpi_init( &T2 ); mbedtls_mpi_init( &P1 ); mbedtls_mpi_init( &Q1 ); mbedtls_mpi_init( &R ); if( f_rng != NULL ) { #if defined(MBEDTLS_RSA_NO_CRT) mbedtls_mpi_init( &D_blind ); #else mbedtls_mpi_init( &DP_blind ); mbedtls_mpi_init( &DQ_blind ); #endif } #if defined(MBEDTLS_THREADING_C) if( ( ret = mbedtls_mutex_lock( &ctx->mutex ) ) != 0 ) return( ret ); #endif MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( &T, input, ctx->len ) ); if( mbedtls_mpi_cmp_mpi( &T, &ctx->N ) >= 0 ) { ret = MBEDTLS_ERR_MPI_BAD_INPUT_DATA; goto cleanup; } if( f_rng != NULL ) { /* * Blinding * T = T * Vi mod N */ MBEDTLS_MPI_CHK( rsa_prepare_blinding( ctx, f_rng, p_rng ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &T, &T, &ctx->Vi ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &T, &T, &ctx->N ) ); /* * Exponent blinding */ MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( &P1, &ctx->P, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( &Q1, &ctx->Q, 1 ) ); #if defined(MBEDTLS_RSA_NO_CRT) /* * D_blind = ( P - 1 ) * ( Q - 1 ) * R + D */ MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &R, RSA_EXPONENT_BLINDING, f_rng, p_rng ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &D_blind, &P1, &Q1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &D_blind, &D_blind, &R ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &D_blind, &D_blind, &ctx->D ) ); D = &D_blind; #else /* * DP_blind = ( P - 1 ) * R + DP */ MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &R, RSA_EXPONENT_BLINDING, f_rng, p_rng ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &DP_blind, &P1, &R ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &DP_blind, &DP_blind, &ctx->DP ) ); DP = &DP_blind; /* * DQ_blind = ( Q - 1 ) * R + DQ */ MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &R, RSA_EXPONENT_BLINDING, f_rng, p_rng ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &DQ_blind, &Q1, &R ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &DQ_blind, &DQ_blind, &ctx->DQ ) ); DQ = &DQ_blind; #endif /* MBEDTLS_RSA_NO_CRT */ } #if defined(MBEDTLS_RSA_NO_CRT) MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &T, &T, D, &ctx->N, &ctx->RN ) ); #else /* * Faster decryption using the CRT * * T1 = input ^ dP mod P * T2 = input ^ dQ mod Q */ MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &T1, &T, DP, &ctx->P, &ctx->RP ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &T2, &T, DQ, &ctx->Q, &ctx->RQ ) ); /* * T = (T1 - T2) * (Q^-1 mod P) mod P */ MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &T, &T1, &T2 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &T1, &T, &ctx->QP ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &T, &T1, &ctx->P ) ); /* * T = T2 + T * Q */ MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &T1, &T, &ctx->Q ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &T, &T2, &T1 ) ); #endif /* MBEDTLS_RSA_NO_CRT */ if( f_rng != NULL ) { /* * Unblind * T = T * Vf mod N */ MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &T, &T, &ctx->Vf ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &T, &T, &ctx->N ) ); } olen = ctx->len; MBEDTLS_MPI_CHK( mbedtls_mpi_write_binary( &T, output, olen ) ); cleanup: #if defined(MBEDTLS_THREADING_C) if( mbedtls_mutex_unlock( &ctx->mutex ) != 0 ) return( MBEDTLS_ERR_THREADING_MUTEX_ERROR ); #endif mbedtls_mpi_free( &T ); mbedtls_mpi_free( &T1 ); mbedtls_mpi_free( &T2 ); mbedtls_mpi_free( &P1 ); mbedtls_mpi_free( &Q1 ); mbedtls_mpi_free( &R ); if( f_rng != NULL ) { #if defined(MBEDTLS_RSA_NO_CRT) mbedtls_mpi_free( &D_blind ); #else mbedtls_mpi_free( &DP_blind ); mbedtls_mpi_free( &DQ_blind ); #endif } if( ret != 0 ) return( MBEDTLS_ERR_RSA_PRIVATE_FAILED + ret ); return( 0 ); } #if defined(MBEDTLS_PKCS1_V21) /** * Generate and apply the MGF1 operation (from PKCS#1 v2.1) to a buffer. * * \param dst buffer to mask * \param dlen length of destination buffer * \param src source of the mask generation * \param slen length of the source buffer * \param md_ctx message digest context to use */ static void mgf_mask( unsigned char *dst, size_t dlen, unsigned char *src, size_t slen, mbedtls_md_context_t *md_ctx ) { unsigned char mask[MBEDTLS_MD_MAX_SIZE]; unsigned char counter[4]; unsigned char *p; unsigned int hlen; size_t i, use_len; memset( mask, 0, MBEDTLS_MD_MAX_SIZE ); memset( counter, 0, 4 ); hlen = mbedtls_md_get_size( md_ctx->md_info ); /* Generate and apply dbMask */ p = dst; while( dlen > 0 ) { use_len = hlen; if( dlen < hlen ) use_len = dlen; mbedtls_md_starts( md_ctx ); mbedtls_md_update( md_ctx, src, slen ); mbedtls_md_update( md_ctx, counter, 4 ); mbedtls_md_finish( md_ctx, mask ); for( i = 0; i < use_len; ++i ) *p++ ^= mask[i]; counter[3]++; dlen -= use_len; } mbedtls_zeroize( mask, sizeof( mask ) ); } #endif /* MBEDTLS_PKCS1_V21 */ #if defined(MBEDTLS_PKCS1_V21) /* * Implementation of the PKCS#1 v2.1 RSAES-OAEP-ENCRYPT function */ int mbedtls_rsa_rsaes_oaep_encrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, const unsigned char *label, size_t label_len, size_t ilen, const unsigned char *input, unsigned char *output ) { size_t olen; int ret; unsigned char *p = output; unsigned int hlen; const mbedtls_md_info_t *md_info; mbedtls_md_context_t md_ctx; if( mode == MBEDTLS_RSA_PRIVATE && ctx->padding != MBEDTLS_RSA_PKCS_V21 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); if( f_rng == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); md_info = mbedtls_md_info_from_type( (mbedtls_md_type_t) ctx->hash_id ); if( md_info == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); olen = ctx->len; hlen = mbedtls_md_get_size( md_info ); /* first comparison checks for overflow */ if( ilen + 2 * hlen + 2 < ilen || olen < ilen + 2 * hlen + 2 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); memset( output, 0, olen ); *p++ = 0; /* Generate a random octet string seed */ if( ( ret = f_rng( p_rng, p, hlen ) ) != 0 ) return( MBEDTLS_ERR_RSA_RNG_FAILED + ret ); p += hlen; /* Construct DB */ mbedtls_md( md_info, label, label_len, p ); p += hlen; p += olen - 2 * hlen - 2 - ilen; *p++ = 1; memcpy( p, input, ilen ); mbedtls_md_init( &md_ctx ); if( ( ret = mbedtls_md_setup( &md_ctx, md_info, 0 ) ) != 0 ) { mbedtls_md_free( &md_ctx ); return( ret ); } /* maskedDB: Apply dbMask to DB */ mgf_mask( output + hlen + 1, olen - hlen - 1, output + 1, hlen, &md_ctx ); /* maskedSeed: Apply seedMask to seed */ mgf_mask( output + 1, hlen, output + hlen + 1, olen - hlen - 1, &md_ctx ); mbedtls_md_free( &md_ctx ); return( ( mode == MBEDTLS_RSA_PUBLIC ) ? mbedtls_rsa_public( ctx, output, output ) : mbedtls_rsa_private( ctx, f_rng, p_rng, output, output ) ); } #endif /* MBEDTLS_PKCS1_V21 */ #if defined(MBEDTLS_PKCS1_V15) /* * Implementation of the PKCS#1 v2.1 RSAES-PKCS1-V1_5-ENCRYPT function */ int mbedtls_rsa_rsaes_pkcs1_v15_encrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, size_t ilen, const unsigned char *input, unsigned char *output ) { size_t nb_pad, olen; int ret; unsigned char *p = output; if( mode == MBEDTLS_RSA_PRIVATE && ctx->padding != MBEDTLS_RSA_PKCS_V15 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); // We don't check p_rng because it won't be dereferenced here if( f_rng == NULL || input == NULL || output == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); olen = ctx->len; /* first comparison checks for overflow */ if( ilen + 11 < ilen || olen < ilen + 11 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); nb_pad = olen - 3 - ilen; *p++ = 0; if( mode == MBEDTLS_RSA_PUBLIC ) { *p++ = MBEDTLS_RSA_CRYPT; while( nb_pad-- > 0 ) { int rng_dl = 100; do { ret = f_rng( p_rng, p, 1 ); } while( *p == 0 && --rng_dl && ret == 0 ); /* Check if RNG failed to generate data */ if( rng_dl == 0 || ret != 0 ) return( MBEDTLS_ERR_RSA_RNG_FAILED + ret ); p++; } } else { *p++ = MBEDTLS_RSA_SIGN; while( nb_pad-- > 0 ) *p++ = 0xFF; } *p++ = 0; memcpy( p, input, ilen ); return( ( mode == MBEDTLS_RSA_PUBLIC ) ? mbedtls_rsa_public( ctx, output, output ) : mbedtls_rsa_private( ctx, f_rng, p_rng, output, output ) ); } #endif /* MBEDTLS_PKCS1_V15 */ /* * Add the message padding, then do an RSA operation */ int mbedtls_rsa_pkcs1_encrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, size_t ilen, const unsigned char *input, unsigned char *output ) { switch( ctx->padding ) { #if defined(MBEDTLS_PKCS1_V15) case MBEDTLS_RSA_PKCS_V15: return mbedtls_rsa_rsaes_pkcs1_v15_encrypt( ctx, f_rng, p_rng, mode, ilen, input, output ); #endif #if defined(MBEDTLS_PKCS1_V21) case MBEDTLS_RSA_PKCS_V21: return mbedtls_rsa_rsaes_oaep_encrypt( ctx, f_rng, p_rng, mode, NULL, 0, ilen, input, output ); #endif default: return( MBEDTLS_ERR_RSA_INVALID_PADDING ); } } #if defined(MBEDTLS_PKCS1_V21) /* * Implementation of the PKCS#1 v2.1 RSAES-OAEP-DECRYPT function */ int mbedtls_rsa_rsaes_oaep_decrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, const unsigned char *label, size_t label_len, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len ) { int ret; size_t ilen, i, pad_len; unsigned char *p, bad, pad_done; unsigned char buf[MBEDTLS_MPI_MAX_SIZE]; unsigned char lhash[MBEDTLS_MD_MAX_SIZE]; unsigned int hlen; const mbedtls_md_info_t *md_info; mbedtls_md_context_t md_ctx; /* * Parameters sanity checks */ if( mode == MBEDTLS_RSA_PRIVATE && ctx->padding != MBEDTLS_RSA_PKCS_V21 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); ilen = ctx->len; if( ilen < 16 || ilen > sizeof( buf ) ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); md_info = mbedtls_md_info_from_type( (mbedtls_md_type_t) ctx->hash_id ); if( md_info == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); hlen = mbedtls_md_get_size( md_info ); // checking for integer underflow if( 2 * hlen + 2 > ilen ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); /* * RSA operation */ ret = ( mode == MBEDTLS_RSA_PUBLIC ) ? mbedtls_rsa_public( ctx, input, buf ) : mbedtls_rsa_private( ctx, f_rng, p_rng, input, buf ); if( ret != 0 ) goto cleanup; /* * Unmask data and generate lHash */ mbedtls_md_init( &md_ctx ); if( ( ret = mbedtls_md_setup( &md_ctx, md_info, 0 ) ) != 0 ) { mbedtls_md_free( &md_ctx ); goto cleanup; } /* Generate lHash */ mbedtls_md( md_info, label, label_len, lhash ); /* seed: Apply seedMask to maskedSeed */ mgf_mask( buf + 1, hlen, buf + hlen + 1, ilen - hlen - 1, &md_ctx ); /* DB: Apply dbMask to maskedDB */ mgf_mask( buf + hlen + 1, ilen - hlen - 1, buf + 1, hlen, &md_ctx ); mbedtls_md_free( &md_ctx ); /* * Check contents, in "constant-time" */ p = buf; bad = 0; bad |= *p++; /* First byte must be 0 */ p += hlen; /* Skip seed */ /* Check lHash */ for( i = 0; i < hlen; i++ ) bad |= lhash[i] ^ *p++; /* Get zero-padding len, but always read till end of buffer * (minus one, for the 01 byte) */ pad_len = 0; pad_done = 0; for( i = 0; i < ilen - 2 * hlen - 2; i++ ) { pad_done |= p[i]; pad_len += ((pad_done | (unsigned char)-pad_done) >> 7) ^ 1; } p += pad_len; bad |= *p++ ^ 0x01; /* * The only information "leaked" is whether the padding was correct or not * (eg, no data is copied if it was not correct). This meets the * recommendations in PKCS#1 v2.2: an opponent cannot distinguish between * the different error conditions. */ if( bad != 0 ) { ret = MBEDTLS_ERR_RSA_INVALID_PADDING; goto cleanup; } if( ilen - ( p - buf ) > output_max_len ) { ret = MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE; goto cleanup; } *olen = ilen - (p - buf); memcpy( output, p, *olen ); ret = 0; cleanup: mbedtls_zeroize( buf, sizeof( buf ) ); mbedtls_zeroize( lhash, sizeof( lhash ) ); return( ret ); } #endif /* MBEDTLS_PKCS1_V21 */ #if defined(MBEDTLS_PKCS1_V15) /* * Implementation of the PKCS#1 v2.1 RSAES-PKCS1-V1_5-DECRYPT function */ int mbedtls_rsa_rsaes_pkcs1_v15_decrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len) { int ret; size_t ilen, pad_count = 0, i; unsigned char *p, bad, pad_done = 0; unsigned char buf[MBEDTLS_MPI_MAX_SIZE]; if( mode == MBEDTLS_RSA_PRIVATE && ctx->padding != MBEDTLS_RSA_PKCS_V15 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); ilen = ctx->len; if( ilen < 16 || ilen > sizeof( buf ) ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); ret = ( mode == MBEDTLS_RSA_PUBLIC ) ? mbedtls_rsa_public( ctx, input, buf ) : mbedtls_rsa_private( ctx, f_rng, p_rng, input, buf ); if( ret != 0 ) goto cleanup; p = buf; bad = 0; /* * Check and get padding len in "constant-time" */ bad |= *p++; /* First byte must be 0 */ /* This test does not depend on secret data */ if( mode == MBEDTLS_RSA_PRIVATE ) { bad |= *p++ ^ MBEDTLS_RSA_CRYPT; /* Get padding len, but always read till end of buffer * (minus one, for the 00 byte) */ for( i = 0; i < ilen - 3; i++ ) { pad_done |= ((p[i] | (unsigned char)-p[i]) >> 7) ^ 1; pad_count += ((pad_done | (unsigned char)-pad_done) >> 7) ^ 1; } p += pad_count; bad |= *p++; /* Must be zero */ } else { bad |= *p++ ^ MBEDTLS_RSA_SIGN; /* Get padding len, but always read till end of buffer * (minus one, for the 00 byte) */ for( i = 0; i < ilen - 3; i++ ) { pad_done |= ( p[i] != 0xFF ); pad_count += ( pad_done == 0 ); } p += pad_count; bad |= *p++; /* Must be zero */ } bad |= ( pad_count < 8 ); if( bad ) { ret = MBEDTLS_ERR_RSA_INVALID_PADDING; goto cleanup; } if( ilen - ( p - buf ) > output_max_len ) { ret = MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE; goto cleanup; } *olen = ilen - (p - buf); memcpy( output, p, *olen ); ret = 0; cleanup: mbedtls_zeroize( buf, sizeof( buf ) ); return( ret ); } #endif /* MBEDTLS_PKCS1_V15 */ /* * Do an RSA operation, then remove the message padding */ int mbedtls_rsa_pkcs1_decrypt( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len) { switch( ctx->padding ) { #if defined(MBEDTLS_PKCS1_V15) case MBEDTLS_RSA_PKCS_V15: return mbedtls_rsa_rsaes_pkcs1_v15_decrypt( ctx, f_rng, p_rng, mode, olen, input, output, output_max_len ); #endif #if defined(MBEDTLS_PKCS1_V21) case MBEDTLS_RSA_PKCS_V21: return mbedtls_rsa_rsaes_oaep_decrypt( ctx, f_rng, p_rng, mode, NULL, 0, olen, input, output, output_max_len ); #endif default: return( MBEDTLS_ERR_RSA_INVALID_PADDING ); } } #if defined(MBEDTLS_PKCS1_V21) /* * Implementation of the PKCS#1 v2.1 RSASSA-PSS-SIGN function */ int mbedtls_rsa_rsassa_pss_sign( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ) { size_t olen; unsigned char *p = sig; unsigned char salt[MBEDTLS_MD_MAX_SIZE]; unsigned int slen, hlen, offset = 0; int ret; size_t msb; const mbedtls_md_info_t *md_info; mbedtls_md_context_t md_ctx; if( mode == MBEDTLS_RSA_PRIVATE && ctx->padding != MBEDTLS_RSA_PKCS_V21 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); if( f_rng == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); olen = ctx->len; if( md_alg != MBEDTLS_MD_NONE ) { /* Gather length of hash to sign */ md_info = mbedtls_md_info_from_type( md_alg ); if( md_info == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); hashlen = mbedtls_md_get_size( md_info ); } md_info = mbedtls_md_info_from_type( (mbedtls_md_type_t) ctx->hash_id ); if( md_info == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); hlen = mbedtls_md_get_size( md_info ); slen = hlen; if( olen < hlen + slen + 2 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); memset( sig, 0, olen ); /* Generate salt of length slen */ if( ( ret = f_rng( p_rng, salt, slen ) ) != 0 ) return( MBEDTLS_ERR_RSA_RNG_FAILED + ret ); /* Note: EMSA-PSS encoding is over the length of N - 1 bits */ msb = mbedtls_mpi_bitlen( &ctx->N ) - 1; p += olen - hlen * 2 - 2; *p++ = 0x01; memcpy( p, salt, slen ); p += slen; mbedtls_md_init( &md_ctx ); if( ( ret = mbedtls_md_setup( &md_ctx, md_info, 0 ) ) != 0 ) { mbedtls_md_free( &md_ctx ); /* No need to zeroize salt: we didn't use it. */ return( ret ); } /* Generate H = Hash( M' ) */ mbedtls_md_starts( &md_ctx ); mbedtls_md_update( &md_ctx, p, 8 ); mbedtls_md_update( &md_ctx, hash, hashlen ); mbedtls_md_update( &md_ctx, salt, slen ); mbedtls_md_finish( &md_ctx, p ); mbedtls_zeroize( salt, sizeof( salt ) ); /* Compensate for boundary condition when applying mask */ if( msb % 8 == 0 ) offset = 1; /* maskedDB: Apply dbMask to DB */ mgf_mask( sig + offset, olen - hlen - 1 - offset, p, hlen, &md_ctx ); mbedtls_md_free( &md_ctx ); msb = mbedtls_mpi_bitlen( &ctx->N ) - 1; sig[0] &= 0xFF >> ( olen * 8 - msb ); p += hlen; *p++ = 0xBC; return( ( mode == MBEDTLS_RSA_PUBLIC ) ? mbedtls_rsa_public( ctx, sig, sig ) : mbedtls_rsa_private( ctx, f_rng, p_rng, sig, sig ) ); } #endif /* MBEDTLS_PKCS1_V21 */ #if defined(MBEDTLS_PKCS1_V15) /* * Implementation of the PKCS#1 v2.1 RSASSA-PKCS1-V1_5-SIGN function */ /* * Do an RSA operation to sign the message digest */ int mbedtls_rsa_rsassa_pkcs1_v15_sign( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ) { size_t nb_pad, olen, oid_size = 0; unsigned char *p = sig; const char *oid = NULL; unsigned char *sig_try = NULL, *verif = NULL; size_t i; unsigned char diff; volatile unsigned char diff_no_optimize; int ret; if( mode == MBEDTLS_RSA_PRIVATE && ctx->padding != MBEDTLS_RSA_PKCS_V15 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); olen = ctx->len; nb_pad = olen - 3; if( md_alg != MBEDTLS_MD_NONE ) { const mbedtls_md_info_t *md_info = mbedtls_md_info_from_type( md_alg ); if( md_info == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); if( mbedtls_oid_get_oid_by_md( md_alg, &oid, &oid_size ) != 0 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); nb_pad -= 10 + oid_size; hashlen = mbedtls_md_get_size( md_info ); } nb_pad -= hashlen; if( ( nb_pad < 8 ) || ( nb_pad > olen ) ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); *p++ = 0; *p++ = MBEDTLS_RSA_SIGN; memset( p, 0xFF, nb_pad ); p += nb_pad; *p++ = 0; if( md_alg == MBEDTLS_MD_NONE ) { memcpy( p, hash, hashlen ); } else { /* * DigestInfo ::= SEQUENCE { * digestAlgorithm DigestAlgorithmIdentifier, * digest Digest } * * DigestAlgorithmIdentifier ::= AlgorithmIdentifier * * Digest ::= OCTET STRING */ *p++ = MBEDTLS_ASN1_SEQUENCE | MBEDTLS_ASN1_CONSTRUCTED; *p++ = (unsigned char) ( 0x08 + oid_size + hashlen ); *p++ = MBEDTLS_ASN1_SEQUENCE | MBEDTLS_ASN1_CONSTRUCTED; *p++ = (unsigned char) ( 0x04 + oid_size ); *p++ = MBEDTLS_ASN1_OID; *p++ = oid_size & 0xFF; memcpy( p, oid, oid_size ); p += oid_size; *p++ = MBEDTLS_ASN1_NULL; *p++ = 0x00; *p++ = MBEDTLS_ASN1_OCTET_STRING; *p++ = hashlen; memcpy( p, hash, hashlen ); } if( mode == MBEDTLS_RSA_PUBLIC ) return( mbedtls_rsa_public( ctx, sig, sig ) ); /* * In order to prevent Lenstra's attack, make the signature in a * temporary buffer and check it before returning it. */ sig_try = mbedtls_calloc( 1, ctx->len ); if( sig_try == NULL ) return( MBEDTLS_ERR_MPI_ALLOC_FAILED ); verif = mbedtls_calloc( 1, ctx->len ); if( verif == NULL ) { mbedtls_free( sig_try ); return( MBEDTLS_ERR_MPI_ALLOC_FAILED ); } MBEDTLS_MPI_CHK( mbedtls_rsa_private( ctx, f_rng, p_rng, sig, sig_try ) ); MBEDTLS_MPI_CHK( mbedtls_rsa_public( ctx, sig_try, verif ) ); /* Compare in constant time just in case */ for( diff = 0, i = 0; i < ctx->len; i++ ) diff |= verif[i] ^ sig[i]; diff_no_optimize = diff; if( diff_no_optimize != 0 ) { ret = MBEDTLS_ERR_RSA_PRIVATE_FAILED; goto cleanup; } memcpy( sig, sig_try, ctx->len ); cleanup: mbedtls_free( sig_try ); mbedtls_free( verif ); return( ret ); } #endif /* MBEDTLS_PKCS1_V15 */ /* * Do an RSA operation to sign the message digest */ int mbedtls_rsa_pkcs1_sign( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ) { switch( ctx->padding ) { #if defined(MBEDTLS_PKCS1_V15) case MBEDTLS_RSA_PKCS_V15: return mbedtls_rsa_rsassa_pkcs1_v15_sign( ctx, f_rng, p_rng, mode, md_alg, hashlen, hash, sig ); #endif #if defined(MBEDTLS_PKCS1_V21) case MBEDTLS_RSA_PKCS_V21: return mbedtls_rsa_rsassa_pss_sign( ctx, f_rng, p_rng, mode, md_alg, hashlen, hash, sig ); #endif default: return( MBEDTLS_ERR_RSA_INVALID_PADDING ); } } #if defined(MBEDTLS_PKCS1_V21) /* * Implementation of the PKCS#1 v2.1 RSASSA-PSS-VERIFY function */ int mbedtls_rsa_rsassa_pss_verify_ext( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, mbedtls_md_type_t mgf1_hash_id, int expected_salt_len, const unsigned char *sig ) { int ret; size_t siglen; unsigned char *p; unsigned char result[MBEDTLS_MD_MAX_SIZE]; unsigned char zeros[8]; unsigned int hlen; size_t slen, msb; const mbedtls_md_info_t *md_info; mbedtls_md_context_t md_ctx; unsigned char buf[MBEDTLS_MPI_MAX_SIZE]; if( mode == MBEDTLS_RSA_PRIVATE && ctx->padding != MBEDTLS_RSA_PKCS_V21 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); siglen = ctx->len; if( siglen < 16 || siglen > sizeof( buf ) ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); ret = ( mode == MBEDTLS_RSA_PUBLIC ) ? mbedtls_rsa_public( ctx, sig, buf ) : mbedtls_rsa_private( ctx, f_rng, p_rng, sig, buf ); if( ret != 0 ) return( ret ); p = buf; if( buf[siglen - 1] != 0xBC ) return( MBEDTLS_ERR_RSA_INVALID_PADDING ); if( md_alg != MBEDTLS_MD_NONE ) { /* Gather length of hash to sign */ md_info = mbedtls_md_info_from_type( md_alg ); if( md_info == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); hashlen = mbedtls_md_get_size( md_info ); } md_info = mbedtls_md_info_from_type( mgf1_hash_id ); if( md_info == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); hlen = mbedtls_md_get_size( md_info ); slen = siglen - hlen - 1; /* Currently length of salt + padding */ memset( zeros, 0, 8 ); /* * Note: EMSA-PSS verification is over the length of N - 1 bits */ msb = mbedtls_mpi_bitlen( &ctx->N ) - 1; /* Compensate for boundary condition when applying mask */ if( msb % 8 == 0 ) { p++; siglen -= 1; } if( buf[0] >> ( 8 - siglen * 8 + msb ) ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); mbedtls_md_init( &md_ctx ); if( ( ret = mbedtls_md_setup( &md_ctx, md_info, 0 ) ) != 0 ) { mbedtls_md_free( &md_ctx ); return( ret ); } mgf_mask( p, siglen - hlen - 1, p + siglen - hlen - 1, hlen, &md_ctx ); buf[0] &= 0xFF >> ( siglen * 8 - msb ); while( p < buf + siglen && *p == 0 ) p++; if( p == buf + siglen || *p++ != 0x01 ) { mbedtls_md_free( &md_ctx ); return( MBEDTLS_ERR_RSA_INVALID_PADDING ); } /* Actual salt len */ slen -= p - buf; if( expected_salt_len != MBEDTLS_RSA_SALT_LEN_ANY && slen != (size_t) expected_salt_len ) { mbedtls_md_free( &md_ctx ); return( MBEDTLS_ERR_RSA_INVALID_PADDING ); } /* * Generate H = Hash( M' ) */ mbedtls_md_starts( &md_ctx ); mbedtls_md_update( &md_ctx, zeros, 8 ); mbedtls_md_update( &md_ctx, hash, hashlen ); mbedtls_md_update( &md_ctx, p, slen ); mbedtls_md_finish( &md_ctx, result ); mbedtls_md_free( &md_ctx ); if( memcmp( p + slen, result, hlen ) == 0 ) return( 0 ); else return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); } /* * Simplified PKCS#1 v2.1 RSASSA-PSS-VERIFY function */ int mbedtls_rsa_rsassa_pss_verify( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, const unsigned char *sig ) { mbedtls_md_type_t mgf1_hash_id = ( ctx->hash_id != MBEDTLS_MD_NONE ) ? (mbedtls_md_type_t) ctx->hash_id : md_alg; return( mbedtls_rsa_rsassa_pss_verify_ext( ctx, f_rng, p_rng, mode, md_alg, hashlen, hash, mgf1_hash_id, MBEDTLS_RSA_SALT_LEN_ANY, sig ) ); } #endif /* MBEDTLS_PKCS1_V21 */ #if defined(MBEDTLS_PKCS1_V15) /* * Implementation of the PKCS#1 v2.1 RSASSA-PKCS1-v1_5-VERIFY function */ int mbedtls_rsa_rsassa_pkcs1_v15_verify( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, const unsigned char *sig ) { int ret; size_t len, siglen, asn1_len; unsigned char *p, *p0, *end; mbedtls_md_type_t msg_md_alg; const mbedtls_md_info_t *md_info; mbedtls_asn1_buf oid; unsigned char buf[MBEDTLS_MPI_MAX_SIZE]; if( mode == MBEDTLS_RSA_PRIVATE && ctx->padding != MBEDTLS_RSA_PKCS_V15 ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); siglen = ctx->len; if( siglen < 16 || siglen > sizeof( buf ) ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); ret = ( mode == MBEDTLS_RSA_PUBLIC ) ? mbedtls_rsa_public( ctx, sig, buf ) : mbedtls_rsa_private( ctx, f_rng, p_rng, sig, buf ); if( ret != 0 ) return( ret ); p = buf; if( *p++ != 0 || *p++ != MBEDTLS_RSA_SIGN ) return( MBEDTLS_ERR_RSA_INVALID_PADDING ); while( *p != 0 ) { if( p >= buf + siglen - 1 || *p != 0xFF ) return( MBEDTLS_ERR_RSA_INVALID_PADDING ); p++; } p++; /* skip 00 byte */ /* We've read: 00 01 PS 00 where PS must be at least 8 bytes */ if( p - buf < 11 ) return( MBEDTLS_ERR_RSA_INVALID_PADDING ); len = siglen - ( p - buf ); if( len == hashlen && md_alg == MBEDTLS_MD_NONE ) { if( memcmp( p, hash, hashlen ) == 0 ) return( 0 ); else return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); } md_info = mbedtls_md_info_from_type( md_alg ); if( md_info == NULL ) return( MBEDTLS_ERR_RSA_BAD_INPUT_DATA ); hashlen = mbedtls_md_get_size( md_info ); end = p + len; /* * Parse the ASN.1 structure inside the PKCS#1 v1.5 structure. * Insist on 2-byte length tags, to protect against variants of * Bleichenbacher's forgery attack against lax PKCS#1v1.5 verification. */ p0 = p; if( ( ret = mbedtls_asn1_get_tag( &p, end, &asn1_len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE ) ) != 0 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); if( p != p0 + 2 || asn1_len + 2 != len ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); p0 = p; if( ( ret = mbedtls_asn1_get_tag( &p, end, &asn1_len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE ) ) != 0 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); if( p != p0 + 2 || asn1_len + 6 + hashlen != len ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); p0 = p; if( ( ret = mbedtls_asn1_get_tag( &p, end, &oid.len, MBEDTLS_ASN1_OID ) ) != 0 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); if( p != p0 + 2 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); oid.p = p; p += oid.len; if( mbedtls_oid_get_md_alg( &oid, &msg_md_alg ) != 0 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); if( md_alg != msg_md_alg ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); /* * assume the algorithm parameters must be NULL */ p0 = p; if( ( ret = mbedtls_asn1_get_tag( &p, end, &asn1_len, MBEDTLS_ASN1_NULL ) ) != 0 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); if( p != p0 + 2 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); p0 = p; if( ( ret = mbedtls_asn1_get_tag( &p, end, &asn1_len, MBEDTLS_ASN1_OCTET_STRING ) ) != 0 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); if( p != p0 + 2 || asn1_len != hashlen ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); if( memcmp( p, hash, hashlen ) != 0 ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); p += hashlen; if( p != end ) return( MBEDTLS_ERR_RSA_VERIFY_FAILED ); return( 0 ); } #endif /* MBEDTLS_PKCS1_V15 */ /* * Do an RSA operation and check the message digest */ int mbedtls_rsa_pkcs1_verify( mbedtls_rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, mbedtls_md_type_t md_alg, unsigned int hashlen, const unsigned char *hash, const unsigned char *sig ) { switch( ctx->padding ) { #if defined(MBEDTLS_PKCS1_V15) case MBEDTLS_RSA_PKCS_V15: return mbedtls_rsa_rsassa_pkcs1_v15_verify( ctx, f_rng, p_rng, mode, md_alg, hashlen, hash, sig ); #endif #if defined(MBEDTLS_PKCS1_V21) case MBEDTLS_RSA_PKCS_V21: return mbedtls_rsa_rsassa_pss_verify( ctx, f_rng, p_rng, mode, md_alg, hashlen, hash, sig ); #endif default: return( MBEDTLS_ERR_RSA_INVALID_PADDING ); } } /* * Copy the components of an RSA key */ int mbedtls_rsa_copy( mbedtls_rsa_context *dst, const mbedtls_rsa_context *src ) { int ret; dst->ver = src->ver; dst->len = src->len; MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->N, &src->N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->E, &src->E ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->D, &src->D ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->P, &src->P ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->Q, &src->Q ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->DP, &src->DP ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->DQ, &src->DQ ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->QP, &src->QP ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->RN, &src->RN ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->RP, &src->RP ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->RQ, &src->RQ ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->Vi, &src->Vi ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &dst->Vf, &src->Vf ) ); dst->padding = src->padding; dst->hash_id = src->hash_id; cleanup: if( ret != 0 ) mbedtls_rsa_free( dst ); return( ret ); } /* * Free the components of an RSA key */ void mbedtls_rsa_free( mbedtls_rsa_context *ctx ) { mbedtls_mpi_free( &ctx->Vi ); mbedtls_mpi_free( &ctx->Vf ); mbedtls_mpi_free( &ctx->RQ ); mbedtls_mpi_free( &ctx->RP ); mbedtls_mpi_free( &ctx->RN ); mbedtls_mpi_free( &ctx->QP ); mbedtls_mpi_free( &ctx->DQ ); mbedtls_mpi_free( &ctx->DP ); mbedtls_mpi_free( &ctx->Q ); mbedtls_mpi_free( &ctx->P ); mbedtls_mpi_free( &ctx->D ); mbedtls_mpi_free( &ctx->E ); mbedtls_mpi_free( &ctx->N ); #if defined(MBEDTLS_THREADING_C) mbedtls_mutex_free( &ctx->mutex ); #endif } #if defined(MBEDTLS_SELF_TEST) #include "mbedtls/sha1.h" /* * Example RSA-1024 keypair, for test purposes */ #define KEY_LEN 128 #define RSA_N "9292758453063D803DD603D5E777D788" \ "8ED1D5BF35786190FA2F23EBC0848AEA" \ "DDA92CA6C3D80B32C4D109BE0F36D6AE" \ "7130B9CED7ACDF54CFC7555AC14EEBAB" \ "93A89813FBF3C4F8066D2D800F7C38A8" \ "1AE31942917403FF4946B0A83D3D3E05" \ "EE57C6F5F5606FB5D4BC6CD34EE0801A" \ "5E94BB77B07507233A0BC7BAC8F90F79" #define RSA_E "10001" #define RSA_D "24BF6185468786FDD303083D25E64EFC" \ "66CA472BC44D253102F8B4A9D3BFA750" \ "91386C0077937FE33FA3252D28855837" \ "AE1B484A8A9A45F7EE8C0C634F99E8CD" \ "DF79C5CE07EE72C7F123142198164234" \ "CABB724CF78B8173B9F880FC86322407" \ "AF1FEDFDDE2BEB674CA15F3E81A1521E" \ "071513A1E85B5DFA031F21ECAE91A34D" #define RSA_P "C36D0EB7FCD285223CFB5AABA5BDA3D8" \ "2C01CAD19EA484A87EA4377637E75500" \ "FCB2005C5C7DD6EC4AC023CDA285D796" \ "C3D9E75E1EFC42488BB4F1D13AC30A57" #define RSA_Q "C000DF51A7C77AE8D7C7370C1FF55B69" \ "E211C2B9E5DB1ED0BF61D0D9899620F4" \ "910E4168387E3C30AA1E00C339A79508" \ "8452DD96A9A5EA5D9DCA68DA636032AF" #define RSA_DP "C1ACF567564274FB07A0BBAD5D26E298" \ "3C94D22288ACD763FD8E5600ED4A702D" \ "F84198A5F06C2E72236AE490C93F07F8" \ "3CC559CD27BC2D1CA488811730BB5725" #define RSA_DQ "4959CBF6F8FEF750AEE6977C155579C7" \ "D8AAEA56749EA28623272E4F7D0592AF" \ "7C1F1313CAC9471B5C523BFE592F517B" \ "407A1BD76C164B93DA2D32A383E58357" #define RSA_QP "9AE7FBC99546432DF71896FC239EADAE" \ "F38D18D2B2F0E2DD275AA977E2BF4411" \ "F5A3B2A5D33605AEBBCCBA7FEB9F2D2F" \ "A74206CEC169D74BF5A8C50D6F48EA08" #define PT_LEN 24 #define RSA_PT "\xAA\xBB\xCC\x03\x02\x01\x00\xFF\xFF\xFF\xFF\xFF" \ "\x11\x22\x33\x0A\x0B\x0C\xCC\xDD\xDD\xDD\xDD\xDD" #if defined(MBEDTLS_PKCS1_V15) static int myrand( void *rng_state, unsigned char *output, size_t len ) { #if !defined(__OpenBSD__) size_t i; if( rng_state != NULL ) rng_state = NULL; for( i = 0; i < len; ++i ) output[i] = rand(); #else if( rng_state != NULL ) rng_state = NULL; arc4random_buf( output, len ); #endif /* !OpenBSD */ return( 0 ); } #endif /* MBEDTLS_PKCS1_V15 */ /* * Checkup routine */ int mbedtls_rsa_self_test( int verbose ) { int ret = 0; #if defined(MBEDTLS_PKCS1_V15) size_t len; mbedtls_rsa_context rsa; unsigned char rsa_plaintext[PT_LEN]; unsigned char rsa_decrypted[PT_LEN]; unsigned char rsa_ciphertext[KEY_LEN]; #if defined(MBEDTLS_SHA1_C) unsigned char sha1sum[20]; #endif mbedtls_rsa_init( &rsa, MBEDTLS_RSA_PKCS_V15, 0 ); rsa.len = KEY_LEN; MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &rsa.N , 16, RSA_N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &rsa.E , 16, RSA_E ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &rsa.D , 16, RSA_D ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &rsa.P , 16, RSA_P ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &rsa.Q , 16, RSA_Q ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &rsa.DP, 16, RSA_DP ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &rsa.DQ, 16, RSA_DQ ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &rsa.QP, 16, RSA_QP ) ); if( verbose != 0 ) mbedtls_printf( " RSA key validation: " ); if( mbedtls_rsa_check_pubkey( &rsa ) != 0 || mbedtls_rsa_check_privkey( &rsa ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); return( 1 ); } if( verbose != 0 ) mbedtls_printf( "passed\n PKCS#1 encryption : " ); memcpy( rsa_plaintext, RSA_PT, PT_LEN ); if( mbedtls_rsa_pkcs1_encrypt( &rsa, myrand, NULL, MBEDTLS_RSA_PUBLIC, PT_LEN, rsa_plaintext, rsa_ciphertext ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); return( 1 ); } if( verbose != 0 ) mbedtls_printf( "passed\n PKCS#1 decryption : " ); if( mbedtls_rsa_pkcs1_decrypt( &rsa, myrand, NULL, MBEDTLS_RSA_PRIVATE, &len, rsa_ciphertext, rsa_decrypted, sizeof(rsa_decrypted) ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); return( 1 ); } if( memcmp( rsa_decrypted, rsa_plaintext, len ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); return( 1 ); } if( verbose != 0 ) mbedtls_printf( "passed\n" ); #if defined(MBEDTLS_SHA1_C) if( verbose != 0 ) mbedtls_printf( " PKCS#1 data sign : " ); mbedtls_sha1( rsa_plaintext, PT_LEN, sha1sum ); if( mbedtls_rsa_pkcs1_sign( &rsa, myrand, NULL, MBEDTLS_RSA_PRIVATE, MBEDTLS_MD_SHA1, 0, sha1sum, rsa_ciphertext ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); return( 1 ); } if( verbose != 0 ) mbedtls_printf( "passed\n PKCS#1 sig. verify: " ); if( mbedtls_rsa_pkcs1_verify( &rsa, NULL, NULL, MBEDTLS_RSA_PUBLIC, MBEDTLS_MD_SHA1, 0, sha1sum, rsa_ciphertext ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); return( 1 ); } if( verbose != 0 ) mbedtls_printf( "passed\n" ); #endif /* MBEDTLS_SHA1_C */ if( verbose != 0 ) mbedtls_printf( "\n" ); cleanup: mbedtls_rsa_free( &rsa ); #else /* MBEDTLS_PKCS1_V15 */ ((void) verbose); #endif /* MBEDTLS_PKCS1_V15 */ return( ret ); } #endif /* MBEDTLS_SELF_TEST */ #endif /* MBEDTLS_RSA_C */ backport-iwlwifi-dkms-7906/compat/verification/rsapubkey.asn1.c000066400000000000000000000016441351064634500246650ustar00rootroot00000000000000/* * Automatically generated by asn1_compiler. Do not edit * * ASN.1 parser for rsapubkey */ #include #include "rsapubkey.asn1.h" enum rsapubkey_actions { ACT_rsa_get_e = 0, ACT_rsa_get_n = 1, NR__rsapubkey_actions = 2 }; static const asn1_action_t rsapubkey_action_table[NR__rsapubkey_actions] = { [ 0] = rsa_get_e, [ 1] = rsa_get_n, }; static const unsigned char rsapubkey_machine[] = { // RsaPubKey [ 0] = ASN1_OP_MATCH, [ 1] = _tag(UNIV, CONS, SEQ), [ 2] = ASN1_OP_MATCH_ACT, // n [ 3] = _tag(UNIV, PRIM, INT), [ 4] = _action(ACT_rsa_get_n), [ 5] = ASN1_OP_MATCH_ACT, // e [ 6] = _tag(UNIV, PRIM, INT), [ 7] = _action(ACT_rsa_get_e), [ 8] = ASN1_OP_END_SEQ, [ 9] = ASN1_OP_COMPLETE, }; const struct asn1_decoder rsapubkey_decoder = { .machine = rsapubkey_machine, .machlen = sizeof(rsapubkey_machine), .actions = rsapubkey_action_table, }; backport-iwlwifi-dkms-7906/compat/verification/rsapubkey.asn1.h000066400000000000000000000006501351064634500246660ustar00rootroot00000000000000/* * Automatically generated by asn1_compiler. Do not edit * * ASN.1 parser for rsapubkey */ #include extern const struct asn1_decoder rsapubkey_decoder; #define rsa_get_e LINUX_BACKPORT(rsa_get_e) #define rsa_get_n LINUX_BACKPORT(rsa_get_n) extern int rsa_get_e(void *, size_t, unsigned char, const void *, size_t); extern int rsa_get_n(void *, size_t, unsigned char, const void *, size_t); backport-iwlwifi-dkms-7906/compat/verification/sha256.c000066400000000000000000000323451351064634500230310ustar00rootroot00000000000000/* * FIPS-180-2 compliant SHA-256 implementation * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: GPL-2.0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is part of mbed TLS (https://tls.mbed.org) */ /* * The SHA-256 Secure Hash Standard was published by NIST in 2002. * * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_SHA256_C) #include "mbedtls/sha256.h" #if defined(MBEDTLS_SELF_TEST) #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include #include #define mbedtls_printf printf #define mbedtls_calloc calloc #define mbedtls_free free #endif /* MBEDTLS_PLATFORM_C */ #endif /* MBEDTLS_SELF_TEST */ #if !defined(MBEDTLS_SHA256_ALT) /* Implementation that should never be optimized out by the compiler */ static void mbedtls_zeroize( void *v, size_t n ) { volatile unsigned char *p = v; while( n-- ) *p++ = 0; } /* * 32-bit integer manipulation macros (big endian) */ #ifndef GET_UINT32_BE #define GET_UINT32_BE(n,b,i) \ do { \ (n) = ( (uint32_t) (b)[(i) ] << 24 ) \ | ( (uint32_t) (b)[(i) + 1] << 16 ) \ | ( (uint32_t) (b)[(i) + 2] << 8 ) \ | ( (uint32_t) (b)[(i) + 3] ); \ } while( 0 ) #endif #ifndef PUT_UINT32_BE #define PUT_UINT32_BE(n,b,i) \ do { \ (b)[(i) ] = (unsigned char) ( (n) >> 24 ); \ (b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \ (b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \ (b)[(i) + 3] = (unsigned char) ( (n) ); \ } while( 0 ) #endif void mbedtls_sha256_init( mbedtls_sha256_context *ctx ) { memset( ctx, 0, sizeof( mbedtls_sha256_context ) ); } void mbedtls_sha256_free( mbedtls_sha256_context *ctx ) { if( ctx == NULL ) return; mbedtls_zeroize( ctx, sizeof( mbedtls_sha256_context ) ); } void mbedtls_sha256_clone( mbedtls_sha256_context *dst, const mbedtls_sha256_context *src ) { *dst = *src; } /* * SHA-256 context setup */ void mbedtls_sha256_starts( mbedtls_sha256_context *ctx, int is224 ) { ctx->total[0] = 0; ctx->total[1] = 0; if( is224 == 0 ) { /* SHA-256 */ ctx->state[0] = 0x6A09E667; ctx->state[1] = 0xBB67AE85; ctx->state[2] = 0x3C6EF372; ctx->state[3] = 0xA54FF53A; ctx->state[4] = 0x510E527F; ctx->state[5] = 0x9B05688C; ctx->state[6] = 0x1F83D9AB; ctx->state[7] = 0x5BE0CD19; } else { /* SHA-224 */ ctx->state[0] = 0xC1059ED8; ctx->state[1] = 0x367CD507; ctx->state[2] = 0x3070DD17; ctx->state[3] = 0xF70E5939; ctx->state[4] = 0xFFC00B31; ctx->state[5] = 0x68581511; ctx->state[6] = 0x64F98FA7; ctx->state[7] = 0xBEFA4FA4; } ctx->is224 = is224; } #if !defined(MBEDTLS_SHA256_PROCESS_ALT) static const uint32_t K[] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, }; #define SHR(x,n) ((x & 0xFFFFFFFF) >> n) #define ROTR(x,n) (SHR(x,n) | (x << (32 - n))) #define S0(x) (ROTR(x, 7) ^ ROTR(x,18) ^ SHR(x, 3)) #define S1(x) (ROTR(x,17) ^ ROTR(x,19) ^ SHR(x,10)) #define S2(x) (ROTR(x, 2) ^ ROTR(x,13) ^ ROTR(x,22)) #define S3(x) (ROTR(x, 6) ^ ROTR(x,11) ^ ROTR(x,25)) #define F0(x,y,z) ((x & y) | (z & (x | y))) #define F1(x,y,z) (z ^ (x & (y ^ z))) #define R(t) \ ( \ W[t] = S1(W[t - 2]) + W[t - 7] + \ S0(W[t - 15]) + W[t - 16] \ ) #define P(a,b,c,d,e,f,g,h,x,K) \ { \ temp1 = h + S3(e) + F1(e,f,g) + K + x; \ temp2 = S2(a) + F0(a,b,c); \ d += temp1; h = temp1 + temp2; \ } void mbedtls_sha256_process( mbedtls_sha256_context *ctx, const unsigned char data[64] ) { uint32_t temp1, temp2, W[64]; uint32_t A[8]; unsigned int i; for( i = 0; i < 8; i++ ) A[i] = ctx->state[i]; #if defined(MBEDTLS_SHA256_SMALLER) for( i = 0; i < 64; i++ ) { if( i < 16 ) GET_UINT32_BE( W[i], data, 4 * i ); else R( i ); P( A[0], A[1], A[2], A[3], A[4], A[5], A[6], A[7], W[i], K[i] ); temp1 = A[7]; A[7] = A[6]; A[6] = A[5]; A[5] = A[4]; A[4] = A[3]; A[3] = A[2]; A[2] = A[1]; A[1] = A[0]; A[0] = temp1; } #else /* MBEDTLS_SHA256_SMALLER */ for( i = 0; i < 16; i++ ) GET_UINT32_BE( W[i], data, 4 * i ); for( i = 0; i < 16; i += 8 ) { P( A[0], A[1], A[2], A[3], A[4], A[5], A[6], A[7], W[i+0], K[i+0] ); P( A[7], A[0], A[1], A[2], A[3], A[4], A[5], A[6], W[i+1], K[i+1] ); P( A[6], A[7], A[0], A[1], A[2], A[3], A[4], A[5], W[i+2], K[i+2] ); P( A[5], A[6], A[7], A[0], A[1], A[2], A[3], A[4], W[i+3], K[i+3] ); P( A[4], A[5], A[6], A[7], A[0], A[1], A[2], A[3], W[i+4], K[i+4] ); P( A[3], A[4], A[5], A[6], A[7], A[0], A[1], A[2], W[i+5], K[i+5] ); P( A[2], A[3], A[4], A[5], A[6], A[7], A[0], A[1], W[i+6], K[i+6] ); P( A[1], A[2], A[3], A[4], A[5], A[6], A[7], A[0], W[i+7], K[i+7] ); } for( i = 16; i < 64; i += 8 ) { P( A[0], A[1], A[2], A[3], A[4], A[5], A[6], A[7], R(i+0), K[i+0] ); P( A[7], A[0], A[1], A[2], A[3], A[4], A[5], A[6], R(i+1), K[i+1] ); P( A[6], A[7], A[0], A[1], A[2], A[3], A[4], A[5], R(i+2), K[i+2] ); P( A[5], A[6], A[7], A[0], A[1], A[2], A[3], A[4], R(i+3), K[i+3] ); P( A[4], A[5], A[6], A[7], A[0], A[1], A[2], A[3], R(i+4), K[i+4] ); P( A[3], A[4], A[5], A[6], A[7], A[0], A[1], A[2], R(i+5), K[i+5] ); P( A[2], A[3], A[4], A[5], A[6], A[7], A[0], A[1], R(i+6), K[i+6] ); P( A[1], A[2], A[3], A[4], A[5], A[6], A[7], A[0], R(i+7), K[i+7] ); } #endif /* MBEDTLS_SHA256_SMALLER */ for( i = 0; i < 8; i++ ) ctx->state[i] += A[i]; } #endif /* !MBEDTLS_SHA256_PROCESS_ALT */ /* * SHA-256 process buffer */ void mbedtls_sha256_update( mbedtls_sha256_context *ctx, const unsigned char *input, size_t ilen ) { size_t fill; uint32_t left; if( ilen == 0 ) return; left = ctx->total[0] & 0x3F; fill = 64 - left; ctx->total[0] += (uint32_t) ilen; ctx->total[0] &= 0xFFFFFFFF; if( ctx->total[0] < (uint32_t) ilen ) ctx->total[1]++; if( left && ilen >= fill ) { memcpy( (void *) (ctx->buffer + left), input, fill ); mbedtls_sha256_process( ctx, ctx->buffer ); input += fill; ilen -= fill; left = 0; } while( ilen >= 64 ) { mbedtls_sha256_process( ctx, input ); input += 64; ilen -= 64; } if( ilen > 0 ) memcpy( (void *) (ctx->buffer + left), input, ilen ); } static const unsigned char sha256_padding[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* * SHA-256 final digest */ void mbedtls_sha256_finish( mbedtls_sha256_context *ctx, unsigned char output[32] ) { uint32_t last, padn; uint32_t high, low; unsigned char msglen[8]; high = ( ctx->total[0] >> 29 ) | ( ctx->total[1] << 3 ); low = ( ctx->total[0] << 3 ); PUT_UINT32_BE( high, msglen, 0 ); PUT_UINT32_BE( low, msglen, 4 ); last = ctx->total[0] & 0x3F; padn = ( last < 56 ) ? ( 56 - last ) : ( 120 - last ); mbedtls_sha256_update( ctx, sha256_padding, padn ); mbedtls_sha256_update( ctx, msglen, 8 ); PUT_UINT32_BE( ctx->state[0], output, 0 ); PUT_UINT32_BE( ctx->state[1], output, 4 ); PUT_UINT32_BE( ctx->state[2], output, 8 ); PUT_UINT32_BE( ctx->state[3], output, 12 ); PUT_UINT32_BE( ctx->state[4], output, 16 ); PUT_UINT32_BE( ctx->state[5], output, 20 ); PUT_UINT32_BE( ctx->state[6], output, 24 ); if( ctx->is224 == 0 ) PUT_UINT32_BE( ctx->state[7], output, 28 ); } #endif /* !MBEDTLS_SHA256_ALT */ /* * output = SHA-256( input buffer ) */ void mbedtls_sha256( const unsigned char *input, size_t ilen, unsigned char output[32], int is224 ) { mbedtls_sha256_context ctx; mbedtls_sha256_init( &ctx ); mbedtls_sha256_starts( &ctx, is224 ); mbedtls_sha256_update( &ctx, input, ilen ); mbedtls_sha256_finish( &ctx, output ); mbedtls_sha256_free( &ctx ); } #if defined(MBEDTLS_SELF_TEST) /* * FIPS-180-2 test vectors */ static const unsigned char sha256_test_buf[3][57] = { { "abc" }, { "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" }, { "" } }; static const int sha256_test_buflen[3] = { 3, 56, 1000 }; static const unsigned char sha256_test_sum[6][32] = { /* * SHA-224 test vectors */ { 0x23, 0x09, 0x7D, 0x22, 0x34, 0x05, 0xD8, 0x22, 0x86, 0x42, 0xA4, 0x77, 0xBD, 0xA2, 0x55, 0xB3, 0x2A, 0xAD, 0xBC, 0xE4, 0xBD, 0xA0, 0xB3, 0xF7, 0xE3, 0x6C, 0x9D, 0xA7 }, { 0x75, 0x38, 0x8B, 0x16, 0x51, 0x27, 0x76, 0xCC, 0x5D, 0xBA, 0x5D, 0xA1, 0xFD, 0x89, 0x01, 0x50, 0xB0, 0xC6, 0x45, 0x5C, 0xB4, 0xF5, 0x8B, 0x19, 0x52, 0x52, 0x25, 0x25 }, { 0x20, 0x79, 0x46, 0x55, 0x98, 0x0C, 0x91, 0xD8, 0xBB, 0xB4, 0xC1, 0xEA, 0x97, 0x61, 0x8A, 0x4B, 0xF0, 0x3F, 0x42, 0x58, 0x19, 0x48, 0xB2, 0xEE, 0x4E, 0xE7, 0xAD, 0x67 }, /* * SHA-256 test vectors */ { 0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01, 0xCF, 0xEA, 0x41, 0x41, 0x40, 0xDE, 0x5D, 0xAE, 0x22, 0x23, 0xB0, 0x03, 0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C, 0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00, 0x15, 0xAD }, { 0x24, 0x8D, 0x6A, 0x61, 0xD2, 0x06, 0x38, 0xB8, 0xE5, 0xC0, 0x26, 0x93, 0x0C, 0x3E, 0x60, 0x39, 0xA3, 0x3C, 0xE4, 0x59, 0x64, 0xFF, 0x21, 0x67, 0xF6, 0xEC, 0xED, 0xD4, 0x19, 0xDB, 0x06, 0xC1 }, { 0xCD, 0xC7, 0x6E, 0x5C, 0x99, 0x14, 0xFB, 0x92, 0x81, 0xA1, 0xC7, 0xE2, 0x84, 0xD7, 0x3E, 0x67, 0xF1, 0x80, 0x9A, 0x48, 0xA4, 0x97, 0x20, 0x0E, 0x04, 0x6D, 0x39, 0xCC, 0xC7, 0x11, 0x2C, 0xD0 } }; /* * Checkup routine */ int mbedtls_sha256_self_test( int verbose ) { int i, j, k, buflen, ret = 0; unsigned char *buf; unsigned char sha256sum[32]; mbedtls_sha256_context ctx; buf = mbedtls_calloc( 1024, sizeof(unsigned char) ); if( NULL == buf ) { if( verbose != 0 ) mbedtls_printf( "Buffer allocation failed\n" ); return( 1 ); } mbedtls_sha256_init( &ctx ); for( i = 0; i < 6; i++ ) { j = i % 3; k = i < 3; if( verbose != 0 ) mbedtls_printf( " SHA-%d test #%d: ", 256 - k * 32, j + 1 ); mbedtls_sha256_starts( &ctx, k ); if( j == 2 ) { memset( buf, 'a', buflen = 1000 ); for( j = 0; j < 1000; j++ ) mbedtls_sha256_update( &ctx, buf, buflen ); } else mbedtls_sha256_update( &ctx, sha256_test_buf[j], sha256_test_buflen[j] ); mbedtls_sha256_finish( &ctx, sha256sum ); if( memcmp( sha256sum, sha256_test_sum[i], 32 - k * 4 ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto exit; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); } if( verbose != 0 ) mbedtls_printf( "\n" ); exit: mbedtls_sha256_free( &ctx ); mbedtls_free( buf ); return( ret ); } #endif /* MBEDTLS_SELF_TEST */ #endif /* MBEDTLS_SHA256_C */ backport-iwlwifi-dkms-7906/compat/verification/verify.c000066400000000000000000000027741351064634500233300ustar00rootroot00000000000000#include #include #include #include #include #include int verify_pkcs7_signature(const void *data, size_t len, const void *raw_pkcs7, size_t pkcs7_len, struct key *trusted_keys, enum key_being_used_for usage, int (*view_content)(void *ctx, const void *data, size_t len, size_t asn1hdrlen), void *ctx) { struct pkcs7_message *pkcs7; int ret; pkcs7 = pkcs7_parse_message(raw_pkcs7, pkcs7_len); if (IS_ERR(pkcs7)) return PTR_ERR(pkcs7); /* The data should be detached - so we need to supply it. */ if (data && pkcs7_supply_detached_data(pkcs7, data, len) < 0) { pr_err("PKCS#7 signature with non-detached data\n"); ret = -EBADMSG; goto error; } ret = pkcs7_verify(pkcs7, usage); if (ret < 0) goto error; if (WARN_ON(!trusted_keys)) { ret = -EINVAL; goto error; } ret = pkcs7_validate_trust(pkcs7, trusted_keys); if (ret < 0) { if (ret == -ENOKEY) pr_err("PKCS#7 signature not signed with a trusted key\n"); goto error; } if (view_content) { size_t asn1hdrlen; ret = pkcs7_get_content_data(pkcs7, &data, &len, &asn1hdrlen); if (ret < 0) { if (ret == -ENODATA) pr_devel("PKCS#7 message does not contain data\n"); goto error; } ret = view_content(ctx, data, len, asn1hdrlen); } error: pkcs7_free_message(pkcs7); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL_GPL(verify_pkcs7_signature); backport-iwlwifi-dkms-7906/compat/verification/x509.asn1.c000066400000000000000000000130441351064634500233620ustar00rootroot00000000000000/* * Automatically generated by asn1_compiler. Do not edit * * ASN.1 parser for x509 */ #include #include "x509.asn1.h" enum x509_actions { ACT_x509_extract_key_data = 0, ACT_x509_extract_name_segment = 1, ACT_x509_note_OID = 2, ACT_x509_note_issuer = 3, ACT_x509_note_not_after = 4, ACT_x509_note_not_before = 5, ACT_x509_note_pkey_algo = 6, ACT_x509_note_serial = 7, ACT_x509_note_signature = 8, ACT_x509_note_subject = 9, ACT_x509_note_tbs_certificate = 10, ACT_x509_process_extension = 11, NR__x509_actions = 12 }; static const asn1_action_t x509_action_table[NR__x509_actions] = { [ 0] = x509_extract_key_data, [ 1] = x509_extract_name_segment, [ 2] = x509_note_OID, [ 3] = x509_note_issuer, [ 4] = x509_note_not_after, [ 5] = x509_note_not_before, [ 6] = x509_note_pkey_algo, [ 7] = x509_note_serial, [ 8] = x509_note_signature, [ 9] = x509_note_subject, [ 10] = x509_note_tbs_certificate, [ 11] = x509_process_extension, }; static const unsigned char x509_machine[] = { // Certificate [ 0] = ASN1_OP_MATCH, [ 1] = _tag(UNIV, CONS, SEQ), // TBSCertificate [ 2] = ASN1_OP_MATCH, [ 3] = _tag(UNIV, CONS, SEQ), [ 4] = ASN1_OP_MATCH_JUMP_OR_SKIP, // version [ 5] = _tagn(CONT, CONS, 0), [ 6] = _jump_target(70), // CertificateSerialNumber [ 7] = ASN1_OP_MATCH, [ 8] = _tag(UNIV, PRIM, INT), [ 9] = ASN1_OP_ACT, [ 10] = _action(ACT_x509_note_serial), // AlgorithmIdentifier [ 11] = ASN1_OP_MATCH_JUMP, [ 12] = _tag(UNIV, CONS, SEQ), [ 13] = _jump_target(74), // --> AlgorithmIdentifier [ 14] = ASN1_OP_ACT, [ 15] = _action(ACT_x509_note_pkey_algo), // Name [ 16] = ASN1_OP_MATCH_JUMP, [ 17] = _tag(UNIV, CONS, SEQ), [ 18] = _jump_target(80), // --> Name [ 19] = ASN1_OP_ACT, [ 20] = _action(ACT_x509_note_issuer), // Validity [ 21] = ASN1_OP_MATCH, [ 22] = _tag(UNIV, CONS, SEQ), // Time [ 23] = ASN1_OP_MATCH_OR_SKIP, // utcTime [ 24] = _tag(UNIV, PRIM, UNITIM), [ 25] = ASN1_OP_COND_MATCH_OR_SKIP, // generalTime [ 26] = _tag(UNIV, PRIM, GENTIM), [ 27] = ASN1_OP_COND_FAIL, [ 28] = ASN1_OP_ACT, [ 29] = _action(ACT_x509_note_not_before), // Time [ 30] = ASN1_OP_MATCH_OR_SKIP, // utcTime [ 31] = _tag(UNIV, PRIM, UNITIM), [ 32] = ASN1_OP_COND_MATCH_OR_SKIP, // generalTime [ 33] = _tag(UNIV, PRIM, GENTIM), [ 34] = ASN1_OP_COND_FAIL, [ 35] = ASN1_OP_ACT, [ 36] = _action(ACT_x509_note_not_after), [ 37] = ASN1_OP_END_SEQ, // Name [ 38] = ASN1_OP_MATCH_JUMP, [ 39] = _tag(UNIV, CONS, SEQ), [ 40] = _jump_target(80), // --> Name [ 41] = ASN1_OP_ACT, [ 42] = _action(ACT_x509_note_subject), // SubjectPublicKeyInfo [ 43] = ASN1_OP_MATCH, [ 44] = _tag(UNIV, CONS, SEQ), // AlgorithmIdentifier [ 45] = ASN1_OP_MATCH_JUMP, [ 46] = _tag(UNIV, CONS, SEQ), [ 47] = _jump_target(74), // --> AlgorithmIdentifier [ 48] = ASN1_OP_MATCH_ACT, // subjectPublicKey [ 49] = _tag(UNIV, PRIM, BTS), [ 50] = _action(ACT_x509_extract_key_data), [ 51] = ASN1_OP_END_SEQ, // UniqueIdentifier [ 52] = ASN1_OP_MATCH_OR_SKIP, // issuerUniqueID [ 53] = _tagn(CONT, PRIM, 1), // UniqueIdentifier [ 54] = ASN1_OP_MATCH_OR_SKIP, // subjectUniqueID [ 55] = _tagn(CONT, PRIM, 2), [ 56] = ASN1_OP_MATCH_JUMP_OR_SKIP, // extensions [ 57] = _tagn(CONT, CONS, 3), [ 58] = _jump_target(95), [ 59] = ASN1_OP_END_SEQ, [ 60] = ASN1_OP_ACT, [ 61] = _action(ACT_x509_note_tbs_certificate), // AlgorithmIdentifier [ 62] = ASN1_OP_MATCH_JUMP, [ 63] = _tag(UNIV, CONS, SEQ), [ 64] = _jump_target(74), // --> AlgorithmIdentifier [ 65] = ASN1_OP_MATCH_ACT, // signature [ 66] = _tag(UNIV, PRIM, BTS), [ 67] = _action(ACT_x509_note_signature), [ 68] = ASN1_OP_END_SEQ, [ 69] = ASN1_OP_COMPLETE, // Version [ 70] = ASN1_OP_MATCH, [ 71] = _tag(UNIV, PRIM, INT), [ 72] = ASN1_OP_END_SEQ, [ 73] = ASN1_OP_RETURN, [ 74] = ASN1_OP_MATCH_ACT, // algorithm [ 75] = _tag(UNIV, PRIM, OID), [ 76] = _action(ACT_x509_note_OID), [ 77] = ASN1_OP_MATCH_ANY_OR_SKIP, // parameters [ 78] = ASN1_OP_END_SEQ, [ 79] = ASN1_OP_RETURN, // RelativeDistinguishedName [ 80] = ASN1_OP_MATCH, [ 81] = _tag(UNIV, CONS, SET), // AttributeValueAssertion [ 82] = ASN1_OP_MATCH, [ 83] = _tag(UNIV, CONS, SEQ), [ 84] = ASN1_OP_MATCH_ACT, // attributeType [ 85] = _tag(UNIV, PRIM, OID), [ 86] = _action(ACT_x509_note_OID), [ 87] = ASN1_OP_MATCH_ANY_ACT, // attributeValue [ 88] = _action(ACT_x509_extract_name_segment), [ 89] = ASN1_OP_END_SEQ, [ 90] = ASN1_OP_END_SET_OF, [ 91] = _jump_target(82), [ 92] = ASN1_OP_END_SEQ_OF, [ 93] = _jump_target(80), [ 94] = ASN1_OP_RETURN, // Extensions [ 95] = ASN1_OP_MATCH, [ 96] = _tag(UNIV, CONS, SEQ), // Extension [ 97] = ASN1_OP_MATCH, [ 98] = _tag(UNIV, CONS, SEQ), [ 99] = ASN1_OP_MATCH_ACT, // extnid [ 100] = _tag(UNIV, PRIM, OID), [ 101] = _action(ACT_x509_note_OID), [ 102] = ASN1_OP_MATCH_OR_SKIP, // critical [ 103] = _tag(UNIV, PRIM, BOOL), [ 104] = ASN1_OP_MATCH_ACT, // extnValue [ 105] = _tag(UNIV, PRIM, OTS), [ 106] = _action(ACT_x509_process_extension), [ 107] = ASN1_OP_END_SEQ, [ 108] = ASN1_OP_END_SEQ_OF, [ 109] = _jump_target(97), [ 110] = ASN1_OP_END_SEQ, [ 111] = ASN1_OP_RETURN, }; const struct asn1_decoder x509_decoder = { .machine = x509_machine, .machlen = sizeof(x509_machine), .actions = x509_action_table, }; backport-iwlwifi-dkms-7906/compat/verification/x509.asn1.h000066400000000000000000000022571351064634500233730ustar00rootroot00000000000000/* * Automatically generated by asn1_compiler. Do not edit * * ASN.1 parser for x509 */ #include extern const struct asn1_decoder x509_decoder; extern int x509_extract_key_data(void *, size_t, unsigned char, const void *, size_t); extern int x509_extract_name_segment(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_OID(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_issuer(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_not_after(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_not_before(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_pkey_algo(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_serial(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_signature(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_subject(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_tbs_certificate(void *, size_t, unsigned char, const void *, size_t); extern int x509_process_extension(void *, size_t, unsigned char, const void *, size_t); backport-iwlwifi-dkms-7906/compat/verification/x509_akid.asn1.c000066400000000000000000000105151351064634500243520ustar00rootroot00000000000000/* * Automatically generated by asn1_compiler. Do not edit * * ASN.1 parser for x509_akid */ #include #include "x509_akid.asn1.h" enum x509_akid_actions { ACT_x509_akid_note_kid = 0, ACT_x509_akid_note_name = 1, ACT_x509_akid_note_serial = 2, ACT_x509_extract_name_segment = 3, ACT_x509_note_OID = 4, NR__x509_akid_actions = 5 }; static const asn1_action_t x509_akid_action_table[NR__x509_akid_actions] = { [ 0] = x509_akid_note_kid, [ 1] = x509_akid_note_name, [ 2] = x509_akid_note_serial, [ 3] = x509_extract_name_segment, [ 4] = x509_note_OID, }; static const unsigned char x509_akid_machine[] = { // AuthorityKeyIdentifier [ 0] = ASN1_OP_MATCH, [ 1] = _tag(UNIV, CONS, SEQ), // KeyIdentifier [ 2] = ASN1_OP_MATCH_ACT_OR_SKIP, // keyIdentifier [ 3] = _tagn(CONT, PRIM, 0), [ 4] = _action(ACT_x509_akid_note_kid), // GeneralNames [ 5] = ASN1_OP_MATCH_JUMP_OR_SKIP, // authorityCertIssuer [ 6] = _tagn(CONT, CONS, 1), [ 7] = _jump_target(13), // --> GeneralNames // CertificateSerialNumber [ 8] = ASN1_OP_MATCH_ACT_OR_SKIP, // authorityCertSerialNumber [ 9] = _tagn(CONT, PRIM, 2), [ 10] = _action(ACT_x509_akid_note_serial), [ 11] = ASN1_OP_END_SEQ, [ 12] = ASN1_OP_COMPLETE, // GeneralName [ 13] = ASN1_OP_MATCH_JUMP_OR_SKIP, // otherName [ 14] = _tagn(CONT, CONS, 0), [ 15] = _jump_target(44), [ 16] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // rfc822Name [ 17] = _tagn(CONT, CONS, 1), [ 18] = _jump_target(47), [ 19] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // dNSName [ 20] = _tagn(CONT, CONS, 2), [ 21] = _jump_target(51), [ 22] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // x400Address [ 23] = _tagn(CONT, CONS, 3), [ 24] = _jump_target(55), [ 25] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // directoryName [ 26] = _tagn(CONT, CONS, 4), [ 27] = _jump_target(58), [ 28] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // ediPartyName [ 29] = _tagn(CONT, CONS, 5), [ 30] = _jump_target(78), [ 31] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // uniformResourceIdentifier [ 32] = _tagn(CONT, CONS, 6), [ 33] = _jump_target(81), [ 34] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // iPAddress [ 35] = _tagn(CONT, CONS, 7), [ 36] = _jump_target(85), [ 37] = ASN1_OP_COND_MATCH_JUMP_OR_SKIP, // registeredID [ 38] = _tagn(CONT, CONS, 8), [ 39] = _jump_target(89), [ 40] = ASN1_OP_COND_FAIL, [ 41] = ASN1_OP_END_SEQ_OF, [ 42] = _jump_target(13), [ 43] = ASN1_OP_RETURN, [ 44] = ASN1_OP_MATCH_ANY, // otherName [ 45] = ASN1_OP_END_SEQ, [ 46] = ASN1_OP_RETURN, [ 47] = ASN1_OP_MATCH, // rfc822Name [ 48] = _tag(UNIV, PRIM, IA5STR), [ 49] = ASN1_OP_END_SEQ, [ 50] = ASN1_OP_RETURN, [ 51] = ASN1_OP_MATCH, // dNSName [ 52] = _tag(UNIV, PRIM, IA5STR), [ 53] = ASN1_OP_END_SEQ, [ 54] = ASN1_OP_RETURN, [ 55] = ASN1_OP_MATCH_ANY, // x400Address [ 56] = ASN1_OP_END_SEQ, [ 57] = ASN1_OP_RETURN, // Name [ 58] = ASN1_OP_MATCH, [ 59] = _tag(UNIV, CONS, SEQ), // RelativeDistinguishedName [ 60] = ASN1_OP_MATCH, [ 61] = _tag(UNIV, CONS, SET), // AttributeValueAssertion [ 62] = ASN1_OP_MATCH, [ 63] = _tag(UNIV, CONS, SEQ), [ 64] = ASN1_OP_MATCH_ACT, // attributeType [ 65] = _tag(UNIV, PRIM, OID), [ 66] = _action(ACT_x509_note_OID), [ 67] = ASN1_OP_MATCH_ANY_ACT, // attributeValue [ 68] = _action(ACT_x509_extract_name_segment), [ 69] = ASN1_OP_END_SEQ, [ 70] = ASN1_OP_END_SET_OF, [ 71] = _jump_target(62), [ 72] = ASN1_OP_END_SEQ_OF, [ 73] = _jump_target(60), [ 74] = ASN1_OP_ACT, [ 75] = _action(ACT_x509_akid_note_name), [ 76] = ASN1_OP_END_SEQ, [ 77] = ASN1_OP_RETURN, [ 78] = ASN1_OP_MATCH_ANY, // ediPartyName [ 79] = ASN1_OP_END_SEQ, [ 80] = ASN1_OP_RETURN, [ 81] = ASN1_OP_MATCH, // uniformResourceIdentifier [ 82] = _tag(UNIV, PRIM, IA5STR), [ 83] = ASN1_OP_END_SEQ, [ 84] = ASN1_OP_RETURN, [ 85] = ASN1_OP_MATCH, // iPAddress [ 86] = _tag(UNIV, PRIM, OTS), [ 87] = ASN1_OP_END_SEQ, [ 88] = ASN1_OP_RETURN, [ 89] = ASN1_OP_MATCH, // registeredID [ 90] = _tag(UNIV, PRIM, OID), [ 91] = ASN1_OP_END_SEQ, [ 92] = ASN1_OP_RETURN, }; const struct asn1_decoder x509_akid_decoder = { .machine = x509_akid_machine, .machlen = sizeof(x509_akid_machine), .actions = x509_akid_action_table, }; backport-iwlwifi-dkms-7906/compat/verification/x509_akid.asn1.h000066400000000000000000000011431351064634500243540ustar00rootroot00000000000000/* * Automatically generated by asn1_compiler. Do not edit * * ASN.1 parser for x509_akid */ #include extern const struct asn1_decoder x509_akid_decoder; extern int x509_akid_note_kid(void *, size_t, unsigned char, const void *, size_t); extern int x509_akid_note_name(void *, size_t, unsigned char, const void *, size_t); extern int x509_akid_note_serial(void *, size_t, unsigned char, const void *, size_t); extern int x509_extract_name_segment(void *, size_t, unsigned char, const void *, size_t); extern int x509_note_OID(void *, size_t, unsigned char, const void *, size_t); backport-iwlwifi-dkms-7906/compat/verification/x509_cert_parser.c000066400000000000000000000377551351064634500251310ustar00rootroot00000000000000/* X.509 certificate parser * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "X.509: "fmt #include #include #include #include #include #include #include "x509_parser.h" #include "x509.asn1.h" #include "x509_akid.asn1.h" struct x509_parse_context { struct x509_certificate *cert; /* Certificate being constructed */ unsigned long data; /* Start of data */ const void *cert_start; /* Start of cert content */ const void *key; /* Key data */ size_t key_size; /* Size of key data */ enum OID last_oid; /* Last OID encountered */ enum OID algo_oid; /* Algorithm OID */ unsigned char nr_mpi; /* Number of MPIs stored */ u8 o_size; /* Size of organizationName (O) */ u8 cn_size; /* Size of commonName (CN) */ u8 email_size; /* Size of emailAddress */ u16 o_offset; /* Offset of organizationName (O) */ u16 cn_offset; /* Offset of commonName (CN) */ u16 email_offset; /* Offset of emailAddress */ unsigned raw_akid_size; const void *raw_akid; /* Raw authorityKeyId in ASN.1 */ const void *akid_raw_issuer; /* Raw directoryName in authorityKeyId */ unsigned akid_raw_issuer_size; }; /* * Free an X.509 certificate */ void x509_free_certificate(struct x509_certificate *cert) { if (cert) { public_key_free(cert->pub); public_key_signature_free(cert->sig); kfree(cert->issuer); kfree(cert->subject); kfree(cert->id); kfree(cert->skid); kfree(cert); } } EXPORT_SYMBOL_GPL(x509_free_certificate); /* * Parse an X.509 certificate */ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) { struct x509_certificate *cert; struct x509_parse_context *ctx; struct asymmetric_key_id *kid; long ret; ret = -ENOMEM; cert = kzalloc(sizeof(struct x509_certificate), GFP_KERNEL); if (!cert) goto error_no_cert; cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL); if (!cert->pub) goto error_no_ctx; cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL); if (!cert->sig) goto error_no_ctx; ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL); if (!ctx) goto error_no_ctx; ctx->cert = cert; ctx->data = (unsigned long)data; /* Attempt to decode the certificate */ ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen); if (ret < 0) goto error_decode; /* Decode the AuthorityKeyIdentifier */ if (ctx->raw_akid) { pr_devel("AKID: %u %*phN\n", ctx->raw_akid_size, ctx->raw_akid_size, ctx->raw_akid); ret = asn1_ber_decoder(&x509_akid_decoder, ctx, ctx->raw_akid, ctx->raw_akid_size); if (ret < 0) { pr_warn("Couldn't decode AuthKeyIdentifier\n"); goto error_decode; } } ret = -ENOMEM; cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); if (!cert->pub->key) goto error_decode; cert->pub->keylen = ctx->key_size; /* Grab the signature bits */ ret = x509_get_sig_params(cert); if (ret < 0) goto error_decode; /* Generate cert issuer + serial number key ID */ kid = asymmetric_key_generate_id(cert->raw_serial, cert->raw_serial_size, cert->raw_issuer, cert->raw_issuer_size); if (IS_ERR(kid)) { ret = PTR_ERR(kid); goto error_decode; } cert->id = kid; /* Detect self-signed certificates */ ret = x509_check_for_self_signed(cert); if (ret < 0) goto error_decode; kfree(ctx); return cert; error_decode: kfree(ctx); error_no_ctx: x509_free_certificate(cert); error_no_cert: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(x509_cert_parse); /* * Note an OID when we find one for later processing when we know how * to interpret it. */ int x509_note_OID(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; ctx->last_oid = look_up_OID(value, vlen); if (ctx->last_oid == OID__NR) { char buffer[50]; sprint_oid(value, vlen, buffer, sizeof(buffer)); pr_debug("Unknown OID: [%lu] %s\n", (unsigned long)value - ctx->data, buffer); } return 0; } /* * Save the position of the TBS data so that we can check the signature over it * later. */ int x509_note_tbs_certificate(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("x509_note_tbs_certificate(,%zu,%02x,%ld,%zu)!\n", hdrlen, tag, (unsigned long)value - ctx->data, vlen); ctx->cert->tbs = value - hdrlen; ctx->cert->tbs_size = vlen + hdrlen; return 0; } /* * Record the public key algorithm */ int x509_note_pkey_algo(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("PubKey Algo: %u\n", ctx->last_oid); switch (ctx->last_oid) { case OID_md2WithRSAEncryption: case OID_md3WithRSAEncryption: default: return -ENOPKG; /* Unsupported combination */ case OID_md4WithRSAEncryption: ctx->cert->sig->hash_algo = "md4"; goto rsa_pkcs1; case OID_sha1WithRSAEncryption: ctx->cert->sig->hash_algo = "sha1"; goto rsa_pkcs1; case OID_sha256WithRSAEncryption: ctx->cert->sig->hash_algo = "sha256"; goto rsa_pkcs1; case OID_sha384WithRSAEncryption: ctx->cert->sig->hash_algo = "sha384"; goto rsa_pkcs1; case OID_sha512WithRSAEncryption: ctx->cert->sig->hash_algo = "sha512"; goto rsa_pkcs1; case OID_sha224WithRSAEncryption: ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; } rsa_pkcs1: ctx->cert->sig->pkey_algo = "rsa"; ctx->cert->sig->encoding = "pkcs1"; ctx->algo_oid = ctx->last_oid; return 0; } /* * Note the whereabouts and type of the signature. */ int x509_note_signature(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("Signature type: %u size %zu\n", ctx->last_oid, vlen); if (ctx->last_oid != ctx->algo_oid) { pr_warn("Got cert with pkey (%u) and sig (%u) algorithm OIDs\n", ctx->algo_oid, ctx->last_oid); return -EINVAL; } if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) { /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) return -EBADMSG; value++; vlen--; } ctx->cert->raw_sig = value; ctx->cert->raw_sig_size = vlen; return 0; } /* * Note the certificate serial number */ int x509_note_serial(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; ctx->cert->raw_serial = value; ctx->cert->raw_serial_size = vlen; return 0; } /* * Note some of the name segments from which we'll fabricate a name. */ int x509_extract_name_segment(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; switch (ctx->last_oid) { case OID_commonName: ctx->cn_size = vlen; ctx->cn_offset = (unsigned long)value - ctx->data; break; case OID_organizationName: ctx->o_size = vlen; ctx->o_offset = (unsigned long)value - ctx->data; break; case OID_email_address: ctx->email_size = vlen; ctx->email_offset = (unsigned long)value - ctx->data; break; default: break; } return 0; } /* * Fabricate and save the issuer and subject names */ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen, unsigned char tag, char **_name, size_t vlen) { const void *name, *data = (const void *)ctx->data; size_t namesize; char *buffer; if (*_name) return -EINVAL; /* Empty name string if no material */ if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) { buffer = kmalloc(1, GFP_KERNEL); if (!buffer) return -ENOMEM; buffer[0] = 0; goto done; } if (ctx->cn_size && ctx->o_size) { /* Consider combining O and CN, but use only the CN if it is * prefixed by the O, or a significant portion thereof. */ namesize = ctx->cn_size; name = data + ctx->cn_offset; if (ctx->cn_size >= ctx->o_size && memcmp(data + ctx->cn_offset, data + ctx->o_offset, ctx->o_size) == 0) goto single_component; if (ctx->cn_size >= 7 && ctx->o_size >= 7 && memcmp(data + ctx->cn_offset, data + ctx->o_offset, 7) == 0) goto single_component; buffer = kmalloc(ctx->o_size + 2 + ctx->cn_size + 1, GFP_KERNEL); if (!buffer) return -ENOMEM; memcpy(buffer, data + ctx->o_offset, ctx->o_size); buffer[ctx->o_size + 0] = ':'; buffer[ctx->o_size + 1] = ' '; memcpy(buffer + ctx->o_size + 2, data + ctx->cn_offset, ctx->cn_size); buffer[ctx->o_size + 2 + ctx->cn_size] = 0; goto done; } else if (ctx->cn_size) { namesize = ctx->cn_size; name = data + ctx->cn_offset; } else if (ctx->o_size) { namesize = ctx->o_size; name = data + ctx->o_offset; } else { namesize = ctx->email_size; name = data + ctx->email_offset; } single_component: buffer = kmalloc(namesize + 1, GFP_KERNEL); if (!buffer) return -ENOMEM; memcpy(buffer, name, namesize); buffer[namesize] = 0; done: *_name = buffer; ctx->cn_size = 0; ctx->o_size = 0; ctx->email_size = 0; return 0; } int x509_note_issuer(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; ctx->cert->raw_issuer = value; ctx->cert->raw_issuer_size = vlen; return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->issuer, vlen); } int x509_note_subject(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; ctx->cert->raw_subject = value; ctx->cert->raw_subject_size = vlen; return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->subject, vlen); } /* * Extract the data for the public key algorithm */ int x509_extract_key_data(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; if (ctx->last_oid != OID_rsaEncryption) return -ENOPKG; ctx->cert->pub->pkey_algo = "rsa"; /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) return -EBADMSG; ctx->key = value + 1; ctx->key_size = vlen - 1; return 0; } /* The keyIdentifier in AuthorityKeyIdentifier SEQUENCE is tag(CONT,PRIM,0) */ #define SEQ_TAG_KEYID (ASN1_CONT << 6) /* * Process certificate extensions that are used to qualify the certificate. */ int x509_process_extension(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; struct asymmetric_key_id *kid; const unsigned char *v = value; pr_debug("Extension: %u\n", ctx->last_oid); if (ctx->last_oid == OID_subjectKeyIdentifier) { /* Get hold of the key fingerprint */ if (ctx->cert->skid || vlen < 3) return -EBADMSG; if (v[0] != ASN1_OTS || v[1] != vlen - 2) return -EBADMSG; v += 2; vlen -= 2; ctx->cert->raw_skid_size = vlen; ctx->cert->raw_skid = v; kid = asymmetric_key_generate_id(v, vlen, "", 0); if (IS_ERR(kid)) return PTR_ERR(kid); ctx->cert->skid = kid; pr_debug("subjkeyid %*phN\n", kid->len, kid->data); return 0; } if (ctx->last_oid == OID_authorityKeyIdentifier) { /* Get hold of the CA key fingerprint */ ctx->raw_akid = v; ctx->raw_akid_size = vlen; return 0; } return 0; } /** * x509_decode_time - Decode an X.509 time ASN.1 object * @_t: The time to fill in * @hdrlen: The length of the object header * @tag: The object tag * @value: The object value * @vlen: The size of the object value * * Decode an ASN.1 universal time or generalised time field into a struct the * kernel can handle and check it for validity. The time is decoded thus: * * [RFC5280 §4.1.2.5] * CAs conforming to this profile MUST always encode certificate validity * dates through the year 2049 as UTCTime; certificate validity dates in * 2050 or later MUST be encoded as GeneralizedTime. Conforming * applications MUST be able to process validity dates that are encoded in * either UTCTime or GeneralizedTime. */ int x509_decode_time(time64_t *_t, size_t hdrlen, unsigned char tag, const unsigned char *value, size_t vlen) { static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; const unsigned char *p = value; unsigned year, mon, day, hour, min, sec, mon_len; #define dec2bin(X) ({ unsigned char x = (X) - '0'; if (x > 9) goto invalid_time; x; }) #define DD2bin(P) ({ unsigned x = dec2bin(P[0]) * 10 + dec2bin(P[1]); P += 2; x; }) if (tag == ASN1_UNITIM) { /* UTCTime: YYMMDDHHMMSSZ */ if (vlen != 13) goto unsupported_time; year = DD2bin(p); if (year >= 50) year += 1900; else year += 2000; } else if (tag == ASN1_GENTIM) { /* GenTime: YYYYMMDDHHMMSSZ */ if (vlen != 15) goto unsupported_time; year = DD2bin(p) * 100 + DD2bin(p); if (year >= 1950 && year <= 2049) goto invalid_time; } else { goto unsupported_time; } mon = DD2bin(p); day = DD2bin(p); hour = DD2bin(p); min = DD2bin(p); sec = DD2bin(p); if (*p != 'Z') goto unsupported_time; if (year < 1970 || mon < 1 || mon > 12) goto invalid_time; mon_len = month_lengths[mon - 1]; if (mon == 2) { if (year % 4 == 0) { mon_len = 29; if (year % 100 == 0) { mon_len = 28; if (year % 400 == 0) mon_len = 29; } } } if (day < 1 || day > mon_len || hour > 24 || /* ISO 8601 permits 24:00:00 as midnight tomorrow */ min > 59 || sec > 60) /* ISO 8601 permits leap seconds [X.680 46.3] */ goto invalid_time; *_t = mktime64(year, mon, day, hour, min, sec); return 0; unsupported_time: pr_debug("Got unsupported time [tag %02x]: '%*phN'\n", tag, (int)vlen, value); return -EBADMSG; invalid_time: pr_debug("Got invalid time [tag %02x]: '%*phN'\n", tag, (int)vlen, value); return -EBADMSG; } EXPORT_SYMBOL_GPL(x509_decode_time); int x509_note_not_before(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; return x509_decode_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen); } int x509_note_not_after(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; return x509_decode_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen); } /* * Note a key identifier-based AuthorityKeyIdentifier */ int x509_akid_note_kid(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; struct asymmetric_key_id *kid; pr_debug("AKID: keyid: %*phN\n", (int)vlen, value); if (ctx->cert->sig->auth_ids[1]) return 0; kid = asymmetric_key_generate_id(value, vlen, "", 0); if (IS_ERR(kid)) return PTR_ERR(kid); pr_debug("authkeyid %*phN\n", kid->len, kid->data); ctx->cert->sig->auth_ids[1] = kid; return 0; } /* * Note a directoryName in an AuthorityKeyIdentifier */ int x509_akid_note_name(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("AKID: name: %*phN\n", (int)vlen, value); ctx->akid_raw_issuer = value; ctx->akid_raw_issuer_size = vlen; return 0; } /* * Note a serial number in an AuthorityKeyIdentifier */ int x509_akid_note_serial(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; struct asymmetric_key_id *kid; pr_debug("AKID: serial: %*phN\n", (int)vlen, value); if (!ctx->akid_raw_issuer || ctx->cert->sig->auth_ids[0]) return 0; kid = asymmetric_key_generate_id(value, vlen, ctx->akid_raw_issuer, ctx->akid_raw_issuer_size); if (IS_ERR(kid)) return PTR_ERR(kid); pr_debug("authkeyid %*phN\n", kid->len, kid->data); ctx->cert->sig->auth_ids[0] = kid; return 0; } backport-iwlwifi-dkms-7906/compat/verification/x509_parser.h000066400000000000000000000047251351064634500241100ustar00rootroot00000000000000/* X.509 certificate parser internal definitions * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include #include #include #define x509_decode_time LINUX_BACKPORT(x509_decode_time) #define x509_cert_parse LINUX_BACKPORT(x509_cert_parse) #define x509_free_certificate LINUX_BACKPORT(x509_free_certificate) struct x509_certificate { struct x509_certificate *next; struct x509_certificate *signer; /* Certificate that signed this one */ struct public_key *pub; /* Public key details */ struct public_key_signature *sig; /* Signature parameters */ char *issuer; /* Name of certificate issuer */ char *subject; /* Name of certificate subject */ struct asymmetric_key_id *id; /* Issuer + Serial number */ struct asymmetric_key_id *skid; /* Subject + subjectKeyId (optional) */ time64_t valid_from; time64_t valid_to; const void *tbs; /* Signed data */ unsigned tbs_size; /* Size of signed data */ unsigned raw_sig_size; /* Size of sigature */ const void *raw_sig; /* Signature data */ const void *raw_serial; /* Raw serial number in ASN.1 */ unsigned raw_serial_size; unsigned raw_issuer_size; const void *raw_issuer; /* Raw issuer name in ASN.1 */ const void *raw_subject; /* Raw subject name in ASN.1 */ unsigned raw_subject_size; unsigned raw_skid_size; const void *raw_skid; /* Raw subjectKeyId in ASN.1 */ unsigned index; bool seen; /* Infinite recursion prevention */ bool verified; bool self_signed; /* T if self-signed (check unsupported_sig too) */ bool unsupported_key; /* T if key uses unsupported crypto */ bool unsupported_sig; /* T if signature uses unsupported crypto */ bool blacklisted; }; /* * x509_cert_parser.c */ extern void x509_free_certificate(struct x509_certificate *cert); extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); extern int x509_decode_time(time64_t *_t, size_t hdrlen, unsigned char tag, const unsigned char *value, size_t vlen); /* * x509_public_key.c */ extern int x509_get_sig_params(struct x509_certificate *cert); extern int x509_check_for_self_signed(struct x509_certificate *cert); backport-iwlwifi-dkms-7906/compat/verification/x509_public_key.c000066400000000000000000000145571351064634500247410ustar00rootroot00000000000000/* Instantiate a public key crypto key from an X.509 Certificate * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "X.509: "fmt #include #include #include #include #include #include "x509_parser.h" /* * Set up the signature parameters in an X.509 certificate. This involves * digesting the signed data and extracting the signature. */ int x509_get_sig_params(struct x509_certificate *cert) { struct public_key_signature *sig = cert->sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t desc_size; int ret; pr_devel("==>%s()\n", __func__); if (!cert->pub->pkey_algo) cert->unsupported_key = true; if (!sig->pkey_algo) cert->unsupported_sig = true; /* We check the hash if we can - even if we can't then verify it */ if (!sig->hash_algo) { cert->unsupported_sig = true; return 0; } sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL); if (!sig->s) return -ENOMEM; sig->s_size = cert->raw_sig_size; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(sig->hash_algo, 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { cert->unsupported_sig = true; return 0; } return PTR_ERR(tfm); } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); sig->digest_size = crypto_shash_digestsize(tfm); ret = -ENOMEM; sig->digest = kmalloc(sig->digest_size, GFP_KERNEL); if (!sig->digest) goto error; desc = kzalloc(desc_size, GFP_KERNEL); if (!desc) goto error; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest); if (ret < 0) goto error_2; ret = is_hash_blacklisted(sig->digest, sig->digest_size, "tbs"); if (ret == -EKEYREJECTED) { pr_err("Cert %*phN is blacklisted\n", sig->digest_size, sig->digest); cert->blacklisted = true; ret = 0; } error_2: kfree(desc); error: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } /* * Check for self-signedness in an X.509 cert and if found, check the signature * immediately if we can. */ int x509_check_for_self_signed(struct x509_certificate *cert) { int ret = 0; pr_devel("==>%s()\n", __func__); if (cert->raw_subject_size != cert->raw_issuer_size || memcmp(cert->raw_subject, cert->raw_issuer, cert->raw_issuer_size) != 0) goto not_self_signed; if (cert->sig->auth_ids[0] || cert->sig->auth_ids[1]) { /* If the AKID is present it may have one or two parts. If * both are supplied, both must match. */ bool a = asymmetric_key_id_same(cert->skid, cert->sig->auth_ids[1]); bool b = asymmetric_key_id_same(cert->id, cert->sig->auth_ids[0]); if (!a && !b) goto not_self_signed; ret = -EKEYREJECTED; if (((a && !b) || (b && !a)) && cert->sig->auth_ids[0] && cert->sig->auth_ids[1]) goto out; } ret = -EKEYREJECTED; if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0) goto out; ret = public_key_verify_signature(cert->pub, cert->sig); if (ret < 0) { if (ret == -ENOPKG) { cert->unsupported_sig = true; ret = 0; } goto out; } pr_devel("Cert Self-signature verified"); cert->self_signed = true; out: pr_devel("<==%s() = %d\n", __func__, ret); return ret; not_self_signed: pr_devel("<==%s() = 0 [not]\n", __func__); return 0; } #if 0 /* * Attempt to parse a data blob for a key as an X509 certificate. */ static int x509_key_preparse(struct key_preparsed_payload *prep) { struct asymmetric_key_ids *kids; struct x509_certificate *cert; const char *q; size_t srlen, sulen; char *desc = NULL, *p; int ret; cert = x509_cert_parse(prep->data, prep->datalen); if (IS_ERR(cert)) return PTR_ERR(cert); pr_devel("Cert Issuer: %s\n", cert->issuer); pr_devel("Cert Subject: %s\n", cert->subject); if (cert->unsupported_key) { ret = -ENOPKG; goto error_free_cert; } pr_devel("Cert Key Algo: %s\n", cert->pub->pkey_algo); pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to); cert->pub->id_type = "X509"; if (cert->unsupported_sig) { public_key_signature_free(cert->sig); cert->sig = NULL; } else { pr_devel("Cert Signature: %s + %s\n", cert->sig->pkey_algo, cert->sig->hash_algo); } /* Don't permit addition of blacklisted keys */ ret = -EKEYREJECTED; if (cert->blacklisted) goto error_free_cert; /* Propose a description */ sulen = strlen(cert->subject); if (cert->raw_skid) { srlen = cert->raw_skid_size; q = cert->raw_skid; } else { srlen = cert->raw_serial_size; q = cert->raw_serial; } ret = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); if (!desc) goto error_free_cert; p = memcpy(desc, cert->subject, sulen); p += sulen; *p++ = ':'; *p++ = ' '; p = bin2hex(p, q, srlen); *p = 0; kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL); if (!kids) goto error_free_desc; kids->id[0] = cert->id; kids->id[1] = cert->skid; /* We're pinning the module by being linked against it */ __module_get(public_key_subtype.owner); prep->payload.data[asym_subtype] = &public_key_subtype; prep->payload.data[asym_key_ids] = kids; prep->payload.data[asym_crypto] = cert->pub; prep->payload.data[asym_auth] = cert->sig; prep->description = desc; prep->quotalen = 100; /* We've finished with the certificate */ cert->pub = NULL; cert->id = NULL; cert->skid = NULL; cert->sig = NULL; desc = NULL; ret = 0; error_free_desc: kfree(desc); error_free_cert: x509_free_certificate(cert); return ret; } static struct asymmetric_key_parser x509_key_parser = { .owner = THIS_MODULE, .name = "x509", .parse = x509_key_preparse, }; /* * Module stuff */ static int __init x509_key_init(void) { return register_asymmetric_key_parser(&x509_key_parser); } static void __exit x509_key_exit(void) { unregister_asymmetric_key_parser(&x509_key_parser); } module_init(x509_key_init); module_exit(x509_key_exit); MODULE_DESCRIPTION("X.509 certificate parser"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); #endif backport-iwlwifi-dkms-7906/defconfig000077700000000000000000000000001351064634500242022defconfigs/prune-publicustar00rootroot00000000000000backport-iwlwifi-dkms-7906/defconfigs/000077500000000000000000000000001351064634500200105ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/defconfigs/iwlwifi-galileo000066400000000000000000000013601351064634500230170ustar00rootroot00000000000000CPTCFG_CFG80211=m CPTCFG_CFG80211_DEFAULT_PS=y CPTCFG_CFG80211_DEBUGFS=y CPTCFG_CFG80211_CERTIFICATION_ONUS=y CPTCFG_CFG80211_REG_RELAX_NO_IR=y CPTCFG_NL80211_TESTMODE=y CPTCFG_MAC80211=m # CPTCFG_MAC80211_RC_PID is not set CPTCFG_MAC80211_RC_MINSTREL=y CPTCFG_MAC80211_RC_DEFAULT_MINSTREL=y CPTCFG_MAC80211_DEBUGFS=y CPTCFG_MAC80211_MESSAGE_TRACING=y CPTCFG_WLAN=y CPTCFG_IWLWIFI=m #CPTCFG_IWLDVM is not set CPTCFG_IWLMVM=m CPTCFG_IWLWIFI_DEBUG=y CPTCFG_IWLWIFI_DEBUGFS=y CPTCFG_IWLWIFI_DEVICE_TRACING=y CPTCFG_IWLWIFI_DEVICE_TESTMODE=y # CPTCFG_MAC80211_HWSIM is not set CPTCFG_IWLXVT=m CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES=y # CPTCFG_IWLWIFI_FORCE_OFDM_RATE is not set # CPTCFG_IWLMVM_P2P_UAPSD_STANDALONE is not set CPTCFG_IWLWIFI_NUM_CHANNELS=1 backport-iwlwifi-dkms-7906/defconfigs/iwlwifi-public000066400000000000000000000011231351064634500226560ustar00rootroot00000000000000CPTCFG_CFG80211=m CPTCFG_CFG80211_DEFAULT_PS=y CPTCFG_CFG80211_DEBUGFS=y CPTCFG_CFG80211_CERTIFICATION_ONUS=y CPTCFG_CFG80211_REG_RELAX_NO_IR=y CPTCFG_NL80211_TESTMODE=y CPTCFG_MAC80211=m # CPTCFG_MAC80211_RC_PID is not set CPTCFG_MAC80211_RC_MINSTREL=y CPTCFG_MAC80211_RC_DEFAULT_MINSTREL=y CPTCFG_MAC80211_DEBUGFS=y CPTCFG_MAC80211_MESSAGE_TRACING=y CPTCFG_WLAN=y CPTCFG_IWLWIFI=m CPTCFG_IWLMVM=m CPTCFG_IWLWIFI_DEBUG=y CPTCFG_IWLWIFI_DEBUGFS=y CPTCFG_IWLWIFI_DEVICE_TRACING=y CPTCFG_IWLWIFI_DEVICE_TESTMODE=y # CPTCFG_MAC80211_HWSIM is not set CPTCFG_IWLXVT=m CPTCFG_IWLWIFI_OPMODE_MODULAR=y backport-iwlwifi-dkms-7906/defconfigs/iwlwifi-public-android000066400000000000000000000012451351064634500243010ustar00rootroot00000000000000CPTCFG_CFG80211=m CPTCFG_CFG80211_DEFAULT_PS=y CPTCFG_CFG80211_DEBUGFS=y CPTCFG_CFG80211_CERTIFICATION_ONUS=y CPTCFG_CFG80211_REG_RELAX_NO_IR=y CPTCFG_NL80211_TESTMODE=y CPTCFG_MAC80211=m # CPTCFG_MAC80211_RC_PID is not set CPTCFG_MAC80211_RC_MINSTREL=y CPTCFG_MAC80211_RC_DEFAULT_MINSTREL=y CPTCFG_MAC80211_DEBUGFS=y CPTCFG_MAC80211_MESSAGE_TRACING=y CPTCFG_WLAN=y CPTCFG_IWLWIFI=m CPTCFG_IWLMVM=m CPTCFG_IWLWIFI_DEBUG=y CPTCFG_IWLWIFI_DEBUGFS=y CPTCFG_IWLWIFI_DEVICE_TRACING=y CPTCFG_IWLWIFI_DEVICE_TESTMODE=y # CPTCFG_MAC80211_HWSIM is not set # CPTCFG_IWLXVT is not set CPTCFG_IWLWIFI_OPMODE_MODULAR=y CPTCFG_IWLWIFI_BCAST_FILTERING=y # CPTCFG_IWLMVM_AX_SUPPORT is not set backport-iwlwifi-dkms-7906/defconfigs/prune-public000066400000000000000000000010141351064634500223340ustar00rootroot00000000000000CPTCFG_CFG80211=m CPTCFG_CFG80211_DEFAULT_PS=y CPTCFG_CFG80211_DEBUGFS=y CPTCFG_CFG80211_CERTIFICATION_ONUS=y CPTCFG_CFG80211_REG_RELAX_NO_IR=y CPTCFG_NL80211_TESTMODE=y CPTCFG_MAC80211=m CPTCFG_MAC80211_RC_MINSTREL=y CPTCFG_MAC80211_RC_DEFAULT_MINSTREL=y CPTCFG_MAC80211_DEBUGFS=y CPTCFG_MAC80211_MESSAGE_TRACING=y CPTCFG_WLAN=y CPTCFG_IWLWIFI=m CPTCFG_IWLMVM=m CPTCFG_IWLWIFI_DEBUG=y CPTCFG_IWLWIFI_DEBUGFS=y CPTCFG_IWLWIFI_DEVICE_TRACING=y CPTCFG_IWLWIFI_DEVICE_TESTMODE=y CPTCFG_IWLXVT=m CPTCFG_IWLWIFI_OPMODE_MODULAR=y backport-iwlwifi-dkms-7906/defconfigs/soc_ip_hfpga000066400000000000000000000011451351064634500223550ustar00rootroot00000000000000CPTCFG_CFG80211=m CPTCFG_CFG80211_DEFAULT_PS=n CPTCFG_CFG80211_DEBUGFS=y CPTCFG_CFG80211_CERTIFICATION_ONUS=y CPTCFG_CFG80211_REG_RELAX_NO_IR=y CPTCFG_NL80211_TESTMODE=y CPTCFG_MAC80211=m # CPTCFG_MAC80211_RC_PID is not set CPTCFG_MAC80211_RC_MINSTREL=n CPTCFG_MAC80211_RC_DEFAULT_MINSTREL=n CPTCFG_MAC80211_DEBUGFS=y CPTCFG_MAC80211_MESSAGE_TRACING=y CPTCFG_WLAN=y CPTCFG_IWLWIFI=m CPTCFG_IWLMVM=m CPTCFG_IWLWIFI_DEBUG=y CPTCFG_IWLWIFI_DEBUGFS=y CPTCFG_IWLWIFI_DEVICE_TRACING=y CPTCFG_IWLWIFI_DEVICE_TESTMODE=y CPTCFG_IWLXVT=m CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES=y # CPTCFG_IWLWIFI_FORCE_OFDM_RATE is not set backport-iwlwifi-dkms-7906/drivers/000077500000000000000000000000001351064634500173575ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/000077500000000000000000000000001351064634500201455ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/000077500000000000000000000000001351064634500220025ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/Kconfig000066400000000000000000000100211351064634500232770ustar00rootroot00000000000000# # Wireless LAN device configuration # menuconfig WLAN bool "Wireless LAN" depends on !S390 depends on NET select WIRELESS default y ---help--- This section contains all the pre 802.11 and 802.11 wireless device drivers. For a complete list of drivers and documentation on them refer to the wireless wiki: http://wireless.kernel.org/en/users/Drivers if WLAN config WIRELESS_WDS bool "mac80211-based legacy WDS support" if EXPERT help This option enables the deprecated WDS support, the newer mac80211-based 4-addr AP/client support supersedes it with a much better feature set (HT, VHT, ...) We plan to remove this option and code, so if you find that you have to enable it, please let us know on the linux-wireless@vger.kernel.org mailing list, so we can help you migrate to 4-addr AP/client (or, if it's really necessary, give up on our plan of removing it). #source "drivers/net/wireless/admtek/Kconfig" #source "drivers/net/wireless/ath/Kconfig" #source "drivers/net/wireless/atmel/Kconfig" #source "drivers/net/wireless/broadcom/Kconfig" #source "drivers/net/wireless/cisco/Kconfig" source "drivers/net/wireless/intel/Kconfig" #source "drivers/net/wireless/intersil/Kconfig" #source "drivers/net/wireless/marvell/Kconfig" #source "drivers/net/wireless/mediatek/Kconfig" #source "drivers/net/wireless/ralink/Kconfig" #source "drivers/net/wireless/realtek/Kconfig" #source "drivers/net/wireless/rsi/Kconfig" #source "drivers/net/wireless/st/Kconfig" #source "drivers/net/wireless/ti/Kconfig" #source "drivers/net/wireless/zydas/Kconfig" #source "drivers/net/wireless/quantenna/Kconfig" config PCMCIA_RAYCS depends on n tristate "Aviator/Raytheon 2.4GHz wireless support" depends on m depends on PCMCIA depends on WIRELESS_EXT depends on WEXT_SPY depends on WEXT_PRIV ---help--- Say Y here if you intend to attach an Aviator/Raytheon PCMCIA (PC-card) wireless Ethernet networking card to your computer. Please read the file for details. To compile this driver as a module, choose M here: the module will be called ray_cs. If unsure, say N. config PCMCIA_WL3501 depends on n tristate "Planet WL3501 PCMCIA cards" depends on m depends on CFG80211 && PCMCIA depends on WIRELESS_EXT depends on WEXT_SPY help A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet. It has basic support for Linux wireless extensions and initial micro support for ethtool. config MAC80211_HWSIM tristate "Simulated radio testing tool for mac80211" depends on m depends on MAC80211 ---help--- This driver is a developer testing tool that can be used to test IEEE 802.11 networking stack (mac80211) functionality. This is not needed for normal wireless LAN usage and is only for testing. See Documentation/networking/mac80211_hwsim for more information on how to use this tool. To compile this driver as a module, choose M here: the module will be called mac80211_hwsim. If unsure, say N. config USB_NET_RNDIS_WLAN depends on !KERNEL_4_6 depends on n tristate "Wireless RNDIS USB support" depends on m depends on USB depends on CFG80211 depends on USB_NET_DRIVERS depends on USB_USBNET depends on USB_NET_CDCETHER depends on USB_NET_RNDIS_HOST ---help--- This is a driver for wireless RNDIS devices. These are USB based adapters found in devices such as: Buffalo WLI-U2-KG125S U.S. Robotics USR5421 Belkin F5D7051 Linksys WUSB54GSv2 Linksys WUSB54GSC Asus WL169gE Eminent EM4045 BT Voyager 1055 Linksys WUSB54GSv1 U.S. Robotics USR5420 BUFFALO WLI-USB-G54 All of these devices are based on Broadcom 4320 chip which is the only wireless RNDIS chip known to date. If you choose to build a module, it'll be called rndis_wlan. config VIRT_WIFI depends on n tristate "Wifi wrapper for ethernet drivers" depends on m depends on CFG80211 ---help--- This option adds support for ethernet connections to appear as if they are wifi connections through a special rtnetlink device. endif # WLAN backport-iwlwifi-dkms-7906/drivers/net/wireless/Makefile000066400000000000000000000020611351064634500234410ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # # Makefile for the Linux Wireless network device drivers. # # #obj-$(CONFIG_WLAN_VENDOR_ADMTEK) += admtek/ #obj-$(CONFIG_WLAN_VENDOR_ATH) += ath/ #obj-$(CONFIG_WLAN_VENDOR_ATMEL) += atmel/ #obj-$(CONFIG_WLAN_VENDOR_BROADCOM) += broadcom/ #obj-$(CONFIG_WLAN_VENDOR_CISCO) += cisco/ obj-$(CPTCFG_WLAN_VENDOR_INTEL) += intel/ #obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/ #obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/ #obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/ #obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/ #obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/ #obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/ #obj-$(CONFIG_WLAN_VENDOR_ST) += st/ #obj-$(CONFIG_WLAN_VENDOR_TI) += ti/ #obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/ #obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/ # 16-bit wireless PCMCIA client drivers #obj-$(CPTCFG_PCMCIA_RAYCS) += ray_cs.o #obj-$(CPTCFG_PCMCIA_WL3501) += wl3501_cs.o # #obj-$(CPTCFG_USB_NET_RNDIS_WLAN) += rndis_wlan.o obj-$(CPTCFG_MAC80211_HWSIM) += mac80211_hwsim.o # #obj-$(CPTCFG_VIRT_WIFI) += virt_wifi.o backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/000077500000000000000000000000001351064634500231155ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/Kconfig000066400000000000000000000011271351064634500244210ustar00rootroot00000000000000config WLAN_VENDOR_INTEL bool "Intel devices" default y ---help--- If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all the questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_INTEL #source "drivers/net/wireless/intel/ipw2x00/Kconfig" #source "drivers/net/wireless/intel/iwlegacy/Kconfig" source "drivers/net/wireless/intel/iwlwifi/Kconfig" endif # WLAN_VENDOR_INTEL backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/Makefile000066400000000000000000000002201351064634500245470ustar00rootroot00000000000000#obj-$(CONFIG_IPW2100) += ipw2x00/ #obj-$(CONFIG_IPW2200) += ipw2x00/ # #obj-$(CONFIG_IWLEGACY) += iwlegacy/ obj-$(CPTCFG_IWLWIFI) += iwlwifi/ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/000077500000000000000000000000001351064634500245675ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/Kconfig000066400000000000000000000110631351064634500260730ustar00rootroot00000000000000config IWLWIFI tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " depends on m depends on PCI && HAS_IOMEM && CFG80211 depends on FW_LOADER ---help--- Select to build the driver supporting the: Intel Wireless WiFi Link Next-Gen AGN This option enables support for use with the following hardware: Intel Wireless WiFi Link 6250AGN Adapter Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN) Intel WiFi Link 1000BGN Intel Wireless WiFi 5150AGN Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN Intel 6005 Series Wi-Fi Adapters Intel 6030 Series Wi-Fi Adapters Intel Wireless WiFi Link 6150BGN 2 Adapter Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) Intel 2000 Series Wi-Fi Adapters Intel 7260 Wi-Fi Adapter Intel 3160 Wi-Fi Adapter Intel 7265 Wi-Fi Adapter Intel 8260 Wi-Fi Adapter Intel 3165 Wi-Fi Adapter This driver uses the kernel's mac80211 subsystem. In order to use this driver, you will need a firmware image for it. You can obtain the microcode from: . The firmware is typically installed in /lib/firmware. You can look in the hotplug script /etc/hotplug/firmware.agent to determine which directory FIRMWARE_DIR is set to when the script runs. If you want to compile the driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . The module will be called iwlwifi. if IWLWIFI config IWLWIFI_LEDS bool depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI depends on IWLMVM || IWLDVM select BPAUTO_LEDS_TRIGGERS select MAC80211_LEDS default y config IWLMVM tristate "Intel Wireless WiFi MVM Firmware support" depends on m select BPAUTO_WANT_DEV_COREDUMP depends on MAC80211 help This is the driver that supports the MVM firmware. The list of the devices that use this firmware is available here: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware # don't call it _MODULE -- will confuse Kconfig/fixdep/... config IWLWIFI_OPMODE_MODULAR bool default y if IWLDVM=m default y if IWLMVM=m default y if IWLXVT=m default y if IWLFMAC=m comment "WARNING: iwlwifi is useless without IWLDVM, IWLMVM or IWLFMAC" depends on IWLDVM=n && IWLMVM=n && IWLFMAC=n config IWLWIFI_BCAST_FILTERING bool "Enable broadcast filtering" depends on IWLMVM help Say Y here to enable default bcast filtering configuration. Enabling broadcast filtering will drop any incoming wireless broadcast frames, except some very specific predefined patterns (e.g. incoming arp requests). If unsure, don't enable this option, as some programs might expect incoming broadcasts for their normal operations. menu "Debugging Options" config IWLWIFI_DEBUG bool "Enable full debugging output in the iwlwifi driver" ---help--- This option will enable debug tracing output for the iwlwifi drivers This will result in the kernel module being ~100k larger. You can control which debug output is sent to the kernel log by setting the value in /sys/module/iwlwifi/parameters/debug This entry will only exist if this option is enabled. To set a value, simply echo an 8-byte hex value to the same file: % echo 0x43fff > /sys/module/iwlwifi/parameters/debug You can find the list of debug mask values in: drivers/net/wireless/iwlwifi/iwl-debug.h If this is your first time using this driver, you should say Y here as the debug information can assist others in helping you resolve any problems you may encounter. config IWLWIFI_DEBUGFS bool "iwlwifi debugfs support" depends on MAC80211_DEBUGFS ---help--- Enable creation of debugfs files for the iwlwifi drivers. This is a low-impact option that allows getting insight into the driver's state at runtime. config IWLWIFI_DEVICE_TRACING bool "iwlwifi device access tracing" depends on EVENT_TRACING default y help Say Y here to trace all commands, including TX frames and IO accesses, sent to the device. If you say yes, iwlwifi will register with the ftrace framework for event tracing and dump all this information to the ringbuffer, you may need to increase the ringbuffer size. See the ftrace documentation for more information. When tracing is not enabled, this option still has some (though rather small) overhead. If unsure, say Y so we can help you better when problems occur. endmenu endif source "drivers/net/wireless/intel/iwlwifi/Kconfig.noupstream" backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/Kconfig.noupstream000066400000000000000000000066301351064634500302730ustar00rootroot00000000000000config IWLXVT tristate "Enable XVT op_mode" depends on IWLWIFI select IWLWIFI_DEVICE_TESTMODE select IWLWIFI_SUPPORT_DEBUG_OVERRIDES help This option enables the xVT operation mode. xVT mode supports special debug and testing operations that are not supported in the standard operation modes, and is used for verification purposes. xVT mode does not support the standard WiFi operations, and does not communicate with the user mode through the nl80211. Enabling this options allows to fully control the HW using an external tool in user space. Say Y if you have the user space application and want to execute debug and testing flows on the HW. config IWLWIFI_NUM_CHANNELS int "number of supported concurrent channels" range 1 2 default 2 depends on IWLMVM config IWLWIFI_THERMAL_DEBUGFS bool "enable thermal debug write and read support" depends on IWLWIFI_DEBUGFS default y help Enable thermal settings debugfs entries that can be used to change the temperatures and other parameters for testing, This code isn't intended for upstream, hence the config option. You shouldn't turn it off. config IWLWIFI_DEVICE_TESTMODE bool "enable generic netlink testmode support" depends on IWLWIFI default y if IWLMVM select IWLWIFI_SUPPORT_DEBUG_OVERRIDES config IWLWIFI_SUPPORT_DEBUG_OVERRIDES depends on IWLWIFI bool "enable default value override (for debugging)" config IWLWIFI_FORCE_OFDM_RATE bool "force transmit to OFDM low rate" depends on IWLMVM help This option force the FW to transmit OFDM rate. If unsure say N, Say Y if you have this code and want to transmit OFDM low rate. config IWLMVM_P2P_OPPPS_TEST_WA bool "Use WA to pass P2P OPPPS certification test" depends on IWLMVM ---help--- This option enables a WA to pass P2P OPPPS certification test. This test verifies P2P client power management behavior when connected to P2P GO that advertises OPPPS. Since P2P client power management is not supported currently by FW a special WA is required. Send zeroed quota on P2P client MAC if power save is enabled from user space to mimic power management behavior. Update quota to normal value otherwise. config IWLMVM_VENDOR_CMDS bool "enable vendor commands" depends on IWLMVM default y help This option enables support for vendor commands, including some that don't have their own Kconfig option. Other Kconfig options depend on this one as well. This option mostly exists for non-upstream tagging, so best leave it set to Y. config IWLMVM_TDLS_PEER_CACHE bool "enable TDLS peer caching and traffic monitoring" depends on IWLMVM_VENDOR_CMDS default y help This option enables (vendor) commands from userspace to monitor traffic to and from specific peers on the same BSS. These peers might be TDLS peers, or candidates to become such. config IWLWIFI_DISALLOW_OLDER_FW bool "disallow fallback to older firmware images" depends on IWLWIFI help This Kconfig option allows disabling fallback to older firmware images to ensure tight coupling between driver and firmware releases. When enabled, the dbg-cfg framework can still override this setting. config IWLWIFI_NUM_STA_INTERFACES int "number of supported concurrent station interfaces" range 1 2 default 1 depends on IWLMVM config REJECT_NONUPSTREAM_NL80211 bool "reject non-upstreamed nl80211 commands" depends on CFG80211 help Say Y to disable non-upstream commands. backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/Makefile000066400000000000000000000024401351064634500262270ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # common obj-$(CPTCFG_IWLWIFI) += iwlwifi.o iwlwifi-objs += iwl-io.o iwlwifi-objs += iwl-drv.o iwlwifi-objs += iwl-debug.o iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += iwl-dbg-tlv.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CPTCFG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o iwlwifi-$(CPTCFG_IWLXVT) += fw/init.o iwlwifi-$(CPTCFG_IWLWIFI_DEBUGFS) += fw/debugfs.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o # Bus iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o pcie/trans-gen2.o pcie/tx-gen2.o iwlwifi-$(CPTCFG_IWLMVM) += cfg/7000.o cfg/8000.o iwlwifi-$(CPTCFG_IWLMVM) += cfg/9000.o cfg/22000.o iwlwifi-$(CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES) += iwl-dbg-cfg.o iwlwifi-objs += $(iwlwifi-m) iwlwifi-$(CPTCFG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o iwlwifi-$(CPTCFG_IWLWIFI_DEVICE_TESTMODE) += iwl-tm-gnl.o iwl-dnt-cfg.o iwl-dnt-dispatch.o iwlwifi-$(CPTCFG_IWLWIFI_DEVICE_TESTMODE) += iwl-dnt-dev-if.o fw/testmode.o ccflags-y += -I$(src) subdir-ccflags-y += $(call cc-option,-Wimplicit-fallthrough) obj-$(CPTCFG_IWLMVM) += mvm/ obj-$(CPTCFG_IWLXVT) += xvt/ CFLAGS_iwl-devtrace.o := -I$(src) backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/cfg/000077500000000000000000000000001351064634500253265ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/cfg/22000.c000066400000000000000000000573401351064634500261460ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-config.h" /* Highest firmware API version supported */ #define IWL_22000_UCODE_API_MAX 51 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d /* Memory offsets and lengths */ #define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */ #define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */ #define IWL_22000_DCCM2_OFFSET 0x880000 #define IWL_22000_DCCM2_LEN 0x8000 #define IWL_22000_SMEM_OFFSET 0x400000 #define IWL_22000_SMEM_LEN 0xD0000 #define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" #define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" #define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" #define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" #define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-" #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" #define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-" #define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" #define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-" #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" #define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-" #define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" #define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" #define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" #define IWL_22000_HR_MODULE_FIRMWARE(api) \ IWL_22000_HR_FW_PRE __stringify(api) ".ucode" #define IWL_22000_JF_MODULE_FIRMWARE(api) \ IWL_22000_JF_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \ IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \ IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_QU_C_HR_B_MODULE_FIRMWARE(api) \ IWL_QU_C_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_CC_A_MODULE_FIRMWARE(api) \ IWL_CC_A_FW_PRE __stringify(api) ".ucode" #define IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(api) \ IWL_22000_SO_A_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(api) \ IWL_22000_SO_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(api) \ IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \ IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 512, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; static const struct iwl_base_params iwl_22560_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 512, .max_tfd_queue_size = 65536, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; static const struct iwl_ht_params iwl_22000_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; #define IWL_DEVICE_22000_COMMON \ .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ .dccm_offset = IWL_22000_DCCM_OFFSET, \ .dccm_len = IWL_22000_DCCM_LEN, \ .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ .dccm2_len = IWL_22000_DCCM2_LEN, \ .smem_offset = IWL_22000_SMEM_OFFSET, \ .smem_len = IWL_22000_SMEM_LEN, \ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .apmg_not_supported = true, \ .mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .use_tfh = true, \ .rf_id = true, \ .gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x400000, \ .d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_length = 60 * 1024, \ .fw_mon_smem_write_ptr_addr = 0xa0c16c, \ .fw_mon_smem_write_ptr_msk = 0xfffff, \ .fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \ .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff #define IWL_DEVICE_AX200_COMMON \ IWL_DEVICE_22000_COMMON, \ .umac_prph_offset = 0x300000 #define IWL_DEVICE_22500 \ IWL_DEVICE_22000_COMMON, \ .device_family = IWL_DEVICE_FAMILY_22000, \ .base_params = &iwl_22000_base_params, \ .csr = &iwl_csr_v1, \ .gp2_reg_addr = 0xa02c68 #define IWL_DEVICE_22560 \ IWL_DEVICE_22000_COMMON, \ .device_family = IWL_DEVICE_FAMILY_22560, \ .base_params = &iwl_22560_base_params, \ .csr = &iwl_csr_v2 #define IWL_DEVICE_AX210 \ IWL_DEVICE_AX200_COMMON, \ .device_family = IWL_DEVICE_FAMILY_AX210, \ .base_params = &iwl_22560_base_params, \ .csr = &iwl_csr_v1, \ .min_txq_size = 128, \ .gp2_reg_addr = 0xd02c68, \ .min_256_ba_txq_size = 512 const struct iwl_cfg iwl22000_2ac_cfg_hr = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_CDB_FW_PRE, IWL_DEVICE_22500, .cdb = true, }; const struct iwl_cfg iwl22000_2ac_cfg_jf = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_JF_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl_ax101_cfg_qu_hr = { .name = "Intel(R) Wi-Fi 6 AX101", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl_ax201_cfg_qu_hr = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = { .name = "Intel(R) Wi-Fi 6 AX101", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl_ax101_cfg_quz_hr = { .name = "Intel(R) Wi-Fi 6 AX101", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl_ax201_cfg_quz_hr = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl_ax200_cfg_cc = { .name = "Intel(R) Wi-Fi 6 AX200 160MHz", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .bisr_workaround = 1, }; const struct iwl_cfg killer1650x_2ax_cfg = { .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .bisr_workaround = 1, }; const struct iwl_cfg killer1650w_2ax_cfg = { .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .bisr_workaround = 1, }; /* * All JF radio modules are part of the 9000 series, but the MAC part * looks more like 22000. That's why this device is here, but called * 9560 nevertheless. */ const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = { .name = "Intel(R) Wireless-AC 9461", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = { .name = "Intel(R) Wireless-AC 9462", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = { .name = "Intel(R) Wireless-AC 9560", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = { .name = "Intel(R) Wireless-AC 9461", .fw_name_pre = IWL_QU_C_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = { .name = "Intel(R) Wireless-AC 9462", .fw_name_pre = IWL_QU_C_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = { .name = "Intel(R) Wireless-AC 9560", .fw_name_pre = IWL_QU_C_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_QU_C_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = { .name = "Intel(R) Dual Band Wireless AC 9461", .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = { .name = "Intel(R) Dual Band Wireless AC 9462", .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = { .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, }; const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_jf = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_A0_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE, IWL_DEVICE_AX210, }; const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = { .name = "Intel(R) Wi-Fi 7 AX210 160MHz", .fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE, IWL_DEVICE_AX210, }; const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { .name = "Intel(R) Wi-Fi 7 AX211 160MHz", .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, }; const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { .name = "Intel(R) Wi-Fi 7 AX210 160MHz", .fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, }; const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { .name = "Intel(R) Wi-Fi 7 AX411 160MHz", .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE, IWL_DEVICE_AX210, }; MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/cfg/7000.c000066400000000000000000000267461351064634500260770ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-config.h" /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 #define IWL7265D_UCODE_API_MAX 29 #define IWL3168_UCODE_API_MAX 29 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 17 #define IWL7265_UCODE_API_MIN 17 #define IWL7265D_UCODE_API_MIN 22 #define IWL3168_UCODE_API_MIN 22 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d #define IWL3160_NVM_VERSION 0x709 #define IWL3165_NVM_VERSION 0x709 #define IWL3168_NVM_VERSION 0xd01 #define IWL7265_NVM_VERSION 0x0a1d #define IWL7265D_NVM_VERSION 0x0c11 /* DCCM offsets and lengths */ #define IWL7000_DCCM_OFFSET 0x800000 #define IWL7260_DCCM_LEN 0x14000 #define IWL3160_DCCM_LEN 0x10000 #define IWL7265_DCCM_LEN 0x17A00 #define IWL7260_FW_PRE "iwlwifi-7260-" #define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" #define IWL3160_FW_PRE "iwlwifi-3160-" #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" #define IWL3168_FW_PRE "iwlwifi-3168-" #define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode" #define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265D_FW_PRE "iwlwifi-7265D-" #define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl7000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_16K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, .apmg_wake_up_wa = true, }; static const struct iwl_tt_params iwl7000_high_temp_tt_params = { .ct_kill_entry = 118, .ct_kill_exit = 96, .ct_kill_duration = 5, .dynamic_smps_entry = 114, .dynamic_smps_exit = 110, .tx_protection_entry = 114, .tx_protection_exit = 108, .tx_backoff = { {.temperature = 112, .backoff = 300}, {.temperature = 113, .backoff = 800}, {.temperature = 114, .backoff = 1500}, {.temperature = 115, .backoff = 3000}, {.temperature = 116, .backoff = 5000}, {.temperature = 117, .backoff = 10000}, }, .support_ct_kill = true, .support_dynamic_smps = true, .support_tx_protection = true, .support_tx_backoff = true, }; static const struct iwl_ht_params iwl7000_ht_params = { .stbc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; #define IWL_DEVICE_7000_COMMON \ .device_family = IWL_DEVICE_FAMILY_7000, \ .base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 0, \ .non_shared_ant = ANT_A, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .dccm_offset = IWL7000_DCCM_OFFSET, \ .csr = &iwl_csr_v1 #define IWL_DEVICE_7000 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7260_UCODE_API_MAX, \ .ucode_api_min = IWL7260_UCODE_API_MIN #define IWL_DEVICE_7005 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265_UCODE_API_MAX, \ .ucode_api_min = IWL7265_UCODE_API_MIN #define IWL_DEVICE_3008 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL3168_UCODE_API_MAX, \ .ucode_api_min = IWL3168_UCODE_API_MIN #define IWL_DEVICE_7005D \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265D_UCODE_API_MAX, \ .ucode_api_min = IWL7265D_UCODE_API_MIN const struct iwl_cfg iwl7260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, }; const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { .name = "Intel(R) Dual Band Wireless AC 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .high_temp = true, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, .thermal_params = &iwl7000_high_temp_tt_params, }; const struct iwl_cfg iwl7260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, }; const struct iwl_cfg iwl7260_n_cfg = { .name = "Intel(R) Wireless N 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, }; const struct iwl_cfg iwl3160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3160", .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; const struct iwl_cfg iwl3160_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 3160", .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; const struct iwl_cfg iwl3160_n_cfg = { .name = "Intel(R) Wireless N 3160", .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { {.pwr = 1600, .backoff = 0}, {.pwr = 1300, .backoff = 467}, {.pwr = 900, .backoff = 1900}, {.pwr = 800, .backoff = 2630}, {.pwr = 700, .backoff = 3720}, {.pwr = 600, .backoff = 5550}, {.pwr = 500, .backoff = 9350}, {0}, }; static const struct iwl_ht_params iwl7265_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; const struct iwl_cfg iwl3165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3165", .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3165_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl3168_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3168", .fw_name_pre = IWL3168_FW_PRE, IWL_DEVICE_3008, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3168_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, .nvm_type = IWL_NVM_SDP, }; const struct iwl_cfg iwl7265_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265_n_cfg = { .name = "Intel(R) Wireless N 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265d_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265d_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7265", .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265d_n_cfg = { .name = "Intel(R) Wireless N 7265", .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX)); MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX)); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/cfg/8000.c000066400000000000000000000167421351064634500260730ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-config.h" /* Highest firmware API version supported */ #define IWL8000_UCODE_API_MAX 36 #define IWL8265_UCODE_API_MAX 36 /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 22 #define IWL8265_UCODE_API_MIN 22 /* NVM versions */ #define IWL8000_NVM_VERSION 0x0a1d /* Memory offsets and lengths */ #define IWL8260_DCCM_OFFSET 0x800000 #define IWL8260_DCCM_LEN 0x18000 #define IWL8260_DCCM2_OFFSET 0x880000 #define IWL8260_DCCM2_LEN 0x8000 #define IWL8260_SMEM_OFFSET 0x400000 #define IWL8260_SMEM_LEN 0x68000 #define IWL8000_FW_PRE "iwlwifi-8000C-" #define IWL8000_MODULE_FIRMWARE(api) \ IWL8000_FW_PRE __stringify(api) ".ucode" #define IWL8265_FW_PRE "iwlwifi-8265-" #define IWL8265_MODULE_FIRMWARE(api) \ IWL8265_FW_PRE __stringify(api) ".ucode" #define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" static const struct iwl_base_params iwl8000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; static const struct iwl_ht_params iwl8000_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_tt_params iwl8000_tt_params = { .ct_kill_entry = 115, .ct_kill_exit = 93, .ct_kill_duration = 5, .dynamic_smps_entry = 111, .dynamic_smps_exit = 107, .tx_protection_entry = 112, .tx_protection_exit = 105, .tx_backoff = { {.temperature = 110, .backoff = 200}, {.temperature = 111, .backoff = 600}, {.temperature = 112, .backoff = 1200}, {.temperature = 113, .backoff = 2000}, {.temperature = 114, .backoff = 4000}, }, .support_ct_kill = true, .support_dynamic_smps = true, .support_tx_protection = true, .support_tx_backoff = true, }; #define IWL_DEVICE_8000_COMMON \ .device_family = IWL_DEVICE_FAMILY_8000, \ .base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .features = NETIF_F_RXCSUM, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL8260_DCCM_OFFSET, \ .dccm_len = IWL8260_DCCM_LEN, \ .dccm2_offset = IWL8260_DCCM2_OFFSET, \ .dccm2_len = IWL8260_DCCM2_LEN, \ .smem_offset = IWL8260_SMEM_OFFSET, \ .smem_len = IWL8260_SMEM_LEN, \ .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ .thermal_params = &iwl8000_tt_params, \ .apmg_not_supported = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000, \ .csr = &iwl_csr_v1 #define IWL_DEVICE_8000 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ .ucode_api_min = IWL8000_UCODE_API_MIN \ #define IWL_DEVICE_8260 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ .ucode_api_min = IWL8000_UCODE_API_MIN \ #define IWL_DEVICE_8265 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8265_UCODE_API_MAX, \ .ucode_api_min = IWL8265_UCODE_API_MIN \ const struct iwl_cfg iwl8260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 8260", .fw_name_pre = IWL8000_FW_PRE, IWL_DEVICE_8260, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, }; const struct iwl_cfg iwl8260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 8260", .fw_name_pre = IWL8000_FW_PRE, IWL_DEVICE_8260, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; const struct iwl_cfg iwl8265_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 8265", .fw_name_pre = IWL8265_FW_PRE, IWL_DEVICE_8265, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .vht_mu_mimo_supported = true, }; const struct iwl_cfg iwl8275_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 8275", .fw_name_pre = IWL8265_FW_PRE, IWL_DEVICE_8265, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .vht_mu_mimo_supported = true, }; const struct iwl_cfg iwl4165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 4165", .fw_name_pre = IWL8000_FW_PRE, IWL_DEVICE_8000, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX)); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/cfg/9000.c000066400000000000000000000240331351064634500260640ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-config.h" #include "fw/file.h" /* Highest firmware API version supported */ #define IWL9000_UCODE_API_MAX 46 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 /* NVM versions */ #define IWL9000_NVM_VERSION 0x0a1d /* Memory offsets and lengths */ #define IWL9000_DCCM_OFFSET 0x800000 #define IWL9000_DCCM_LEN 0x18000 #define IWL9000_DCCM2_OFFSET 0x880000 #define IWL9000_DCCM2_LEN 0x8000 #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 #define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" #define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" #define IWL9000_MODULE_FIRMWARE(api) \ IWL9000_FW_PRE __stringify(api) ".ucode" #define IWL9260_MODULE_FIRMWARE(api) \ IWL9260_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl9000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; static const struct iwl_ht_params iwl9000_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_tt_params iwl9000_tt_params = { .ct_kill_entry = 115, .ct_kill_exit = 93, .ct_kill_duration = 5, .dynamic_smps_entry = 111, .dynamic_smps_exit = 107, .tx_protection_entry = 112, .tx_protection_exit = 105, .tx_backoff = { {.temperature = 110, .backoff = 200}, {.temperature = 111, .backoff = 600}, {.temperature = 112, .backoff = 1200}, {.temperature = 113, .backoff = 2000}, {.temperature = 114, .backoff = 4000}, }, .support_ct_kill = true, .support_dynamic_smps = true, .support_tx_protection = true, .support_tx_backoff = true, }; #define IWL_DEVICE_9000 \ .ucode_api_max = IWL9000_UCODE_API_MAX, \ .ucode_api_min = IWL9000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_9000, \ .base_params = &iwl9000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ .dccm_offset = IWL9000_DCCM_OFFSET, \ .dccm_len = IWL9000_DCCM_LEN, \ .dccm2_offset = IWL9000_DCCM2_OFFSET, \ .dccm2_len = IWL9000_DCCM2_LEN, \ .smem_offset = IWL9000_SMEM_OFFSET, \ .smem_len = IWL9000_SMEM_LEN, \ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .thermal_params = &iwl9000_tt_params, \ .apmg_not_supported = true, \ .mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true, \ .rf_id = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000, \ .csr = &iwl_csr_v1, \ .d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_length = 92 * 1024, \ .ht_params = &iwl9000_ht_params, \ .nvm_ver = IWL9000_NVM_VERSION, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .fw_mon_smem_write_ptr_addr = 0xa0476c, \ .fw_mon_smem_write_ptr_msk = 0xfffff, \ .fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \ .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff const struct iwl_cfg iwl9160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9160", .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9260_2ac_160_cfg = { .name = "Intel(R) Wireless-AC 9260 160MHz", .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9260_killer_2ac_cfg = { .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)", .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9270_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9270", .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9460_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9460", .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9460_2ac_cfg_soc = { .name = "Intel(R) Dual Band Wireless AC 9460", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9461_2ac_cfg_soc = { .name = "Intel(R) Dual Band Wireless AC 9461", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9462_2ac_cfg_soc = { .name = "Intel(R) Dual Band Wireless AC 9462", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9560", .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9560_2ac_160_cfg = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9560_2ac_cfg_soc = { .name = "Intel(R) Dual Band Wireless AC 9560", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_160_cfg_soc = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9460", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9461", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9462", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9560", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/000077500000000000000000000000001351064634500252035ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/acpi.c000066400000000000000000000161261351064634500262710ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright (C) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright (C) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-drv.h" #include "iwl-debug.h" #include "acpi.h" void *iwl_acpi_get_object(struct device *dev, acpi_string method) { acpi_handle root_handle; acpi_handle handle; struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; root_handle = ACPI_HANDLE(dev); if (!root_handle) { IWL_DEBUG_DEV_RADIO(dev, "Could not retrieve root port ACPI handle\n"); return ERR_PTR(-ENOENT); } /* Get the method's handle */ status = acpi_get_handle(root_handle, method, &handle); if (ACPI_FAILURE(status)) { IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method); return ERR_PTR(-ENOENT); } /* Call the method with no arguments */ status = acpi_evaluate_object(handle, NULL, NULL, &buf); if (ACPI_FAILURE(status)) { IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n", method, status); return ERR_PTR(-ENOENT); } return buf.pointer; } IWL_EXPORT_SYMBOL(iwl_acpi_get_object); union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, int data_size, int *tbl_rev) { int i; union acpi_object *wifi_pkg; /* * We need at least one entry in the wifi package that * describes the domain, and one more entry, otherwise there's * no point in reading it. */ if (WARN_ON_ONCE(data_size < 2)) return ERR_PTR(-EINVAL); /* * We need at least two packages, one for the revision and one * for the data itself. Also check that the revision is valid * (i.e. it is an integer smaller than 2, as we currently support only * 2 revisions). */ if (data->type != ACPI_TYPE_PACKAGE || data->package.count < 2 || data->package.elements[0].type != ACPI_TYPE_INTEGER || data->package.elements[0].integer.value > 1) { IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n"); return ERR_PTR(-EINVAL); } *tbl_rev = data->package.elements[0].integer.value; /* loop through all the packages to find the one for WiFi */ for (i = 1; i < data->package.count; i++) { union acpi_object *domain; wifi_pkg = &data->package.elements[i]; /* skip entries that are not a package with the right size */ if (wifi_pkg->type != ACPI_TYPE_PACKAGE || wifi_pkg->package.count != data_size) continue; domain = &wifi_pkg->package.elements[0]; if (domain->type == ACPI_TYPE_INTEGER && domain->integer.value == ACPI_WIFI_DOMAIN) goto found; } return ERR_PTR(-ENOENT); found: return wifi_pkg; } IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg); int iwl_acpi_get_mcc(struct device *dev, char *mcc) { union acpi_object *wifi_pkg, *data; u32 mcc_val; int ret, tbl_rev; data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } mcc_val = wifi_pkg->package.elements[1].integer.value; mcc[0] = (mcc_val >> 8) & 0xff; mcc[1] = mcc_val & 0xff; mcc[2] = '\0'; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc); u64 iwl_acpi_get_pwr_limit(struct device *dev) { union acpi_object *data, *wifi_pkg; u64 dflt_pwr_limit; int tbl_rev; data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD); if (IS_ERR(data)) { dflt_pwr_limit = 0; goto out; } wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0 || wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) { dflt_pwr_limit = 0; goto out_free; } dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value; out_free: kfree(data); out: return dflt_pwr_limit; } IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit); int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) { union acpi_object *wifi_pkg, *data; int ret, tbl_rev; data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } *extl_clk = wifi_pkg->package.elements[1].integer.value; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/acpi.h000066400000000000000000000126751351064634500263030ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_acpi__ #define __iwl_fw_acpi__ #include #define ACPI_WRDS_METHOD "WRDS" #define ACPI_EWRD_METHOD "EWRD" #define ACPI_WGDS_METHOD "WGDS" #define ACPI_WRDD_METHOD "WRDD" #define ACPI_SPLC_METHOD "SPLC" #define ACPI_ECKV_METHOD "ECKV" #define ACPI_PPAG_METHOD "PPAG" #define ACPI_WIFI_DOMAIN (0x07) #define ACPI_SAR_TABLE_SIZE 10 #define ACPI_SAR_PROFILE_NUM 4 #define ACPI_GEO_TABLE_SIZE 6 #define ACPI_NUM_GEO_PROFILES 3 #define ACPI_GEO_PER_CHAIN_SIZE 3 #define ACPI_SAR_NUM_CHAIN_LIMITS 2 #define ACPI_SAR_NUM_SUB_BANDS 5 #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_TABLE_SIZE + 3) #define ACPI_WGDS_WIFI_DATA_SIZE 19 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 #define ACPI_ECKV_WIFI_DATA_SIZE 2 #define ACPI_WGDS_NUM_BANDS 2 #define ACPI_WGDS_TABLE_SIZE 3 #define ACPI_PPAG_NUM_CHAINS 2 #define ACPI_PPAG_NUM_SUB_BANDS 5 #define ACPI_PPAG_WIFI_DATA_SIZE ((ACPI_PPAG_NUM_CHAINS * \ ACPI_PPAG_NUM_SUB_BANDS) + 3) /* PPAG gain value bounds in 1/8 dBm */ #define ACPI_PPAG_MIN_LB -16 #define ACPI_PPAG_MAX_LB 24 #define ACPI_PPAG_MIN_HB -16 #define ACPI_PPAG_MAX_HB 40 #ifdef CONFIG_ACPI void *iwl_acpi_get_object(struct device *dev, acpi_string method); union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, int data_size, int *tbl_rev); /** * iwl_acpi_get_mcc - read MCC from ACPI, if available * * @dev: the struct device * @mcc: output buffer (3 bytes) that will get the MCC * * This function tries to read the current MCC from ACPI if available. */ int iwl_acpi_get_mcc(struct device *dev, char *mcc); u64 iwl_acpi_get_pwr_limit(struct device *dev); /* * iwl_acpi_get_eckv - read external clock validation from ACPI, if available * * @dev: the struct device * @extl_clk: output var (2 bytes) that will get the clk indication. * * This function tries to read the external clock indication * from ACPI if available. */ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk); #else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) { return ERR_PTR(-ENOENT); } static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, int data_size, int *tbl_rev) { return ERR_PTR(-ENOENT); } static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc) { return -ENOENT; } static inline u64 iwl_acpi_get_pwr_limit(struct device *dev) { return 0; } static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) { return -ENOENT; } #endif /* CONFIG_ACPI */ #endif /* __iwl_fw_acpi__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/000077500000000000000000000000001351064634500257545ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h000066400000000000000000000160031351064634500272250ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_alive_h__ #define __iwl_fw_api_alive_h__ /* alive response is_valid values */ #define ALIVE_RESP_UCODE_OK BIT(0) #define ALIVE_RESP_RFKILL BIT(1) /* alive response ver_type values */ enum { FW_TYPE_HW = 0, FW_TYPE_PROT = 1, FW_TYPE_AP = 2, FW_TYPE_WOWLAN = 3, FW_TYPE_TIMING = 4, FW_TYPE_WIPAN = 5 }; /* alive response ver_subtype values */ enum { FW_SUBTYPE_FULL_FEATURE = 0, FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ FW_SUBTYPE_REDUCED = 2, FW_SUBTYPE_ALIVE_ONLY = 3, FW_SUBTYPE_WOWLAN = 4, FW_SUBTYPE_AP_SUBTYPE = 5, FW_SUBTYPE_WIPAN = 6, FW_SUBTYPE_INITIALIZE = 9 }; #define IWL_ALIVE_STATUS_ERR 0xDEAD #define IWL_ALIVE_STATUS_OK 0xCAFE #define IWL_ALIVE_FLG_RFKILL BIT(0) struct iwl_lmac_debug_addrs { __le32 error_event_table_ptr; /* SRAM address for error log */ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ __le32 cpu_register_ptr; __le32 dbgm_config_ptr; __le32 alive_counter_ptr; __le32 scd_base_ptr; /* SRAM address for SCD */ __le32 st_fwrd_addr; /* pointer to Store and forward */ __le32 st_fwrd_size; } __packed; /* UCODE_DEBUG_ADDRS_API_S_VER_2 */ struct iwl_lmac_alive { __le32 ucode_major; __le32 ucode_minor; u8 ver_subtype; u8 ver_type; u8 mac; u8 opt; __le32 timestamp; struct iwl_lmac_debug_addrs dbg_ptrs; } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ struct iwl_umac_debug_addrs { __le32 error_info_addr; /* SRAM address for UMAC error log */ __le32 dbg_print_buff_addr; } __packed; /* UMAC_DEBUG_ADDRS_API_S_VER_1 */ struct iwl_umac_alive { __le32 umac_major; /* UMAC version: major */ __le32 umac_minor; /* UMAC version: minor */ struct iwl_umac_debug_addrs dbg_ptrs; } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ struct mvm_alive_resp_v3 { __le16 status; __le16 flags; struct iwl_lmac_alive lmac_data; struct iwl_umac_alive umac_data; } __packed; /* ALIVE_RES_API_S_VER_3 */ struct mvm_alive_resp { __le16 status; __le16 flags; struct iwl_lmac_alive lmac_data[2]; struct iwl_umac_alive umac_data; } __packed; /* ALIVE_RES_API_S_VER_4 */ /** * enum iwl_extended_cfg_flag - commands driver may send before * finishing init flow * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands * @IWL_INIT_PHY: driver is going to send PHY_DB commands */ enum iwl_extended_cfg_flags { IWL_INIT_DEBUG_CFG, IWL_INIT_NVM, IWL_INIT_PHY, }; /** * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for * before finishing init flows * @init_flags: values from iwl_extended_cfg_flags */ struct iwl_init_extended_cfg_cmd { __le32 init_flags; } __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ /** * struct iwl_radio_version_notif - information on the radio version * ( RADIO_VERSION_NOTIFICATION = 0x68 ) * @radio_flavor: radio flavor * @radio_step: radio version step * @radio_dash: radio version dash */ struct iwl_radio_version_notif { __le32 radio_flavor; __le32 radio_step; __le32 radio_dash; } __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ enum iwl_card_state_flags { CARD_ENABLED = 0x00, HW_CARD_DISABLED = 0x01, SW_CARD_DISABLED = 0x02, CT_KILL_CARD_DISABLED = 0x04, HALT_CARD_DISABLED = 0x08, CARD_DISABLED_MSK = 0x0f, CARD_IS_RX_ON = 0x10, }; /** * struct iwl_radio_version_notif - information on the card state * ( CARD_STATE_NOTIFICATION = 0xa1 ) * @flags: &enum iwl_card_state_flags */ struct iwl_card_state_notif { __le32 flags; } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ /** * enum iwl_error_recovery_flags - flags for error recovery cmd * @ERROR_RECOVERY_UPDATE_DB: update db from blob sent * @ERROR_RECOVERY_END_OF_RECOVERY: end of recovery */ enum iwl_error_recovery_flags { ERROR_RECOVERY_UPDATE_DB = BIT(0), ERROR_RECOVERY_END_OF_RECOVERY = BIT(1), }; /** * struct iwl_fw_error_recovery_cmd - recovery cmd sent upon assert * @flags: &enum iwl_error_recovery_flags * @buf_size: db buffer size in bytes */ struct iwl_fw_error_recovery_cmd { __le32 flags; __le32 buf_size; } __packed; /* ERROR_RECOVERY_CMD_HDR_API_S_VER_1 */ #endif /* __iwl_fw_api_alive_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h000066400000000000000000000152221351064634500275410ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_binding_h__ #define __iwl_fw_api_binding_h__ #define MAX_MACS_IN_BINDING (3) #define MAX_BINDINGS (4) /** * struct iwl_binding_cmd_v1 - configuring bindings * ( BINDING_CONTEXT_CMD = 0x2b ) * @id_and_color: ID and color of the relevant Binding, * &enum iwl_ctxt_id_and_color * @action: action to perform, one of FW_CTXT_ACTION_* * @macs: array of MAC id and colors which belong to the binding, * &enum iwl_ctxt_id_and_color * @phy: PHY id and color which belongs to the binding, * &enum iwl_ctxt_id_and_color */ struct iwl_binding_cmd_v1 { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* BINDING_DATA_API_S_VER_1 */ __le32 macs[MAX_MACS_IN_BINDING]; __le32 phy; } __packed; /* BINDING_CMD_API_S_VER_1 */ /** * struct iwl_binding_cmd - configuring bindings * ( BINDING_CONTEXT_CMD = 0x2b ) * @id_and_color: ID and color of the relevant Binding, * &enum iwl_ctxt_id_and_color * @action: action to perform, one of FW_CTXT_ACTION_* * @macs: array of MAC id and colors which belong to the binding * &enum iwl_ctxt_id_and_color * @phy: PHY id and color which belongs to the binding * &enum iwl_ctxt_id_and_color * @lmac_id: the lmac id the binding belongs to */ struct iwl_binding_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* BINDING_DATA_API_S_VER_1 */ __le32 macs[MAX_MACS_IN_BINDING]; __le32 phy; __le32 lmac_id; } __packed; /* BINDING_CMD_API_S_VER_2 */ #define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) #define IWL_LMAC_24G_INDEX 0 #define IWL_LMAC_5G_INDEX 1 /* The maximal number of fragments in the FW's schedule session */ #define IWL_MVM_MAX_QUOTA 128 /** * struct iwl_time_quota_data_v1 - configuration of time quota per binding * @id_and_color: ID and color of the relevant Binding, * &enum iwl_ctxt_id_and_color * @quota: absolute time quota in TU. The scheduler will try to divide the * remainig quota (after Time Events) according to this quota. * @max_duration: max uninterrupted context duration in TU */ struct iwl_time_quota_data_v1 { __le32 id_and_color; __le32 quota; __le32 max_duration; } __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ /** * struct iwl_time_quota_cmd - configuration of time quota between bindings * ( TIME_QUOTA_CMD = 0x2c ) * @quotas: allocations per binding * Note: on non-CDB the fourth one is the auxilary mac and is * essentially zero. * On CDB the fourth one is a regular binding. */ struct iwl_time_quota_cmd_v1 { struct iwl_time_quota_data_v1 quotas[MAX_BINDINGS]; } __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ enum iwl_quota_low_latency { IWL_QUOTA_LOW_LATENCY_NONE = 0, IWL_QUOTA_LOW_LATENCY_TX = BIT(0), IWL_QUOTA_LOW_LATENCY_RX = BIT(1), IWL_QUOTA_LOW_LATENCY_TX_RX = IWL_QUOTA_LOW_LATENCY_TX | IWL_QUOTA_LOW_LATENCY_RX, }; /** * struct iwl_time_quota_data - configuration of time quota per binding * @id_and_color: ID and color of the relevant Binding. * @quota: absolute time quota in TU. The scheduler will try to divide the * remainig quota (after Time Events) according to this quota. * @max_duration: max uninterrupted context duration in TU * @low_latency: low latency status, &enum iwl_quota_low_latency */ struct iwl_time_quota_data { __le32 id_and_color; __le32 quota; __le32 max_duration; __le32 low_latency; } __packed; /* TIME_QUOTA_DATA_API_S_VER_2 */ /** * struct iwl_time_quota_cmd - configuration of time quota between bindings * ( TIME_QUOTA_CMD = 0x2c ) * Note: on non-CDB the fourth one is the auxilary mac and is essentially zero. * On CDB the fourth one is a regular binding. * * @quotas: allocations per binding */ struct iwl_time_quota_cmd { struct iwl_time_quota_data quotas[MAX_BINDINGS]; } __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_2 */ #endif /* __iwl_fw_api_binding_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h000066400000000000000000000160031351064634500273660ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_cmdhdr_h__ #define __iwl_fw_api_cmdhdr_h__ /** * DOC: Host command section * * A host command is a command issued by the upper layer to the fw. There are * several versions of fw that have several APIs. The transport layer is * completely agnostic to these differences. * The transport does provide helper functionality (i.e. SYNC / ASYNC mode), */ #define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) #define SEQ_TO_INDEX(s) ((s) & 0xff) #define INDEX_TO_SEQ(i) ((i) & 0xff) #define SEQ_RX_FRAME cpu_to_le16(0x8000) /* * those functions retrieve specific information from * the id field in the iwl_host_cmd struct which contains * the command id, the group id and the version of the command * and vice versa */ static inline u8 iwl_cmd_opcode(u32 cmdid) { return cmdid & 0xFF; } static inline u8 iwl_cmd_groupid(u32 cmdid) { return ((cmdid & 0xFF00) >> 8); } static inline u8 iwl_cmd_version(u32 cmdid) { return ((cmdid & 0xFF0000) >> 16); } static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) { return opcode + (groupid << 8) + (version << 16); } /* make u16 wide id out of u8 group and opcode */ #define WIDE_ID(grp, opcode) (((grp) << 8) | (opcode)) #define DEF_ID(opcode) ((1 << 8) | (opcode)) /* due to the conversion, this group is special; new groups * should be defined in the appropriate fw-api header files */ #define IWL_ALWAYS_LONG_GROUP 1 /** * struct iwl_cmd_header - (short) command header format * * This header format appears in the beginning of each command sent from the * driver, and each response/notification received from uCode. */ struct iwl_cmd_header { /** * @cmd: Command ID: REPLY_RXON, etc. */ u8 cmd; /** * @group_id: group ID, for commands with groups */ u8 group_id; /** * @sequence: * Sequence number for the command. * * The driver sets up the sequence number to values of its choosing. * uCode does not use this value, but passes it back to the driver * when sending the response to each driver-originated command, so * the driver can match the response to the command. Since the values * don't get used by uCode, the driver may set up an arbitrary format. * * There is one exception: uCode sets bit 15 when it originates * the response/notification, i.e. when the response/notification * is not a direct response to a command sent by the driver. For * example, uCode issues REPLY_RX when it sends a received frame * to the driver; it is not a direct response to any driver command. * * The Linux driver uses the following format: * * 0:7 tfd index - position within TX queue * 8:12 TX queue id * 13:14 reserved * 15 unsolicited RX or uCode-originated notification */ __le16 sequence; } __packed; /** * struct iwl_cmd_header_wide * * This header format appears in the beginning of each command sent from the * driver, and each response/notification received from uCode. * this is the wide version that contains more information about the command * like length, version and command type * * @cmd: command ID, like in &struct iwl_cmd_header * @group_id: group ID, like in &struct iwl_cmd_header * @sequence: sequence, like in &struct iwl_cmd_header * @length: length of the command * @reserved: reserved * @version: command version */ struct iwl_cmd_header_wide { u8 cmd; u8 group_id; __le16 sequence; __le16 length; u8 reserved; u8 version; } __packed; /** * struct iwl_calib_res_notif_phy_db - Receive phy db chunk after calibrations * @type: type of the result - mostly ignored * @length: length of the data * @data: data, length in @length */ struct iwl_calib_res_notif_phy_db { __le16 type; __le16 length; u8 data[]; } __packed; /** * struct iwl_phy_db_cmd - configure operational ucode * @type: type of the data * @length: length of the data * @data: data, length in @length */ struct iwl_phy_db_cmd { __le16 type; __le16 length; u8 data[]; } __packed; /** * struct iwl_cmd_response - generic response struct for most commands * @status: status of the command asked, changes for each one */ struct iwl_cmd_response { __le32 status; }; #endif /* __iwl_fw_api_cmdhdr_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h000066400000000000000000000176101351064634500270700ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_fw_api_coex_h__ #define __iwl_fw_api_coex_h__ #include #include #define BITS(nb) (BIT(nb) - 1) enum iwl_bt_coex_lut_type { BT_COEX_TIGHT_LUT = 0, BT_COEX_LOOSE_LUT, BT_COEX_TX_DIS_LUT, BT_COEX_MAX_LUT, BT_COEX_INVALID_LUT = 0xff, }; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */ #define BT_REDUCED_TX_POWER_BIT BIT(7) enum iwl_bt_coex_mode { BT_COEX_DISABLE = 0x0, BT_COEX_NW = 0x1, BT_COEX_BT = 0x2, BT_COEX_WIFI = 0x3, }; /* BT_COEX_MODES_E */ enum iwl_bt_coex_enabled_modules { BT_COEX_MPLUT_ENABLED = BIT(0), BT_COEX_MPLUT_BOOST_ENABLED = BIT(1), BT_COEX_SYNC2SCO_ENABLED = BIT(2), BT_COEX_CORUN_ENABLED = BIT(3), BT_COEX_HIGH_BAND_RET = BIT(4), }; /* BT_COEX_MODULES_ENABLE_E_VER_1 */ /** * struct iwl_bt_coex_cmd - bt coex configuration command * @mode: &enum iwl_bt_coex_mode * @enabled_modules: &enum iwl_bt_coex_enabled_modules * * The structure is used for the BT_COEX command. */ struct iwl_bt_coex_cmd { __le32 mode; __le32 enabled_modules; } __packed; /* BT_COEX_CMD_API_S_VER_6 */ /** * struct iwl_bt_coex_reduced_txp_update_cmd * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the * bits are the sta_id (value) */ struct iwl_bt_coex_reduced_txp_update_cmd { __le32 reduced_txp; } __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */ /** * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command * @bt_primary_ci: primary channel inhibition bitmap * @primary_ch_phy_id: primary channel PHY ID * @bt_secondary_ci: secondary channel inhibition bitmap * @secondary_ch_phy_id: secondary channel PHY ID * * Used for BT_COEX_CI command */ struct iwl_bt_coex_ci_cmd { __le64 bt_primary_ci; __le32 primary_ch_phy_id; __le64 bt_secondary_ci; __le32 secondary_ch_phy_id; } __packed; /* BT_CI_MSG_API_S_VER_2 */ #define BT_MBOX(n_dw, _msg, _pos, _nbits) \ BT_MBOX##n_dw##_##_msg##_POS = (_pos), \ BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS enum iwl_bt_mxbox_dw0 { BT_MBOX(0, LE_SLAVE_LAT, 0, 3), BT_MBOX(0, LE_PROF1, 3, 1), BT_MBOX(0, LE_PROF2, 4, 1), BT_MBOX(0, LE_PROF_OTHER, 5, 1), BT_MBOX(0, CHL_SEQ_N, 8, 4), BT_MBOX(0, INBAND_S, 13, 1), BT_MBOX(0, LE_MIN_RSSI, 16, 4), BT_MBOX(0, LE_SCAN, 20, 1), BT_MBOX(0, LE_ADV, 21, 1), BT_MBOX(0, LE_MAX_TX_POWER, 24, 4), BT_MBOX(0, OPEN_CON_1, 28, 2), }; enum iwl_bt_mxbox_dw1 { BT_MBOX(1, BR_MAX_TX_POWER, 0, 4), BT_MBOX(1, IP_SR, 4, 1), BT_MBOX(1, LE_MSTR, 5, 1), BT_MBOX(1, AGGR_TRFC_LD, 8, 6), BT_MBOX(1, MSG_TYPE, 16, 3), BT_MBOX(1, SSN, 19, 2), }; enum iwl_bt_mxbox_dw2 { BT_MBOX(2, SNIFF_ACT, 0, 3), BT_MBOX(2, PAG, 3, 1), BT_MBOX(2, INQUIRY, 4, 1), BT_MBOX(2, CONN, 5, 1), BT_MBOX(2, SNIFF_INTERVAL, 8, 5), BT_MBOX(2, DISC, 13, 1), BT_MBOX(2, SCO_TX_ACT, 16, 2), BT_MBOX(2, SCO_RX_ACT, 18, 2), BT_MBOX(2, ESCO_RE_TX, 20, 2), BT_MBOX(2, SCO_DURATION, 24, 6), }; enum iwl_bt_mxbox_dw3 { BT_MBOX(3, SCO_STATE, 0, 1), BT_MBOX(3, SNIFF_STATE, 1, 1), BT_MBOX(3, A2DP_STATE, 2, 1), BT_MBOX(3, ACL_STATE, 3, 1), BT_MBOX(3, MSTR_STATE, 4, 1), BT_MBOX(3, OBX_STATE, 5, 1), BT_MBOX(3, A2DP_SRC, 6, 1), BT_MBOX(3, OPEN_CON_2, 8, 2), BT_MBOX(3, TRAFFIC_LOAD, 10, 2), BT_MBOX(3, CHL_SEQN_LSB, 12, 1), BT_MBOX(3, INBAND_P, 13, 1), BT_MBOX(3, MSG_TYPE_2, 16, 3), BT_MBOX(3, SSN_2, 19, 2), BT_MBOX(3, UPDATE_REQUEST, 21, 1), }; #define BT_MBOX_MSG(_notif, _num, _field) \ ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ >> BT_MBOX##_num##_##_field##_POS) #define BT_MBOX_PRINT(_num, _field, _end) \ pos += scnprintf(buf + pos, bufsz - pos, \ "\t%s: %d%s", \ #_field, \ BT_MBOX_MSG(notif, _num, _field), \ true ? "\n" : ", "); enum iwl_bt_activity_grading { BT_OFF = 0, BT_ON_NO_CONNECTION = 1, BT_LOW_TRAFFIC = 2, BT_HIGH_TRAFFIC = 3, BT_VERY_HIGH_TRAFFIC = 4, BT_MAX_AG, }; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */ enum iwl_bt_ci_compliance { BT_CI_COMPLIANCE_NONE = 0, BT_CI_COMPLIANCE_PRIMARY = 1, BT_CI_COMPLIANCE_SECONDARY = 2, BT_CI_COMPLIANCE_BOTH = 3, }; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ /** * struct iwl_bt_coex_profile_notif - notification about BT coex * @mbox_msg: message from BT to WiFi * @msg_idx: the index of the message * @bt_ci_compliance: enum %iwl_bt_ci_compliance * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading * @ttc_status: is TTC enabled - one bit per PHY * @rrc_status: is RRC enabled - one bit per PHY * @reserved: reserved */ struct iwl_bt_coex_profile_notif { __le32 mbox_msg[4]; __le32 msg_idx; __le32 bt_ci_compliance; __le32 primary_ch_lut; __le32 secondary_ch_lut; __le32 bt_activity_grading; u8 ttc_status; u8 rrc_status; __le16 reserved; } __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ #endif /* __iwl_fw_api_coex_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h000066400000000000000000000401221351064634500277250ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_commands_h__ #define __iwl_fw_api_commands_h__ /** * enum iwl_mvm_command_groups - command groups for the firmware * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds * @LONG_GROUP: legacy group with long header, also uses command IDs * from &enum iwl_legacy_cmds * @SYSTEM_GROUP: system group, uses command IDs from * &enum iwl_system_subcmd_ids * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from * &enum iwl_mac_conf_subcmd_ids * @PHY_OPS_GROUP: PHY operations group, uses command IDs from * &enum iwl_phy_ops_subcmd_ids * @DATA_PATH_GROUP: data path group, uses command IDs from * &enum iwl_data_path_subcmd_ids * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids * @LOCATION_GROUP: location group, uses command IDs from * &enum iwl_location_subcmd_ids * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from * &enum iwl_prot_offload_subcmd_ids * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from * &enum iwl_regulatory_and_nvm_subcmd_ids * @XVT_GROUP: XVT group, uses command IDs from &enum iwl_xvt_subcmd_ids * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds */ enum iwl_mvm_command_groups { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, SYSTEM_GROUP = 0x2, MAC_CONF_GROUP = 0x3, PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, NAN_GROUP = 0x7, LOCATION_GROUP = 0x8, PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, XVT_GROUP = 0xe, DEBUG_GROUP = 0xf, }; /** * enum iwl_legacy_cmds - legacy group command IDs */ enum iwl_legacy_cmds { /** * @MVM_ALIVE: * Alive data from the firmware, as described in * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp. */ MVM_ALIVE = 0x1, /** * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. */ REPLY_ERROR = 0x2, /** * @ECHO_CMD: Send data to the device to have it returned immediately. */ ECHO_CMD = 0x3, /** * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. */ INIT_COMPLETE_NOTIF = 0x4, /** * @PHY_CONTEXT_CMD: * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. */ PHY_CONTEXT_CMD = 0x8, /** * @DBG_CFG: Debug configuration command. */ DBG_CFG = 0x9, /** * @SCAN_ITERATION_COMPLETE_UMAC: * Firmware indicates a scan iteration completed, using * &struct iwl_umac_scan_iter_complete_notif. */ SCAN_ITERATION_COMPLETE_UMAC = 0xb5, /** * @SCAN_CFG_CMD: * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config */ SCAN_CFG_CMD = 0xc, /** * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac */ SCAN_REQ_UMAC = 0xd, /** * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort */ SCAN_ABORT_UMAC = 0xe, /** * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete */ SCAN_COMPLETE_UMAC = 0xf, /** * @BA_WINDOW_STATUS_NOTIFICATION_ID: * uses &struct iwl_ba_window_status_notif */ BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, /** * @ADD_STA_KEY: * &struct iwl_mvm_add_sta_key_cmd_v1 or * &struct iwl_mvm_add_sta_key_cmd. */ ADD_STA_KEY = 0x17, /** * @ADD_STA: * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. */ ADD_STA = 0x18, /** * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd */ REMOVE_STA = 0x19, /** * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd */ FW_GET_ITEM_CMD = 0x1a, /** * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or * &struct iwl_tx_cmd_gen3, * response in &struct iwl_mvm_tx_resp or * &struct iwl_mvm_tx_resp_v3 */ TX_CMD = 0x1c, /** * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd */ TXPATH_FLUSH = 0x1e, /** * @MGMT_MCAST_KEY: * &struct iwl_mvm_mgmt_mcast_key_cmd or * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 */ MGMT_MCAST_KEY = 0x1f, /* scheduler config */ /** * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp * for newer (22000) hardware. */ SCD_QUEUE_CFG = 0x1d, /** * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd */ WEP_KEY = 0x20, /** * @SHARED_MEM_CFG: * retrieve shared memory configuration - response in * &struct iwl_shared_mem_cfg */ SHARED_MEM_CFG = 0x25, /** * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd */ TDLS_CHANNEL_SWITCH_CMD = 0x27, /** * @TDLS_CHANNEL_SWITCH_NOTIFICATION: * uses &struct iwl_tdls_channel_switch_notif */ TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, /** * @TDLS_CONFIG_CMD: * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res */ TDLS_CONFIG_CMD = 0xa7, /** * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd */ MAC_CONTEXT_CMD = 0x28, /** * @TIME_EVENT_CMD: * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp */ TIME_EVENT_CMD = 0x29, /* both CMD and response */ /** * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif */ TIME_EVENT_NOTIFICATION = 0x2a, /** * @BINDING_CONTEXT_CMD: * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 */ BINDING_CONTEXT_CMD = 0x2b, /** * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd */ TIME_QUOTA_CMD = 0x2c, /** * @NON_QOS_TX_COUNTER_CMD: * command is &struct iwl_nonqos_seq_query_cmd */ NON_QOS_TX_COUNTER_CMD = 0x2d, /** * @FIPS_TEST_VECTOR_CMD: command is &struct iwl_fips_test_cmd */ FIPS_TEST_VECTOR_CMD = 0x3b, /** * @LEDS_CMD: command is &struct iwl_led_cmd */ LEDS_CMD = 0x48, /** * @LQ_CMD: using &struct iwl_lq_cmd */ LQ_CMD = 0x4e, /** * @FW_PAGING_BLOCK_CMD: * &struct iwl_fw_paging_cmd */ FW_PAGING_BLOCK_CMD = 0x4f, /** * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac */ SCAN_OFFLOAD_REQUEST_CMD = 0x51, /** * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents */ SCAN_OFFLOAD_ABORT_CMD = 0x52, /** * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req */ HOT_SPOT_CMD = 0x53, /** * @SCAN_OFFLOAD_COMPLETE: * notification, &struct iwl_periodic_scan_complete */ SCAN_OFFLOAD_COMPLETE = 0x6D, /** * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: * update scan offload (scheduled scan) profiles/blacklist/etc. */ SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, /** * @MATCH_FOUND_NOTIFICATION: scan match found */ MATCH_FOUND_NOTIFICATION = 0xd9, /** * @SCAN_ITERATION_COMPLETE: * uses &struct iwl_lmac_scan_complete_notif */ SCAN_ITERATION_COMPLETE = 0xe7, /* Phy */ /** * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd */ PHY_CONFIGURATION_CMD = 0x6a, /** * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db */ CALIB_RES_NOTIF_PHY_DB = 0x6b, /** * @PHY_DB_CMD: &struct iwl_phy_db_cmd */ PHY_DB_CMD = 0x6c, /** * @CONFIG_2G_COEX_CMD: &struct iwl_config_2g_coex_cmd */ CONFIG_2G_COEX_CMD = 0x71, /** * @POWER_TABLE_CMD: &struct iwl_device_power_cmd */ POWER_TABLE_CMD = 0x77, /** * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: * &struct iwl_uapsd_misbehaving_ap_notif */ PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, /** * @LTR_CONFIG: &struct iwl_ltr_config_cmd */ LTR_CONFIG = 0xee, /** * @REPLY_THERMAL_MNG_BACKOFF: * Thermal throttling command */ REPLY_THERMAL_MNG_BACKOFF = 0x7e, /** * @DC2DC_CONFIG_CMD: * Set/Get DC2DC frequency tune * Command is &struct iwl_dc2dc_config_cmd, * response is &struct iwl_dc2dc_config_resp */ DC2DC_CONFIG_CMD = 0x83, /** * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd */ NVM_ACCESS_CMD = 0x88, /** * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif */ BEACON_NOTIFICATION = 0x90, /** * @BEACON_TEMPLATE_CMD: * Uses one of &struct iwl_mac_beacon_cmd_v6, * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd * depending on the device version. */ BEACON_TEMPLATE_CMD = 0x91, /** * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd */ TX_ANT_CONFIGURATION_CMD = 0x98, /** * @STATISTICS_CMD: * one of &struct iwl_statistics_cmd, * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics */ STATISTICS_CMD = 0x9c, /** * @STATISTICS_NOTIFICATION: * one of &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistics */ STATISTICS_NOTIFICATION = 0x9d, /** * @EOSP_NOTIFICATION: * Notify that a service period ended, * &struct iwl_mvm_eosp_notification */ EOSP_NOTIFICATION = 0x9e, /** * @REDUCE_TX_POWER_CMD: * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4 * or &struct iwl_dev_tx_power_cmd */ REDUCE_TX_POWER_CMD = 0x9f, /** * @CARD_STATE_NOTIFICATION: * Card state (RF/CT kill) notification, * uses &struct iwl_card_state_notif */ CARD_STATE_NOTIFICATION = 0xa1, /** * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif */ MISSED_BEACONS_NOTIFICATION = 0xa2, /** * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd */ MAC_PM_POWER_TABLE = 0xa9, /** * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif */ MFUART_LOAD_NOTIFICATION = 0xb1, /** * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd */ RSS_CONFIG_CMD = 0xb3, /** * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info */ REPLY_RX_PHY_CMD = 0xc0, /** * @REPLY_RX_MPDU_CMD: * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc */ REPLY_RX_MPDU_CMD = 0xc1, /** * @FRAME_RELEASE: * Frame release (reorder helper) notification, uses * &struct iwl_frame_release */ FRAME_RELEASE = 0xc3, /** * @BA_NOTIF: * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif * or &struct iwl_mvm_ba_notif depending on the HW */ BA_NOTIF = 0xc5, /* Location Aware Regulatory */ /** * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd */ MCC_UPDATE_CMD = 0xc8, /** * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif */ MCC_CHUB_UPDATE_CMD = 0xc9, /** * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker * with &struct iwl_mvm_marker_rsp */ MARKER_CMD = 0xcb, /** * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif */ BT_PROFILE_NOTIFICATION = 0xce, /** * @BT_CONFIG: &struct iwl_bt_coex_cmd */ BT_CONFIG = 0x9b, /** * @BT_COEX_UPDATE_REDUCED_TXP: * &struct iwl_bt_coex_reduced_txp_update_cmd */ BT_COEX_UPDATE_REDUCED_TXP = 0x5c, /** * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd */ BT_COEX_CI = 0x5d, /** * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd */ REPLY_SF_CFG_CMD = 0xd1, /** * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd */ REPLY_BEACON_FILTERING_CMD = 0xd2, /** * @DTS_MEASUREMENT_NOTIFICATION: * &struct iwl_dts_measurement_notif_v1 or * &struct iwl_dts_measurement_notif_v2 */ DTS_MEASUREMENT_NOTIFICATION = 0xdd, /** * @LDBG_CONFIG_CMD: configure continuous trace recording */ LDBG_CONFIG_CMD = 0xf6, /** * @DEBUG_LOG_MSG: Debugging log data from firmware */ DEBUG_LOG_MSG = 0xf7, /** * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd */ BCAST_FILTER_CMD = 0xcf, /** * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd */ MCAST_FILTER_CMD = 0xd0, /** * @D3_CONFIG_CMD: &struct iwl_d3_manager_config */ D3_CONFIG_CMD = 0xd3, /** * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, * &struct iwl_proto_offload_cmd_v3_small, * &struct iwl_proto_offload_cmd_v3_large */ PROT_OFFLOAD_CONFIG_CMD = 0xd4, /** * @OFFLOADS_QUERY_CMD: * No data in command, response in &struct iwl_wowlan_status */ OFFLOADS_QUERY_CMD = 0xd5, /** * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config */ REMOTE_WAKE_CONFIG_CMD = 0xd6, /** * @D0I3_END_CMD: End D0i3/D3 state, no command data */ D0I3_END_CMD = 0xed, /** * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd */ WOWLAN_PATTERNS = 0xe0, /** * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd */ WOWLAN_CONFIGURATION = 0xe1, /** * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd */ WOWLAN_TSC_RSC_PARAM = 0xe2, /** * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd */ WOWLAN_TKIP_PARAM = 0xe3, /** * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd */ WOWLAN_KEK_KCK_MATERIAL = 0xe4, /** * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status */ WOWLAN_GET_STATUSES = 0xe5, /** * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: * No command data, response is &struct iwl_scan_offload_profiles_query */ SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, }; /** * enum iwl_system_subcmd_ids - system group command IDs */ enum iwl_system_subcmd_ids { /** * @SHARED_MEM_CFG_CMD: * response in &struct iwl_shared_mem_cfg or * &struct iwl_shared_mem_cfg_v2 */ SHARED_MEM_CFG_CMD = 0x0, /** * @SOC_CONFIGURATION_CMD: &struct iwl_soc_configuration_cmd */ SOC_CONFIGURATION_CMD = 0x01, /** * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd */ INIT_EXTENDED_CFG_CMD = 0x03, /** * @FW_ERROR_RECOVERY_CMD: &struct iwl_fw_error_recovery_cmd */ FW_ERROR_RECOVERY_CMD = 0x7, }; /** * enum iwl_xvt_subcmd_ids - XVT group command IDs */ enum iwl_xvt_subcmd_ids { /** * @IQ_CALIB_CONFIG_NOTIF : Notification about IQ calibration finished * Handled by user space component */ IQ_CALIB_CONFIG_NOTIF = 0xFF, }; #endif /* __iwl_fw_api_commands_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/config.h000066400000000000000000000141011351064634500273670ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_config_h__ #define __iwl_fw_api_config_h__ /* * struct iwl_dqa_enable_cmd * @cmd_queue: the TXQ number of the command queue */ struct iwl_dqa_enable_cmd { __le32 cmd_queue; } __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ /* * struct iwl_tx_ant_cfg_cmd * @valid: valid antenna configuration */ struct iwl_tx_ant_cfg_cmd { __le32 valid; } __packed; /** * struct iwl_calib_ctrl - Calibration control struct. * Sent as part of the phy configuration command. * @flow_trigger: bitmap for which calibrations to perform according to * flow triggers, using &enum iwl_calib_cfg * @event_trigger: bitmap for which calibrations to perform according to * event triggers, using &enum iwl_calib_cfg */ struct iwl_calib_ctrl { __le32 flow_trigger; __le32 event_trigger; } __packed; /* This enum defines the bitmap of various calibrations to enable in both * init ucode and runtime ucode through CALIBRATION_CFG_CMD. */ enum iwl_calib_cfg { IWL_CALIB_CFG_XTAL_IDX = BIT(0), IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), IWL_CALIB_CFG_PAPD_IDX = BIT(3), IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), IWL_CALIB_CFG_DC_IDX = BIT(5), IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), IWL_CALIB_CFG_DAC_IDX = BIT(16), IWL_CALIB_CFG_ABS_IDX = BIT(17), IWL_CALIB_CFG_AGC_IDX = BIT(18), }; /** * struct iwl_phy_cfg_cmd - Phy configuration command * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg * @calib_control: calibration control data */ struct iwl_phy_cfg_cmd { __le32 phy_cfg; struct iwl_calib_ctrl calib_control; } __packed; /* * enum iwl_dc2dc_config_id - flag ids * * Ids of dc2dc configuration flags */ enum iwl_dc2dc_config_id { DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ DCDC_FREQ_TUNE_SET = 0x2, }; /* MARKER_ID_API_E_VER_1 */ /** * struct iwl_dc2dc_config_cmd - configure dc2dc values * * (DC2DC_CONFIG_CMD = 0x83) * * Set/Get & configure dc2dc values. * The command always returns the current dc2dc values. * * @flags: set/get dc2dc * @enable_low_power_mode: not used. * @dc2dc_freq_tune0: frequency divider - digital domain * @dc2dc_freq_tune1: frequency divider - analog domain */ struct iwl_dc2dc_config_cmd { __le32 flags; __le32 enable_low_power_mode; /* not used */ __le32 dc2dc_freq_tune0; __le32 dc2dc_freq_tune1; } __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ /** * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd * * Current dc2dc values returned by the FW. * * @dc2dc_freq_tune0: frequency divider - digital domain * @dc2dc_freq_tune1: frequency divider - analog domain */ struct iwl_dc2dc_config_resp { __le32 dc2dc_freq_tune0; __le32 dc2dc_freq_tune1; } __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ #endif /* __iwl_fw_api_config_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/context.h000066400000000000000000000074711351064634500276220ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_context_h__ #define __iwl_fw_api_context_h__ /** * enum iwl_ctxt_id_and_color - ID and color fields in context dword * @FW_CTXT_ID_POS: position of the ID * @FW_CTXT_ID_MSK: mask of the ID * @FW_CTXT_COLOR_POS: position of the color * @FW_CTXT_COLOR_MSK: mask of the color * @FW_CTXT_INVALID: value used to indicate unused/invalid */ enum iwl_ctxt_id_and_color { FW_CTXT_ID_POS = 0, FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS, FW_CTXT_COLOR_POS = 8, FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, FW_CTXT_INVALID = 0xffffffff, }; #define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\ ((_color) << FW_CTXT_COLOR_POS)) /* Possible actions on PHYs, MACs and Bindings */ enum iwl_ctxt_action { FW_CTXT_ACTION_STUB = 0, FW_CTXT_ACTION_ADD, FW_CTXT_ACTION_MODIFY, FW_CTXT_ACTION_REMOVE, FW_CTXT_ACTION_NUM }; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ #endif /* __iwl_fw_api_context_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h000066400000000000000000000520241351064634500264360ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_fw_api_d3_h__ #define __iwl_fw_api_d3_h__ /** * enum iwl_d3_wakeup_flags - D3 manager wakeup flags * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert */ enum iwl_d3_wakeup_flags { IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0), }; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */ /** * struct iwl_d3_manager_config - D3 manager configuration command * @min_sleep_time: minimum sleep time (in usec) * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags * @wakeup_host_timer: force wakeup after this many seconds * * The structure is used for the D3_CONFIG_CMD command. */ struct iwl_d3_manager_config { __le32 min_sleep_time; __le32 wakeup_flags; __le32 wakeup_host_timer; } __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ /* TODO: OFFLOADS_QUERY_API_S_VER_1 */ /** * enum iwl_d3_proto_offloads - enabled protocol offloads * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid */ enum iwl_proto_offloads { IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), IWL_D3_PROTO_OFFLOAD_NS = BIT(1), IWL_D3_PROTO_IPV4_VALID = BIT(2), IWL_D3_PROTO_IPV6_VALID = BIT(3), }; #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12 #define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4 #define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2 /** * struct iwl_proto_offload_cmd_common - ARP/NS offload common part * @enabled: enable flags * @remote_ipv4_addr: remote address to answer to (or zero if all) * @host_ipv4_addr: our IPv4 address to respond to queries for * @arp_mac_addr: our MAC address for ARP responses * @reserved: unused */ struct iwl_proto_offload_cmd_common { __le32 enabled; __be32 remote_ipv4_addr; __be32 host_ipv4_addr; u8 arp_mac_addr[ETH_ALEN]; __le16 reserved; } __packed; /** * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration * @common: common/IPv4 configuration * @remote_ipv6_addr: remote address to answer to (or zero if all) * @solicited_node_ipv6_addr: broken -- solicited node address exists * for each target address * @target_ipv6_addr: our target addresses * @ndp_mac_addr: neighbor solicitation response MAC address * @reserved2: reserved */ struct iwl_proto_offload_cmd_v1 { struct iwl_proto_offload_cmd_common common; u8 remote_ipv6_addr[16]; u8 solicited_node_ipv6_addr[16]; u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16]; u8 ndp_mac_addr[ETH_ALEN]; __le16 reserved2; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ /** * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration * @common: common/IPv4 configuration * @remote_ipv6_addr: remote address to answer to (or zero if all) * @solicited_node_ipv6_addr: broken -- solicited node address exists * for each target address * @target_ipv6_addr: our target addresses * @ndp_mac_addr: neighbor solicitation response MAC address * @num_valid_ipv6_addrs: number of valid IPv6 addresses * @reserved2: reserved */ struct iwl_proto_offload_cmd_v2 { struct iwl_proto_offload_cmd_common common; u8 remote_ipv6_addr[16]; u8 solicited_node_ipv6_addr[16]; u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16]; u8 ndp_mac_addr[ETH_ALEN]; u8 num_valid_ipv6_addrs; u8 reserved2[3]; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */ struct iwl_ns_config { struct in6_addr source_ipv6_addr; struct in6_addr dest_ipv6_addr; u8 target_mac_addr[ETH_ALEN]; __le16 reserved; } __packed; /* NS_OFFLOAD_CONFIG */ struct iwl_targ_addr { struct in6_addr addr; __le32 config_num; } __packed; /* TARGET_IPV6_ADDRESS */ /** * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration * @common: common/IPv4 configuration * @num_valid_ipv6_addrs: number of valid IPv6 addresses * @targ_addrs: target IPv6 addresses * @ns_config: NS offload configurations */ struct iwl_proto_offload_cmd_v3_small { struct iwl_proto_offload_cmd_common common; __le32 num_valid_ipv6_addrs; struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S]; struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S]; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ /** * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration * @common: common/IPv4 configuration * @num_valid_ipv6_addrs: number of valid IPv6 addresses * @targ_addrs: target IPv6 addresses * @ns_config: NS offload configurations */ struct iwl_proto_offload_cmd_v3_large { struct iwl_proto_offload_cmd_common common; __le32 num_valid_ipv6_addrs; struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L]; struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L]; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ /* * WOWLAN_PATTERNS */ #define IWL_WOWLAN_MIN_PATTERN_LEN 16 #define IWL_WOWLAN_MAX_PATTERN_LEN 128 struct iwl_wowlan_pattern_v1 { u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN]; u8 mask_size; u8 pattern_size; __le16 reserved; } __packed; /* WOWLAN_PATTERN_API_S_VER_1 */ #define IWL_WOWLAN_MAX_PATTERNS 20 /** * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns */ struct iwl_wowlan_patterns_cmd_v1 { /** * @n_patterns: number of patterns */ __le32 n_patterns; /** * @patterns: the patterns, array length in @n_patterns */ struct iwl_wowlan_pattern_v1 patterns[]; } __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ #define IPV4_ADDR_SIZE 4 #define IPV6_ADDR_SIZE 16 enum iwl_wowlan_pattern_type { WOWLAN_PATTERN_TYPE_BITMASK, WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN, WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN, WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN_WILDCARD, WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN_WILDCARD, }; /* WOWLAN_PATTERN_TYPE_API_E_VER_1 */ /** * struct iwl_wowlan_ipv4_tcp_syn - WoWLAN IPv4 TCP SYN pattern data */ struct iwl_wowlan_ipv4_tcp_syn { /** * @src_addr: source IP address to match */ u8 src_addr[IPV4_ADDR_SIZE]; /** * @dst_addr: destination IP address to match */ u8 dst_addr[IPV4_ADDR_SIZE]; /** * @src_port: source TCP port to match */ __le16 src_port; /** * @dst_port: destination TCP port to match */ __le16 dst_port; } __packed; /* WOWLAN_IPV4_TCP_SYN_API_S_VER_1 */ /** * struct iwl_wowlan_ipv6_tcp_syn - WoWLAN Ipv6 TCP SYN pattern data */ struct iwl_wowlan_ipv6_tcp_syn { /** * @src_addr: source IP address to match */ u8 src_addr[IPV6_ADDR_SIZE]; /** * @dst_addr: destination IP address to match */ u8 dst_addr[IPV6_ADDR_SIZE]; /** * @src_port: source TCP port to match */ __le16 src_port; /** * @dst_port: destination TCP port to match */ __le16 dst_port; } __packed; /* WOWLAN_IPV6_TCP_SYN_API_S_VER_1 */ /** * union iwl_wowlan_pattern_data - Data for the different pattern types * * If wildcard addresses/ports are to be used, the union can be left * undefined. */ union iwl_wowlan_pattern_data { /** * @bitmask: bitmask pattern data */ struct iwl_wowlan_pattern_v1 bitmask; /** * @ipv4_tcp_syn: IPv4 TCP SYN pattern data */ struct iwl_wowlan_ipv4_tcp_syn ipv4_tcp_syn; /** * @ipv6_tcp_syn: IPv6 TCP SYN pattern data */ struct iwl_wowlan_ipv6_tcp_syn ipv6_tcp_syn; }; /* WOWLAN_PATTERN_API_U_VER_1 */ /** * struct iwl_wowlan_pattern_v2 - Pattern entry for the WoWLAN wakeup patterns */ struct iwl_wowlan_pattern_v2 { /** * @pattern_type: defines the struct type to be used in the union */ u8 pattern_type; /** * @reserved: reserved for alignment */ u8 reserved[3]; /** * @u: the union containing the match data, or undefined for * wildcard matches */ union iwl_wowlan_pattern_data u; } __packed; /* WOWLAN_PATTERN_API_S_VER_2 */ /** * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns command */ struct iwl_wowlan_patterns_cmd { /** * @n_patterns: number of patterns */ __le32 n_patterns; /** * @patterns: the patterns, array length in @n_patterns */ struct iwl_wowlan_pattern_v2 patterns[]; } __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */ enum iwl_wowlan_wakeup_filters { IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0), IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1), IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7), IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8), IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9), IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10), IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11), IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12), IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13), IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14), IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15), IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16), }; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ enum iwl_wowlan_flags { IS_11W_ASSOC = BIT(0), ENABLE_L3_FILTERING = BIT(1), ENABLE_NBNS_FILTERING = BIT(2), ENABLE_DHCP_FILTERING = BIT(3), ENABLE_STORE_BEACON = BIT(4), }; /** * struct iwl_wowlan_config_cmd - WoWLAN configuration * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters * @non_qos_seq: non-QoS sequence counter to use next * @qos_seq: QoS sequence counters to use next * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down * @is_11n_connection: indicates HT connection * @offloading_tid: TID reserved for firmware use * @flags: extra flags, see &enum iwl_wowlan_flags * @reserved: reserved */ struct iwl_wowlan_config_cmd { __le32 wakeup_filter; __le16 non_qos_seq; __le16 qos_seq[8]; u8 wowlan_ba_teardown_tids; u8 is_11n_connection; u8 offloading_tid; u8 flags; u8 reserved[2]; } __packed; /* WOWLAN_CONFIG_API_S_VER_4 */ /* * WOWLAN_TSC_RSC_PARAMS */ #define IWL_NUM_RSC 16 struct tkip_sc { __le16 iv16; __le16 pad; __le32 iv32; } __packed; /* TKIP_SC_API_U_VER_1 */ struct iwl_tkip_rsc_tsc { struct tkip_sc unicast_rsc[IWL_NUM_RSC]; struct tkip_sc multicast_rsc[IWL_NUM_RSC]; struct tkip_sc tsc; } __packed; /* TKIP_TSC_RSC_API_S_VER_1 */ struct aes_sc { __le64 pn; } __packed; /* TKIP_AES_SC_API_U_VER_1 */ struct iwl_aes_rsc_tsc { struct aes_sc unicast_rsc[IWL_NUM_RSC]; struct aes_sc multicast_rsc[IWL_NUM_RSC]; struct aes_sc tsc; } __packed; /* AES_TSC_RSC_API_S_VER_1 */ union iwl_all_tsc_rsc { struct iwl_tkip_rsc_tsc tkip; struct iwl_aes_rsc_tsc aes; }; /* ALL_TSC_RSC_API_S_VER_2 */ struct iwl_wowlan_rsc_tsc_params_cmd { union iwl_all_tsc_rsc all_tsc_rsc; } __packed; /* ALL_TSC_RSC_API_S_VER_2 */ #define IWL_MIC_KEY_SIZE 8 struct iwl_mic_keys { u8 tx[IWL_MIC_KEY_SIZE]; u8 rx_unicast[IWL_MIC_KEY_SIZE]; u8 rx_mcast[IWL_MIC_KEY_SIZE]; } __packed; /* MIC_KEYS_API_S_VER_1 */ #define IWL_P1K_SIZE 5 struct iwl_p1k_cache { __le16 p1k[IWL_P1K_SIZE]; } __packed; #define IWL_NUM_RX_P1K_CACHE 2 struct iwl_wowlan_tkip_params_cmd { struct iwl_mic_keys mic_keys; struct iwl_p1k_cache tx; struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; } __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */ #define IWL_KCK_MAX_SIZE 32 #define IWL_KEK_MAX_SIZE 32 struct iwl_wowlan_kek_kck_material_cmd { u8 kck[IWL_KCK_MAX_SIZE]; u8 kek[IWL_KEK_MAX_SIZE]; __le16 kck_len; __le16 kek_len; __le64 replay_ctr; } __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */ #define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 enum iwl_wowlan_rekey_status { IWL_WOWLAN_REKEY_POST_REKEY = 0, IWL_WOWLAN_REKEY_WHILE_REKEY = 1, }; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */ enum iwl_wowlan_wakeup_reason { IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0, IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0), IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1), IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2), IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3), IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4), IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5), IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6), IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7), IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13), IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14), IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15), IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16), IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC = BIT(17), IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN = BIT(18), IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD = BIT(19), IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN = BIT(20), IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD = BIT(21), }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ struct iwl_wowlan_gtk_status_v1 { u8 key_index; u8 reserved[3]; u8 decrypt_key[16]; u8 tkip_mic_key[8]; struct iwl_wowlan_rsc_tsc_params_cmd rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ #define WOWLAN_KEY_MAX_SIZE 32 #define WOWLAN_GTK_KEYS_NUM 2 #define WOWLAN_IGTK_KEYS_NUM 2 /** * struct iwl_wowlan_gtk_status - GTK status * @key: GTK material * @key_len: GTK legth, if set to 0, the key is not available * @key_flags: information about the key: * bits[0:1]: key index assigned by the AP * bits[2:6]: GTK index of the key in the internal DB * bit[7]: Set iff this is the currently used GTK * @reserved: padding * @tkip_mic_key: TKIP RX MIC key * @rsc: TSC RSC counters */ struct iwl_wowlan_gtk_status { u8 key[WOWLAN_KEY_MAX_SIZE]; u8 key_len; u8 key_flags; u8 reserved[2]; u8 tkip_mic_key[8]; struct iwl_wowlan_rsc_tsc_params_cmd rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */ #define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1)) /** * struct iwl_wowlan_igtk_status - IGTK status * @key: IGTK material * @ipn: the IGTK packet number (replay counter) * @key_len: IGTK length, if set to 0, the key is not available * @key_flags: information about the key: * bits[0]: key index assigned by the AP (0: index 4, 1: index 5) * bits[1:5]: IGTK index of the key in the internal DB * bit[6]: Set iff this is the currently used IGTK */ struct iwl_wowlan_igtk_status { u8 key[WOWLAN_KEY_MAX_SIZE]; u8 ipn[6]; u8 key_len; u8 key_flags; } __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */ /** * struct iwl_wowlan_status_v6 - WoWLAN status * @gtk: GTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched pattern * @non_qos_seq_ctr: non-QoS sequence counter to use next * @qos_seq_ctr: QoS sequence counters to use next * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason * @num_of_gtk_rekeys: number of GTK rekeys * @transmitted_ndps: number of transmitted neighbor discovery packets * @received_beacons: number of received beacons * @wake_packet_length: wakeup packet length * @wake_packet_bufsize: wakeup packet buffer size * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v6 { struct iwl_wowlan_gtk_status_v1 gtk; __le64 replay_ctr; __le16 pattern_number; __le16 non_qos_seq_ctr; __le16 qos_seq_ctr[8]; __le32 wakeup_reasons; __le32 num_of_gtk_rekeys; __le32 transmitted_ndps; __le32 received_beacons; __le32 wake_packet_length; __le32 wake_packet_bufsize; u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ /** * struct iwl_wowlan_status - WoWLAN status * @gtk: GTK data * @igtk: IGTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched pattern * @non_qos_seq_ctr: non-QoS sequence counter to use next * @qos_seq_ctr: QoS sequence counters to use next * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason * @num_of_gtk_rekeys: number of GTK rekeys * @transmitted_ndps: number of transmitted neighbor discovery packets * @received_beacons: number of received beacons * @wake_packet_length: wakeup packet length * @wake_packet_bufsize: wakeup packet buffer size * @wake_packet: wakeup packet */ struct iwl_wowlan_status { struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; __le16 non_qos_seq_ctr; __le16 qos_seq_ctr[8]; __le32 wakeup_reasons; __le32 num_of_gtk_rekeys; __le32 transmitted_ndps; __le32 received_beacons; __le32 wake_packet_length; __le32 wake_packet_bufsize; u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_API_S_VER_7 */ static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk) { return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK; } #define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64 #define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128 #define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048 struct iwl_tcp_packet_info { __le16 tcp_pseudo_header_checksum; __le16 tcp_payload_length; } __packed; /* TCP_PACKET_INFO_API_S_VER_2 */ struct iwl_tcp_packet { struct iwl_tcp_packet_info info; u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN]; } __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ struct iwl_remote_wake_packet { struct iwl_tcp_packet_info info; u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN]; } __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ struct iwl_wowlan_remote_wake_config { __le32 connection_max_time; /* unused */ /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */ u8 max_syn_retries; u8 max_data_retries; u8 tcp_syn_ack_timeout; u8 tcp_ack_timeout; struct iwl_tcp_packet syn_tx; struct iwl_tcp_packet synack_rx; struct iwl_tcp_packet keepalive_ack_rx; struct iwl_tcp_packet fin_tx; struct iwl_remote_wake_packet keepalive_tx; struct iwl_remote_wake_packet wake_rx; /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */ u8 sequence_number_offset; u8 sequence_number_length; u8 token_offset; u8 token_length; /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */ __le32 initial_sequence_number; __le16 keepalive_interval; __le16 num_tokens; u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS]; } __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */ /* TODO: NetDetect API */ #endif /* __iwl_fw_api_d3_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h000066400000000000000000000212221351064634500277120ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_datapath_h__ #define __iwl_fw_api_datapath_h__ /** * enum iwl_data_path_subcmd_ids - data path group commands */ enum iwl_data_path_subcmd_ids { /** * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd */ DQA_ENABLE_CMD = 0x0, /** * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd */ UPDATE_MU_GROUPS_CMD = 0x1, /** * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd */ TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, /** * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd */ STA_HE_CTXT_CMD = 0x7, /** * @AX_SOFTAP_CLIENT_TESTMODE: &struct ax_softap_client_testmode_cmd */ AX_SOFTAP_CLIENT_TESTMODE = 0xC, /** * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config */ RFH_QUEUE_CONFIG_CMD = 0xD, /** * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd */ TLC_MNG_CONFIG_CMD = 0xF, /** * @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd */ HE_AIR_SNIFFER_CONFIG_CMD = 0x13, /** * @CHEST_COLLECTOR_FILTER_CONFIG_CMD: Configure the CSI * matrix collection, uses &struct iwl_channel_estimation_cfg_v1 * or &struct iwl_channel_estimation_cfg */ CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14, /** * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data */ RX_NO_DATA_NOTIF = 0xF5, /** * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif */ TLC_MNG_UPDATE_NOTIF = 0xF7, /** * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification */ STA_PM_NOTIF = 0xFD, /** * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif */ MU_GROUP_MGMT_NOTIF = 0xFE, /** * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification */ RX_QUEUES_NOTIFICATION = 0xFF, }; /** * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration * * @reserved: reserved * @membership_status: a bitmap of MU groups * @user_position:the position of station in a group. If the station is in the * group then bits (group * 2) is the position -1 */ struct iwl_mu_group_mgmt_cmd { __le32 reserved; __le32 membership_status[2]; __le32 user_position[4]; } __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ /** * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification * * @membership_status: a bitmap of MU groups * @user_position: the position of station in a group. If the station is in the * group then bits (group * 2) is the position -1 */ struct iwl_mu_group_mgmt_notif { __le32 membership_status[2]; __le32 user_position[4]; } __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ enum iwl_channel_estimation_flags { IWL_CHANNEL_ESTIMATION_ENABLE = BIT(0), IWL_CHANNEL_ESTIMATION_TIMER = BIT(1), IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2), /* starting from v2: */ IWL_CHANNEL_ESTIMATION_INTERVAL = BIT(3), }; /** * struct iwl_channel_estimation_cfg_v1 - channel estimation reporting config */ struct iwl_channel_estimation_cfg_v1 { /** * @flags: flags, see &enum iwl_channel_estimation_flags */ __le32 flags; /** * @timer: if enabled via flags, automatically disable after this many * microseconds */ __le32 timer; /** * @count: if enabled via flags, automatically disable after this many * frames with channel estimation matrix were captured */ __le32 count; /** * @rate_n_flags_mask: only try to record the channel estimation matrix * if the rate_n_flags value for the received frame (let's call * that rx_rnf) matches the mask/value given here like this: * (rx_rnf & rate_n_flags_mask) == rate_n_flags_val. */ __le32 rate_n_flags_mask; /** * @rate_n_flags_val: see @rate_n_flags_mask */ __le32 rate_n_flags_val; /** * @reserved: reserved (for alignment) */ __le32 reserved; /** * @frame_types: bitmap of frame types to capture, the received frame's * subtype|type takes 6 bits in the frame and the corresponding bit * in this field must be set to 1 to capture channel estimation for * that frame type. Set to all-ones to enable capturing for all * frame types. */ __le64 frame_types; } __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */ /* CHEST_MAX_MAC_ADDR_FILTERED_IN_API_D_VER_1 */ #define IWL_NUM_CHANNEL_ESTIMATION_FILTER_ADDRS 20 /** * struct iwl_channel_estimation_cfg - channel estimation reporting config */ struct iwl_channel_estimation_cfg { /** * @flags: flags, see &enum iwl_channel_estimation_flags */ __le32 flags; /** * @timer: if enabled via flags, automatically disable after this many * microseconds */ __le32 timer; /** * @count: if enabled via flags, automatically disable after this many * frames with channel estimation matrix were captured */ __le32 count; /** * @rate_n_flags_mask: only try to record the channel estimation matrix * if the rate_n_flags value for the received frame (let's call * that rx_rnf) matches the mask/value given here like this: * (rx_rnf & rate_n_flags_mask) == rate_n_flags_val. */ __le32 rate_n_flags_mask; /** * @rate_n_flags_val: see @rate_n_flags_mask */ __le32 rate_n_flags_val; /** * @min_time_between_collection: minimum time between collecting data */ __le32 min_time_between_collection; /** * @frame_types: bitmap of frame types to capture, the received frame's * subtype|type takes 6 bits in the frame and the corresponding bit * in this field must be set to 1 to capture channel estimation for * that frame type. Set to all-ones to enable capturing for all * frame types. */ __le64 frame_types; /** * @num_filter_addrs: number of MAC address filters configured, if 0 * no filters are applied */ __le32 num_filter_addrs; /** * @filter_addrs: MAC address filters, used length in @num_filter_addrs */ struct { u8 addr[ETH_ALEN]; __le16 reserved; } filter_addrs[IWL_NUM_CHANNEL_ESTIMATION_FILTER_ADDRS]; } __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_2 */ #endif /* __iwl_fw_api_datapath_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h000066400000000000000000000451201351064634500274660ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_dbg_tlv_h__ #define __iwl_fw_dbg_tlv_h__ #include /** * struct iwl_fw_ini_header: Common Header for all debug group TLV's structures * * @tlv_version: version info * @apply_point: &enum iwl_fw_ini_apply_point * @data: TLV data followed */ struct iwl_fw_ini_header { __le32 tlv_version; __le32 apply_point; u8 data[]; } __packed; /* FW_DEBUG_TLV_HEADER_S */ /** * struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) * buffer allocation TLV - for debug * * @iwl_fw_ini_header: header * @allocation_id: &enum iwl_fw_ini_allocation_id - to bind allocation and hcmd * if needed (DBGC1/DBGC2/SDFX/...) * @buffer_location: type of iwl_fw_ini_buffer_location * @size: size in bytes * @max_fragments: the maximum allowed fragmentation in the desired memory * allocation above * @min_frag_size: the minimum allowed fragmentation size in bytes */ struct iwl_fw_ini_allocation_tlv { struct iwl_fw_ini_header header; __le32 allocation_id; __le32 buffer_location; __le32 size; __le32 max_fragments; __le32 min_frag_size; } __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */ /** * enum iwl_fw_ini_dbg_domain - debug domains * allows to send host cmd or collect memory region if a given domain is enabled * * @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on * @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain */ enum iwl_fw_ini_dbg_domain { IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0, IWL_FW_INI_DBG_DOMAIN_REPORT_PS, }; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */ /** * struct iwl_fw_ini_hcmd * * @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC * @group: the desired cmd group * @reserved: to align to FW struct * @data: all of the relevant command data to be sent */ struct iwl_fw_ini_hcmd { u8 id; u8 group; __le16 reserved; u8 data[0]; } __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */ /** * struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD) * Generic Host command pass through TLV * * @header: header * @domain: send command only if the specific domain is enabled * &enum iwl_fw_ini_dbg_domain * @period_msec: period in which the hcmd will be sent to FW. Measured in msec * (0 = one time command). * @hcmd: a variable length host-command to be sent to apply the configuration. */ struct iwl_fw_ini_hcmd_tlv { struct iwl_fw_ini_header header; __le32 domain; __le32 period_msec; struct iwl_fw_ini_hcmd hcmd; } __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */ /** * struct iwl_fw_ini_debug_flow_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_FLOW) * * @header: header * @debug_flow_cfg: &enum iwl_fw_ini_debug_flow */ struct iwl_fw_ini_debug_flow_tlv { struct iwl_fw_ini_header header; __le32 debug_flow_cfg; } __packed; /* FW_DEBUG_TLV_FLOW_TLV_S_VER_1 */ #define IWL_FW_INI_MAX_REGION_ID 64 #define IWL_FW_INI_MAX_NAME 32 /** * struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump. * * @id_and_grp: id and group of dhc response. * @desc: dhc response descriptor. */ struct iwl_fw_ini_region_cfg_dhc { __le32 id_and_grp; __le32 desc; } __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */ /** * struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region * * @num_of_range: the amount of ranges in the region * @range_data_size: size of the data to read per range, in bytes. */ struct iwl_fw_ini_region_cfg_internal { __le32 num_of_ranges; __le32 range_data_size; } __packed; /* FW_DEBUG_TLV_REGION_NIC_INTERNAL_RANGES_S */ /** * struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region * * @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region * @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region. * It is unused for tx. * @num_of_registers: number of prph registers in the region, each register is * 4 bytes size. * @header_only: none zero value indicates that this region does not include * fifo data and includes only the given registers. */ struct iwl_fw_ini_region_cfg_fifos { __le32 fid1; __le32 fid2; __le32 num_of_registers; __le32 header_only; } __packed; /* FW_DEBUG_TLV_REGION_FIFOS_S */ /** * struct iwl_fw_ini_region_cfg * * @region_id: ID of this dump configuration * @region_type: &enum iwl_fw_ini_region_type * @domain: dump this region only if the specific domain is enabled * &enum iwl_fw_ini_dbg_domain * @name_len: name length * @name: file name to use for this region * @internal: used in case the region uses internal memory. * @allocation_id: For DRAM type field substitutes for allocation_id * @fifos: used in case of fifos region. * @dhc_desc: dhc response descriptor. * @notif_id_and_grp: dump this region only if the specific notification * occurred. * @offset: offset to use for each memory base address * @start_addr: array of addresses. */ struct iwl_fw_ini_region_cfg { __le32 region_id; __le32 region_type; __le32 domain; __le32 name_len; u8 name[IWL_FW_INI_MAX_NAME]; union { struct iwl_fw_ini_region_cfg_internal internal; __le32 allocation_id; struct iwl_fw_ini_region_cfg_fifos fifos; struct iwl_fw_ini_region_cfg_dhc dhc_desc; __le32 notif_id_and_grp; }; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */ __le32 offset; __le32 start_addr[]; } __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */ /** * struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS) * defines memory regions to dump * * @header: header * @num_regions: how many different region section and IDs are coming next * @region_config: list of dump configurations */ struct iwl_fw_ini_region_tlv { struct iwl_fw_ini_header header; __le32 num_regions; struct iwl_fw_ini_region_cfg region_config[]; } __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */ /** * struct iwl_fw_ini_trigger * * @trigger_id: &enum iwl_fw_ini_trigger_id * @override_trig: determines how apply trigger in case a trigger with the * same id is already in use. Using the first 2 bytes: * Byte 0: if 0, override trigger configuration, otherwise use the * existing configuration. * Byte 1: if 0, override trigger regions, otherwise append regions to * existing trigger. * @dump_delay: delay from trigger fire to dump, in usec * @occurrences: max amount of times to be fired * @reserved: to align to FW struct * @ignore_consec: ignore consecutive triggers, in usec * @force_restart: force FW restart * @multi_dut: initiate debug dump data on several DUTs * @trigger_data: generic data to be utilized per trigger * @num_regions: number of dump regions defined for this trigger * @data: region IDs */ struct iwl_fw_ini_trigger { __le32 trigger_id; __le32 override_trig; __le32 dump_delay; __le32 occurrences; __le32 reserved; __le32 ignore_consec; __le32 force_restart; __le32 multi_dut; __le32 trigger_data; __le32 num_regions; __le32 data[]; } __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */ /** * struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS) * Triggers that hold memory regions to dump in case a trigger fires * * @header: header * @num_triggers: how many different triggers section and IDs are coming next * @trigger_config: list of trigger configurations */ struct iwl_fw_ini_trigger_tlv { struct iwl_fw_ini_header header; __le32 num_triggers; struct iwl_fw_ini_trigger trigger_config[]; } __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */ #define IWL_FW_INI_MAX_IMG_NAME_LEN 32 #define IWL_FW_INI_MAX_DBG_CFG_NAME_LEN 64 /** * struct iwl_fw_ini_debug_info_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_INFO) * * holds image name and debug configuration name * * @header: header * @img_name_len: length of the image name string * @img_name: image name string * @dbg_cfg_name_len : length of the debug configuration name string * @dbg_cfg_name: debug configuration name string */ struct iwl_fw_ini_debug_info_tlv { struct iwl_fw_ini_header header; __le32 img_name_len; u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN]; __le32 dbg_cfg_name_len; u8 dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN]; } __packed; /* FW_DEBUG_TLV_INFO_API_S_VER_1 */ /** * enum iwl_fw_ini_trigger_id * * @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert * @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang * @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification * @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger * @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity * @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency * threshold was crossed * @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed * @IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER: Deauth initiated by host * @IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST: stop GO request * @IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST: start GO request * @IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST: join P2P group request * @IWL_FW_TRIGGER_ID_HOST_SCAN_START: scan started event * @IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED: undefined * @IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS: undefined * @IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG: undefined * @IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED: BAR frame was received * @IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED: agg TX failed * @IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED: EAPOL TX failed * @IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED: suspicious TX response * @IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT: received suspicious auth * @IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE: roaming was completed * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED: fast assoc failed * @IWL_FW_TRIGGER_ID_HOST_D3_START: D3 start * @IWL_FW_TRIGGER_ID_HOST_D3_END: D3 end * @IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS: missed beacon events * @IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS: P2P missed beacon events * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES: undefined * @IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED: undefined * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED: authentication / association * failed * @IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE: scan complete event * @IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT: scan abort complete * @IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE: nic alive message was received * @IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE: CSA was completed * @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs */ enum iwl_fw_ini_trigger_id { IWL_FW_TRIGGER_ID_INVALID = 0, /* Errors triggers */ IWL_FW_TRIGGER_ID_FW_ASSERT = 1, IWL_FW_TRIGGER_ID_FW_HW_ERROR = 2, IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 3, /* FW triggers */ IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4, IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5, /* User trigger */ IWL_FW_TRIGGER_ID_USER_TRIGGER = 6, /* periodic uses the data field for the interval time */ IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7, /* Host triggers */ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8, IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9, IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10, IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11, IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12, IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13, IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14, IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15, IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16, IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17, IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18, IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19, IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20, IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21, IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22, IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23, IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24, IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25, IWL_FW_TRIGGER_ID_HOST_D3_START = 26, IWL_FW_TRIGGER_ID_HOST_D3_END = 27, IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28, IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29, IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30, IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31, IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32, IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33, IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34, IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35, IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36, IWL_FW_TRIGGER_ID_NUM, }; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */ /** * enum iwl_fw_ini_apply_point * * @IWL_FW_INI_APPLY_INVALID: invalid * @IWL_FW_INI_APPLY_EARLY: pre loading FW * @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive * @IWL_FW_INI_APPLY_POST_INIT: last cmd in initialization sequence * @IWL_FW_INI_APPLY_MISSED_BEACONS: missed beacons notification * @IWL_FW_INI_APPLY_SCAN_COMPLETE: scan completed * @IWL_FW_INI_APPLY_NUM: number of apply points */ enum iwl_fw_ini_apply_point { IWL_FW_INI_APPLY_INVALID, IWL_FW_INI_APPLY_EARLY, IWL_FW_INI_APPLY_AFTER_ALIVE, IWL_FW_INI_APPLY_POST_INIT, IWL_FW_INI_APPLY_MISSED_BEACONS, IWL_FW_INI_APPLY_SCAN_COMPLETE, IWL_FW_INI_APPLY_NUM, }; /* FW_DEBUG_TLV_APPLY_POINT_E_VER_1 */ /** * enum iwl_fw_ini_allocation_id * * @IWL_FW_INI_ALLOCATION_INVALID: invalid * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration * @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module * @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps * @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids */ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_INVALID, IWL_FW_INI_ALLOCATION_ID_DBGC1, IWL_FW_INI_ALLOCATION_ID_DBGC2, IWL_FW_INI_ALLOCATION_ID_DBGC3, IWL_FW_INI_ALLOCATION_ID_SDFX, IWL_FW_INI_ALLOCATION_ID_FW_DUMP, IWL_FW_INI_ALLOCATION_ID_USER_DEFINED, IWL_FW_INI_ALLOCATION_NUM, }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ /** * enum iwl_fw_ini_buffer_location * * @IWL_FW_INI_LOCATION_INVALID: invalid * @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location * @IWL_FW_INI_LOCATION_NPK_PATH: NPK location */ enum iwl_fw_ini_buffer_location { IWL_FW_INI_LOCATION_INVALID, IWL_FW_INI_LOCATION_SRAM_PATH, IWL_FW_INI_LOCATION_DRAM_PATH, IWL_FW_INI_LOCATION_NPK_PATH, }; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */ /** * enum iwl_fw_ini_debug_flow * * @IWL_FW_INI_DEBUG_INVALID: invalid * @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined * @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined */ enum iwl_fw_ini_debug_flow { IWL_FW_INI_DEBUG_INVALID, IWL_FW_INI_DEBUG_DBTR_FLOW, IWL_FW_INI_DEBUG_TB2DTF_FLOW, }; /* FW_DEBUG_TLV_FLOW_E_VER_1 */ /** * enum iwl_fw_ini_region_type * * @IWL_FW_INI_REGION_INVALID: invalid * @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory * @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC * @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY * @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX * @IWL_FW_INI_REGION_DRAM_BUFFER: DRAM buffer * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory * @IWL_FW_INI_REGION_INTERNAL_BUFFER: undefined * @IWL_FW_INI_REGION_TXF: TX fifos * @IWL_FW_INI_REGION_RXF: RX fifo * @IWL_FW_INI_REGION_PAGING: paging memory * @IWL_FW_INI_REGION_CSR: CSR registers * @IWL_FW_INI_REGION_NOTIFICATION: FW notification data * @IWL_FW_INI_REGION_DHC: dhc response to dump * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table * @IWL_FW_INI_REGION_NUM: number of region types */ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_INVALID, IWL_FW_INI_REGION_DEVICE_MEMORY, IWL_FW_INI_REGION_PERIPHERY_MAC, IWL_FW_INI_REGION_PERIPHERY_PHY, IWL_FW_INI_REGION_PERIPHERY_AUX, IWL_FW_INI_REGION_DRAM_BUFFER, IWL_FW_INI_REGION_DRAM_IMR, IWL_FW_INI_REGION_INTERNAL_BUFFER, IWL_FW_INI_REGION_TXF, IWL_FW_INI_REGION_RXF, IWL_FW_INI_REGION_PAGING, IWL_FW_INI_REGION_CSR, IWL_FW_INI_REGION_NOTIFICATION, IWL_FW_INI_REGION_DHC, IWL_FW_INI_REGION_LMAC_ERROR_TABLE, IWL_FW_INI_REGION_UMAC_ERROR_TABLE, IWL_FW_INI_REGION_NUM }; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */ #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h000066400000000000000000000311151351064634500272140ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_debug_h__ #define __iwl_fw_api_debug_h__ /** * enum iwl_debug_cmds - debug commands */ enum iwl_debug_cmds { /** * @LMAC_RD_WR: * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and * &struct iwl_dbg_mem_access_rsp */ LMAC_RD_WR = 0x0, /** * @UMAC_RD_WR: * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and * &struct iwl_dbg_mem_access_rsp */ UMAC_RD_WR = 0x1, /** * @DBGC_SUSPEND_RESUME: * DBGC suspend/resume commad. Uses a single dword as data: * 0 - resume DBGC recording * 1 - suspend DBGC recording */ DBGC_SUSPEND_RESUME = 0x7, /** * @MFU_ASSERT_DUMP_NTF: * &struct iwl_mfu_assert_dump_notif */ MFU_ASSERT_DUMP_NTF = 0xFE, }; /* Error response/notification */ enum { FW_ERR_UNKNOWN_CMD = 0x0, FW_ERR_INVALID_CMD_PARAM = 0x1, FW_ERR_SERVICE = 0x2, FW_ERR_ARC_MEMORY = 0x3, FW_ERR_ARC_CODE = 0x4, FW_ERR_WATCH_DOG = 0x5, FW_ERR_WEP_GRP_KEY_INDX = 0x10, FW_ERR_WEP_KEY_SIZE = 0x11, FW_ERR_OBSOLETE_FUNC = 0x12, FW_ERR_UNEXPECTED = 0xFE, FW_ERR_FATAL = 0xFF }; /** enum iwl_dbg_suspend_resume_cmds - dbgc suspend resume operations * dbgc suspend resume command operations * @DBGC_RESUME_CMD: resume dbgc recording * @DBGC_SUSPEND_CMD: stop dbgc recording */ enum iwl_dbg_suspend_resume_cmds { DBGC_RESUME_CMD, DBGC_SUSPEND_CMD, }; /** * struct iwl_error_resp - FW error indication * ( REPLY_ERROR = 0x2 ) * @error_type: one of FW_ERR_* * @cmd_id: the command ID for which the error occurred * @reserved1: reserved * @bad_cmd_seq_num: sequence number of the erroneous command * @error_service: which service created the error, applicable only if * error_type = 2, otherwise 0 * @timestamp: TSF in usecs. */ struct iwl_error_resp { __le32 error_type; u8 cmd_id; u8 reserved1; __le16 bad_cmd_seq_num; __le32 error_service; __le64 timestamp; } __packed; #define TX_FIFO_MAX_NUM_9000 8 #define TX_FIFO_MAX_NUM 15 #define RX_FIFO_MAX_NUM 2 #define TX_FIFO_INTERNAL_MAX_NUM 6 /** * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information * * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not * accessible) * @shared_mem_size: shared memory size * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to * 0x0 as accessible only via DBGM RDAT) * @sample_buff_size: internal sample buff size * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre * 8000 HW set to 0x0 as not accessible) * @txfifo_size: size of TXF0 ... TXF7 * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 * @page_buff_addr: used by UMAC and performance debug (page miss analysis), * when paging is not supported this should be 0 * @page_buff_size: size of %page_buff_addr * @rxfifo_addr: Start address of rxFifo * @internal_txfifo_addr: start address of internalFifo * @internal_txfifo_size: internal fifos' size * * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG * set, the last 3 members don't exist. */ struct iwl_shared_mem_cfg_v2 { __le32 shared_mem_addr; __le32 shared_mem_size; __le32 sample_buff_addr; __le32 sample_buff_size; __le32 txfifo_addr; __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; __le32 rxfifo_size[RX_FIFO_MAX_NUM]; __le32 page_buff_addr; __le32 page_buff_size; __le32 rxfifo_addr; __le32 internal_txfifo_addr; __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ /** * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration * * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) * @txfifo_size: size of TX FIFOs * @rxfifo1_addr: RXF1 addr * @rxfifo1_size: RXF1 size */ struct iwl_shared_mem_lmac_cfg { __le32 txfifo_addr; __le32 txfifo_size[TX_FIFO_MAX_NUM]; __le32 rxfifo1_addr; __le32 rxfifo1_size; } __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ /** * struct iwl_shared_mem_cfg - Shared memory configuration information * * @shared_mem_addr: shared memory address * @shared_mem_size: shared memory size * @sample_buff_addr: internal sample (mon/adc) buff addr * @sample_buff_size: internal sample buff size * @rxfifo2_addr: start addr of RXF2 * @rxfifo2_size: size of RXF2 * @page_buff_addr: used by UMAC and performance debug (page miss analysis), * when paging is not supported this should be 0 * @page_buff_size: size of %page_buff_addr * @lmac_num: number of LMACs (1 or 2) * @lmac_smem: per - LMAC smem data */ struct iwl_shared_mem_cfg { __le32 shared_mem_addr; __le32 shared_mem_size; __le32 sample_buff_addr; __le32 sample_buff_size; __le32 rxfifo2_addr; __le32 rxfifo2_size; __le32 page_buff_addr; __le32 page_buff_size; __le32 lmac_num; struct iwl_shared_mem_lmac_cfg lmac_smem[2]; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ /** * struct iwl_mfuart_load_notif - mfuart image version & status * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) * @installed_ver: installed image version * @external_ver: external image version * @status: MFUART loading status * @duration: MFUART loading time * @image_size: MFUART image size in bytes */ struct iwl_mfuart_load_notif { __le32 installed_ver; __le32 external_ver; __le32 status; __le32 duration; /* image size valid only in v2 of the command */ __le32 image_size; } __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */ /** * struct iwl_mfu_assert_dump_notif - mfuart dump logs * ( MFU_ASSERT_DUMP_NTF = 0xfe ) * @assert_id: mfuart assert id that cause the notif * @curr_reset_num: number of asserts since uptime * @index_num: current chunk id * @parts_num: total number of chunks * @data_size: number of data bytes sent * @data: data buffer */ struct iwl_mfu_assert_dump_notif { __le32 assert_id; __le32 curr_reset_num; __le16 index_num; __le16 parts_num; __le32 data_size; __le32 data[0]; } __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */ /** * enum iwl_mvm_marker_id - marker ids * * The ids for different type of markers to insert into the usniffer logs * * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker * @MARKER_ID_SYNC_CLOCK: sync FW time and systime */ enum iwl_mvm_marker_id { MARKER_ID_TX_FRAME_LATENCY = 1, MARKER_ID_SYNC_CLOCK = 2, }; /* MARKER_ID_API_E_VER_2 */ /** * struct iwl_mvm_marker - mark info into the usniffer logs * * (MARKER_CMD = 0xcb) * * Mark the UTC time stamp into the usniffer logs together with additional * metadata, so the usniffer output can be parsed. * In the command response the ucode will return the GP2 time. * * @dw_len: The amount of dwords following this byte including this byte. * @marker_id: A unique marker id (iwl_mvm_marker_id). * @reserved: reserved. * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC * @metadata: additional meta data that will be written to the unsiffer log */ struct iwl_mvm_marker { u8 dw_len; u8 marker_id; __le16 reserved; __le64 timestamp; __le32 metadata[0]; } __packed; /* MARKER_API_S_VER_1 */ /** * struct iwl_mvm_marker_rsp - Response to marker cmd * * @gp2: The gp2 clock value in the FW */ struct iwl_mvm_marker_rsp { __le32 gp2; } __packed; /* Operation types for the debug mem access */ enum { DEBUG_MEM_OP_READ = 0, DEBUG_MEM_OP_WRITE = 1, DEBUG_MEM_OP_WRITE_BYTES = 2, }; #define DEBUG_MEM_MAX_SIZE_DWORDS 32 /** * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory * @op: DEBUG_MEM_OP_* * @addr: address to read/write from/to * @len: in dwords, to read/write * @data: for write opeations, contains the source buffer */ struct iwl_dbg_mem_access_cmd { __le32 op; __le32 addr; __le32 len; __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ /* Status responses for the debug mem access */ enum { DEBUG_MEM_STATUS_SUCCESS = 0x0, DEBUG_MEM_STATUS_FAILED = 0x1, DEBUG_MEM_STATUS_LOCKED = 0x2, DEBUG_MEM_STATUS_HIDDEN = 0x3, DEBUG_MEM_STATUS_LENGTH = 0x4, }; /** * struct iwl_dbg_mem_access_rsp - Response to debug mem commands * @status: DEBUG_MEM_STATUS_* * @len: read dwords (0 for write operations) * @data: contains the read DWs */ struct iwl_dbg_mem_access_rsp { __le32 status; __le32 len; __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ #define LDBG_CFG_COMMAND_SIZE 80 #define BUFFER_ALLOCATION 0x27 #define START_DEBUG_RECORDING 0x29 #define STOP_DEBUG_RECORDING 0x2A /* maximum fragments to be allocated per target of allocationId */ #define IWL_BUFFER_LOCATION_MAX_FRAGS 2 /** * struct iwl_fragment_data single fragment structure * @address: 64bit start address * @size: size in bytes */ struct iwl_fragment_data { __le64 address; __le32 size; } __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */ /** * struct iwl_buffer_allocation_cmd - buffer allocation command structure * @allocation_id: id of the allocation * @buffer_location: location of the buffer * @num_frags: number of fragments * @fragments: memory fragments */ struct iwl_buffer_allocation_cmd { __le32 allocation_id; __le32 buffer_location; __le32 num_frags; struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS]; } __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */ /** * struct iwl_ldbg_config_cmd - LDBG config command * @type: configuration type * @pad: reserved space for type-dependent data */ struct iwl_ldbg_config_cmd { __le32 type; union { u8 pad[LDBG_CFG_COMMAND_SIZE - sizeof(__le32)]; struct iwl_buffer_allocation_cmd buffer_allocation; }; /* LDBG_CFG_BODY_API_U_VER_2 (partially) */ } __packed; /* LDBG_CFG_CMD_API_S_VER_2 */ /** * struct iwl_dbg_suspend_resume_cmd - dbgc suspend resume command * @operation: suspend or resume operation, uses * &enum iwl_dbg_suspend_resume_cmds */ struct iwl_dbg_suspend_resume_cmd { __le32 operation; } __packed; #endif /* __iwl_fw_api_debug_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h000066400000000000000000000151211351064634500274120ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_filter_h__ #define __iwl_fw_api_filter_h__ #include "fw/api/mac.h" #define MAX_PORT_ID_NUM 2 #define MAX_MCAST_FILTERING_ADDRESSES 256 /** * struct iwl_mcast_filter_cmd - configure multicast filter. * @filter_own: Set 1 to filter out multicast packets sent by station itself * @port_id: Multicast MAC addresses array specifier. This is a strange way * to identify network interface adopted in host-device IF. * It is used by FW as index in array of addresses. This array has * MAX_PORT_ID_NUM members. * @count: Number of MAC addresses in the array * @pass_all: Set 1 to pass all multicast packets. * @bssid: current association BSSID. * @reserved: reserved * @addr_list: Place holder for array of MAC addresses. * IMPORTANT: add padding if necessary to ensure DWORD alignment. */ struct iwl_mcast_filter_cmd { u8 filter_own; u8 port_id; u8 count; u8 pass_all; u8 bssid[6]; u8 reserved[2]; u8 addr_list[0]; } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ #define MAX_BCAST_FILTERS 8 #define MAX_BCAST_FILTER_ATTRS 2 /** * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. * start of ip payload). */ enum iwl_mvm_bcast_filter_attr_offset { BCAST_FILTER_OFFSET_PAYLOAD_START = 0, BCAST_FILTER_OFFSET_IP_END = 1, }; /** * struct iwl_fw_bcast_filter_attr - broadcast filter attribute * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. * @offset: starting offset of this pattern. * @reserved1: reserved * @val: value to match - big endian (MSB is the first * byte to match from offset pos). * @mask: mask to match (big endian). */ struct iwl_fw_bcast_filter_attr { u8 offset_type; u8 offset; __le16 reserved1; __be32 val; __be32 mask; } __packed; /* BCAST_FILTER_ATT_S_VER_1 */ /** * enum iwl_mvm_bcast_filter_frame_type - filter frame type * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames */ enum iwl_mvm_bcast_filter_frame_type { BCAST_FILTER_FRAME_TYPE_ALL = 0, BCAST_FILTER_FRAME_TYPE_IPV4 = 1, }; /** * struct iwl_fw_bcast_filter - broadcast filter * @discard: discard frame (1) or let it pass (0). * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. * @reserved1: reserved * @num_attrs: number of valid attributes in this filter. * @attrs: attributes of this filter. a filter is considered matched * only when all its attributes are matched (i.e. AND relationship) */ struct iwl_fw_bcast_filter { u8 discard; u8 frame_type; u8 num_attrs; u8 reserved1; struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; } __packed; /* BCAST_FILTER_S_VER_1 */ /** * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. * @default_discard: default action for this mac (discard (1) / pass (0)). * @reserved1: reserved * @attached_filters: bitmap of relevant filters for this mac. */ struct iwl_fw_bcast_mac { u8 default_discard; u8 reserved1; __le16 attached_filters; } __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ /** * struct iwl_bcast_filter_cmd - broadcast filtering configuration * @disable: enable (0) / disable (1) * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) * @reserved1: reserved * @filters: broadcast filters * @macs: broadcast filtering configuration per-mac */ struct iwl_bcast_filter_cmd { u8 disable; u8 max_bcast_filters; u8 max_macs; u8 reserved1; struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; } __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ #endif /* __iwl_fw_api_filter_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/led.h000066400000000000000000000055711351064634500267010ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_led_h__ #define __iwl_fw_api_led_h__ /** * struct iwl_led_cmd - LED switching command * * @status: LED status (on/off) */ struct iwl_led_cmd { __le32 status; } __packed; /* LEDS_CMD_API_S_VER_2 */ #endif /* __iwl_fw_api_led_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/location.h000066400000000000000000001066461351064634500277520ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * Copyright (C) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * Copyright (C) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ /** * enum iwl_location_subcmd_ids - location group command IDs */ enum iwl_location_subcmd_ids { /** * @TOF_RANGE_REQ_CMD: TOF ranging request, * uses &struct iwl_tof_range_req_cmd */ TOF_RANGE_REQ_CMD = 0x0, /** * @TOF_CONFIG_CMD: TOF configuration, uses &struct iwl_tof_config_cmd */ TOF_CONFIG_CMD = 0x1, /** * @TOF_RANGE_ABORT_CMD: abort ongoing ranging, uses * &struct iwl_tof_range_abort_cmd */ TOF_RANGE_ABORT_CMD = 0x2, /** * @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config, * uses &struct iwl_tof_range_req_ext_cmd */ TOF_RANGE_REQ_EXT_CMD = 0x3, /** * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration, * uses &struct iwl_tof_responder_config_cmd */ TOF_RESPONDER_CONFIG_CMD = 0x4, /** * @TOF_RESPONDER_DYN_CONFIG_CMD: FTM dynamic configuration, * uses &struct iwl_tof_responder_dyn_config_cmd */ TOF_RESPONDER_DYN_CONFIG_CMD = 0x5, /** * @CSI_HEADER_NOTIFICATION: CSI header */ CSI_HEADER_NOTIFICATION = 0xFA, /** * @CSI_CHUNKS_NOTIFICATION: CSI chunk, * uses &struct iwl_csi_chunk_notification */ CSI_CHUNKS_NOTIFICATION = 0xFB, /** * @TOF_LC_NOTIF: used for LCI/civic location, contains just * the action frame */ TOF_LC_NOTIF = 0xFC, /** * @TOF_RESPONDER_STATS: FTM responder statistics notification, * uses &struct iwl_ftm_responder_stats */ TOF_RESPONDER_STATS = 0xFD, /** * @TOF_MCSI_DEBUG_NOTIF: MCSI debug notification, uses * &struct iwl_tof_mcsi_notif */ TOF_MCSI_DEBUG_NOTIF = 0xFE, /** * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using * &struct iwl_tof_range_rsp_ntfy */ TOF_RANGE_RESPONSE_NOTIF = 0xFF, }; /** * struct iwl_tof_config_cmd - ToF configuration * @tof_disabled: indicates if ToF is disabled (or not) * @one_sided_disabled: indicates if one-sided is disabled (or not) * @is_debug_mode: indiciates if debug mode is active * @is_buf_required: indicates if channel estimation buffer is required */ struct iwl_tof_config_cmd { u8 tof_disabled; u8 one_sided_disabled; u8 is_debug_mode; u8 is_buf_required; } __packed; /** * enum iwl_tof_bandwidth - values for iwl_tof_range_req_ap_entry.bandwidth * @IWL_TOF_BW_20_LEGACY: 20 MHz non-HT * @IWL_TOF_BW_20_HT: 20 MHz HT * @IWL_TOF_BW_40: 40 MHz * @IWL_TOF_BW_80: 80 MHz * @IWL_TOF_BW_160: 160 MHz */ enum iwl_tof_bandwidth { IWL_TOF_BW_20_LEGACY, IWL_TOF_BW_20_HT, IWL_TOF_BW_40, IWL_TOF_BW_80, IWL_TOF_BW_160, }; /* LOCAT_BW_TYPE_E */ /* * enum iwl_tof_algo_type - Algorithym type for range measurement request */ enum iwl_tof_algo_type { IWL_TOF_ALGO_TYPE_MAX_LIKE = 0, IWL_TOF_ALGO_TYPE_LINEAR_REG = 1, IWL_TOF_ALGO_TYPE_FFT = 2, /* Keep last */ IWL_TOF_ALGO_TYPE_INVALID, }; /* ALGO_TYPE_E */ /* * enum iwl_tof_mcsi_ntfy - Enable/Disable MCSI notifications */ enum iwl_tof_mcsi_enable { IWL_TOF_MCSI_DISABLED = 0, IWL_TOF_MCSI_ENABLED = 1, }; /* MCSI_ENABLE_E */ /** * enum iwl_tof_responder_cmd_valid_field - valid fields in the responder cfg * @IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO: channel info is valid * @IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET: ToA offset is valid * @IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB: common calibration mode is valid * @IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB: spefici calibration mode is * valid * @IWL_TOF_RESPONDER_CMD_VALID_BSSID: BSSID is valid * @IWL_TOF_RESPONDER_CMD_VALID_TX_ANT: TX antenna is valid * @IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE: algorithm type is valid * @IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT: non-ASAP support is valid * @IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT: statistics report * support is valid * @IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT: MCSI notification support * is valid * @IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT: fast algorithm support * is valid * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure * is valid * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid */ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0), IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = BIT(1), IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = BIT(2), IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = BIT(3), IWL_TOF_RESPONDER_CMD_VALID_BSSID = BIT(4), IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = BIT(5), IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = BIT(6), IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = BIT(7), IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = BIT(8), IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = BIT(9), IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10), IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11), IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12), }; /** * enum iwl_tof_responder_cfg_flags - responder configuration flags * @IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT: non-ASAP support * @IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS: report statistics * @IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI: report MCSI * @IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE: algorithm type * @IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE: ToA offset mode * @IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE: common calibration mode * @IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE: specific calibration mode * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask */ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0), IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS = BIT(1), IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI = BIT(2), IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE = BIT(3) | BIT(4) | BIT(5), IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE = BIT(6), IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE = BIT(7), IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8), IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9), IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10), IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK, }; /** * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field * @responder_cfg_flags: &iwl_tof_responder_cfg_flags * @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth * @rate: current AP rate * @channel_num: current AP Channel * @ctrl_ch_position: coding of the control channel position relative to * the center frequency, see iwl_mvm_get_ctrl_pos() * @sta_id: index of the AP STA when in AP mode * @reserved1: reserved * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug * purposes, simulating station movement by adding various values * to this field * @common_calib: XVT: common calibration value * @specific_calib: XVT: specific calibration value * @bssid: Current AP BSSID * @reserved2: reserved */ struct iwl_tof_responder_config_cmd { __le32 cmd_valid_fields; __le32 responder_cfg_flags; u8 bandwidth; u8 rate; u8 channel_num; u8 ctrl_ch_position; u8 sta_id; u8 reserved1; __le16 toa_offset; __le16 common_calib; __le16 specific_calib; u8 bssid[ETH_ALEN]; __le16 reserved2; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ #define IWL_LCI_CIVIC_IE_MAX_SIZE 400 /** * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if * needed, 0-padding such that the next part is dword-aligned, then CIVIC * data (if exists) follows, and then 0-padding again to complete a * 4-multiple long buffer. */ struct iwl_tof_responder_dyn_config_cmd { __le32 lci_len; __le32 civic_len; u8 lci_civic[]; } __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */ /** * struct iwl_tof_range_req_ext_cmd - extended range req for WLS * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF * @reserved: reserved * @min_delta_ftm: Minimal time between two consecutive measurements, * in units of 100us. 0 means no preference by station * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended * value be sent to the AP * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended * value to be sent to the AP * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended * value to be sent to the AP */ struct iwl_tof_range_req_ext_cmd { __le16 tsf_timer_offset_msec; __le16 reserved; u8 min_delta_ftm; u8 ftm_format_and_bw20M; u8 ftm_format_and_bw40M; u8 ftm_format_and_bw80M; } __packed; /** * enum iwl_tof_location_query - values for query bitmap * @IWL_TOF_LOC_LCI: query LCI * @IWL_TOF_LOC_CIVIC: query civic */ enum iwl_tof_location_query { IWL_TOF_LOC_LCI = 0x01, IWL_TOF_LOC_CIVIC = 0x02, }; /** * struct iwl_tof_range_req_ap_entry_v2 - AP configuration parameters * @channel_num: Current AP Channel * @bandwidth: Current AP Bandwidth. One of iwl_tof_bandwidth. * @tsf_delta_direction: TSF relatively to the subject AP * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @bssid: AP's BSSID * @measure_type: Measurement type: 0 - two sided, 1 - One sided * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the * number of measurement iterations (min 2^0 = 1, max 2^14) * @burst_period: Recommended value to be sent to the AP. Measurement * periodicity In units of 100ms. ignored if num_of_bursts = 0 * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31); * 1-sided: how many rts/cts pairs should be used per burst. * @retries_per_sample: Max number of retries that the LMAC should send * in case of no replies by the AP. * @tsf_delta: TSF Delta in units of microseconds. * The difference between the AP TSF and the device local clock. * @location_req: Location Request Bit[0] LCI should be sent in the FTMR; * Bit[1] Civic should be sent in the FTMR * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) * @enable_dyn_ack: Enable Dynamic ACK BW. * 0: Initiator interact with regular AP; * 1: Initiator interact with Responder machine: need to send the * Initiator Acks with HT 40MHz / 80MHz, since the Responder should * use it for its ch est measurement (this flag will be set when we * configure the opposite machine to be Responder). * @rssi: Last received value * legal values: -128-0 (0x7f). above 0x0 indicating an invalid value. * @algo_type: &enum iwl_tof_algo_type * @notify_mcsi: &enum iwl_tof_mcsi_ntfy. * @reserved: For alignment and future use */ struct iwl_tof_range_req_ap_entry_v2 { u8 channel_num; u8 bandwidth; u8 tsf_delta_direction; u8 ctrl_ch_position; u8 bssid[ETH_ALEN]; u8 measure_type; u8 num_of_bursts; __le16 burst_period; u8 samples_per_burst; u8 retries_per_sample; __le32 tsf_delta; u8 location_req; u8 asap_mode; u8 enable_dyn_ack; s8 rssi; u8 algo_type; u8 notify_mcsi; __le16 reserved; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_2 */ /** * enum iwl_initiator_ap_flags - per responder FTM configuration flags * @IWL_INITIATOR_AP_FLAGS_ASAP: Request for ASAP measurement. * @IWL_INITIATOR_AP_FLAGS_LCI_REQUEST: Request for LCI information * @IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST: Request for CIVIC information * @IWL_INITIATOR_AP_FLAGS_DYN_ACK: Send HT/VHT ack for FTM frames. If not set, * 20Mhz dup acks will be sent. * @IWL_INITIATOR_AP_FLAGS_ALGO_LR: Use LR algo type for rtt calculation. * Default algo type is ML. * @IWL_INITIATOR_AP_FLAGS_ALGO_FFT: Use FFT algo type for rtt calculation. * Default algo type is ML. * @IWL_INITIATOR_AP_FLAGS_MCSI_REPORT: Send the MCSI for each FTM frame to the * driver. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), IWL_INITIATOR_AP_FLAGS_LCI_REQUEST = BIT(2), IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST = BIT(3), IWL_INITIATOR_AP_FLAGS_DYN_ACK = BIT(4), IWL_INITIATOR_AP_FLAGS_ALGO_LR = BIT(5), IWL_INITIATOR_AP_FLAGS_ALGO_FFT = BIT(6), IWL_INITIATOR_AP_FLAGS_MCSI_REPORT = BIT(8), }; /** * struct iwl_tof_range_req_ap_entry - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @bandwidth: AP bandwidth. One of iwl_tof_bandwidth. * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @ftmr_max_retries: Max number of retries to send the FTMR in case of no * reply from the AP. * @bssid: AP's BSSID * @burst_period: Recommended value to be sent to the AP. Measurement * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @reserved: For alignment and future use * @tsf_delta: not in use */ struct iwl_tof_range_req_ap_entry { __le32 initiator_ap_flags; u8 channel_num; u8 bandwidth; u8 ctrl_ch_position; u8 ftmr_max_retries; u8 bssid[ETH_ALEN]; __le16 burst_period; u8 samples_per_burst; u8 num_of_bursts; __le16 reserved; __le32 tsf_delta; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */ /** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * possible (not supported for this release) * @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon * timeout expiration * @IWL_MVM_TOF_RESPONSE_COMPLETE: report all AP measurements as a batch at the * earlier of: measurements completion / timeout * expiration. */ enum iwl_tof_response_mode { IWL_MVM_TOF_RESPONSE_ASAP, IWL_MVM_TOF_RESPONSE_TIMEOUT, IWL_MVM_TOF_RESPONSE_COMPLETE, }; /** * enum iwl_tof_initiator_flags * * @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run * the algo on ant A+B, instead of only one of them. * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A: open RX antenna A for FTMs RX * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B: open RX antenna B for FTMs RX * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C: open RX antenna C for FTMs RX * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A: use antenna A fo TX ACKs during FTM * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B: use antenna B fo TX ACKs during FTM * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C: use antenna C fo TX ACKs during FTM * @IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM: use random mac address for FTM * @IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB: use the specific calib value from * the range request command * @IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB: use the common calib value from the * ragne request command * @IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT: support non-asap measurements */ enum iwl_tof_initiator_flags { IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = BIT(0), IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = BIT(1), IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = BIT(2), IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = BIT(3), IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = BIT(4), IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = BIT(5), IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = BIT(6), IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM = BIT(7), IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB = BIT(15), IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB = BIT(16), IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20), }; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ #define IWL_MVM_TOF_MAX_APS 5 #define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5 /** * struct iwl_tof_range_req_cmd_v5 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @initiator: 0- NW initiated, 1 - Client Initiated * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, * '1' - run ML-Algo for ToF only * @req_timeout: Requested timeout of the response in units of 100ms. * This is equivalent to the session time configured to the * LMAC in Initiator Request * @report_policy: Supported partially for this release: For current release - * the range report will be uploaded as a batch when ready or * when the session is done (successfully / partially). * one of iwl_tof_response_mode. * @reserved0: reserved * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), * '1' Use MAC Address randomization according to the below * @range_req_bssid: ranging request BSSID * @macaddr_template: MAC address template to use for non-randomized bits * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @ftm_rx_chains: Rx chain to open to receive Responder's FTMs (XVT) * @ftm_tx_chains: Tx chain to send the ack to the Responder FTM (XVT) * @common_calib: The common calib value to inject to this measurement calc * @specific_calib: The specific calib value to inject to this measurement calc * @ap: per-AP request data */ struct iwl_tof_range_req_cmd_v5 { __le32 initiator_flags; u8 request_id; u8 initiator; u8 one_sided_los_disable; u8 req_timeout; u8 report_policy; u8 reserved0; u8 num_of_ap; u8 macaddr_random; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 ftm_rx_chains; u8 ftm_tx_chains; __le16 common_calib; __le16 specific_calib; struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ /** * struct iwl_tof_range_req_cmd - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @macaddr_template: MAC address template to use for non-randomized bits * @req_timeout_ms: Requested timeout of the response in units of milliseconds. * This is the session time for completing the measurement. * @tsf_mac_id: report the measurement start time for each ap in terms of the * TSF of this mac id. 0xff to disable TSF reporting. * @common_calib: The common calib value to inject to this measurement calc * @specific_calib: The specific calib value to inject to this measurement calc * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. */ struct iwl_tof_range_req_cmd { __le32 initiator_flags; u8 request_id; u8 num_of_ap; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; __le16 common_calib; __le16 specific_calib; struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */ /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the * request * @IWL_TOF_RANGE_REQUEST_STATUS_BUSY - FW is busy with a previous request, the * sent request will not be handled */ enum iwl_tof_range_request_status { IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS, IWL_TOF_RANGE_REQUEST_STATUS_BUSY, }; /** * enum iwl_tof_entry_status * * @IWL_TOF_ENTRY_SUCCESS: successful measurement. * @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure. * @IWL_TOF_ENTRY_NO_RESPONSE: Responder didn't reply to the request. * @IWL_TOF_ENTRY_REQUEST_REJECTED: Responder rejected the request. * @IWL_TOF_ENTRY_NOT_SCHEDULED: Time event was scheduled but not called yet. * @IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: Time event triggered but no * measurement was completed. * @IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE: No range due inability to switch * from the primary channel. * @IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED: Device doesn't support FTM. * @IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON: Request aborted due to unknown * reason. * @IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP: Failure due to invalid * T1/T4. * @IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE: Failure due to invalid FTM frame * structure. * @IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED: Request cannot be scheduled. * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE: Responder cannot serve the * initiator for some period, period supplied in @refusal_period. * @IWL_TOF_ENTRY_BAD_REQUEST_ARGS: Bad request arguments. * @IWL_TOF_ENTRY_WIFI_NOT_ENABLED: Wifi not enabled. * @IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS: Responder override the original * parameters within the current session. */ enum iwl_tof_entry_status { IWL_TOF_ENTRY_SUCCESS = 0, IWL_TOF_ENTRY_GENERAL_FAILURE = 1, IWL_TOF_ENTRY_NO_RESPONSE = 2, IWL_TOF_ENTRY_REQUEST_REJECTED = 3, IWL_TOF_ENTRY_NOT_SCHEDULED = 4, IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5, IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6, IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7, IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8, IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9, IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10, IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11, IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12, IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13, IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14, IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15, }; /* LOCATION_RANGE_RSP_AP_ENTRY_NTFY_API_S_VER_2 */ /** * struct iwl_tof_range_rsp_ap_entry_ntfy_v3 - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz * @rtt: The Round Trip Time that took for the last measurement for * current AP [pSec] * @rtt_variance: The Variance of the RTT values measured for current AP * @rtt_spread: The Difference between the maximum and the minimum RTT * values measured for current AP in the current session [pSec] * @rssi: RSSI as uploaded in the Channel Estimation notification * @rssi_spread: The Difference between the maximum and the minimum RSSI values * measured for current AP in the current session * @reserved: reserved * @refusal_period: refusal period in case of * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] * @range: Measured range [cm] * @range_variance: Measured range variance [cm] * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was * uploaded by the LMAC * @t2t3_initiator: as calculated from the algo in the initiator * @t1t4_responder: as calculated from the algo in the responder * @common_calib: Calib val that was used in for this AP measurement * @specific_calib: val that was used in for this AP measurement * @papd_calib_output: The result of the tof papd calibration that was injected * into the algorithm. */ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; __le32 rtt; __le32 rtt_variance; __le32 rtt_spread; s8 rssi; u8 rssi_spread; u8 reserved; u8 refusal_period; __le32 range; __le32 range_variance; __le32 timestamp; __le32 t2t3_initiator; __le32 t1t4_responder; __le16 common_calib; __le16 specific_calib; __le32 papd_calib_output; } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */ /** * struct iwl_tof_range_rsp_ap_entry_ntfy_v4 - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz * @rtt: The Round Trip Time that took for the last measurement for * current AP [pSec] * @rtt_variance: The Variance of the RTT values measured for current AP * @rtt_spread: The Difference between the maximum and the minimum RTT * values measured for current AP in the current session [pSec] * @rssi: RSSI as uploaded in the Channel Estimation notification * @rssi_spread: The Difference between the maximum and the minimum RSSI values * measured for current AP in the current session * @last_burst: 1 if no more FTM sessions are scheduled for this responder * @refusal_period: refusal period in case of * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was * uploaded by the LMAC * @start_tsf: measurement start time in TSF of the mac specified in the range * request * @rx_rate_n_flags: rate and flags of the last FTM frame received from this * responder * @tx_rate_n_flags: rate and flags of the last ack sent to this responder * @t2t3_initiator: as calculated from the algo in the initiator * @t1t4_responder: as calculated from the algo in the responder * @common_calib: Calib val that was used in for this AP measurement * @specific_calib: val that was used in for this AP measurement * @papd_calib_output: The result of the tof papd calibration that was injected * into the algorithm. */ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; __le32 rtt; __le32 rtt_variance; __le32 rtt_spread; s8 rssi; u8 rssi_spread; u8 last_burst; u8 refusal_period; __le32 timestamp; __le32 start_tsf; __le32 rx_rate_n_flags; __le32 tx_rate_n_flags; __le32 t2t3_initiator; __le32 t1t4_responder; __le16 common_calib; __le16 specific_calib; __le32 papd_calib_output; } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */ /** * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz * @rtt: The Round Trip Time that took for the last measurement for * current AP [pSec] * @rtt_variance: The Variance of the RTT values measured for current AP * @rtt_spread: The Difference between the maximum and the minimum RTT * values measured for current AP in the current session [pSec] * @rssi: RSSI as uploaded in the Channel Estimation notification * @rssi_spread: The Difference between the maximum and the minimum RSSI values * measured for current AP in the current session * @last_burst: 1 if no more FTM sessions are scheduled for this responder * @refusal_period: refusal period in case of * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was * uploaded by the LMAC * @start_tsf: measurement start time in TSF of the mac specified in the range * request * @rx_rate_n_flags: rate and flags of the last FTM frame received from this * responder * @tx_rate_n_flags: rate and flags of the last ack sent to this responder * @t2t3_initiator: as calculated from the algo in the initiator * @t1t4_responder: as calculated from the algo in the responder * @common_calib: Calib val that was used in for this AP measurement * @specific_calib: val that was used in for this AP measurement * @papd_calib_output: The result of the tof papd calibration that was injected * into the algorithm. * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy. * @reserved: for alignment */ struct iwl_tof_range_rsp_ap_entry_ntfy { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; __le32 rtt; __le32 rtt_variance; __le32 rtt_spread; s8 rssi; u8 rssi_spread; u8 last_burst; u8 refusal_period; __le32 timestamp; __le32 start_tsf; __le32 rx_rate_n_flags; __le32 tx_rate_n_flags; __le32 t2t3_initiator; __le32 t1t4_responder; __le16 common_calib; __le16 specific_calib; __le32 papd_calib_output; u8 rttConfidence; u8 reserved[3]; } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */ /** * enum iwl_tof_response_status - tof response status * * @IWL_TOF_RESPONSE_SUCCESS: successful range. * @IWL_TOF_RESPONSE_TIMEOUT: request aborted due to timeout expiration. * partial result of ranges done so far is included in the response. * @IWL_TOF_RESPONSE_ABORTED: Measurement aborted by command. * @IWL_TOF_RESPONSE_FAILED: Measurement request command failed. */ enum iwl_tof_response_status { IWL_TOF_RESPONSE_SUCCESS = 0, IWL_TOF_RESPONSE_TIMEOUT = 1, IWL_TOF_RESPONSE_ABORTED = 4, IWL_TOF_RESPONSE_FAILED = 5, }; /* LOCATION_RNG_RSP_STATUS */ /** * struct iwl_tof_range_rsp_ntfy_v5 - ranging response notification * @request_id: A Token ID of the corresponding Range request * @request_status: status of current measurement session, one of * &enum iwl_tof_response_status. * @last_in_batch: reprot policy (when not all responses are uploaded at once) * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @ap: per-AP data */ struct iwl_tof_range_rsp_ntfy_v5 { u8 request_id; u8 request_status; u8 last_in_batch; u8 num_of_aps; struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */ /** * struct iwl_tof_range_rsp_ntfy_v6 - ranging response notification * @request_id: A Token ID of the corresponding Range request * @num_of_aps: Number of APs results * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. * @reserved: reserved * @ap: per-AP data */ struct iwl_tof_range_rsp_ntfy_v6 { u8 request_id; u8 num_of_aps; u8 last_report; u8 reserved; struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */ /** * struct iwl_tof_range_rsp_ntfy - ranging response notification * @request_id: A Token ID of the corresponding Range request * @num_of_aps: Number of APs results * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. * @reserved: reserved * @ap: per-AP data */ struct iwl_tof_range_rsp_ntfy { u8 request_id; u8 num_of_aps; u8 last_report; u8 reserved; struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */ #define IWL_MVM_TOF_MCSI_BUF_SIZE (245) /** * struct iwl_tof_mcsi_notif - used for debug * @token: token ID for the current session * @role: '0' - initiator, '1' - responder * @reserved: reserved * @initiator_bssid: initiator machine * @responder_bssid: responder machine * @mcsi_buffer: debug data */ struct iwl_tof_mcsi_notif { u8 token; u8 role; __le16 reserved; u8 initiator_bssid[ETH_ALEN]; u8 responder_bssid[ETH_ALEN]; u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; } __packed; /** * struct iwl_tof_range_abort_cmd * @request_id: corresponds to a range request * @reserved: reserved */ struct iwl_tof_range_abort_cmd { u8 request_id; u8 reserved[3]; } __packed; enum ftm_responder_stats_flags { FTM_RESP_STAT_NON_ASAP_STARTED = BIT(0), FTM_RESP_STAT_NON_ASAP_IN_WIN = BIT(1), FTM_RESP_STAT_NON_ASAP_OUT_WIN = BIT(2), FTM_RESP_STAT_TRIGGER_DUP = BIT(3), FTM_RESP_STAT_DUP = BIT(4), FTM_RESP_STAT_DUP_IN_WIN = BIT(5), FTM_RESP_STAT_DUP_OUT_WIN = BIT(6), FTM_RESP_STAT_SCHED_SUCCESS = BIT(7), FTM_RESP_STAT_ASAP_REQ = BIT(8), FTM_RESP_STAT_NON_ASAP_REQ = BIT(9), FTM_RESP_STAT_ASAP_RESP = BIT(10), FTM_RESP_STAT_NON_ASAP_RESP = BIT(11), FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = BIT(12), FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = BIT(13), FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = BIT(14), FTM_RESP_STAT_FAIL_NEXT_SERVED = BIT(15), FTM_RESP_STAT_FAIL_TRIGGER_ERR = BIT(16), FTM_RESP_STAT_FAIL_GC = BIT(17), FTM_RESP_STAT_SUCCESS = BIT(18), FTM_RESP_STAT_INTEL_IE = BIT(19), FTM_RESP_STAT_INITIATOR_ACTIVE = BIT(20), FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = BIT(21), FTM_RESP_STAT_TRIGGER_UNKNOWN = BIT(22), FTM_RESP_STAT_PROCESS_FAIL = BIT(23), FTM_RESP_STAT_ACK = BIT(24), FTM_RESP_STAT_NACK = BIT(25), FTM_RESP_STAT_INVALID_INITIATOR_ID = BIT(26), FTM_RESP_STAT_TIMER_MIN_DELTA = BIT(27), FTM_RESP_STAT_INITIATOR_REMOVED = BIT(28), FTM_RESP_STAT_INITIATOR_ADDED = BIT(29), FTM_RESP_STAT_ERR_LIST_FULL = BIT(30), FTM_RESP_STAT_INITIATOR_SCHED_NOW = BIT(31), }; /* RESP_IND_E */ /** * struct iwl_ftm_responder_stats - FTM responder statistics * @addr: initiator address * @success_ftm: number of successful ftm frames * @ftm_per_burst: num of FTM frames that were received * @flags: &enum ftm_responder_stats_flags * @duration: actual duration of FTM * @allocated_duration: time that was allocated for this FTM session * @bw: FTM request bandwidth * @rate: FTM request rate * @reserved: for alingment and future use */ struct iwl_ftm_responder_stats { u8 addr[ETH_ALEN]; u8 success_ftm; u8 ftm_per_burst; __le32 flags; __le32 duration; __le32 allocated_duration; u8 bw; u8 rate; __le16 reserved; } __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */ #define IWL_CSI_MAX_EXPECTED_CHUNKS 16 #define IWL_CSI_CHUNK_CTL_NUM_MASK_VER_1 0x0003 #define IWL_CSI_CHUNK_CTL_IDX_MASK_VER_1 0x000c #define IWL_CSI_CHUNK_CTL_NUM_MASK_VER_2 0x00ff #define IWL_CSI_CHUNK_CTL_IDX_MASK_VER_2 0xff00 struct iwl_csi_chunk_notification { __le32 token; __le16 seq; __le16 ctl; __le32 size; u8 data[]; } __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1/VER_2 */ #endif /* __iwl_fw_api_location_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h000066400000000000000000000140711351064634500274250ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_mac_cfg_h__ #define __iwl_fw_api_mac_cfg_h__ /** * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs */ enum iwl_mac_conf_subcmd_ids { /** * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd */ LOW_LATENCY_CMD = 0x3, /** * @CHANNEL_SWITCH_TIME_EVENT_CMD: &struct iwl_chan_switch_te_cmd */ CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4, /** * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif */ PROBE_RESPONSE_DATA_NOTIF = 0xFC, /** * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif */ CHANNEL_SWITCH_NOA_NOTIF = 0xFF, }; #define IWL_P2P_NOA_DESC_COUNT (2) /** * struct iwl_p2p_noa_attr - NOA attr contained in probe resp FW notification * * @id: attribute id * @len_low: length low half * @len_high: length high half * @idx: instance of NoA timing * @ctwin: GO's ct window and pwer save capability * @desc: NoA descriptor * @reserved: reserved for alignment purposes */ struct iwl_p2p_noa_attr { u8 id; u8 len_low; u8 len_high; u8 idx; u8 ctwin; struct ieee80211_p2p_noa_desc desc[IWL_P2P_NOA_DESC_COUNT]; u8 reserved; } __packed; #define IWL_PROBE_RESP_DATA_NO_CSA (0xff) /** * struct iwl_probe_resp_data_notif - notification with NOA and CSA counter * * @mac_id: the mac which should send the probe response * @noa_active: notifies if the noa attribute should be handled * @noa_attr: P2P NOA attribute * @csa_counter: current csa counter * @reserved: reserved for alignment purposes */ struct iwl_probe_resp_data_notif { __le32 mac_id; __le32 noa_active; struct iwl_p2p_noa_attr noa_attr; u8 csa_counter; u8 reserved[3]; } __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */ /** * struct iwl_channel_switch_noa_notif - Channel switch NOA notification * * @id_and_color: ID and color of the MAC */ struct iwl_channel_switch_noa_notif { __le32 id_and_color; } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ /** * struct iwl_chan_switch_te_cmd - Channel Switch Time Event command * * @mac_id: MAC ID for channel switch * @action: action to perform, one of FW_CTXT_ACTION_* * @tsf: beacon tsf * @cs_count: channel switch count from CSA/eCSA IE * @cs_delayed_bcn_count: if set to N (!= 0) GO/AP can delay N beacon intervals * at the new channel after the channel switch, otherwise (N == 0) expect * beacon right after the channel switch. * @cs_mode: 1 - quiet, 0 - otherwise * @reserved: reserved for alignment purposes */ struct iwl_chan_switch_te_cmd { __le32 mac_id; __le32 action; __le32 tsf; u8 cs_count; u8 cs_delayed_bcn_count; u8 cs_mode; u8 reserved; } __packed; /* MAC_CHANNEL_SWITCH_TIME_EVENT_S_VER_2 */ /** * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode' * * @mac_id: MAC ID to whom to apply the low-latency configurations * @low_latency_rx: 1/0 to set/clear Rx low latency direction * @low_latency_tx: 1/0 to set/clear Tx low latency direction * @reserved: reserved for alignment purposes */ struct iwl_mac_low_latency_cmd { __le32 mac_id; u8 low_latency_rx; u8 low_latency_tx; __le16 reserved; } __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */ #endif /* __iwl_fw_api_mac_cfg_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h000066400000000000000000000574011351064634500266740ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_fw_api_mac_h__ #define __iwl_fw_api_mac_h__ /* * The first MAC indices (starting from 0) are available to the driver, * AUX indices follows - 1 for non-CDB, 2 for CDB. */ #define MAC_INDEX_AUX 4 #define MAC_INDEX_MIN_DRIVER 0 #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX #define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) #define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) #define IWL_MVM_STATION_COUNT 16 #define IWL_MVM_INVALID_STA 0xFF enum iwl_ac { AC_BK, AC_BE, AC_VI, AC_VO, AC_NUM, }; /** * enum iwl_mac_protection_flags - MAC context flags * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, * this will require CCK RTS/CTS2self. * RTS/CTS will protect full burst time. * @MAC_PROT_FLG_HT_PROT: enable HT protection * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self */ enum iwl_mac_protection_flags { MAC_PROT_FLG_TGG_PROTECT = BIT(3), MAC_PROT_FLG_HT_PROT = BIT(23), MAC_PROT_FLG_FAT_PROT = BIT(24), MAC_PROT_FLG_SELF_CTS_EN = BIT(30), }; #define MAC_FLG_SHORT_SLOT BIT(4) #define MAC_FLG_SHORT_PREAMBLE BIT(5) /** * enum iwl_mac_types - Supported MAC types * @FW_MAC_TYPE_FIRST: lowest supported MAC type * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal) * @FW_MAC_TYPE_LISTENER: monitor MAC type (?) * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS * @FW_MAC_TYPE_IBSS: IBSS * @FW_MAC_TYPE_BSS_STA: BSS (managed) station * @FW_MAC_TYPE_P2P_DEVICE: P2P Device * @FW_MAC_TYPE_P2P_STA: P2P client * @FW_MAC_TYPE_GO: P2P GO * @FW_MAC_TYPE_TEST: ? * @FW_MAC_TYPE_MAX: highest support MAC type */ enum iwl_mac_types { FW_MAC_TYPE_FIRST = 1, FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST, FW_MAC_TYPE_LISTENER, FW_MAC_TYPE_PIBSS, FW_MAC_TYPE_IBSS, FW_MAC_TYPE_BSS_STA, FW_MAC_TYPE_P2P_DEVICE, FW_MAC_TYPE_P2P_STA, FW_MAC_TYPE_GO, FW_MAC_TYPE_TEST, FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST }; /* MAC_CONTEXT_TYPE_API_E_VER_1 */ /** * enum iwl_tsf_id - TSF hw timer ID * @TSF_ID_A: use TSF A * @TSF_ID_B: use TSF B * @TSF_ID_C: use TSF C * @TSF_ID_D: use TSF D * @NUM_TSF_IDS: number of TSF timers available */ enum iwl_tsf_id { TSF_ID_A = 0, TSF_ID_B = 1, TSF_ID_C = 2, TSF_ID_D = 3, NUM_TSF_IDS = 4, }; /* TSF_ID_API_E_VER_1 */ /** * struct iwl_mac_data_ap - configuration data for AP MAC context * @beacon_time: beacon transmit time in system time * @beacon_tsf: beacon transmit time in TSF * @bi: beacon interval in TU * @reserved1: reserved * @dtim_interval: dtim transmit time in TU * @reserved2: reserved * @mcast_qid: queue ID for multicast traffic. * NOTE: obsolete from VER2 and on * @beacon_template: beacon template ID */ struct iwl_mac_data_ap { __le32 beacon_time; __le64 beacon_tsf; __le32 bi; __le32 reserved1; __le32 dtim_interval; __le32 reserved2; __le32 mcast_qid; __le32 beacon_template; } __packed; /* AP_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_ibss - configuration data for IBSS MAC context * @beacon_time: beacon transmit time in system time * @beacon_tsf: beacon transmit time in TSF * @bi: beacon interval in TU * @reserved: reserved * @beacon_template: beacon template ID */ struct iwl_mac_data_ibss { __le32 beacon_time; __le64 beacon_tsf; __le32 bi; __le32 reserved; __le32 beacon_template; } __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ /** * enum iwl_mac_data_policy - policy of the data path for this MAC * @TWT_SUPPORTED: twt is supported */ enum iwl_mac_data_policy { TWT_SUPPORTED = BIT(0), }; /** * struct iwl_mac_data_sta - configuration data for station MAC context * @is_assoc: 1 for associated state, 0 otherwise * @dtim_time: DTIM arrival time in system time * @dtim_tsf: DTIM arrival time in TSF * @bi: beacon interval in TU, applicable only when associated * @reserved1: reserved * @dtim_interval: DTIM interval in TU, applicable only when associated * @data_policy: see &enum iwl_mac_data_policy * @listen_interval: in beacon intervals, applicable only when associated * @assoc_id: unique ID assigned by the AP during association * @assoc_beacon_arrive_time: TSF of first beacon after association */ struct iwl_mac_data_sta { __le32 is_assoc; __le32 dtim_time; __le64 dtim_tsf; __le32 bi; __le32 reserved1; __le32 dtim_interval; __le32 data_policy; __le32 listen_interval; __le32 assoc_id; __le32 assoc_beacon_arrive_time; } __packed; /* STA_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_go - configuration data for P2P GO MAC context * @ap: iwl_mac_data_ap struct with most config data * @ctwin: client traffic window in TU (period after TBTT when GO is present). * 0 indicates that there is no CT window. * @opp_ps_enabled: indicate that opportunistic PS allowed */ struct iwl_mac_data_go { struct iwl_mac_data_ap ap; __le32 ctwin; __le32 opp_ps_enabled; } __packed; /* GO_MAC_DATA_API_S_VER_1 */ /** * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context * @sta: iwl_mac_data_sta struct with most config data * @ctwin: client traffic window in TU (period after TBTT when GO is present). * 0 indicates that there is no CT window. */ struct iwl_mac_data_p2p_sta { struct iwl_mac_data_sta sta; __le32 ctwin; } __packed; /* P2P_STA_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_pibss - Pseudo IBSS config data * @stats_interval: interval in TU between statistics notifications to host. */ struct iwl_mac_data_pibss { __le32 stats_interval; } __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */ /* * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC * context. * @is_disc_extended: if set to true, P2P Device discoverability is enabled on * other channels as well. This should be to true only in case that the * device is discoverable and there is an active GO. Note that setting this * field when not needed, will increase the number of interrupts and have * effect on the platform power, as this setting opens the Rx filters on * all macs. */ struct iwl_mac_data_p2p_dev { __le32 is_disc_extended; } __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */ /** * enum iwl_mac_filter_flags - MAC context filter flags * @MAC_FILTER_IN_PROMISC: accept all data frames * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and * control frames to the host * @MAC_FILTER_ACCEPT_GRP: accept multicast frames * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host * (in station mode when associated) * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host */ enum iwl_mac_filter_flags { MAC_FILTER_IN_PROMISC = BIT(0), MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1), MAC_FILTER_ACCEPT_GRP = BIT(2), MAC_FILTER_DIS_DECRYPT = BIT(3), MAC_FILTER_DIS_GRP_DECRYPT = BIT(4), MAC_FILTER_IN_BEACON = BIT(6), MAC_FILTER_OUT_BCAST = BIT(8), MAC_FILTER_IN_CRC32 = BIT(11), MAC_FILTER_IN_PROBE_REQUEST = BIT(12), /** * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax */ MAC_FILTER_IN_11AX = BIT(14), }; /** * enum iwl_mac_qos_flags - QoS flags * @MAC_QOS_FLG_UPDATE_EDCA: ? * @MAC_QOS_FLG_TGN: HT is enabled * @MAC_QOS_FLG_TXOP_TYPE: ? * */ enum iwl_mac_qos_flags { MAC_QOS_FLG_UPDATE_EDCA = BIT(0), MAC_QOS_FLG_TGN = BIT(1), MAC_QOS_FLG_TXOP_TYPE = BIT(4), }; /** * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD * @cw_min: Contention window, start value in numbers of slots. * Should be a power-of-2, minus 1. Device's default is 0x0f. * @cw_max: Contention window, max value in numbers of slots. * Should be a power-of-2, minus 1. Device's default is 0x3f. * @aifsn: Number of slots in Arbitration Interframe Space (before * performing random backoff timing prior to Tx). Device default 1. * @fifos_mask: FIFOs used by this MAC for this AC * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. * * One instance of this config struct for each of 4 EDCA access categories * in struct iwl_qosparam_cmd. * * Device will automatically increase contention window by (2*CW) + 1 for each * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW * value, to cap the CW value. */ struct iwl_ac_qos { __le16 cw_min; __le16 cw_max; u8 aifsn; u8 fifos_mask; __le16 edca_txop; } __packed; /* AC_QOS_API_S_VER_2 */ /** * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts * ( MAC_CONTEXT_CMD = 0x28 ) * @id_and_color: ID and color of the MAC * @action: action to perform, one of FW_CTXT_ACTION_* * @mac_type: one of &enum iwl_mac_types * @tsf_id: TSF HW timer, one of &enum iwl_tsf_id * @node_addr: MAC address * @reserved_for_node_addr: reserved * @bssid_addr: BSSID * @reserved_for_bssid_addr: reserved * @cck_rates: basic rates available for CCK * @ofdm_rates: basic rates available for OFDM * @protection_flags: combination of &enum iwl_mac_protection_flags * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise * @short_slot: 0x10 for enabling short slots, 0 otherwise * @filter_flags: combination of &enum iwl_mac_filter_flags * @qos_flags: from &enum iwl_mac_qos_flags * @ac: one iwl_mac_qos configuration for each AC */ struct iwl_mac_ctx_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */ __le32 mac_type; __le32 tsf_id; u8 node_addr[6]; __le16 reserved_for_node_addr; u8 bssid_addr[6]; __le16 reserved_for_bssid_addr; __le32 cck_rates; __le32 ofdm_rates; __le32 protection_flags; __le32 cck_short_preamble; __le32 short_slot; __le32 filter_flags; /* MAC_QOS_PARAM_API_S_VER_1 */ __le32 qos_flags; struct iwl_ac_qos ac[AC_NUM+1]; /* MAC_CONTEXT_COMMON_DATA_API_S */ union { struct iwl_mac_data_ap ap; struct iwl_mac_data_go go; struct iwl_mac_data_sta sta; struct iwl_mac_data_p2p_sta p2p_sta; struct iwl_mac_data_p2p_dev p2p_dev; struct iwl_mac_data_pibss pibss; struct iwl_mac_data_ibss ibss; }; } __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */ #define IWL_NONQOS_SEQ_GET 0x1 #define IWL_NONQOS_SEQ_SET 0x2 struct iwl_nonqos_seq_query_cmd { __le32 get_set_flag; __le32 mac_id_n_color; __le16 value; __le16 reserved; } __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ /** * struct iwl_missed_beacons_notif - information on missed beacons * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) * @mac_id: interface ID * @consec_missed_beacons_since_last_rx: number of consecutive missed * beacons since last RX. * @consec_missed_beacons: number of consecutive missed beacons * @num_expected_beacons: number of expected beacons * @num_recvd_beacons: number of received beacons */ struct iwl_missed_beacons_notif { __le32 mac_id; __le32 consec_missed_beacons_since_last_rx; __le32 consec_missed_beacons; __le32 num_expected_beacons; __le32 num_recvd_beacons; } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ /** * struct iwl_he_backoff_conf - used for backoff configuration * Per each trigger-based AC, (set by MU EDCA Parameter set info-element) * used for backoff configuration of TXF5..TXF8 trigger based. * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via * trigger-based TX. * @cwmin: CW min * @cwmax: CW max * @aifsn: AIFSN * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is * allowed till the MU-TIMER is 0 * @mu_time: MU time in 8TU units */ struct iwl_he_backoff_conf { __le16 cwmin; __le16 cwmax; __le16 aifsn; __le16 mu_time; } __packed; /* AC_QOS_DOT11AX_API_S */ /** * enum iwl_he_pkt_ext_constellations - PPE constellation indices * @IWL_HE_PKT_EXT_BPSK: BPSK * @IWL_HE_PKT_EXT_QPSK: QPSK * @IWL_HE_PKT_EXT_16QAM: 16-QAM * @IWL_HE_PKT_EXT_64QAM: 64-QAM * @IWL_HE_PKT_EXT_256QAM: 256-QAM * @IWL_HE_PKT_EXT_1024QAM: 1024-QAM * @IWL_HE_PKT_EXT_RESERVED: reserved value * @IWL_HE_PKT_EXT_NONE: not defined */ enum iwl_he_pkt_ext_constellations { IWL_HE_PKT_EXT_BPSK = 0, IWL_HE_PKT_EXT_QPSK, IWL_HE_PKT_EXT_16QAM, IWL_HE_PKT_EXT_64QAM, IWL_HE_PKT_EXT_256QAM, IWL_HE_PKT_EXT_1024QAM, IWL_HE_PKT_EXT_RESERVED, IWL_HE_PKT_EXT_NONE, }; #define MAX_HE_SUPP_NSS 2 #define MAX_HE_CHANNEL_BW_INDX 4 /** * struct iwl_he_pkt_ext - QAM thresholds * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS * The IE is organized in the following way: * Support for Nss x BW (or RU) matrix: * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) * Each entry contains 2 QAM thresholds for 8us and 16us: * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx: * QAM_tx < QAM_th1 --> PPE=0us * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us * QAM_th2 <= QAM_tx --> PPE=16us * @pkt_ext_qam_th: QAM thresholds * For each Nss/Bw define 2 QAM thrsholds (0..5) * For rates below the low_th, no need for PPE * For rates between low_th and high_th, need 8us PPE * For rates equal or higher then the high_th, need 16us PPE * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x * (0-low_th, 1-high_th) */ struct iwl_he_pkt_ext { u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2]; } __packed; /* PKT_EXT_DOT11AX_API_S */ /** * enum iwl_he_sta_ctxt_flags - HE STA context flags * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific * control frames such as TRIG, NDPA, BACK) * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS * color for RX filter but use MAC header * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap * of 32-bits * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid * and should be used * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation * is enabled according to UORA element existence * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK- * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA * parameter set, i.e. the backoff counters for trig-based ACs * @STA_CTXT_HE_NIC_NOT_ACK_ENABLED: mark that the NIC doesn't support receiving * ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG). * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0 * len delim to determine if AGG or single. * @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are * not allowed (as there are OBSS that might classify such transmissions as * radar pulses). */ enum iwl_he_sta_ctxt_flags { STA_CTXT_HE_REF_BSSID_VALID = BIT(4), STA_CTXT_HE_BSS_COLOR_DIS = BIT(5), STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6), STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7), STA_CTXT_HE_PACKET_EXT = BIT(8), STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9), STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10), STA_CTXT_HE_ACK_ENABLED = BIT(11), STA_CTXT_HE_MU_EDCA_CW = BIT(12), STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13), STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14), }; /** * enum iwl_he_htc_flags - HE HTC support flags * @IWL_HE_HTC_SUPPORT: HE-HTC support * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule * support via A-control field * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field */ enum iwl_he_htc_flags { IWL_HE_HTC_SUPPORT = BIT(0), IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3), IWL_HE_HTC_BSR_SUPP = BIT(4), IWL_HE_HTC_OMI_SUPP = BIT(5), IWL_HE_HTC_BQR_SUPP = BIT(6), }; /* * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in * response to HE MRQ and if the STA provides unsolicited HE MFB */ #define IWL_HE_HTC_LINK_ADAP_POS (1) #define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0) #define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS) #define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS) /** * struct iwl_he_sta_context_cmd_v1 - configure FW to work with HE AP * @sta_id: STA id * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit * @reserved1: reserved byte for future use * @reserved2: reserved byte for future use * @flags: see %iwl_11ax_sta_ctxt_flags * @ref_bssid_addr: reference BSSID used by the AP * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes * @htc_flags: which features are supported in HTC * @frag_flags: frag support in A-MSDU * @frag_level: frag support level * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) * @frag_min_size: min frag size (except last frag) * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame * @htc_trig_based_pkt_ext: default PE in 4us units * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 * @reserved3: reserved byte for future use * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues */ struct iwl_he_sta_context_cmd_v1 { u8 sta_id; u8 tid_limit; u8 reserved1; u8 reserved2; __le32 flags; /* The below fields are set via Multiple BSSID IE */ u8 ref_bssid_addr[6]; __le16 reserved0; /* The below fields are set via HE-capabilities IE */ __le32 htc_flags; u8 frag_flags; u8 frag_level; u8 frag_max_num; u8 frag_min_size; /* The below fields are set via PPE thresholds element */ struct iwl_he_pkt_ext pkt_ext; /* The below fields are set via HE-Operation IE */ u8 bss_color; u8 htc_trig_based_pkt_ext; __le16 frame_time_rts_th; /* Random access parameter set (i.e. RAPS) */ u8 rand_alloc_ecwmin; u8 rand_alloc_ecwmax; __le16 reserved3; /* The below fields are set via MU EDCA parameter set element */ struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */ /** * struct iwl_he_sta_context_cmd - configure FW to work with HE AP * @sta_id: STA id * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit * @reserved1: reserved byte for future use * @reserved2: reserved byte for future use * @flags: see %iwl_11ax_sta_ctxt_flags * @ref_bssid_addr: reference BSSID used by the AP * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes * @htc_flags: which features are supported in HTC * @frag_flags: frag support in A-MSDU * @frag_level: frag support level * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) * @frag_min_size: min frag size (except last frag) * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame * @htc_trig_based_pkt_ext: default PE in 4us units * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 * @reserved3: reserved byte for future use * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues * @max_bssid_indicator: indicator of the max bssid supported on the associated * bss * @bssid_index: index of the associated VAP * @ema_ap: AP supports enhanced Multi BSSID advertisement * @profile_periodicity: number of Beacon periods that are needed to receive the * complete VAPs info * @bssid_count: actual number of VAPs in the MultiBSS Set * @reserved4: alignment */ struct iwl_he_sta_context_cmd { u8 sta_id; u8 tid_limit; u8 reserved1; u8 reserved2; __le32 flags; /* The below fields are set via Multiple BSSID IE */ u8 ref_bssid_addr[6]; __le16 reserved0; /* The below fields are set via HE-capabilities IE */ __le32 htc_flags; u8 frag_flags; u8 frag_level; u8 frag_max_num; u8 frag_min_size; /* The below fields are set via PPE thresholds element */ struct iwl_he_pkt_ext pkt_ext; /* The below fields are set via HE-Operation IE */ u8 bss_color; u8 htc_trig_based_pkt_ext; __le16 frame_time_rts_th; /* Random access parameter set (i.e. RAPS) */ u8 rand_alloc_ecwmin; u8 rand_alloc_ecwmax; __le16 reserved3; /* The below fields are set via MU EDCA parameter set element */ struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; u8 max_bssid_indicator; u8 bssid_index; u8 ema_ap; u8 profile_periodicity; u8 bssid_count; u8 reserved4[3]; } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */ /** * struct iwl_he_monitor_cmd - configure air sniffer for HE * @bssid: the BSSID to sniff for * @reserved1: reserved for dword alignment * @aid: the AID to track on for HE MU * @reserved2: reserved for future use */ struct iwl_he_monitor_cmd { u8 bssid[6]; __le16 reserved1; __le16 aid; u8 reserved2[6]; } __packed; /* HE_AIR_SNIFFER_CONFIG_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_mac_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/nan.h000066400000000000000000000373711351064634500267140ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_nan_h__ #define __iwl_fw_api_nan_h__ /* TODO: read it from tlv */ #define NAN_MAX_SUPPORTED_DE_ENTRIES 10 /** * enum iwl_nan_subcmd_ids - Neighbor Awareness Networking (NaN) commands IDS */ enum iwl_nan_subcmd_ids { /** * @NAN_CONFIG_CMD: * &struct iwl_nan_cfg_cmd_v2 or &struct iwl_nan_cfg_cmd */ NAN_CONFIG_CMD = 0, /** * @NAN_DISCOVERY_FUNC_CMD: * &struct iwl_nan_add_func_cmd or &struct iwl_nan_add_func_cmd_v2 */ NAN_DISCOVERY_FUNC_CMD = 0x1, /** * @NAN_FAW_CONFIG_CMD: * &struct iwl_nan_faw_config */ NAN_FAW_CONFIG_CMD = 0x2, /** * @NAN_DISCOVERY_EVENT_NOTIF: * &struct iwl_nan_disc_evt_notify_v1 or * &struct iwl_nan_disc_evt_notify_v2 */ NAN_DISCOVERY_EVENT_NOTIF = 0xFD, /** * @NAN_DISCOVERY_TERMINATE_NOTIF: * &struct iwl_nan_de_term */ NAN_DISCOVERY_TERMINATE_NOTIF = 0xFE, /** * @NAN_FAW_START_NOTIF: * Further availability window started. */ NAN_FAW_START_NOTIF = 0xFF, }; /** * struct iwl_fw_chan_avail - Defines per op. class channel availability * * @op_class: operating class * @chan_bitmap: channel bitmap */ struct iwl_fw_chan_avail { u8 op_class; __le16 chan_bitmap; } __packed; /** * struct iwl_nan_umac_cfg - NAN umac config parameters * * @action: one of the FW_CTXT_ACTION_* * @tsf_id: tsf id to use in beacons * @sta_id: STA used for NAN operations. Currently it is AUX STA. * @node_addr: our address * @reserved1: reserved * @master_pref: master preference value * @master_rand: random factor to override fw's decision (DEBUG) * @cluster_id: cluster id, if 0 the fw will choose one for us. * @dual_band: enables dual band operation. * @beacon_template_id: beacon template id for NAN */ struct iwl_nan_umac_cfg { __le32 action; __le32 tsf_id; __le32 sta_id; u8 node_addr[6]; __le16 reserved1; u8 master_pref; u8 master_rand; __le16 cluster_id; __le32 dual_band; __le32 beacon_template_id; } __packed; /* _NAN_UMAC_CONFIG_CMD_API_S_VER_1 */ /** * struct iwl_nan_testbed_cfg - NAN testbed specific config parameters * * @chan24: override default 2.4GHz channel (DEBUG) * @chan52: override default 5.2GHz channel (DEBUG) * @hop_count: fake hop count (DEBUG) * @op_bands: band bit mask (DEBUG) * @warmup_timer: warmup_timer in us (DEBUG) * @custom_tsf: fake tsf value (DEBUG) * @action_delay: usecs to delay SDFs (DEBUG) */ struct iwl_nan_testbed_cfg { u8 chan24; u8 chan52; u8 hop_count; u8 op_bands; __le32 warmup_timer; __le64 custom_tsf; __le32 action_delay; } __packed; /* NAN_TEST_BED_SPECIFIC_CONFIG_S_VER_1 */ /* * struct iwl_nan_nan2_cfg - NAN2 specific configuration * * @cdw: committed DW interval as defined in NAN2 spec (NAN2) * @op_mode: operation mode as defined in NAN2 spec (NAN2) * @pot_avail_len: length of pot_avail array (NAN2) * @pot_avail: potential availability per op. class (NAN2) */ struct iwl_nan_nan2_cfg { __le16 cdw; u8 op_mode; u8 pot_avail_len; struct iwl_fw_chan_avail pot_avail[20]; } __packed; /* NAN_CONFIG_CMD_API_S_VER_1 */ /** * struct iwl_nan_cfg_cmd - NAN configuration command * * This command starts/stops/modifies NAN sync engine. * * @umac_cfg: umac specific configuration * @tb_cfg: testbed specific configuration * @nan2_cfg: nan2 specific configuration */ struct iwl_nan_cfg_cmd { struct iwl_nan_umac_cfg umac_cfg; struct iwl_nan_testbed_cfg tb_cfg; /* NAN 2 specific configuration */ struct iwl_nan_nan2_cfg nan2_cfg; } __packed; /* NAN_CONFIG_CMD_API_S_VER_1 */ /** * struct iwl_nan_cfg_cmd_v2 - NAN configuration command, version 2 * * This command starts/stops/modifies NAN sync engine. * * @umac_cfg: umac specific configuration * @tb_cfg: testbed specific configuration * @unavailable_slots: Force this amount of slots to be unavailable in potential * map * @nan2_cfg: nan2 specific configuration */ struct iwl_nan_cfg_cmd_v2 { struct iwl_nan_umac_cfg umac_cfg; struct iwl_nan_testbed_cfg tb_cfg; __le32 unavailable_slots; /* NAN 2 specific configuration */ struct iwl_nan_nan2_cfg nan2_cfg; } __packed; /* NAN_CONFIG_CMD_API_S_VER_2 */ /* NAN DE function type */ enum iwl_fw_nan_func_type { IWL_NAN_DE_FUNC_PUBLISH = 0, IWL_NAN_DE_FUNC_SUBSCRIBE = 1, IWL_NAN_DE_FUNC_FOLLOW_UP = 2, /* keep last */ IWL_NAN_DE_FUNC_NOT_VALID, }; /* NAN DE function flags */ enum iwl_fw_nan_func_flags { IWL_NAN_DE_FUNC_FLAG_UNSOLICITED_OR_ACTIVE = BIT(0), IWL_NAN_DE_FUNC_FLAG_SOLICITED = BIT(1), IWL_NAN_DE_FUNC_FLAG_UNICAST = BIT(2), IWL_NAN_DE_FUNC_FLAG_CLOSE_RANGE = BIT(3), IWL_NAN_DE_FUNC_FLAG_FAW_PRESENT = BIT(4), IWL_NAN_DE_FUNC_FLAG_FAW_TYPE = BIT(5), IWL_NAN_DE_FUNC_FLAG_FAW_NOTIFY = BIT(6), IWL_NAN_DE_FUNC_FLAG_RAISE_EVENTS = BIT(7), }; /** * struct iwl_nan_add_func_common_tail - tail of iwl_nan_add_func_common for * alignment with various channel info sizes * * @faw_attrtype: further availability bitmap * @serv_info_len: length of service specific information * @srf_len: length of the srf * @rx_filter_len: length of rx filter * @tx_filter_len: length of tx filter * @dw_interval: awake dw interval */ struct iwl_nan_add_func_common_tail { u8 faw_attrtype; u8 serv_info_len; u8 srf_len; u8 rx_filter_len; u8 tx_filter_len; u8 dw_interval; } __packed; /** * struct iwl_nan_add_func_common - NAN add/remove function, common part * * @action: one of the FW_CTXT_ACTION_ADD/REMOVE * @instance_id: instance id of the DE function to remove * @type: enum %iwl_fw_nan_func_type * @service_id: service id * @flags: a combination of &enum iwl_fw_nan_func_flags * @flw_up_id: follow up id * @flw_up_req_id: follow up requestor id * @flw_up_addr: follow up destination address * @reserved1: reserved * @ttl: ttl in DW's or 0 for infinite * @faw_ci: &struct iwl_fw_channel_info for further availability * @tail: command tail */ struct iwl_nan_add_func_common { __le32 action; u8 instance_id; u8 type; u8 service_id[6]; __le16 flags; u8 flw_up_id; u8 flw_up_req_id; u8 flw_up_addr[6]; __le16 reserved1; __le32 ttl; struct iwl_fw_channel_info faw_ci; struct iwl_nan_add_func_common_tail tail; } __packed; /* NAN_DISCO_FUNC_FIXED_CMD_API_S_VER_1 */ /** * struct iwl_nan_add_func_cmd_v2 - NAN add/remove function command, version 2 * * @cmn: nan add function common part * @cipher_capa: Cipher suite information capabilities * @cipher_suite_id: Bitmap of the list of cipher suite IDs * @security_ctx_len: length of tx security context attributes * @sdea_ctrl: SDEA control field * @data: dw aligned fields: service_info, srf, rxFilter, txFilter, * security_ctx */ struct iwl_nan_add_func_cmd_v2 { struct iwl_nan_add_func_common cmn; u8 cipher_capa; u8 cipher_suite_id; __le16 security_ctx_len; __le16 sdea_ctrl; u8 data[0]; } __packed; /* NAN_DISCO_FUNC_FIXED_CMD_API_S_VER_2 */ /** * struct iwl_nan_add_func_cmd - NAN add/remove function command * * @cmn: nan add function common part * @reserved: reserved * @data: dw aligned fields -service_info, srf, rxFilter, txFilter */ struct iwl_nan_add_func_cmd { struct iwl_nan_add_func_common cmn; u8 reserved[2]; u8 data[0]; } __packed; /* NAN_DISCO_FUNC_FIXED_CMD_API_S_VER_1 */ enum iwl_nan_add_func_resp_status { IWL_NAN_DE_FUNC_STATUS_SUCCESSFUL, IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_ENTRIES, IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_MEMORY, IWL_NAN_DE_FUNC_STATUS_INVALID_INSTANCE, IWL_NAN_DE_FUNC_STATUS_UNSPECIFIED, }; /** * struct iwl_nan_add_func_res - Add NAN function response * * @instance_id: assigned instance_id (if added) * @status: status of the command (&enum iwl_nan_add_func_resp_status) * @reserved: reserved */ struct iwl_nan_add_func_res { u8 instance_id; u8 status; __le16 reserved; } __packed; /* NAN_DISCO_FUNC_CMD_API_S_VER_1 */ /* Shared key cipher suite with CCMP with a 128 bit TK */ #define IWL_NAN_DE_FUNC_CS_SK_CCM_128 BIT(0) /* Shared key cipher suite with GCMP with a 256 bit TK */ #define IWL_NAN_DE_FUNC_CS_SK_GCM_256 BIT(1) /** * enum iwl_nan_de_func_sdea_flags - NAN func SDEA flags * @IWL_NAN_DE_FUNC_SDEA_FSD_REQ: Further Service Discovery (FSD) is required * for the service * @IWL_NAN_DE_FUNC_SDEA_FSD_GAS: GAS is used for FSD * @IWL_NAN_DE_FUNC_SDEA_DP_REQ: Data path required for the service * @IWL_NAN_DE_FUNC_SDEA_DP_MCAST: If data path is required, the type is * multicast * @IWL_NAN_DE_FUNC_SDEA_DP_MCAST_M_TO_M:if multicast data path is required then * it is many to many * @IWL_NAN_DE_FUNC_SDEA_QOS_REQ: QoS is required for the service * @IWL_NAN_DE_FUNC_SDEA_SEC_REQ: Security is required for the service * @IWL_NAN_DE_FUNC_SDEA_RANGIGN_REQ: Ranging is required prior to subscription * to the service */ enum iwl_nan_de_func_sdea_flags { IWL_NAN_DE_FUNC_SDEA_FSD_REQ = BIT(0), IWL_NAN_DE_FUNC_SDEA_FSD_GAS = BIT(1), IWL_NAN_DE_FUNC_SDEA_DP_REQ = BIT(2), IWL_NAN_DE_FUNC_SDEA_DP_MCAST = BIT(3), IWL_NAN_DE_FUNC_SDEA_DP_MCAST_M_TO_M = BIT(4), IWL_NAN_DE_FUNC_SDEA_QOS_REQ = BIT(5), IWL_NAN_DE_FUNC_SDEA_SEC_REQ = BIT(6), IWL_NAN_DE_FUNC_SDEA_RANGIGN_REQ = BIT(7), }; /** * struct iwl_nan_disc_evt_notify_v1 - NaN discovery event * * @peer_mac_addr: peer address * @reserved1: reserved * @type: Type of matching function * @instance_id: local matching instance id * @peer_instance: peer matching instance id * @service_info_len: service specific information length * @attrs_len: additional peer attributes length * @buf: service specific information followed by attributes */ struct iwl_nan_disc_evt_notify_v1 { u8 peer_mac_addr[6]; __le16 reserved1; u8 type; u8 instance_id; u8 peer_instance; u8 service_info_len; __le32 attrs_len; u8 buf[0]; } __packed; /* NAN_DISCO_EVENT_NTFY_API_S_VER_1 */ /** * struct iwl_nan_sec_ctxt_info - NaN security context information * * @type: the security context type * @reserved: for alignment * @len: the length of the security context * @buf: security context data */ struct iwl_nan_sec_ctxt_info { u8 type; u8 reserved; __le16 len; u8 buf[0]; } __packed; /* NAN_DISCO_SEC_CTXT_ID_API_S_VER_1 */ /** * struct iwl_nan_disc_info - NaN match information * * @type: Type of matching function * @instance_id: local matching instance id * @peer_instance: peer matching instance id * @service_info_len: service specific information length * @sdea_control: bit mask of &enum iwl_nan_de_func_sdea_flags_ * @sdea_service_info_len: sdea service specific information length * @sec_ctxt_len: security context information length * @cipher_suite_ids: bit mask of cipher suite IDs * @sdea_update_indicator: update indicator from the sdea attribute * @buf: service specific information followed by sdea service specific * information and by security context information which encapsulates 0 or * more iwl_nan_sec_ctxt_info entries. */ struct iwl_nan_disc_info { u8 type; u8 instance_id; u8 peer_instance; u8 service_info_len; __le16 sdea_control; __le16 sdea_service_info_len; __le16 sec_ctxt_len; u8 cipher_suite_ids; u8 sdea_update_indicator; u8 buf[0]; } __packed; /* NAN_DISCO_INFO_API_S_VER_1 */ /** * struct iwl_nan_disc_evt_notify_v2 - NaN discovery information * * @peer_mac_addr: peer address * @reserved1: reserved * @match_len: Length of the match data * @avail_attrs_len: Length of the availability attributes associated with the * peer * @buf: aggregation of matches, each starting on a dword aligned address, * followed by the peer availability attributes, which also start on a * dword aligned address. */ struct iwl_nan_disc_evt_notify_v2 { u8 peer_mac_addr[6]; __le16 reserved1; __le32 match_len; __le32 avail_attrs_len; u8 buf[0]; } __packed; /* NAN_DISCO_EVENT_NTFY_API_S_VER_2 */ /* NAN function termination reasons */ enum iwl_fw_nan_de_term_reason { IWL_NAN_DE_TERM_FAILURE = 0, IWL_NAN_DE_TERM_TTL_REACHED, IWL_NAN_DE_TERM_USER_REQUEST, }; /** * struct iwl_nan_de_term - NAN function termination event * * @type: type of terminated function (&enum iwl_fw_nan_func_type) * @instance_id: instance id * @reason: termination reason (&enum iwl_fw_nan_de_term_reason) * @reserved1: reserved */ struct iwl_nan_de_term { u8 type; u8 instance_id; u8 reason; u8 reserved1; } __packed; /* NAN_DISCO_TERM_NTFY_API_S_VER_1 */ enum iwl_fw_post_nan_type { IWL_NAN_POST_NAN_ATTR_WLAN = 0, IWL_NAN_POST_NAN_ATTR_P2P, IWL_NAN_POST_NAN_ATTR_IBSS, IWL_NAN_POST_NAN_ATTR_MESH, IWL_NAN_POST_NAN_ATTR_FURTHER_NAN, }; enum iwl_fw_config_flags { NAN_FAW_FLAG_NOTIFY_HOST = BIT(0), }; /** * struct iwl_nan_faw_config_tail - tail fo iwl_nan_faw_config for various * channel info structs * * @type: type of post NAN availability (enum %iwl_fw_post_nan_type) * @slots: number of 16TU slots to be available on (should be < 32) * @flags: NAN_FAW_FLAG_* * @op_class: operating class which corresponds to faw_ci */ struct iwl_nan_faw_config_tail { u8 type; u8 slots; u8 flags; u8 op_class; } __packed; /** * struct iwl_nan_faw_config - NAN further availability configuration command * * @id_n_color: id and color of the mac used for further availability * @faw_ci: channel to be available on * @tail: command tail */ struct iwl_nan_faw_config { __le32 id_n_color; struct iwl_fw_channel_info faw_ci; struct iwl_nan_faw_config_tail tail; } __packed; /* _NAN_DISCO_FAW_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_nan_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h000066400000000000000000000344341351064634500275100ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_nvm_reg_h__ #define __iwl_fw_api_nvm_reg_h__ /** * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands */ enum iwl_regulatory_and_nvm_subcmd_ids { /** * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd */ NVM_ACCESS_COMPLETE = 0x0, /** * @NVM_GET_INFO: * Command is &struct iwl_nvm_get_info, * response is &struct iwl_nvm_get_info_rsp */ NVM_GET_INFO = 0x2, }; /** * enum iwl_nvm_access_op - NVM access opcode * @IWL_NVM_READ: read NVM * @IWL_NVM_WRITE: write NVM */ enum iwl_nvm_access_op { IWL_NVM_READ = 0, IWL_NVM_WRITE = 1, }; /** * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD * @NVM_ACCESS_TARGET_CACHE: access the cache * @NVM_ACCESS_TARGET_OTP: access the OTP * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM */ enum iwl_nvm_access_target { NVM_ACCESS_TARGET_CACHE = 0, NVM_ACCESS_TARGET_OTP = 1, NVM_ACCESS_TARGET_EEPROM = 2, }; /** * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD * @NVM_SECTION_TYPE_SW: software section * @NVM_SECTION_TYPE_REGULATORY: regulatory section * @NVM_SECTION_TYPE_CALIBRATION: calibration section * @NVM_SECTION_TYPE_PRODUCTION: production section * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section * @NVM_MAX_NUM_SECTIONS: number of sections */ enum iwl_nvm_section_type { NVM_SECTION_TYPE_SW = 1, NVM_SECTION_TYPE_REGULATORY = 3, NVM_SECTION_TYPE_CALIBRATION = 4, NVM_SECTION_TYPE_PRODUCTION = 5, NVM_SECTION_TYPE_REGULATORY_SDP = 8, NVM_SECTION_TYPE_MAC_OVERRIDE = 11, NVM_SECTION_TYPE_PHY_SKU = 12, NVM_MAX_NUM_SECTIONS = 13, }; /** * struct iwl_nvm_access_cmd - Request the device to send an NVM section * @op_code: &enum iwl_nvm_access_op * @target: &enum iwl_nvm_access_target * @type: &enum iwl_nvm_section_type * @offset: offset in bytes into the section * @length: in bytes, to read/write * @data: if write operation, the data to write. On read its empty */ struct iwl_nvm_access_cmd { u8 op_code; u8 target; __le16 type; __le16 offset; __le16 length; u8 data[]; } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ /** * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD * @offset: offset in bytes into the section * @length: in bytes, either how much was written or read * @type: NVM_SECTION_TYPE_* * @status: 0 for success, fail otherwise * @data: if read operation, the data returned. Empty on write. */ struct iwl_nvm_access_resp { __le16 offset; __le16 length; __le16 type; __le16 status; u8 data[]; } __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ /* * struct iwl_nvm_get_info - request to get NVM data */ struct iwl_nvm_get_info { __le32 reserved; } __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */ /** * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty */ enum iwl_nvm_info_general_flags { NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0), }; /** * struct iwl_nvm_get_info_general - general NVM data * @flags: bit 0: 1 - empty, 0 - non-empty * @nvm_version: nvm version * @board_type: board type * @n_hw_addrs: number of reserved MAC addresses */ struct iwl_nvm_get_info_general { __le32 flags; __le16 nvm_version; u8 board_type; u8 n_hw_addrs; } __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */ /** * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled */ enum iwl_nvm_mac_sku_flags { NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0), NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1), NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2), NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3), /** * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled */ NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4), NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5), NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8), NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14), NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15), }; /** * struct iwl_nvm_get_info_sku - mac information * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags */ struct iwl_nvm_get_info_sku { __le32 mac_sku_flags; } __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */ /** * struct iwl_nvm_get_info_phy - phy information * @tx_chains: BIT 0 chain A, BIT 1 chain B * @rx_chains: BIT 0 chain A, BIT 1 chain B */ struct iwl_nvm_get_info_phy { __le32 tx_chains; __le32 rx_chains; } __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ #define IWL_NUM_CHANNELS_V1 51 #define IWL_NUM_CHANNELS 110 /** * struct iwl_nvm_get_info_regulatory - regulatory information * @lar_enabled: is LAR enabled * @channel_profile: regulatory data of this channel * @reserved: reserved */ struct iwl_nvm_get_info_regulatory_v1 { __le32 lar_enabled; __le16 channel_profile[IWL_NUM_CHANNELS_V1]; __le16 reserved; } __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ /** * struct iwl_nvm_get_info_regulatory - regulatory information * @lar_enabled: is LAR enabled * @n_channels: number of valid channels in the array * @channel_profile: regulatory data of this channel */ struct iwl_nvm_get_info_regulatory { __le32 lar_enabled; __le32 n_channels; __le32 channel_profile[IWL_NUM_CHANNELS]; } __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */ /** * struct iwl_nvm_get_info_rsp_v3 - response to get NVM data * @general: general NVM data * @mac_sku: data relating to MAC sku * @phy_sku: data relating to PHY sku * @regulatory: regulatory data */ struct iwl_nvm_get_info_rsp_v3 { struct iwl_nvm_get_info_general general; struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory_v1 regulatory; } __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */ /** * struct iwl_nvm_get_info_rsp - response to get NVM data * @general: general NVM data * @mac_sku: data relating to MAC sku * @phy_sku: data relating to PHY sku * @regulatory: regulatory data */ struct iwl_nvm_get_info_rsp { struct iwl_nvm_get_info_general general; struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory regulatory; } __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */ /** * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed * @reserved: reserved */ struct iwl_nvm_access_complete_cmd { __le32 reserved; } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ /** * struct iwl_mcc_update_cmd - Request the device to update geographic * regulatory profile according to the given MCC (Mobile Country Code). * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the * MCC in the cmd response will be the relevant MCC in the NVM. * @mcc: given mobile country code * @source_id: the source from where we got the MCC, see iwl_mcc_source * @reserved: reserved for alignment * @key: integrity key for MCC API OEM testing * @reserved2: reserved */ struct iwl_mcc_update_cmd { __le16 mcc; u8 source_id; u8 reserved; __le32 key; u8 reserved2[20]; } __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ /** * enum iwl_geo_information - geographic information. * @GEO_NO_INFO: no special info for this geo profile. * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params * for the 5 GHz band. */ enum iwl_geo_information { GEO_NO_INFO = 0, GEO_WMM_ETSI_5GHZ_INFO = BIT(0), }; /** * struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD. * Contains the new channel control profile map, if changed, and the new MCC * (mobile country code). * The new MCC may be different than what was requested in MCC_UPDATE_CMD. * @status: see &enum iwl_mcc_update_status * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC * @source_id: the MCC source, see iwl_mcc_source * @time: time elapsed from the MCC test start (in units of 30 seconds) * @geo_info: geographic specific profile information * see &enum iwl_geo_information. * @n_channels: number of channels in @channels_data. * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ struct iwl_mcc_update_resp_v3 { __le32 status; __le16 mcc; u8 cap; u8 source_id; __le16 time; __le16 geo_info; __le32 n_channels; __le32 channels[0]; } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ /** * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. * Contains the new channel control profile map, if changed, and the new MCC * (mobile country code). * The new MCC may be different than what was requested in MCC_UPDATE_CMD. * @status: see &enum iwl_mcc_update_status * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC * @time: time elapsed from the MCC test start (in units of 30 seconds) * @geo_info: geographic specific profile information * see &enum iwl_geo_information. * @source_id: the MCC source, see iwl_mcc_source * @reserved: for four bytes alignment. * @n_channels: number of channels in @channels_data. * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ struct iwl_mcc_update_resp { __le32 status; __le16 mcc; __le16 cap; __le16 time; __le16 geo_info; u8 source_id; u8 reserved[3]; __le32 n_channels; __le32 channels[0]; } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */ /** * struct iwl_mcc_chub_notif - chub notifies of mcc change * (MCC_CHUB_UPDATE_CMD = 0xc9) * The Chub (Communication Hub, CommsHUB) is a HW component that connects to * the cellular and connectivity cores that gets updates of the mcc, and * notifies the ucode directly of any mcc change. * The ucode requests the driver to request the device to update geographic * regulatory profile according to the given MCC (Mobile Country Code). * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the * MCC in the cmd response will be the relevant MCC in the NVM. * @mcc: given mobile country code * @source_id: identity of the change originator, see iwl_mcc_source * @reserved1: reserved for alignment */ struct iwl_mcc_chub_notif { __le16 mcc; u8 source_id; u8 reserved1; } __packed; /* LAR_MCC_NOTIFY_S */ enum iwl_mcc_update_status { MCC_RESP_NEW_CHAN_PROFILE, MCC_RESP_SAME_CHAN_PROFILE, MCC_RESP_INVALID, MCC_RESP_NVM_DISABLED, MCC_RESP_ILLEGAL, MCC_RESP_LOW_PRIORITY, MCC_RESP_TEST_MODE_ACTIVE, MCC_RESP_TEST_MODE_NOT_ACTIVE, MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, }; enum iwl_mcc_source { MCC_SOURCE_OLD_FW = 0, MCC_SOURCE_ME = 1, MCC_SOURCE_BIOS = 2, MCC_SOURCE_3G_LTE_HOST = 3, MCC_SOURCE_3G_LTE_DEVICE = 4, MCC_SOURCE_WIFI = 5, MCC_SOURCE_RESERVED = 6, MCC_SOURCE_DEFAULT = 7, MCC_SOURCE_UNINITIALIZED = 8, MCC_SOURCE_MCC_API = 9, MCC_SOURCE_GET_CURRENT = 0x10, MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, }; #endif /* __iwl_fw_api_nvm_reg_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h000066400000000000000000000075611351064634500275500ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ /** * enum iwl_prot_offload_subcmd_ids - protocol offload commands */ enum iwl_prot_offload_subcmd_ids { /** * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif */ STORED_BEACON_NTF = 0xFF, }; #define MAX_STORED_BEACON_SIZE 600 /** * struct iwl_stored_beacon_notif - Stored beacon notification * * @system_time: system time on air rise * @tsf: TSF on air rise * @beacon_timestamp: beacon on air rise * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition * @channel: channel this beacon was received on * @rates: rate in ucode internal format * @byte_count: frame's byte count * @data: beacon data, length in @byte_count */ struct iwl_stored_beacon_notif { __le32 system_time; __le64 tsf; __le32 beacon_timestamp; __le16 band; __le16 channel; __le32 rates; __le32 byte_count; u8 data[MAX_STORED_BEACON_SIZE]; } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ #endif /* __iwl_fw_api_offload_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h000066400000000000000000000070351351064634500273770ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_paging_h__ #define __iwl_fw_api_paging_h__ #define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ /** * struct iwl_fw_paging_cmd - paging layout * * Send to FW the paging layout in the driver. * * @flags: various flags for the command * @block_size: the block size in powers of 2 * @block_num: number of blocks specified in the command. * @device_phy_addr: virtual addresses from device side */ struct iwl_fw_paging_cmd { __le32 flags; __le32 block_size; __le32 block_num; __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; } __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_paging_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h000066400000000000000000000155531351064634500277160ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_phy_ctxt_h__ #define __iwl_fw_api_phy_ctxt_h__ /* Supported bands */ #define PHY_BAND_5 (0) #define PHY_BAND_24 (1) /* Supported channel width, vary if there is VHT support */ #define PHY_VHT_CHANNEL_MODE20 (0x0) #define PHY_VHT_CHANNEL_MODE40 (0x1) #define PHY_VHT_CHANNEL_MODE80 (0x2) #define PHY_VHT_CHANNEL_MODE160 (0x3) /* * Control channel position: * For legacy set bit means upper channel, otherwise lower. * For VHT - bit-2 marks if the control is lower/upper relative to center-freq * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. * center_freq * | * 40Mhz |_______|_______| * 80Mhz |_______|_______|_______|_______| * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| * code 011 010 001 000 | 100 101 110 111 */ #define PHY_VHT_CTRL_POS_1_BELOW (0x0) #define PHY_VHT_CTRL_POS_2_BELOW (0x1) #define PHY_VHT_CTRL_POS_3_BELOW (0x2) #define PHY_VHT_CTRL_POS_4_BELOW (0x3) #define PHY_VHT_CTRL_POS_1_ABOVE (0x4) #define PHY_VHT_CTRL_POS_2_ABOVE (0x5) #define PHY_VHT_CTRL_POS_3_ABOVE (0x6) #define PHY_VHT_CTRL_POS_4_ABOVE (0x7) /* * struct iwl_fw_channel_info_v1 - channel information * * @band: PHY_BAND_* * @channel: channel number * @width: PHY_[VHT|LEGACY]_CHANNEL_* * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* */ struct iwl_fw_channel_info_v1 { u8 band; u8 channel; u8 width; u8 ctrl_pos; } __packed; /* CHANNEL_CONFIG_API_S_VER_1 */ /* * struct iwl_fw_channel_info - channel information * * @channel: channel number * @band: PHY_BAND_* * @width: PHY_[VHT|LEGACY]_CHANNEL_* * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* * @reserved: for future use and alignment */ struct iwl_fw_channel_info { __le32 channel; u8 band; u8 width; u8 ctrl_pos; u8 reserved; } __packed; /*CHANNEL_CONFIG_API_S_VER_2 */ #define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) #define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) #define PHY_RX_CHAIN_VALID_POS (1) #define PHY_RX_CHAIN_VALID_MSK \ (0x7 << PHY_RX_CHAIN_VALID_POS) #define PHY_RX_CHAIN_FORCE_SEL_POS (4) #define PHY_RX_CHAIN_FORCE_SEL_MSK \ (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) #define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) #define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) #define PHY_RX_CHAIN_CNT_POS (10) #define PHY_RX_CHAIN_CNT_MSK \ (0x3 << PHY_RX_CHAIN_CNT_POS) #define PHY_RX_CHAIN_MIMO_CNT_POS (12) #define PHY_RX_CHAIN_MIMO_CNT_MSK \ (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) #define PHY_RX_CHAIN_MIMO_FORCE_POS (14) #define PHY_RX_CHAIN_MIMO_FORCE_MSK \ (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) /* TODO: fix the value, make it depend on firmware at runtime? */ #define NUM_PHY_CTX 3 /* TODO: complete missing documentation */ /** * struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with * various channel structures. * * @txchain_info: ??? * @rxchain_info: ??? * @acquisition_data: ??? * @dsp_cfg_flags: set to 0 */ struct iwl_phy_context_cmd_tail { __le32 txchain_info; __le32 rxchain_info; __le32 acquisition_data; __le32 dsp_cfg_flags; } __packed; /** * struct iwl_phy_context_cmd - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding * @action: action to perform, one of FW_CTXT_ACTION_* * @apply_time: 0 means immediate apply and context switch. * other value means apply new params after X usecs * @tx_param_color: ??? * @ci: channel info * @tail: command tail */ struct iwl_phy_context_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* PHY_CONTEXT_DATA_API_S_VER_1 */ __le32 apply_time; __le32 tx_param_color; struct iwl_fw_channel_info ci; struct iwl_phy_context_cmd_tail tail; } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ #endif /* __iwl_fw_api_phy_ctxt_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h000066400000000000000000000204351351064634500267310ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_phy_h__ #define __iwl_fw_api_phy_h__ /** * enum iwl_phy_ops_subcmd_ids - PHY group commands */ enum iwl_phy_ops_subcmd_ids { /** * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE: * Uses either &struct iwl_dts_measurement_cmd or * &struct iwl_ext_dts_measurement_cmd */ CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, /** * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd */ CTDP_CONFIG_CMD = 0x03, /** * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd */ TEMP_REPORTING_THRESHOLDS_CMD = 0x04, /** * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd */ GEO_TX_POWER_LIMIT = 0x05, /** * @PER_PLATFORM_ANT_GAIN_CMD: &struct iwl_ppag_table_cmd */ PER_PLATFORM_ANT_GAIN_CMD = 0x07, /** * @CT_KILL_NOTIFICATION: &struct ct_kill_notif */ CT_KILL_NOTIFICATION = 0xFE, /** * @DTS_MEASUREMENT_NOTIF_WIDE: * &struct iwl_dts_measurement_notif_v1 or * &struct iwl_dts_measurement_notif_v2 */ DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; /* DTS measurements */ enum iwl_dts_measurement_flags { DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), }; /** * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements * * @flags: indicates which measurements we want as specified in * &enum iwl_dts_measurement_flags */ struct iwl_dts_measurement_cmd { __le32 flags; } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ /** * enum iwl_dts_control_measurement_mode - DTS measurement type * @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read * back (latest value. Not waiting for new value). Use automatic * SW DTS configuration. * @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, * trigger DTS reading and provide read back temperature read * when available. * @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read * @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, * without measurement trigger. */ enum iwl_dts_control_measurement_mode { DTS_AUTOMATIC = 0, DTS_REQUEST_READ = 1, DTS_OVER_WRITE = 2, DTS_DIRECT_WITHOUT_MEASURE = 3, }; /** * enum iwl_dts_used - DTS to use or used for measurement in the DTS request * @DTS_USE_TOP: Top * @DTS_USE_CHAIN_A: chain A * @DTS_USE_CHAIN_B: chain B * @DTS_USE_CHAIN_C: chain C * @XTAL_TEMPERATURE: read temperature from xtal */ enum iwl_dts_used { DTS_USE_TOP = 0, DTS_USE_CHAIN_A = 1, DTS_USE_CHAIN_B = 2, DTS_USE_CHAIN_C = 3, XTAL_TEMPERATURE = 4, }; /** * enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode * @DTS_BIT6_MODE: bit 6 mode * @DTS_BIT8_MODE: bit 8 mode */ enum iwl_dts_bit_mode { DTS_BIT6_MODE = 0, DTS_BIT8_MODE = 1, }; /** * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements * @control_mode: see &enum iwl_dts_control_measurement_mode * @temperature: used when over write DTS mode is selected * @sensor: set temperature sensor to use. See &enum iwl_dts_used * @avg_factor: average factor to DTS in request DTS read mode * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode * @step_duration: step duration for the DTS */ struct iwl_ext_dts_measurement_cmd { __le32 control_mode; __le32 temperature; __le32 sensor; __le32 avg_factor; __le32 bit_mode; __le32 step_duration; } __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ /** * struct iwl_dts_measurement_notif_v1 - measurements notification * * @temp: the measured temperature * @voltage: the measured voltage */ struct iwl_dts_measurement_notif_v1 { __le32 temp; __le32 voltage; } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ /** * struct iwl_dts_measurement_notif_v2 - measurements notification * * @temp: the measured temperature * @voltage: the measured voltage * @threshold_idx: the trip index that was crossed */ struct iwl_dts_measurement_notif_v2 { __le32 temp; __le32 voltage; __le32 threshold_idx; } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ /** * struct ct_kill_notif - CT-kill entry notification * * @temperature: the current temperature in celsius * @reserved: reserved */ struct ct_kill_notif { __le16 temperature; __le16 reserved; } __packed; /* GRP_PHY_CT_KILL_NTF */ /** * enum ctdp_cmd_operation - CTDP command operations * @CTDP_CMD_OPERATION_START: update the current budget * @CTDP_CMD_OPERATION_STOP: stop ctdp * @CTDP_CMD_OPERATION_REPORT: get the average budget */ enum iwl_mvm_ctdp_cmd_operation { CTDP_CMD_OPERATION_START = 0x1, CTDP_CMD_OPERATION_STOP = 0x2, CTDP_CMD_OPERATION_REPORT = 0x4, };/* CTDP_CMD_OPERATION_TYPE_E */ /** * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget * * @operation: see &enum iwl_mvm_ctdp_cmd_operation * @budget: the budget in milliwatt * @window_size: defined in API but not used */ struct iwl_mvm_ctdp_cmd { __le32 operation; __le32 budget; __le32 window_size; } __packed; #define IWL_MAX_DTS_TRIPS 8 /** * struct temp_report_ths_cmd - set temperature thresholds * * @num_temps: number of temperature thresholds passed * @thresholds: array with the thresholds to be configured */ struct temp_report_ths_cmd { __le32 num_temps; __le16 thresholds[IWL_MAX_DTS_TRIPS]; } __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ #endif /* __iwl_fw_api_phy_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/power.h000066400000000000000000000557071351064634500272770ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_power_h__ #define __iwl_fw_api_power_h__ /* Power Management Commands, Responses, Notifications */ /** * enum iwl_ltr_config_flags - masks for LTR config command flags * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow * memory access * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR * reg change * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from * D0 to D3 * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD * @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short * idle timeout */ enum iwl_ltr_config_flags { LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), LTR_CFG_FLAG_SW_SET_LONG = BIT(5), LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), LTR_CFG_FLAG_UPDATE_VALUES = BIT(7), }; /** * struct iwl_ltr_config_cmd_v1 - configures the LTR * @flags: See &enum iwl_ltr_config_flags * @static_long: static LTR Long register value. * @static_short: static LTR Short register value. */ struct iwl_ltr_config_cmd_v1 { __le32 flags; __le32 static_long; __le32 static_short; } __packed; /* LTR_CAPABLE_API_S_VER_1 */ #define LTR_VALID_STATES_NUM 4 /** * struct iwl_ltr_config_cmd - configures the LTR * @flags: See &enum iwl_ltr_config_flags * @static_long: static LTR Long register value. * @static_short: static LTR Short register value. * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order: * TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES * is set. * @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if * %LTR_CFG_FLAG_UPDATE_VALUES is set. */ struct iwl_ltr_config_cmd { __le32 flags; __le32 static_long; __le32 static_short; __le32 ltr_cfg_values[LTR_VALID_STATES_NUM]; __le32 ltr_short_idle_timeout; } __packed; /* LTR_CAPABLE_API_S_VER_2 */ /* Radio LP RX Energy Threshold measured in dBm */ #define POWER_LPRX_RSSI_THRESHOLD 75 #define POWER_LPRX_RSSI_THRESHOLD_MAX 94 #define POWER_LPRX_RSSI_THRESHOLD_MIN 30 /** * enum iwl_power_flags - masks for power table command flags * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, * '1' Driver enables PM (use rest of parameters) * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, * '1' PM could sleep over DTIM till listen Interval. * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all * access categories are both delivery and trigger enabled. * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and * PBW Snoozing enabled * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. * @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving * detection enablement */ enum iwl_power_flags { POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5), POWER_FLAGS_BT_SCO_ENA = BIT(8), POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), POWER_FLAGS_LPRX_ENA_MSK = BIT(11), POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12), }; #define IWL_POWER_VEC_SIZE 5 /** * struct iwl_powertable_cmd - legacy power command. Beside old API support this * is used also with a new power API for device wide power settings. * POWER_TABLE_CMD = 0x77 (command, has simple generic response) * * @flags: Power table command flags from POWER_FLAGS_* * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. * Minimum allowed:- 3 * DTIM. Keep alive period must be * set regardless of power scheme or current power state. * FW use this value also when PM is disabled. * @debug_flags: debug flags * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to * PSM transition - legacy PM * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to * PSM transition - legacy PM * @sleep_interval: not in use * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag * is set. For example, if it is required to skip over * one DTIM, this value need to be set to 2 (DTIM periods). * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm */ struct iwl_powertable_cmd { /* PM_POWER_TABLE_CMD_API_S_VER_6 */ __le16 flags; u8 keep_alive_seconds; u8 debug_flags; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; __le32 skip_dtim_periods; __le32 lprx_rssi_threshold; } __packed; /** * enum iwl_device_power_flags - masks for device power command flags * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK: * '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. * @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK: * Device Retention indication, '1' indicate retention is enabled. * @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK: * 32Khz external slow clock valid indication, '1' indicate cloack is * valid. */ enum iwl_device_power_flags { DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1), DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12), }; /** * struct iwl_device_power_cmd - device wide power command. * DEVICE_POWER_CMD = 0x77 (command, has simple generic response) * * @flags: Power table command flags from &enum iwl_device_power_flags * @reserved: reserved (padding) */ struct iwl_device_power_cmd { /* PM_POWER_TABLE_CMD_API_S_VER_6 */ __le16 flags; __le16 reserved; } __packed; /** * struct iwl_mac_power_cmd - New power command containing uAPSD support * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) * @id_and_color: MAC contex identifier, &enum iwl_ctxt_id_and_color * @flags: Power table command flags from POWER_FLAGS_* * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. * Minimum allowed:- 3 * DTIM. Keep alive period must be * set regardless of power scheme or current power state. * FW use this value also when PM is disabled. * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to * PSM transition - legacy PM * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to * PSM transition - legacy PM * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag * is set. For example, if it is required to skip over * one DTIM, this value need to be set to 2 (DTIM periods). * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to * PSM transition - uAPSD * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to * PSM transition - uAPSD * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm * @snooze_interval: Maximum time between attempts to retrieve buffered data * from the AP [msec] * @snooze_window: A window of time in which PBW snoozing insures that all * packets received. It is also the minimum time from last * received unicast RX packet, before client stops snoozing * for data. [msec] * @snooze_step: TBD * @qndp_tid: TID client shall use for uAPSD QNDP triggers * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for * each corresponding AC. * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct * values. * @heavy_tx_thld_packets: TX threshold measured in number of packets * @heavy_rx_thld_packets: RX threshold measured in number of packets * @heavy_tx_thld_percentage: TX threshold measured in load's percentage * @heavy_rx_thld_percentage: RX threshold measured in load's percentage * @limited_ps_threshold: (unused) * @reserved: reserved (padding) */ struct iwl_mac_power_cmd { /* CONTEXT_DESC_API_T_VER_1 */ __le32 id_and_color; /* CLIENT_PM_POWER_TABLE_S_VER_1 */ __le16 flags; __le16 keep_alive_seconds; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 rx_data_timeout_uapsd; __le32 tx_data_timeout_uapsd; u8 lprx_rssi_threshold; u8 skip_dtim_periods; __le16 snooze_interval; __le16 snooze_window; u8 snooze_step; u8 qndp_tid; u8 uapsd_ac_flags; u8 uapsd_max_sp; u8 heavy_tx_thld_packets; u8 heavy_rx_thld_packets; u8 heavy_tx_thld_percentage; u8 heavy_rx_thld_percentage; u8 limited_ps_threshold; u8 reserved; } __packed; /* * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when * associated AP is identified as improperly implementing uAPSD protocol. * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78 * @sta_id: index of station in uCode's station table - associated AP ID in * this context. */ struct iwl_uapsd_misbehaving_ap_notif { __le32 sta_id; u8 mac_id; u8 reserved[3]; } __packed; /** * struct iwl_reduce_tx_power_cmd - TX power reduction command * REDUCE_TX_POWER_CMD = 0x9f * @flags: (reserved for future implementation) * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in dBms. */ struct iwl_reduce_tx_power_cmd { u8 flags; u8 mac_context_id; __le16 pwr_restriction; } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ enum iwl_dev_tx_power_cmd_mode { IWL_TX_POWER_MODE_SET_MAC = 0, IWL_TX_POWER_MODE_SET_DEVICE = 1, IWL_TX_POWER_MODE_SET_CHAINS = 2, IWL_TX_POWER_MODE_SET_ACK = 3, IWL_TX_POWER_MODE_SET_SAR_TIMER = 4, IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5, }; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */; #define IWL_NUM_CHAIN_LIMITS 2 #define IWL_NUM_SUB_BANDS 5 /** * struct iwl_dev_tx_power_cmd - TX power reduction command * @set_mode: see &enum iwl_dev_tx_power_cmd_mode * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in 1/8 dBms. * @dev_24: device TX power restriction in 1/8 dBms * @dev_52_low: device TX power restriction upper band - low * @dev_52_high: device TX power restriction upper band - high * @per_chain_restriction: per chain restrictions */ struct iwl_dev_tx_power_cmd_v3 { __le32 set_mode; __le32 mac_context_id; __le16 pwr_restriction; __le16 dev_24; __le16 dev_52_low; __le16 dev_52_high; __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; } __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ #define IWL_DEV_MAX_TX_POWER 0x7FFF /** * struct iwl_dev_tx_power_cmd - TX power reduction command * @v3: version 3 of the command, embedded here for easier software handling * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @reserved: reserved (padding) */ struct iwl_dev_tx_power_cmd_v4 { /* v4 is just an extension of v3 - keep this here */ struct iwl_dev_tx_power_cmd_v3 v3; u8 enable_ack_reduction; u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ /** * struct iwl_dev_tx_power_cmd - TX power reduction command * @v3: version 3 of the command, embedded here for easier software handling * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @per_chain_restriction_changed: is per_chain_restriction has changed * from last command. used if set_mode is * IWL_TX_POWER_MODE_SET_SAR_TIMER. * note: if not changed, the command is used for keep alive only. * @reserved: reserved (padding) * @timer_period: timer in milliseconds. if expires FW will change to default * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER */ struct iwl_dev_tx_power_cmd { /* v5 is just an extension of v3 - keep this here */ struct iwl_dev_tx_power_cmd_v3 v3; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; __le32 timer_period; } __packed; /* TX_REDUCED_POWER_API_S_VER_5 */ #define IWL_NUM_GEO_PROFILES 3 /** * enum iwl_geo_per_chain_offset_operation - type of operation * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW. * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table. */ enum iwl_geo_per_chain_offset_operation { IWL_PER_CHAIN_OFFSET_SET_TABLES, IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE, }; /* GEO_TX_POWER_LIMIT FLAGS TYPE */ /** * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT. * @max_tx_power: maximum allowed tx power. * @chain_a: tx power offset for chain a. * @chain_b: tx power offset for chain b. */ struct iwl_per_chain_offset { __le16 max_tx_power; u8 chain_a; u8 chain_b; } __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ struct iwl_per_chain_offset_group { struct iwl_per_chain_offset lb; struct iwl_per_chain_offset hb; } __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */ /** * struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. */ struct iwl_geo_tx_power_profiles_cmd_v1 { __le32 ops; struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; } __packed; /* GEO_TX_POWER_LIMIT_VER_1 */ /** * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: BIOS table revision. */ struct iwl_geo_tx_power_profiles_cmd { __le32 ops; struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; __le32 table_revision; } __packed; /* GEO_TX_POWER_LIMIT */ /** * struct iwl_geo_tx_power_profiles_resp - response to GEO_TX_POWER_LIMIT cmd * @profile_idx: current geo profile in use */ struct iwl_geo_tx_power_profiles_resp { __le32 profile_idx; } __packed; /* GEO_TX_POWER_LIMIT_RESP */ /** * struct iwl_ppag_table_cmd - struct for PER_PLATFORM_ANT_GAIN_CMD cmd. * @enabled: 1 if PPAG is enabled, 0 otherwise * @gain: table of antenna gain values per chain and sub-band * @reserved: reserved */ struct iwl_ppag_table_cmd { __le32 enabled; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; s8 reserved[2]; } __packed; /* PER_PLATFORM_ANT_GAIN_CMD */ /** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon * to driver if delta in Energy values calculated for this and last * passed beacon is greater than this threshold. Zero value means that * the Energy change is ignored for beacon filtering, and beacon will * not be forced to be sent to driver regardless of this delta. Typical * energy delta 5dB. * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. * Send beacon to driver if delta in Energy values calculated for this * and last passed beacon is greater than this threshold. Zero value * means that the Energy change is ignored for beacon filtering while in * Roaming state, typical energy delta 1dB. * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values * calculated for current beacon is less than the threshold, use * Roaming Energy Delta Threshold, otherwise use normal Energy Delta * Threshold. Typical energy threshold is -72dBm. * @bf_temp_threshold: This threshold determines the type of temperature * filtering (Slow or Fast) that is selected (Units are in Celsuis): * If the current temperature is above this threshold - Fast filter * will be used, If the current temperature is below this threshold - * Slow filter will be used. * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values * calculated for this and the last passed beacon is greater than this * threshold. Zero value means that the temperature change is ignored for * beacon filtering; beacons will not be forced to be sent to driver * regardless of whether its temerature has been changed. * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values * calculated for this and the last passed beacon is greater than this * threshold. Zero value means that the temperature change is ignored for * beacon filtering; beacons will not be forced to be sent to driver * regardless of whether its temerature has been changed. * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. * @bf_debug_flag: beacon filtering debug configuration * @bf_escape_timer: Send beacons to to driver if no beacons were passed * for a specific period of time. Units: Beacons. * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed * for a longer period of time then this escape-timeout. Units: Beacons. * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. * @bf_threshold_absolute_low: See below. * @bf_threshold_absolute_high: Send Beacon to driver if Energy value calculated * for this beacon crossed this absolute threshold. For the 'Increase' * direction the bf_energy_absolute_low[i] is used. For the 'Decrease' * direction the bf_energy_absolute_high[i] is used. Zero value means * that this specific threshold is ignored for beacon filtering, and * beacon will not be forced to be sent to driver due to this setting. */ struct iwl_beacon_filter_cmd { __le32 bf_energy_delta; __le32 bf_roaming_energy_delta; __le32 bf_roaming_state; __le32 bf_temp_threshold; __le32 bf_temp_fast_filter; __le32 bf_temp_slow_filter; __le32 bf_enable_beacon_filter; __le32 bf_debug_flag; __le32 bf_escape_timer; __le32 ba_escape_timer; __le32 ba_enable_beacon_abort; __le32 bf_threshold_absolute_low[2]; __le32 bf_threshold_absolute_high[2]; } __packed; /* BEACON_FILTER_CONFIG_API_S_VER_4 */ /* Beacon filtering and beacon abort */ #define IWL_BF_ENERGY_DELTA_DEFAULT 5 #define IWL_BF_ENERGY_DELTA_D0I3 20 #define IWL_BF_ENERGY_DELTA_MAX 255 #define IWL_BF_ENERGY_DELTA_MIN 0 #define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 #define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20 #define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 #define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 #define IWL_BF_ROAMING_STATE_DEFAULT 72 #define IWL_BF_ROAMING_STATE_D0I3 72 #define IWL_BF_ROAMING_STATE_MAX 255 #define IWL_BF_ROAMING_STATE_MIN 0 #define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 #define IWL_BF_TEMP_THRESHOLD_D0I3 112 #define IWL_BF_TEMP_THRESHOLD_MAX 255 #define IWL_BF_TEMP_THRESHOLD_MIN 0 #define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 #define IWL_BF_TEMP_FAST_FILTER_D0I3 1 #define IWL_BF_TEMP_FAST_FILTER_MAX 255 #define IWL_BF_TEMP_FAST_FILTER_MIN 0 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 #define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0 #define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 #define IWL_BF_DEBUG_FLAG_DEFAULT 0 #define IWL_BF_DEBUG_FLAG_D0I3 0 #define IWL_BF_ESCAPE_TIMER_DEFAULT 0 #define IWL_BF_ESCAPE_TIMER_D0I3 0 #define IWL_BF_ESCAPE_TIMER_MAX 1024 #define IWL_BF_ESCAPE_TIMER_MIN 0 #define IWL_BA_ESCAPE_TIMER_DEFAULT 6 #define IWL_BA_ESCAPE_TIMER_D0I3 6 #define IWL_BA_ESCAPE_TIMER_D3 9 #define IWL_BA_ESCAPE_TIMER_MAX 1024 #define IWL_BA_ESCAPE_TIMER_MIN 0 #define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 #define IWL_BF_CMD_CONFIG(mode) \ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \ .bf_roaming_energy_delta = \ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode) #define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) #define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) #endif /* __iwl_fw_api_power_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h000066400000000000000000000474011351064634500265570ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_fw_api_rs_h__ #define __iwl_fw_api_rs_h__ #include "mac.h" /** * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for * bandwidths <= 80MHz * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC * @IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz * bandwidth * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation * for BPSK (MCS 0) with 1 spatial * stream * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation * for BPSK (MCS 0) with 2 spatial * streams */ enum iwl_tlc_mng_cfg_flags { IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0), IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1), IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2), IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3), IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4), }; /** * enum iwl_tlc_mng_cfg_cw - channel width options * @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value */ enum iwl_tlc_mng_cfg_cw { IWL_TLC_MNG_CH_WIDTH_20MHZ, IWL_TLC_MNG_CH_WIDTH_40MHZ, IWL_TLC_MNG_CH_WIDTH_80MHZ, IWL_TLC_MNG_CH_WIDTH_160MHZ, IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ, }; /** * enum iwl_tlc_mng_cfg_chains - possible chains * @IWL_TLC_MNG_CHAIN_A_MSK: chain A * @IWL_TLC_MNG_CHAIN_B_MSK: chain B */ enum iwl_tlc_mng_cfg_chains { IWL_TLC_MNG_CHAIN_A_MSK = BIT(0), IWL_TLC_MNG_CHAIN_B_MSK = BIT(1), }; /** * enum iwl_tlc_mng_cfg_mode - supported modes * @IWL_TLC_MNG_MODE_CCK: enable CCK * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT) * @IWL_TLC_MNG_MODE_NON_HT: enable non HT * @IWL_TLC_MNG_MODE_HT: enable HT * @IWL_TLC_MNG_MODE_VHT: enable VHT * @IWL_TLC_MNG_MODE_HE: enable HE * @IWL_TLC_MNG_MODE_INVALID: invalid value * @IWL_TLC_MNG_MODE_NUM: a count of possible modes */ enum iwl_tlc_mng_cfg_mode { IWL_TLC_MNG_MODE_CCK = 0, IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK, IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK, IWL_TLC_MNG_MODE_HT, IWL_TLC_MNG_MODE_VHT, IWL_TLC_MNG_MODE_HE, IWL_TLC_MNG_MODE_INVALID, IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID, }; /** * enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0 * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1 * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2 * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3 * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4 * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5 * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6 * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7 * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8 * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9 * @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10 * @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11 * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT */ enum iwl_tlc_mng_ht_rates { IWL_TLC_MNG_HT_RATE_MCS0 = 0, IWL_TLC_MNG_HT_RATE_MCS1, IWL_TLC_MNG_HT_RATE_MCS2, IWL_TLC_MNG_HT_RATE_MCS3, IWL_TLC_MNG_HT_RATE_MCS4, IWL_TLC_MNG_HT_RATE_MCS5, IWL_TLC_MNG_HT_RATE_MCS6, IWL_TLC_MNG_HT_RATE_MCS7, IWL_TLC_MNG_HT_RATE_MCS8, IWL_TLC_MNG_HT_RATE_MCS9, IWL_TLC_MNG_HT_RATE_MCS10, IWL_TLC_MNG_HT_RATE_MCS11, IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11, }; enum IWL_TLC_MNG_NSS { IWL_TLC_NSS_1, IWL_TLC_NSS_2, IWL_TLC_NSS_MAX }; enum IWL_TLC_HT_BW_RATES { IWL_TLC_HT_BW_NONE_160, IWL_TLC_HT_BW_160, }; /** * struct tlc_config_cmd - TLC configuration * @sta_id: station id * @reserved1: reserved * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw * @mode: &enum iwl_tlc_mng_cfg_mode * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains * @amsdu: TX amsdu is supported * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags * @non_ht_rates: bitmap of supported legacy rates * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per * pair (0 - 80mhz width and below, 1 - 160mhz). * @max_mpdu_len: max MPDU length, in bytes * @sgi_ch_width_supp: bitmap of SGI support per channel width * use BIT(@enum iwl_tlc_mng_cfg_cw) * @reserved2: reserved */ struct iwl_tlc_config_cmd { u8 sta_id; u8 reserved1[3]; u8 max_ch_width; u8 mode; u8 chains; u8 amsdu; __le16 flags; __le16 non_ht_rates; __le16 ht_rates[IWL_TLC_NSS_MAX][2]; __le16 max_mpdu_len; u8 sgi_ch_width_supp; u8 reserved2[1]; } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */ /** * enum iwl_tlc_update_flags - updated fields * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update */ enum iwl_tlc_update_flags { IWL_TLC_NOTIF_FLAG_RATE = BIT(0), IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1), }; /** * struct iwl_tlc_update_notif - TLC notification from FW * @sta_id: station id * @reserved: reserved * @flags: bitmap of notifications reported * @rate: current initial rate * @amsdu_size: Max AMSDU size, in bytes * @amsdu_enabled: bitmap for per-TID AMSDU enablement */ struct iwl_tlc_update_notif { u8 sta_id; u8 reserved[3]; __le32 flags; __le32 rate; __le32 amsdu_size; __le32 amsdu_enabled; } __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ /* * These serve as indexes into * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; * TODO: avoid overlap between legacy and HT rates */ enum { IWL_RATE_1M_INDEX = 0, IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, IWL_RATE_2M_INDEX, IWL_RATE_5M_INDEX, IWL_RATE_11M_INDEX, IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, IWL_RATE_6M_INDEX, IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX, IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX, IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX, IWL_RATE_9M_INDEX, IWL_RATE_12M_INDEX, IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX, IWL_RATE_24M_INDEX, IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX, IWL_RATE_48M_INDEX, IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX, IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX, IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, IWL_RATE_60M_INDEX, IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX, IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX, IWL_RATE_MCS_8_INDEX, IWL_RATE_MCS_9_INDEX, IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, IWL_RATE_MCS_10_INDEX, IWL_RATE_MCS_11_INDEX, IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX, IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1, }; #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) /* fw API values for legacy bit rates, both OFDM and CCK */ enum { IWL_RATE_6M_PLCP = 13, IWL_RATE_9M_PLCP = 15, IWL_RATE_12M_PLCP = 5, IWL_RATE_18M_PLCP = 7, IWL_RATE_24M_PLCP = 9, IWL_RATE_36M_PLCP = 11, IWL_RATE_48M_PLCP = 1, IWL_RATE_54M_PLCP = 3, IWL_RATE_1M_PLCP = 10, IWL_RATE_2M_PLCP = 20, IWL_RATE_5M_PLCP = 55, IWL_RATE_11M_PLCP = 110, IWL_RATE_INVM_PLCP = -1, }; /* * rate_n_flags bit fields * * The 32-bit value has different layouts in the low 8 bites depending on the * format. There are three formats, HT, VHT and legacy (11abg, with subformats * for CCK and OFDM). * * High-throughput (HT) rate format * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM) * Very High-throughput (VHT) rate format * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM) * Legacy OFDM rate format for bits 7:0 * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM) * Legacy CCK rate format for bits 7:0: * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK) */ /* Bit 8: (1) HT format, (0) legacy or VHT format */ #define RATE_MCS_HT_POS 8 #define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS) /* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ #define RATE_MCS_CCK_POS 9 #define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS) /* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ #define RATE_MCS_VHT_POS 26 #define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS) /* * High-throughput (HT) rate format for bits 7:0 * * 2-0: MCS rate base * 0) 6 Mbps * 1) 12 Mbps * 2) 18 Mbps * 3) 24 Mbps * 4) 36 Mbps * 5) 48 Mbps * 6) 54 Mbps * 7) 60 Mbps * 4-3: 0) Single stream (SISO) * 1) Dual stream (MIMO) * 2) Triple stream (MIMO) * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data * (bits 7-6 are zero) * * Together the low 5 bits work out to the MCS index because we don't * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two * streams and 16-23 have three streams. We could also support MCS 32 * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) */ #define RATE_HT_MCS_RATE_CODE_MSK 0x7 #define RATE_HT_MCS_NSS_POS 3 #define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS) /* Bit 10: (1) Use Green Field preamble */ #define RATE_HT_MCS_GF_POS 10 #define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) #define RATE_HT_MCS_INDEX_MSK 0x3f /* * Very High-throughput (VHT) rate format for bits 7:0 * * 3-0: VHT MCS (0-9) * 5-4: number of streams - 1: * 0) Single stream (SISO) * 1) Dual stream (MIMO) * 2) Triple stream (MIMO) */ /* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ #define RATE_VHT_MCS_RATE_CODE_MSK 0xf #define RATE_VHT_MCS_NSS_POS 4 #define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) /* * Legacy OFDM rate format for bits 7:0 * * 3-0: 0xD) 6 Mbps * 0xF) 9 Mbps * 0x5) 12 Mbps * 0x7) 18 Mbps * 0x9) 24 Mbps * 0xB) 36 Mbps * 0x1) 48 Mbps * 0x3) 54 Mbps * (bits 7-4 are 0) * * Legacy CCK rate format for bits 7:0: * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK): * * 6-0: 10) 1 Mbps * 20) 2 Mbps * 55) 5.5 Mbps * 110) 11 Mbps * (bit 7 is 0) */ #define RATE_LEGACY_RATE_MSK 0xff /* Bit 10 - OFDM HE */ #define RATE_MCS_HE_POS 10 #define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS) /* * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT */ #define RATE_MCS_CHAN_WIDTH_POS 11 #define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) /* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ #define RATE_MCS_SGI_POS 13 #define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) /* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ #define RATE_MCS_ANT_POS 14 #define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ RATE_MCS_ANT_B_MSK) #define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \ RATE_MCS_ANT_C_MSK) #define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK /* Bit 17: (0) SS, (1) SS*2 */ #define RATE_MCS_STBC_POS 17 #define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS) /* Bit 18: OFDM-HE dual carrier mode */ #define RATE_HE_DUAL_CARRIER_MODE 18 #define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE) /* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ #define RATE_MCS_BF_POS 19 #define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) /* * Bit 20-21: HE LTF type and guard interval * HE (ext) SU: * 0 1xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 & SGI (bit 13) clear 4xLTF+3.2us * 3 & SGI (bit 13) set 4xLTF+0.8us * HE MU: * 0 4xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 4xLTF+3.2us * HE TRIG: * 0 1xLTF+1.6us * 1 2xLTF+1.6us * 2 4xLTF+3.2us * 3 (does not occur) */ #define RATE_MCS_HE_GI_LTF_POS 20 #define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) /* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ #define RATE_MCS_HE_TYPE_POS 22 #define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ #define RATE_MCS_DUP_POS 24 #define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS) /* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ #define RATE_MCS_LDPC_POS 27 #define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) /* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ #define RATE_MCS_HE_106T_POS 28 #define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS) /* Link Quality definitions */ /* # entries in rate scale table to support Tx retries */ #define LQ_MAX_RETRY_NUM 16 /* Link quality command flags bit fields */ /* Bit 0: (0) Don't use RTS (1) Use RTS */ #define LQ_FLAG_USE_RTS_POS 0 #define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS) /* Bit 1-3: LQ command color. Used to match responses to LQ commands */ #define LQ_FLAG_COLOR_POS 1 #define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) #define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\ LQ_FLAG_COLOR_POS) #define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\ LQ_FLAG_COLOR_MSK) #define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK)) /* Bit 4-5: Tx RTS BW Signalling * (0) No RTS BW signalling * (1) Static BW signalling * (2) Dynamic BW signalling */ #define LQ_FLAG_RTS_BW_SIG_POS 4 #define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS) #define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS) #define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS) /* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection * Dyanmic BW selection allows Tx with narrower BW then requested in rates */ #define LQ_FLAG_DYNAMIC_BW_POS 6 #define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) /* Single Stream Tx Parameters (lq_cmd->ss_params) * Flags to control a smart FW decision about whether BFER/STBC/SISO will be * used for single stream Tx. */ /* Bit 0-1: Max STBC streams allowed. Can be 0-3. * (0) - No STBC allowed * (1) - 2x1 STBC allowed (HT/VHT) * (2) - 4x2 STBC allowed (HT/VHT) * (3) - 3x2 STBC allowed (HT only) * All our chips are at most 2 antennas so only (1) is valid for now. */ #define LQ_SS_STBC_ALLOWED_POS 0 #define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK) /* 2x1 STBC is allowed */ #define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS) /* Bit 2: Beamformer (VHT only) is allowed */ #define LQ_SS_BFER_ALLOWED_POS 2 #define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS) /* Bit 3: Force BFER or STBC for testing * If this is set: * If BFER is allowed then force the ucode to choose BFER else * If STBC is allowed then force the ucode to choose STBC over SISO */ #define LQ_SS_FORCE_POS 3 #define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS) /* Bit 31: ss_params field is valid. Used for FW backward compatibility * with other drivers which don't support the ss_params API yet */ #define LQ_SS_PARAMS_VALID_POS 31 #define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS) /** * struct iwl_lq_cmd - link quality command * @sta_id: station to update * @reduced_tpc: reduced transmit power control value * @control: not used * @flags: combination of LQ_FLAG_* * @mimo_delim: the first SISO index in rs_table, which separates MIMO * and SISO rates * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD). * Should be ANT_[ABC] * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC] * @initial_rate_index: first index from rs_table per AC category * @agg_time_limit: aggregation max time threshold in usec/100, meaning * value of 100 is one usec. Range is 100 to 8000 * @agg_disable_start_th: try-count threshold for starting aggregation. * If a frame has higher try-count, it should not be selected for * starting an aggregation sequence. * @agg_frame_cnt_limit: max frame count in an aggregation. * 0: no limit * 1: no aggregation (one frame per aggregation) * 2 - 0x3f: maximal number of frames (up to 3f == 63) * @reserved2: reserved * @rs_table: array of rates for each TX try, each is rate_n_flags, * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP * @ss_params: single stream features. declare whether STBC or BFER are allowed. */ struct iwl_lq_cmd { u8 sta_id; u8 reduced_tpc; __le16 control; /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ u8 flags; u8 mimo_delim; u8 single_stream_ant_msk; u8 dual_stream_ant_msk; u8 initial_rate_index[AC_NUM]; /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */ __le16 agg_time_limit; u8 agg_disable_start_th; u8 agg_frame_cnt_limit; __le32 reserved2; __le32 rs_table[LQ_MAX_RETRY_NUM]; __le32 ss_params; }; /* LINK_QUALITY_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_rs_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h000066400000000000000000000655321351064634500265710ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_rx_h__ #define __iwl_fw_api_rx_h__ /* API for pre-9000 hardware */ #define IWL_RX_INFO_PHY_CNT 8 #define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 #define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff #define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00 #define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000 #define IWL_RX_INFO_ENERGY_ANT_A_POS 0 #define IWL_RX_INFO_ENERGY_ANT_B_POS 8 #define IWL_RX_INFO_ENERGY_ANT_C_POS 16 enum iwl_mac_context_info { MAC_CONTEXT_INFO_NONE, MAC_CONTEXT_INFO_GSCAN, }; /** * struct iwl_rx_phy_info - phy info * (REPLY_RX_PHY_CMD = 0xc0) * @non_cfg_phy_cnt: non configurable DSP phy data byte count * @cfg_phy_cnt: configurable DSP phy data byte count * @stat_id: configurable DSP phy data set ID * @reserved1: reserved * @system_timestamp: GP2 at on air rise * @timestamp: TSF at on air rise * @beacon_time_stamp: beacon at on-air rise * @phy_flags: general phy flags: band, modulation, ... * @channel: channel number * @non_cfg_phy: for various implementations of non_cfg_phy * @rate_n_flags: RATE_MCS_* * @byte_count: frame's byte-count * @frame_time: frame's time on the air, based on byte count and frame rate * calculation * @mac_active_msk: what MACs were active when the frame was received * @mac_context_info: additional info on the context in which the frame was * received as defined in &enum iwl_mac_context_info * * Before each Rx, the device sends this data. It contains PHY information * about the reception of the packet. */ struct iwl_rx_phy_info { u8 non_cfg_phy_cnt; u8 cfg_phy_cnt; u8 stat_id; u8 reserved1; __le32 system_timestamp; __le64 timestamp; __le32 beacon_time_stamp; __le16 phy_flags; __le16 channel; __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; __le32 rate_n_flags; __le32 byte_count; u8 mac_active_msk; u8 mac_context_info; __le16 frame_time; } __packed; /* * TCP offload Rx assist info * * bits 0:3 - reserved * bits 4:7 - MIC CRC length * bits 8:12 - MAC header length * bit 13 - Padding indication * bit 14 - A-AMSDU indication * bit 15 - Offload enabled */ enum iwl_csum_rx_assist_info { CSUM_RXA_RESERVED_MASK = 0x000f, CSUM_RXA_MICSIZE_MASK = 0x00f0, CSUM_RXA_HEADERLEN_MASK = 0x1f00, CSUM_RXA_PADD = BIT(13), CSUM_RXA_AMSDU = BIT(14), CSUM_RXA_ENA = BIT(15) }; /** * struct iwl_rx_mpdu_res_start - phy info * @byte_count: byte count of the frame * @assist: see &enum iwl_csum_rx_assist_info */ struct iwl_rx_mpdu_res_start { __le16 byte_count; __le16 assist; } __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */ /** * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band * @RX_RES_PHY_FLAGS_MOD_CCK: modulation is CCK * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short * @RX_RES_PHY_FLAGS_NARROW_BAND: narrow band (<20 MHz) receive * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received * @RX_RES_PHY_FLAGS_ANTENNA_POS: antenna bit position * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame */ enum iwl_rx_phy_flags { RX_RES_PHY_FLAGS_BAND_24 = BIT(0), RX_RES_PHY_FLAGS_MOD_CCK = BIT(1), RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2), RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3), RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4), RX_RES_PHY_FLAGS_ANTENNA_POS = 4, RX_RES_PHY_FLAGS_AGG = BIT(7), RX_RES_PHY_FLAGS_OFDM_HT = BIT(8), RX_RES_PHY_FLAGS_OFDM_GF = BIT(9), RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10), }; /** * enum iwl_mvm_rx_status - written by fw for each Rx packet * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: station was found * @RX_MPDU_RES_STATUS_KEY_VALID: key was valid * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: key parameters were usable * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked * in the driver. * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or * alg = CCM only. Checks replay attack for 11w frames. Relevant only if * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set. * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP * @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension * algorithm * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: extended IV (set with TKIP) * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: key ID comparison done * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift */ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_CRC_OK = BIT(0), RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1), RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2), RX_MPDU_RES_STATUS_KEY_VALID = BIT(3), RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4), RX_MPDU_RES_STATUS_ICV_OK = BIT(5), RX_MPDU_RES_STATUS_MIC_OK = BIT(6), RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7), RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8), RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8), RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8), RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8), RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8), RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8), RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8), RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8), RX_MPDU_RES_STATUS_DEC_DONE = BIT(11), RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT, }; /* 9000 series API */ enum iwl_rx_mpdu_mac_flags1 { IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 0x03, IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 0xf0, /* shift should be 4, but the length is measured in 2-byte * words, so shifting only by 3 gives a byte result */ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3, }; enum iwl_rx_mpdu_mac_flags2 { /* in 2-byte words */ IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 0x1f, IWL_RX_MPDU_MFLG2_PAD = 0x20, IWL_RX_MPDU_MFLG2_AMSDU = 0x40, }; enum iwl_rx_mpdu_amsdu_info { IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f, IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, }; enum iwl_rx_l3_proto_values { IWL_RX_L3_TYPE_NONE, IWL_RX_L3_TYPE_IPV4, IWL_RX_L3_TYPE_IPV4_FRAG, IWL_RX_L3_TYPE_IPV6_FRAG, IWL_RX_L3_TYPE_IPV6, IWL_RX_L3_TYPE_IPV6_IN_IPV4, IWL_RX_L3_TYPE_ARP, IWL_RX_L3_TYPE_EAPOL, }; #define IWL_RX_L3_PROTO_POS 4 enum iwl_rx_l3l4_flags { IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), IWL_RX_L3L4_TCP_ACK = BIT(3), IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS, IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, }; enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_CRC_OK = BIT(0), IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1), IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2), IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3), IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4), IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK, IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13), IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14), IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15), }; enum iwl_rx_mpdu_hash_filter { IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f, IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0, }; enum iwl_rx_mpdu_sta_id_flags { IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f, IWL_RX_MPDU_SIF_RRF_ABORT = 0x20, IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0, }; #define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f enum iwl_rx_mpdu_reorder_data { IWL_RX_MPDU_REORDER_NSSN_MASK = 0x00000fff, IWL_RX_MPDU_REORDER_SN_MASK = 0x00fff000, IWL_RX_MPDU_REORDER_SN_SHIFT = 12, IWL_RX_MPDU_REORDER_BAID_MASK = 0x7f000000, IWL_RX_MPDU_REORDER_BAID_SHIFT = 24, IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000, }; enum iwl_rx_mpdu_phy_info { IWL_RX_MPDU_PHY_8023 = BIT(0), IWL_RX_MPDU_PHY_AMPDU = BIT(5), IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6), IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7), /* short preamble is only for CCK, for non-CCK overridden by this */ IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = BIT(7), IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8), }; enum iwl_rx_mpdu_mac_info { IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f, IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, }; /* TSF overload low dword */ enum iwl_rx_phy_data0 { /* info type: HE any */ IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001, IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002, IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK = 0x000000fc, IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK = 0x00000f00, /* 1 bit reserved */ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK = 0x000fe000, IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM = 0x00100000, IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK = 0x00600000, IWL_RX_PHY_DATA0_HE_PE_DISAMBIG = 0x00800000, IWL_RX_PHY_DATA0_HE_DOPPLER = 0x01000000, /* 6 bits reserved */ IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000, }; enum iwl_rx_phy_info_type { IWL_RX_PHY_INFO_TYPE_NONE = 0, IWL_RX_PHY_INFO_TYPE_CCK = 1, IWL_RX_PHY_INFO_TYPE_OFDM_LGCY = 2, IWL_RX_PHY_INFO_TYPE_HT = 3, IWL_RX_PHY_INFO_TYPE_VHT_SU = 4, IWL_RX_PHY_INFO_TYPE_VHT_MU = 5, IWL_RX_PHY_INFO_TYPE_HE_SU = 6, IWL_RX_PHY_INFO_TYPE_HE_MU = 7, IWL_RX_PHY_INFO_TYPE_HE_TB = 8, IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9, IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10, }; /* TSF overload high dword */ enum iwl_rx_phy_data1 { /* * check this first - if TSF overload is set, * see &enum iwl_rx_phy_info_type */ IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000, /* info type: HT/VHT/HE any */ IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000, /* info type: HE MU/MU-EXT */ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001, IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e, /* info type: HE any */ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK = 0x000000e0, IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80 = 0x00000100, /* trigger encoded */ IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK = 0x0000fe00, /* info type: HE TB/TX-EXT */ IWL_RX_PHY_DATA1_HE_TB_PILOT_TYPE = 0x00000001, IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e, }; /* goes into Metadata DW 7 */ enum iwl_rx_phy_data2 { /* info type: HE MU-EXT */ /* the a1/a2/... is what the PHY/firmware calls the values */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2 = 0x0000ff00, /* a2 */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0 = 0x00ff0000, /* b1 */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2 = 0xff000000, /* b2 */ /* info type: HE TB-EXT */ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1 = 0x0000000f, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2 = 0x000000f0, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3 = 0x00000f00, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000, }; /* goes into Metadata DW 8 */ enum iwl_rx_phy_data3 { /* info type: HE MU-EXT */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1 = 0x00ff0000, /* d1 */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3 = 0xff000000, /* d2 */ }; /* goes into Metadata DW 4 high 16 bits */ enum iwl_rx_phy_data4 { /* info type: HE MU-EXT */ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002, IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK = 0x0004, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK = 0x0008, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK = 0x00f0, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM = 0x0100, IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600, }; /** * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor */ struct iwl_rx_mpdu_desc_v1 { /* DW7 - carries rss_hash only when rpa_en == 1 */ union { /** * @rss_hash: RSS hash value */ __le32 rss_hash; /** * @phy_data2: depends on info type (see @phy_data1) */ __le32 phy_data2; }; /* DW8 - carries filter_match only when rpa_en == 1 */ union { /** * @filter_match: filter match value */ __le32 filter_match; /** * @phy_data3: depends on info type (see @phy_data1) */ __le32 phy_data3; }; /* DW9 */ /** * @rate_n_flags: RX rate/flags encoding */ __le32 rate_n_flags; /* DW10 */ /** * @energy_a: energy chain A */ u8 energy_a; /** * @energy_b: energy chain B */ u8 energy_b; /** * @channel: channel number */ u8 channel; /** * @mac_context: MAC context mask */ u8 mac_context; /* DW11 */ /** * @gp2_on_air_rise: GP2 timer value on air rise (INA) */ __le32 gp2_on_air_rise; /* DW12 & DW13 */ union { /** * @tsf_on_air_rise: * TSF value on air rise (INA), only valid if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set */ __le64 tsf_on_air_rise; struct { /** * @phy_data0: depends on info_type, see @phy_data1 */ __le32 phy_data0; /** * @phy_data1: valid only if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, * see &enum iwl_rx_phy_data1. */ __le32 phy_data1; }; }; } __packed; /** * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor */ struct iwl_rx_mpdu_desc_v3 { /* DW7 - carries filter_match only when rpa_en == 1 */ union { /** * @filter_match: filter match value */ __le32 filter_match; /** * @phy_data2: depends on info type (see @phy_data1) */ __le32 phy_data2; }; /* DW8 - carries rss_hash only when rpa_en == 1 */ union { /** * @rss_hash: RSS hash value */ __le32 rss_hash; /** * @phy_data3: depends on info type (see @phy_data1) */ __le32 phy_data3; }; /* DW9 */ /** * @partial_hash: 31:0 ip/tcp header hash * w/o some fields (such as IP SRC addr) */ __le32 partial_hash; /* DW10 */ /** * @raw_xsum: raw xsum value */ __le32 raw_xsum; /* DW11 */ /** * @rate_n_flags: RX rate/flags encoding */ __le32 rate_n_flags; /* DW12 */ /** * @energy_a: energy chain A */ u8 energy_a; /** * @energy_b: energy chain B */ u8 energy_b; /** * @channel: channel number */ u8 channel; /** * @mac_context: MAC context mask */ u8 mac_context; /* DW13 */ /** * @gp2_on_air_rise: GP2 timer value on air rise (INA) */ __le32 gp2_on_air_rise; /* DW14 & DW15 */ union { /** * @tsf_on_air_rise: * TSF value on air rise (INA), only valid if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set */ __le64 tsf_on_air_rise; struct { /** * @phy_data0: depends on info_type, see @phy_data1 */ __le32 phy_data0; /** * @phy_data1: valid only if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, * see &enum iwl_rx_phy_data1. */ __le32 phy_data1; }; }; /* DW16 & DW17 */ /** * @reserved: reserved */ __le32 reserved[2]; } __packed; /* RX_MPDU_RES_START_API_S_VER_3 */ /** * struct iwl_rx_mpdu_desc - RX MPDU descriptor */ struct iwl_rx_mpdu_desc { /* DW2 */ /** * @mpdu_len: MPDU length */ __le16 mpdu_len; /** * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1 */ u8 mac_flags1; /** * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2 */ u8 mac_flags2; /* DW3 */ /** * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info */ u8 amsdu_info; /** * @phy_info: &enum iwl_rx_mpdu_phy_info */ __le16 phy_info; /** * @mac_phy_idx: MAC/PHY index */ u8 mac_phy_idx; /* DW4 - carries csum data only when rpa_en == 1 */ /** * @raw_csum: raw checksum (alledgedly unreliable) */ __le16 raw_csum; union { /** * @l3l4_flags: &enum iwl_rx_l3l4_flags */ __le16 l3l4_flags; /** * @phy_data4: depends on info type, see phy_data1 */ __le16 phy_data4; }; /* DW5 */ /** * @status: &enum iwl_rx_mpdu_status */ __le16 status; /** * @hash_filter: hash filter value */ u8 hash_filter; /** * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags */ u8 sta_id_flags; /* DW6 */ /** * @reorder_data: &enum iwl_rx_mpdu_reorder_data */ __le32 reorder_data; union { struct iwl_rx_mpdu_desc_v1 v1; struct iwl_rx_mpdu_desc_v3 v3; }; } __packed; /* RX_MPDU_RES_START_API_S_VER_3 */ #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) #define RX_NO_DATA_CHAIN_A_POS 0 #define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS) #define RX_NO_DATA_CHAIN_B_POS 8 #define RX_NO_DATA_CHAIN_B_MSK (0xff << RX_NO_DATA_CHAIN_B_POS) #define RX_NO_DATA_CHANNEL_POS 16 #define RX_NO_DATA_CHANNEL_MSK (0xff << RX_NO_DATA_CHANNEL_POS) #define RX_NO_DATA_INFO_TYPE_POS 0 #define RX_NO_DATA_INFO_TYPE_MSK (0xff << RX_NO_DATA_INFO_TYPE_POS) #define RX_NO_DATA_INFO_TYPE_NONE 0 #define RX_NO_DATA_INFO_TYPE_RX_ERR 1 #define RX_NO_DATA_INFO_TYPE_NDP 2 #define RX_NO_DATA_INFO_TYPE_MU_UNMATCHED 3 #define RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED 4 #define RX_NO_DATA_INFO_ERR_POS 8 #define RX_NO_DATA_INFO_ERR_MSK (0xff << RX_NO_DATA_INFO_ERR_POS) #define RX_NO_DATA_INFO_ERR_NONE 0 #define RX_NO_DATA_INFO_ERR_BAD_PLCP 1 #define RX_NO_DATA_INFO_ERR_UNSUPPORTED_RATE 2 #define RX_NO_DATA_INFO_ERR_NO_DELIM 3 #define RX_NO_DATA_INFO_ERR_BAD_MAC_HDR 4 #define RX_NO_DATA_FRAME_TIME_POS 0 #define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS) #define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000 #define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000 /** * struct iwl_rx_no_data - RX no data descriptor * @info: 7:0 frame type, 15:8 RX error type * @rssi: 7:0 energy chain-A, * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel * @on_air_rise_time: GP2 during on air rise * @fr_time: frame time * @rate: rate/mcs of frame * @phy_info: &enum iwl_rx_phy_data0 and &enum iwl_rx_phy_info_type * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type. * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT */ struct iwl_rx_no_data { __le32 info; __le32 rssi; __le32 on_air_rise_time; __le32 fr_time; __le32 rate; __le32 phy_info[2]; __le32 rx_vec[2]; } __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */ struct iwl_frame_release { u8 baid; u8 reserved; __le16 nssn; }; enum iwl_rss_hash_func_en { IWL_RSS_HASH_TYPE_IPV4_TCP, IWL_RSS_HASH_TYPE_IPV4_UDP, IWL_RSS_HASH_TYPE_IPV4_PAYLOAD, IWL_RSS_HASH_TYPE_IPV6_TCP, IWL_RSS_HASH_TYPE_IPV6_UDP, IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; #define IWL_RSS_HASH_KEY_CNT 10 #define IWL_RSS_INDIRECTION_TABLE_SIZE 128 #define IWL_RSS_ENABLE 1 /** * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration * * @flags: 1 - enable, 0 - disable * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en * @reserved: reserved * @secret_key: 320 bit input of random key configuration from driver * @indirection_table: indirection table */ struct iwl_rss_config_cmd { __le32 flags; u8 hash_mask; u8 reserved[3]; __le32 secret_key[IWL_RSS_HASH_KEY_CNT]; u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; } __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ #define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 #define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf /** * struct iwl_rxq_sync_cmd - RXQ notification trigger * * @flags: flags of the notification. bit 0:3 are the sender queue * @rxq_mask: rx queues to send the notification on * @count: number of bytes in payload, should be DWORD aligned * @payload: data to send to rx queues */ struct iwl_rxq_sync_cmd { __le32 flags; __le32 rxq_mask; __le32 count; u8 payload[]; } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ /** * struct iwl_rxq_sync_notification - Notification triggered by RXQ * sync command * * @count: number of bytes in payload * @payload: data to send to rx queues */ struct iwl_rxq_sync_notification { __le32 count; u8 payload[]; } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ /** * enum iwl_mvm_rxq_notif_type - Internal message identifier * * @IWL_MVM_RXQ_EMPTY: empty sync notification * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN */ enum iwl_mvm_rxq_notif_type { IWL_MVM_RXQ_EMPTY, IWL_MVM_RXQ_NOTIF_DEL_BA, IWL_MVM_RXQ_NSSN_SYNC, }; /** * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent * in &iwl_rxq_sync_cmd. Should be DWORD aligned. * FW is agnostic to the payload, so there are no endianity requirements. * * @type: value from &iwl_mvm_rxq_notif_type * @sync: ctrl path is waiting for all notifications to be received * @cookie: internal cookie to identify old notifications * @data: payload */ struct iwl_mvm_internal_rxq_notif { u16 type; u16 sync; u32 cookie; u8 data[]; } __packed; /** * enum iwl_mvm_pm_event - type of station PM event * @IWL_MVM_PM_EVENT_AWAKE: station woke up * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep * @IWL_MVM_PM_EVENT_UAPSD: station sent uAPSD trigger * @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll */ enum iwl_mvm_pm_event { IWL_MVM_PM_EVENT_AWAKE, IWL_MVM_PM_EVENT_ASLEEP, IWL_MVM_PM_EVENT_UAPSD, IWL_MVM_PM_EVENT_PS_POLL, }; /* PEER_PM_NTFY_API_E_VER_1 */ /** * struct iwl_mvm_pm_state_notification - station PM state notification * @sta_id: station ID of the station changing state * @type: the new powersave state, see &enum iwl_mvm_pm_event */ struct iwl_mvm_pm_state_notification { u8 sta_id; u8 type; /* private: */ __le16 reserved; } __packed; /* PEER_PM_NTFY_API_S_VER_1 */ #define BA_WINDOW_STREAMS_MAX 16 #define BA_WINDOW_STATUS_TID_MSK 0x000F #define BA_WINDOW_STATUS_STA_ID_POS 4 #define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 #define BA_WINDOW_STATUS_VALID_MSK BIT(9) /** * struct iwl_ba_window_status_notif - reordering window's status notification * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid * @start_seq_num: the start sequence number of the bitmap * @mpdu_rx_count: the number of received MPDUs since entering D0i3 */ struct iwl_ba_window_status_notif { __le64 bitmap[BA_WINDOW_STREAMS_MAX]; __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ /** * struct iwl_rfh_queue_config - RX queue configuration * @q_num: Q num * @enable: enable queue * @reserved: alignment * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr * @fr_bd_cb: DMA address of freeRB table * @ur_bd_cb: DMA address of used RB table * @fr_bd_wid: Initial index of the free table */ struct iwl_rfh_queue_data { u8 q_num; u8 enable; __le16 reserved; __le64 urbd_stts_wrptr; __le64 fr_bd_cb; __le64 ur_bd_cb; __le32 fr_bd_wid; } __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */ /** * struct iwl_rfh_queue_config - RX queue configuration * @num_queues: number of queues configured * @reserved: alignment * @data: DMA addresses per-queue */ struct iwl_rfh_queue_config { u8 num_queues; u8 reserved[3]; struct iwl_rfh_queue_data data[]; } __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */ #endif /* __iwl_fw_api_rx_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h000066400000000000000000000765441351064634500270710ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_scan_h__ #define __iwl_fw_api_scan_h__ /* Scan Commands, Responses, Notifications */ /* Max number of IEs for direct SSID scans in a command */ #define PROBE_OPTION_MAX 20 /** * struct iwl_ssid_ie - directed scan network information element * * Up to 20 of these may appear in REPLY_SCAN_CMD, * selected by "type" bit field in struct iwl_scan_channel; * each channel may select different ssids from among the 20 entries. * SSID IEs get transmitted in reverse order of entry. * * @id: element ID * @len: element length * @ssid: element (SSID) data */ struct iwl_ssid_ie { u8 id; u8 len; u8 ssid[IEEE80211_MAX_SSID_LEN]; } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ /* scan offload */ #define IWL_SCAN_MAX_BLACKLIST_LEN 64 #define IWL_SCAN_SHORT_BLACKLIST_LEN 16 #define IWL_SCAN_MAX_PROFILES 11 #define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 #define SCAN_NUM_BAND_PROBE_DATA_V_1 2 #define SCAN_NUM_BAND_PROBE_DATA_V_2 3 /* Default watchdog (in MS) for scheduled scan iteration */ #define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000) #define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) #define CAN_ABORT_STATUS 1 #define IWL_FULL_SCAN_MULTIPLIER 5 #define IWL_FAST_SCHED_SCAN_ITERATIONS 3 #define IWL_MAX_SCHED_SCAN_PLANS 2 #define IWL_SCAN_MAX_NUM_OF_CHANNELS 52 enum scan_framework_client { SCAN_CLIENT_SCHED_SCAN = BIT(0), SCAN_CLIENT_NETDETECT = BIT(1), SCAN_CLIENT_ASSET_TRACKING = BIT(2), }; /** * struct iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S * @ssid: MAC address to filter out * @reported_rssi: AP rssi reported to the host * @client_bitmap: clients ignore this entry - enum scan_framework_client */ struct iwl_scan_offload_blacklist { u8 ssid[ETH_ALEN]; u8 reported_rssi; u8 client_bitmap; } __packed; enum iwl_scan_offload_network_type { IWL_NETWORK_TYPE_BSS = 1, IWL_NETWORK_TYPE_IBSS = 2, IWL_NETWORK_TYPE_ANY = 3, }; enum iwl_scan_offload_band_selection { IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4, IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8, IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, }; /** * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S * @ssid_index: index to ssid list in fixed part * @unicast_cipher: encryption algorithm to match - bitmap * @auth_alg: authentication algorithm to match - bitmap * @network_type: enum iwl_scan_offload_network_type * @band_selection: enum iwl_scan_offload_band_selection * @client_bitmap: clients waiting for match - enum scan_framework_client * @reserved: reserved */ struct iwl_scan_offload_profile { u8 ssid_index; u8 unicast_cipher; u8 auth_alg; u8 network_type; u8 band_selection; u8 client_bitmap; u8 reserved[2]; } __packed; /** * struct iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1 * @profiles: profiles to search for match * @blacklist_len: length of blacklist * @num_profiles: num of profiles in the list * @match_notify: clients waiting for match found notification * @pass_match: clients waiting for the results * @active_clients: active clients bitmap - enum scan_framework_client * @any_beacon_notify: clients waiting for match notification without match * @reserved: reserved */ struct iwl_scan_offload_profile_cfg { struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; u8 blacklist_len; u8 num_profiles; u8 match_notify; u8 pass_match; u8 active_clients; u8 any_beacon_notify; u8 reserved[2]; } __packed; /** * struct iwl_scan_schedule_lmac - schedule of scan offload * @delay: delay between iterations, in seconds. * @iterations: num of scan iterations * @full_scan_mul: number of partial scans before each full scan */ struct iwl_scan_schedule_lmac { __le16 delay; u8 iterations; u8 full_scan_mul; } __packed; /* SCAN_SCHEDULE_API_S */ enum iwl_scan_offload_complete_status { IWL_SCAN_OFFLOAD_COMPLETED = 1, IWL_SCAN_OFFLOAD_ABORTED = 2, }; enum iwl_scan_ebs_status { IWL_SCAN_EBS_SUCCESS, IWL_SCAN_EBS_FAILED, IWL_SCAN_EBS_CHAN_NOT_FOUND, IWL_SCAN_EBS_INACTIVE, }; /** * struct iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S * @tx_flags: combination of TX_CMD_FLG_* * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* * @sta_id: index of destination station in FW station table * @reserved: for alignment and future use */ struct iwl_scan_req_tx_cmd { __le32 tx_flags; __le32 rate_n_flags; u8 sta_id; u8 reserved[3]; } __packed; enum iwl_scan_channel_flags_lmac { IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27), IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28), }; /** * struct iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2 * @flags: bits 1-20: directed scan to i'th ssid * other bits &enum iwl_scan_channel_flags_lmac * @channel_num: channel number 1-13 etc * @iter_count: scan iteration on this channel * @iter_interval: interval in seconds between iterations on one channel */ struct iwl_scan_channel_cfg_lmac { __le32 flags; __le16 channel_num; __le16 iter_count; __le32 iter_interval; } __packed; /* * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1 * @offset: offset in the data block * @len: length of the segment */ struct iwl_scan_probe_segment { __le16 offset; __le16 len; } __packed; /* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2 * @mac_header: first (and common) part of the probe * @band_data: band specific data * @common_data: last (and common) part of the probe * @buf: raw data block */ struct iwl_scan_probe_req_v1 { struct iwl_scan_probe_segment mac_header; struct iwl_scan_probe_segment band_data[SCAN_NUM_BAND_PROBE_DATA_V_1]; struct iwl_scan_probe_segment common_data; u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; } __packed; /* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2 * @mac_header: first (and common) part of the probe * @band_data: band specific data * @common_data: last (and common) part of the probe * @buf: raw data block */ struct iwl_scan_probe_req { struct iwl_scan_probe_segment mac_header; struct iwl_scan_probe_segment band_data[SCAN_NUM_BAND_PROBE_DATA_V_2]; struct iwl_scan_probe_segment common_data; u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; } __packed; enum iwl_scan_channel_flags { IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0), IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1), IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2), IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3), }; /* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S * @flags: enum iwl_scan_channel_flags * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is * involved. * 1 - EBS is disabled. * 2 - every second scan will be full scan(and so on). */ struct iwl_scan_channel_opt { __le16 flags; __le16 non_ebs_ratio; } __packed; /** * enum iwl_mvm_lmac_scan_flags - LMAC scan flags * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses * without filtering. * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS: multiple SSID matching * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report * and DS parameter set IEs into probe requests. * @IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL: use extended dwell time on channels * 1, 6 and 11. * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches */ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1), IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2), IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3), IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = BIT(7), IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), }; enum iwl_scan_priority { IWL_SCAN_PRIORITY_LOW, IWL_SCAN_PRIORITY_MEDIUM, IWL_SCAN_PRIORITY_HIGH, }; enum iwl_scan_priority_ext { IWL_SCAN_PRIORITY_EXT_0_LOWEST, IWL_SCAN_PRIORITY_EXT_1, IWL_SCAN_PRIORITY_EXT_2, IWL_SCAN_PRIORITY_EXT_3, IWL_SCAN_PRIORITY_EXT_4, IWL_SCAN_PRIORITY_EXT_5, IWL_SCAN_PRIORITY_EXT_6, IWL_SCAN_PRIORITY_EXT_7_HIGHEST, }; /** * struct iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1 * @reserved1: for alignment and future use * @n_channels: num of channels to scan * @active_dwell: dwell time for active channels * @passive_dwell: dwell time for passive channels * @fragmented_dwell: dwell time for fragmented passive scan * @extended_dwell: dwell time for channels 1, 6 and 11 (in certain cases) * @reserved2: for alignment and future use * @rx_chain_select: PHY_RX_CHAIN_* flags * @scan_flags: &enum iwl_mvm_lmac_scan_flags * @max_out_time: max time (in TU) to be out of associated channel * @suspend_time: pause scan this long (TUs) when returning to service channel * @flags: RXON flags * @filter_flags: RXON filter * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz * @direct_scan: list of SSIDs for directed active scan * @scan_prio: enum iwl_scan_priority * @iter_num: number of scan iterations * @delay: delay in seconds before first iteration * @schedule: two scheduling plans. The first one is finite, the second one can * be infinite. * @channel_opt: channel optimization options, for full and partial scan * @data: channel configuration and probe request packet. */ struct iwl_scan_req_lmac { /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */ __le32 reserved1; u8 n_channels; u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; u8 extended_dwell; u8 reserved2; __le16 rx_chain_select; __le32 scan_flags; __le32 max_out_time; __le32 suspend_time; /* RX_ON_FLAGS_API_S_VER_1 */ __le32 flags; __le32 filter_flags; struct iwl_scan_req_tx_cmd tx_cmd[2]; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 scan_prio; /* SCAN_REQ_PERIODIC_PARAMS_API_S */ __le32 iter_num; __le32 delay; struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS]; struct iwl_scan_channel_opt channel_opt[2]; u8 data[]; } __packed; /** * struct iwl_scan_results_notif - scan results for one channel - * SCAN_RESULT_NTF_API_S_VER_3 * @channel: which channel the results are from * @band: 0 for 5.2 GHz, 1 for 2.4 GHz * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request * @num_probe_not_sent: # of request that weren't sent due to not enough time * @duration: duration spent in channel, in usecs */ struct iwl_scan_results_notif { u8 channel; u8 band; u8 probe_status; u8 num_probe_not_sent; __le32 duration; } __packed; /** * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels) * SCAN_COMPLETE_NTF_API_S_VER_3 * @scanned_channels: number of channels scanned (and number of valid results) * @status: one of SCAN_COMP_STATUS_* * @bt_status: BT on/off status * @last_channel: last channel that was scanned * @tsf_low: TSF timer (lower half) in usecs * @tsf_high: TSF timer (higher half) in usecs * @results: an array of scan results, only "scanned_channels" of them are valid */ struct iwl_lmac_scan_complete_notif { u8 scanned_channels; u8 status; u8 bt_status; u8 last_channel; __le32 tsf_low; __le32 tsf_high; struct iwl_scan_results_notif results[]; } __packed; /** * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 * @last_schedule_line: last schedule line executed (fast or regular) * @last_schedule_iteration: last scan iteration executed before scan abort * @status: &enum iwl_scan_offload_complete_status * @ebs_status: EBS success status &enum iwl_scan_ebs_status * @time_after_last_iter: time in seconds elapsed after last iteration * @reserved: reserved */ struct iwl_periodic_scan_complete { u8 last_schedule_line; u8 last_schedule_iteration; u8 status; u8 ebs_status; __le32 time_after_last_iter; __le32 reserved; } __packed; /* UMAC Scan API */ /* The maximum of either of these cannot exceed 8, because we use an * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). */ #define IWL_MVM_MAX_UMAC_SCANS 4 #define IWL_MVM_MAX_LMAC_SCANS 1 enum scan_config_flags { SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1), SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2), SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3), SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8), SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9), SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10), SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11), SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12), SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13), SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14), SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15), SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16), SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17), SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18), SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22), SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23), /* Bits 26-31 are for num of channels in channel_array */ #define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) }; enum scan_config_rates { /* OFDM basic rates */ SCAN_CONFIG_RATE_6M = BIT(0), SCAN_CONFIG_RATE_9M = BIT(1), SCAN_CONFIG_RATE_12M = BIT(2), SCAN_CONFIG_RATE_18M = BIT(3), SCAN_CONFIG_RATE_24M = BIT(4), SCAN_CONFIG_RATE_36M = BIT(5), SCAN_CONFIG_RATE_48M = BIT(6), SCAN_CONFIG_RATE_54M = BIT(7), /* CCK basic rates */ SCAN_CONFIG_RATE_1M = BIT(8), SCAN_CONFIG_RATE_2M = BIT(9), SCAN_CONFIG_RATE_5M = BIT(10), SCAN_CONFIG_RATE_11M = BIT(11), /* Bits 16-27 are for supported rates */ #define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16) }; enum iwl_channel_flags { IWL_CHANNEL_FLAG_EBS = BIT(0), IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1), IWL_CHANNEL_FLAG_EBS_ADD = BIT(2), IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), }; /** * struct iwl_scan_dwell * @active: default dwell time for active scan * @passive: default dwell time for passive scan * @fragmented: default dwell time for fragmented scan * @extended: default dwell time for channels 1, 6 and 11 */ struct iwl_scan_dwell { u8 active; u8 passive; u8 fragmented; u8 extended; } __packed; /** * struct iwl_scan_config_v1 * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions * @legacy_rates: default legacy rates - enum scan_config_rates * @out_of_channel_time: default max out of serving channel time * @suspend_time: default max suspend time * @dwell: dwells for the scan * @mac_addr: default mac address to be used in probes * @bcast_sta_id: the index of the station in the fw * @channel_flags: default channel flags - enum iwl_channel_flags * scan_config_channel_flag * @channel_array: default supported channels */ struct iwl_scan_config_v1 { __le32 flags; __le32 tx_chains; __le32 rx_chains; __le32 legacy_rates; __le32 out_of_channel_time; __le32 suspend_time; struct iwl_scan_dwell dwell; u8 mac_addr[ETH_ALEN]; u8 bcast_sta_id; u8 channel_flags; u8 channel_array[]; } __packed; /* SCAN_CONFIG_DB_CMD_API_S */ #define SCAN_TWO_LMACS 2 #define SCAN_LB_LMAC_IDX 0 #define SCAN_HB_LMAC_IDX 1 struct iwl_scan_config_v2 { __le32 flags; __le32 tx_chains; __le32 rx_chains; __le32 legacy_rates; __le32 out_of_channel_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; struct iwl_scan_dwell dwell; u8 mac_addr[ETH_ALEN]; u8 bcast_sta_id; u8 channel_flags; u8 channel_array[]; } __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */ /** * struct iwl_scan_config * @enable_cam_mode: whether to enable CAM mode. * @enable_promiscouos_mode: whether to enable promiscouos mode * @bcast_sta_id: the index of the station in the fw * @reserved: reserved * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions */ struct iwl_scan_config { u8 enable_cam_mode; u8 enable_promiscouos_mode; u8 bcast_sta_id; u8 reserved; __le32 tx_chains; __le32 rx_chains; } __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */ /** * enum iwl_umac_scan_flags - UMAC scan flags * @IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request * can be preempted by other scan requests with higher priority. * The low priority scan will be resumed when the higher proirity scan is * completed. * @IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver * when scan starts. */ enum iwl_umac_scan_flags { IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0), IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1), }; enum iwl_umac_scan_uid_offsets { IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0, IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8, }; enum iwl_umac_scan_general_flags { IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), /* Extended dwell is obselete when adaptive dwell is used, making this * bit reusable. Hence, probe request defer is used only when adaptive * dwell is supported. */ IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13), IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME = BIT(14), IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE = BIT(15), }; /** * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2 * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete * notification per channel or not. * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel * reorder optimization or not. */ enum iwl_umac_scan_general_flags2 { IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1), }; /** * struct iwl_scan_channel_cfg_umac * @flags: bitmap - 0-19: directed scan to i'th ssid. * @channel_num: channel number 1-13 etc. * @band: band of channel: 0 for 2GHz, 1 for 5GHz * @iter_count: repetition count for the channel. * @iter_interval: interval between two scan iterations on one channel. */ struct iwl_scan_channel_cfg_umac { __le32 flags; /* Both versions are of the same size, so use a union without adjusting * the command size later */ union { struct { u8 channel_num; u8 iter_count; __le16 iter_interval; } v1; /* SCAN_CHANNEL_CFG_S_VER1 */ struct { u8 channel_num; u8 band; u8 iter_count; u8 iter_interval; } v2; /* SCAN_CHANNEL_CFG_S_VER2 */ }; } __packed; /** * struct iwl_scan_umac_schedule * @interval: interval in seconds between scan iterations * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop * @reserved: for alignment and future use */ struct iwl_scan_umac_schedule { __le16 interval; u8 iter_count; u8 reserved; } __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */ struct iwl_scan_req_umac_tail_v1 { /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; __le16 delay; __le16 reserved; /* SCAN_PROBE_PARAMS_API_S_VER_1 */ struct iwl_scan_probe_req_v1 preq; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; } __packed; /** * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command * parameters following channels configuration array. * @schedule: two scheduling plans. * @delay: delay in TUs before starting the first scan iteration * @reserved: for future use and alignment * @preq: probe request with IEs blocks * @direct_scan: list of SSIDs for directed active scan */ struct iwl_scan_req_umac_tail_v2 { /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; __le16 delay; __le16 reserved; /* SCAN_PROBE_PARAMS_API_S_VER_2 */ struct iwl_scan_probe_req preq; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; } __packed; /** * struct iwl_scan_umac_chan_param * @flags: channel flags &enum iwl_scan_channel_flags * @count: num of channels in scan request * @reserved: for future use and alignment */ struct iwl_scan_umac_chan_param { u8 flags; u8 count; __le16 reserved; } __packed; /*SCAN_CHANNEL_PARAMS_API_S_VER_1 */ /** * struct iwl_scan_req_umac * @flags: &enum iwl_umac_scan_flags * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @extended_dwell: dwell time for channels 1, 6 and 11 * @active_dwell: dwell time for active scan per LMAC * @passive_dwell: dwell time for passive scan per LMAC * @fragmented_dwell: dwell time for fragmented passive scan * @adwell_default_n_aps: for adaptive dwell the default number of APs * per channel * @adwell_default_n_aps_social: for adaptive dwell the default * number of APs per social (1,6,11) channel * @general_flags2: &enum iwl_umac_scan_general_flags2 * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added * to total scan time * @max_out_time: max out of serving channel time, per LMAC - for CDB there * are 2 LMACs * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs * @scan_priority: scan internal prioritization &enum iwl_scan_priority * @num_of_fragments: Number of fragments needed for full coverage per band. * Relevant only for fragmented scan. * @channel: &struct iwl_scan_umac_chan_param * @reserved: for future use and alignment * @reserved3: for future use and alignment * @data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail */ struct iwl_scan_req_umac { __le32 flags; __le32 uid; __le32 ooc_priority; __le16 general_flags; u8 reserved; u8 scan_start_mac_id; union { struct { u8 extended_dwell; u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; __le32 max_out_time; __le32 suspend_time; __le32 scan_priority; struct iwl_scan_umac_chan_param channel; u8 data[]; } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { u8 extended_dwell; u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; struct iwl_scan_umac_chan_param channel; u8 data[]; } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ struct { u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; u8 adwell_default_n_aps; u8 adwell_default_n_aps_social; u8 reserved3; __le16 adwell_max_budget; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; struct iwl_scan_umac_chan_param channel; u8 data[]; } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */ struct { u8 active_dwell[SCAN_TWO_LMACS]; u8 reserved2; u8 adwell_default_n_aps; u8 adwell_default_n_aps_social; u8 general_flags2; __le16 adwell_max_budget; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; u8 passive_dwell[SCAN_TWO_LMACS]; u8 num_of_fragments[SCAN_TWO_LMACS]; struct iwl_scan_umac_chan_param channel; u8 data[]; } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */ struct { u8 active_dwell[SCAN_TWO_LMACS]; u8 adwell_default_hb_n_aps; u8 adwell_default_lb_n_aps; u8 adwell_default_n_aps_social; u8 general_flags2; __le16 adwell_max_budget; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; u8 passive_dwell[SCAN_TWO_LMACS]; u8 num_of_fragments[SCAN_TWO_LMACS]; struct iwl_scan_umac_chan_param channel; u8 data[]; } v9; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_9 */ }; } __packed; #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) #define IWL_SCAN_REQ_UMAC_SIZE_V7 48 #define IWL_SCAN_REQ_UMAC_SIZE_V6 44 #define IWL_SCAN_REQ_UMAC_SIZE_V1 36 /** * struct iwl_umac_scan_abort * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @flags: reserved */ struct iwl_umac_scan_abort { __le32 uid; __le32 flags; } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ /** * struct iwl_umac_scan_complete * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @last_schedule: last scheduling line * @last_iter: last scan iteration number * @status: &enum iwl_scan_offload_complete_status * @ebs_status: &enum iwl_scan_ebs_status * @time_from_last_iter: time elapsed from last iteration * @reserved: for future use */ struct iwl_umac_scan_complete { __le32 uid; u8 last_schedule; u8 last_iter; u8 status; u8 ebs_status; __le32 time_from_last_iter; __le32 reserved; } __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */ #define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 5 #define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 7 /** * struct iwl_scan_offload_profile_match_v1 - match information * @bssid: matched bssid * @reserved: reserved * @channel: channel where the match occurred * @energy: energy * @matching_feature: feature matches * @matching_channels: bitmap of channels that matched, referencing * the channels passed in the scan offload request. */ struct iwl_scan_offload_profile_match_v1 { u8 bssid[ETH_ALEN]; __le16 reserved; u8 channel; u8 energy; u8 matching_feature; u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1]; } __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ /** * struct iwl_scan_offload_profiles_query_v1 - match results query response * @matched_profiles: bitmap of matched profiles, referencing the * matches passed in the scan offload request * @last_scan_age: age of the last offloaded scan * @n_scans_done: number of offloaded scans done * @gp2_d0u: GP2 when D0U occurred * @gp2_invoked: GP2 when scan offload was invoked * @resume_while_scanning: not used * @self_recovery: obsolete * @reserved: reserved * @matches: array of match information, one for each match */ struct iwl_scan_offload_profiles_query_v1 { __le32 matched_profiles; __le32 last_scan_age; __le32 n_scans_done; __le32 gp2_d0u; __le32 gp2_invoked; u8 resume_while_scanning; u8 self_recovery; __le16 reserved; struct iwl_scan_offload_profile_match_v1 matches[IWL_SCAN_MAX_PROFILES]; } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ /** * struct iwl_scan_offload_profile_match - match information * @bssid: matched bssid * @reserved: reserved * @channel: channel where the match occurred * @energy: energy * @matching_feature: feature matches * @matching_channels: bitmap of channels that matched, referencing * the channels passed in the scan offload request. */ struct iwl_scan_offload_profile_match { u8 bssid[ETH_ALEN]; __le16 reserved; u8 channel; u8 energy; u8 matching_feature; u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN]; } __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */ /** * struct iwl_scan_offload_profiles_query - match results query response * @matched_profiles: bitmap of matched profiles, referencing the * matches passed in the scan offload request * @last_scan_age: age of the last offloaded scan * @n_scans_done: number of offloaded scans done * @gp2_d0u: GP2 when D0U occurred * @gp2_invoked: GP2 when scan offload was invoked * @resume_while_scanning: not used * @self_recovery: obsolete * @reserved: reserved * @matches: array of match information, one for each match */ struct iwl_scan_offload_profiles_query { __le32 matched_profiles; __le32 last_scan_age; __le32 n_scans_done; __le32 gp2_d0u; __le32 gp2_invoked; u8 resume_while_scanning; u8 self_recovery; __le16 reserved; struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */ /** * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @scanned_channels: number of channels scanned and number of valid elements in * results array * @status: one of SCAN_COMP_STATUS_* * @bt_status: BT on/off status * @last_channel: last channel that was scanned * @start_tsf: TSF timer in usecs of the scan start time for the mac specified * in &struct iwl_scan_req_umac. * @results: array of scan results, length in @scanned_channels */ struct iwl_umac_scan_iter_complete_notif { __le32 uid; u8 scanned_channels; u8 status; u8 bt_status; u8 last_channel; __le64 start_tsf; struct iwl_scan_results_notif results[]; } __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ #endif /* __iwl_fw_api_scan_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h000066400000000000000000000130401351064634500265330ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_sf_h__ #define __iwl_fw_api_sf_h__ /* Smart Fifo state */ enum iwl_sf_state { SF_LONG_DELAY_ON = 0, /* should never be called by driver */ SF_FULL_ON, SF_UNINIT, SF_INIT_OFF, SF_HW_NUM_STATES }; /* Smart Fifo possible scenario */ enum iwl_sf_scenario { SF_SCENARIO_SINGLE_UNICAST, SF_SCENARIO_AGG_UNICAST, SF_SCENARIO_MULTICAST, SF_SCENARIO_BA_RESP, SF_SCENARIO_TX_RESP, SF_NUM_SCENARIO }; #define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ #define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ /* smart FIFO default values */ #define SF_W_MARK_SISO 6144 #define SF_W_MARK_MIMO2 8192 #define SF_W_MARK_MIMO3 6144 #define SF_W_MARK_LEGACY 4096 #define SF_W_MARK_SCAN 4096 /* SF Scenarios timers for default configuration (aligned to 32 uSec) */ #define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ #define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ #define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ #define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ #define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ #define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ #define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ #define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ #define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ #define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ /* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ #define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ #define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ #define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ #define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ #define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ #define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ #define SF_BA_IDLE_TIMER 320 /* 300 uSec */ #define SF_BA_AGING_TIMER 2016 /* 2 mSec */ #define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ #define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ #define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ #define SF_CFG_DUMMY_NOTIF_OFF BIT(16) /** * struct iwl_sf_cfg_cmd - Smart Fifo configuration command. * @state: smart fifo state, types listed in &enum iwl_sf_state. * @watermark: Minimum allowed available free space in RXF for transient state. * @long_delay_timeouts: aging and idle timer values for each scenario * in long delay state. * @full_on_timeouts: timer values for each scenario in full on state. */ struct iwl_sf_cfg_cmd { __le32 state; __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; } __packed; /* SF_CFG_API_S_VER_2 */ #endif /* __iwl_fw_api_sf_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h000066400000000000000000000067201351064634500267160ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_soc_h__ #define __iwl_fw_api_soc_h__ /* type of devices for defining SOC latency */ enum iwl_soc_device_types { SOC_CONFIG_CMD_INTEGRATED = 0x0, SOC_CONFIG_CMD_DISCRETE = 0x1, }; /** * struct iwl_soc_configuration_cmd - Set device stabilization latency * * @device_type: the device type as defined in &enum iwl_soc_device_types * @soc_latency: time for SOC to ensure stable power & XTAL */ struct iwl_soc_configuration_cmd { __le32 device_type; __le32 soc_latency; } __packed; /* SOC_CONFIGURATION_CMD_S_VER_1 */ #endif /* __iwl_fw_api_soc_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h000066400000000000000000000514531351064634500267240ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_fw_api_sta_h__ #define __iwl_fw_api_sta_h__ /** * enum iwl_sta_flags - flags for the ADD_STA host command * @STA_FLG_REDUCED_TX_PWR_CTRL: reduced TX power (control frames) * @STA_FLG_REDUCED_TX_PWR_DATA: reduced TX power (data frames) * @STA_FLG_DISABLE_TX: set if TX should be disabled * @STA_FLG_PS: set if STA is in Power Save * @STA_FLG_DRAIN_FLOW: drain flow * @STA_FLG_PAN: STA is for PAN interface * @STA_FLG_CLASS_AUTH: station is authenticated * @STA_FLG_CLASS_ASSOC: station is associated * @STA_FLG_RTS_MIMO_PROT: station requires RTS MIMO protection (dynamic SMPS) * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU (mask) * @STA_FLG_MAX_AGG_SIZE_SHIFT: maximal size for A-MPDU (bit shift) * @STA_FLG_MAX_AGG_SIZE_8K: maximal size for A-MPDU (8k supported) * @STA_FLG_MAX_AGG_SIZE_16K: maximal size for A-MPDU (16k supported) * @STA_FLG_MAX_AGG_SIZE_32K: maximal size for A-MPDU (32k supported) * @STA_FLG_MAX_AGG_SIZE_64K: maximal size for A-MPDU (64k supported) * @STA_FLG_MAX_AGG_SIZE_128K: maximal size for A-MPDU (128k supported) * @STA_FLG_MAX_AGG_SIZE_256K: maximal size for A-MPDU (256k supported) * @STA_FLG_MAX_AGG_SIZE_512K: maximal size for A-MPDU (512k supported) * @STA_FLG_MAX_AGG_SIZE_1024K: maximal size for A-MPDU (1024k supported) * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is * initialised by driver and can be updated by fw upon reception of * action frames that can change the channel width. When cleared the fw * will send all the frames in 20MHz even when FAT channel is requested. * @STA_FLG_FAT_EN_20MHZ: no wide channels are supported, only 20 MHz * @STA_FLG_FAT_EN_40MHZ: wide channels up to 40 MHz supported * @STA_FLG_FAT_EN_80MHZ: wide channels up to 80 MHz supported * @STA_FLG_FAT_EN_160MHZ: wide channels up to 160 MHz supported * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the * driver and can be updated by fw upon reception of action frames. * @STA_FLG_MIMO_EN_SISO: no support for MIMO * @STA_FLG_MIMO_EN_MIMO2: 2 streams supported * @STA_FLG_MIMO_EN_MIMO3: 3 streams supported * @STA_FLG_AGG_MPDU_DENS_MSK: A-MPDU density (mask) * @STA_FLG_AGG_MPDU_DENS_SHIFT: A-MPDU density (bit shift) * @STA_FLG_AGG_MPDU_DENS_2US: A-MPDU density (2 usec gap) * @STA_FLG_AGG_MPDU_DENS_4US: A-MPDU density (4 usec gap) * @STA_FLG_AGG_MPDU_DENS_8US: A-MPDU density (8 usec gap) * @STA_FLG_AGG_MPDU_DENS_16US: A-MPDU density (16 usec gap) */ enum iwl_sta_flags { STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3), STA_FLG_REDUCED_TX_PWR_DATA = BIT(6), STA_FLG_DISABLE_TX = BIT(4), STA_FLG_PS = BIT(8), STA_FLG_DRAIN_FLOW = BIT(12), STA_FLG_PAN = BIT(13), STA_FLG_CLASS_AUTH = BIT(14), STA_FLG_CLASS_ASSOC = BIT(15), STA_FLG_RTS_MIMO_PROT = BIT(17), STA_FLG_MAX_AGG_SIZE_SHIFT = 19, STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_AGG_MPDU_DENS_SHIFT = 23, STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_FAT_EN_20MHZ = (0 << 26), STA_FLG_FAT_EN_40MHZ = (1 << 26), STA_FLG_FAT_EN_80MHZ = (2 << 26), STA_FLG_FAT_EN_160MHZ = (3 << 26), STA_FLG_FAT_EN_MSK = (3 << 26), STA_FLG_MIMO_EN_SISO = (0 << 28), STA_FLG_MIMO_EN_MIMO2 = (1 << 28), STA_FLG_MIMO_EN_MIMO3 = (2 << 28), STA_FLG_MIMO_EN_MSK = (3 << 28), }; /** * enum iwl_sta_key_flag - key flags for the ADD_STA host command * @STA_KEY_FLG_NO_ENC: no encryption * @STA_KEY_FLG_WEP: WEP encryption algorithm * @STA_KEY_FLG_CCM: CCMP encryption algorithm * @STA_KEY_FLG_TKIP: TKIP encryption algorithm * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support) * @STA_KEY_FLG_GCMP: GCMP encryption algorithm * @STA_KEY_FLG_CMAC: CMAC encryption algorithm * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from * station info array (1 - n 1X mode) * @STA_KEY_FLG_KEYID_MSK: the index of the key * @STA_KEY_FLG_KEYID_POS: key index bit position * @STA_KEY_NOT_VALID: key is invalid * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key * @STA_KEY_FLG_KEY_32BYTES: for non-wep key set for 32 bytes key * @STA_KEY_MULTICAST: set for multical key * @STA_KEY_MFP: key is used for Management Frame Protection */ enum iwl_sta_key_flag { STA_KEY_FLG_NO_ENC = (0 << 0), STA_KEY_FLG_WEP = (1 << 0), STA_KEY_FLG_CCM = (2 << 0), STA_KEY_FLG_TKIP = (3 << 0), STA_KEY_FLG_EXT = (4 << 0), STA_KEY_FLG_GCMP = (5 << 0), STA_KEY_FLG_CMAC = (6 << 0), STA_KEY_FLG_ENC_UNKNOWN = (7 << 0), STA_KEY_FLG_EN_MSK = (7 << 0), STA_KEY_FLG_WEP_KEY_MAP = BIT(3), STA_KEY_FLG_KEYID_POS = 8, STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), STA_KEY_NOT_VALID = BIT(11), STA_KEY_FLG_WEP_13BYTES = BIT(12), STA_KEY_FLG_KEY_32BYTES = BIT(12), STA_KEY_MULTICAST = BIT(14), STA_KEY_MFP = BIT(15), }; /** * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count * @STA_MODIFY_PROT_TH: modify RTS threshold * @STA_MODIFY_QUEUES: modify the queues used by this station */ enum iwl_sta_modify_flag { STA_MODIFY_QUEUE_REMOVAL = BIT(0), STA_MODIFY_TID_DISABLE_TX = BIT(1), STA_MODIFY_UAPSD_ACS = BIT(2), STA_MODIFY_ADD_BA_TID = BIT(3), STA_MODIFY_REMOVE_BA_TID = BIT(4), STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5), STA_MODIFY_PROT_TH = BIT(6), STA_MODIFY_QUEUES = BIT(7), }; /** * enum iwl_sta_mode - station command mode * @STA_MODE_ADD: add new station * @STA_MODE_MODIFY: modify the station */ enum iwl_sta_mode { STA_MODE_ADD = 0, STA_MODE_MODIFY = 1, }; /** * enum iwl_sta_sleep_flag - type of sleep of the station * @STA_SLEEP_STATE_AWAKE: station is awake * @STA_SLEEP_STATE_PS_POLL: station is PS-polling * @STA_SLEEP_STATE_UAPSD: station uses U-APSD * @STA_SLEEP_STATE_MOREDATA: set more-data bit on * (last) released frame */ enum iwl_sta_sleep_flag { STA_SLEEP_STATE_AWAKE = 0, STA_SLEEP_STATE_PS_POLL = BIT(0), STA_SLEEP_STATE_UAPSD = BIT(1), STA_SLEEP_STATE_MOREDATA = BIT(2), }; #define STA_KEY_MAX_NUM (16) #define STA_KEY_IDX_INVALID (0xff) #define STA_KEY_MAX_DATA_KEY_NUM (4) #define IWL_MAX_GLOBAL_KEYS (4) #define STA_KEY_LEN_WEP40 (5) #define STA_KEY_LEN_WEP104 (13) /** * struct iwl_mvm_keyinfo - key information * @key_flags: type &enum iwl_sta_key_flag * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection * @reserved1: reserved * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx * @key_offset: key offset in the fw's key table * @reserved2: reserved * @key: 16-byte unicast decryption key * @tx_secur_seq_cnt: initial RSC / PN needed for replay check * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only */ struct iwl_mvm_keyinfo { __le16 key_flags; u8 tkip_rx_tsc_byte2; u8 reserved1; __le16 tkip_rx_ttak[5]; u8 key_offset; u8 reserved2; u8 key[16]; __le64 tx_secur_seq_cnt; __le64 hw_tkip_mic_rx_key; __le64 hw_tkip_mic_tx_key; } __packed; #define IWL_ADD_STA_STATUS_MASK 0xFF #define IWL_ADD_STA_BAID_VALID_MASK 0x8000 #define IWL_ADD_STA_BAID_MASK 0x7F00 #define IWL_ADD_STA_BAID_SHIFT 8 /** * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) * @add_modify: see &enum iwl_sta_mode * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD) * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. * @mac_id_n_color: the Mac context this station belongs to, * see &enum iwl_ctxt_id_and_color * @addr: station's MAC address * @reserved2: reserved * @sta_id: index of station in uCode's station table * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave * alone. 1 - modify, 0 - don't change. * @reserved3: reserved * @station_flags: look at &enum iwl_sta_flags * @station_flags_msk: what of %station_flags have changed, * also &enum iwl_sta_flags * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set * add_immediate_ba_ssn. * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) * Set %STA_MODIFY_REMOVE_BA_TID to use this field * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with * add_immediate_ba_tid. * @sleep_tx_count: number of packets to transmit to station even though it is * asleep. Used to synchronise PS-poll and u-APSD responses while ucode * keeps track of STA sleep state. * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag. * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP * mac-addr. * @beamform_flags: beam forming controls * @tfd_queue_msk: tfd queues used by this station * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). * * ADD_STA sets up the table entry for one station, either creating a new * entry, or modifying a pre-existing one. */ struct iwl_mvm_add_sta_cmd_v7 { u8 add_modify; u8 awake_acs; __le16 tid_disable_tx; __le32 mac_id_n_color; u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ __le16 reserved2; u8 sta_id; u8 modify_mask; __le16 reserved3; __le32 station_flags; __le32 station_flags_msk; u8 add_immediate_ba_tid; u8 remove_immediate_ba_tid; __le16 add_immediate_ba_ssn; __le16 sleep_tx_count; __le16 sleep_state_flags; __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; } __packed; /* ADD_STA_CMD_API_S_VER_7 */ /** * enum iwl_sta_type - FW station types * ( REPLY_ADD_STA = 0x18 ) * @IWL_STA_LINK: Link station - normal RX and TX traffic. * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons * and probe responses. * @IWL_STA_MULTICAST: multicast traffic, * @IWL_STA_TDLS_LINK: TDLS link station * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on). */ enum iwl_sta_type { IWL_STA_LINK, IWL_STA_GENERAL_PURPOSE, IWL_STA_MULTICAST, IWL_STA_TDLS_LINK, IWL_STA_AUX_ACTIVITY, }; /** * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) * @add_modify: see &enum iwl_sta_mode * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD) * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. * @mac_id_n_color: the Mac context this station belongs to, * see &enum iwl_ctxt_id_and_color * @addr: station's MAC address * @reserved2: reserved * @sta_id: index of station in uCode's station table * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave * alone. 1 - modify, 0 - don't change. * @reserved3: reserved * @station_flags: look at &enum iwl_sta_flags * @station_flags_msk: what of %station_flags have changed, * also &enum iwl_sta_flags * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set * add_immediate_ba_ssn. * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) * Set %STA_MODIFY_REMOVE_BA_TID to use this field * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with * add_immediate_ba_tid. * @sleep_tx_count: number of packets to transmit to station even though it is * asleep. Used to synchronise PS-poll and u-APSD responses while ucode * keeps track of STA sleep state. * @station_type: type of this station. See &enum iwl_sta_type. * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag. * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP * mac-addr. * @beamform_flags: beam forming controls * @tfd_queue_msk: tfd queues used by this station. * Obselete for new TX API (9 and above). * @rx_ba_window: aggregation window size * @sp_length: the size of the SP in actual number of frames * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver * enabled ACs. * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). * * ADD_STA sets up the table entry for one station, either creating a new * entry, or modifying a pre-existing one. */ struct iwl_mvm_add_sta_cmd { u8 add_modify; u8 awake_acs; __le16 tid_disable_tx; __le32 mac_id_n_color; u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ __le16 reserved2; u8 sta_id; u8 modify_mask; __le16 reserved3; __le32 station_flags; __le32 station_flags_msk; u8 add_immediate_ba_tid; u8 remove_immediate_ba_tid; __le16 add_immediate_ba_ssn; __le16 sleep_tx_count; u8 sleep_state_flags; u8 station_type; __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; __le16 rx_ba_window; u8 sp_length; u8 uapsd_acs; } __packed; /* ADD_STA_CMD_API_S_VER_10 */ /** * struct iwl_mvm_add_sta_key_common - add/modify sta key common part * ( REPLY_ADD_STA_KEY = 0x17 ) * @sta_id: index of station in uCode's station table * @key_offset: key offset in key storage * @key_flags: type &enum iwl_sta_key_flag * @key: key material data * @rx_secur_seq_cnt: RX security sequence counter for the key */ struct iwl_mvm_add_sta_key_common { u8 sta_id; u8 key_offset; __le16 key_flags; u8 key[32]; u8 rx_secur_seq_cnt[16]; } __packed; /** * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key * @common: see &struct iwl_mvm_add_sta_key_common * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection * @reserved: reserved * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx */ struct iwl_mvm_add_sta_key_cmd_v1 { struct iwl_mvm_add_sta_key_common common; u8 tkip_rx_tsc_byte2; u8 reserved; __le16 tkip_rx_ttak[5]; } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */ /** * struct iwl_mvm_add_sta_key_cmd - add/modify sta key * @common: see &struct iwl_mvm_add_sta_key_common * @rx_mic_key: TKIP RX unicast or multicast key * @tx_mic_key: TKIP TX key * @transmit_seq_cnt: TSC, transmit packet number */ struct iwl_mvm_add_sta_key_cmd { struct iwl_mvm_add_sta_key_common common; __le64 rx_mic_key; __le64 tx_mic_key; __le64 transmit_seq_cnt; } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */ /** * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command * @ADD_STA_SUCCESS: operation was executed successfully * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that * doesn't exist. */ enum iwl_mvm_add_sta_rsp_status { ADD_STA_SUCCESS = 0x1, ADD_STA_STATIONS_OVERLOAD = 0x2, ADD_STA_IMMEDIATE_BA_FAILURE = 0x4, ADD_STA_MODIFY_NON_EXISTING_STA = 0x8, }; /** * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table * ( REMOVE_STA = 0x19 ) * @sta_id: the station id of the station to be removed * @reserved: reserved */ struct iwl_mvm_rm_sta_cmd { u8 sta_id; u8 reserved[3]; } __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ /** * struct iwl_mvm_mgmt_mcast_key_cmd_v1 * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: &enum iwl_sta_key_flag * @igtk: IGTK key material * @k1: unused * @k2: unused * @sta_id: station ID that support IGTK * @key_id: key ID * @receive_seq_cnt: initial RSC/PN needed for replay check */ struct iwl_mvm_mgmt_mcast_key_cmd_v1 { __le32 ctrl_flags; u8 igtk[16]; u8 k1[16]; u8 k2[16]; __le32 key_id; __le32 sta_id; __le64 receive_seq_cnt; } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ /** * struct iwl_mvm_mgmt_mcast_key_cmd * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: &enum iwl_sta_key_flag * @igtk: IGTK master key * @sta_id: station ID that support IGTK * @key_id: key ID * @receive_seq_cnt: initial RSC/PN needed for replay check */ struct iwl_mvm_mgmt_mcast_key_cmd { __le32 ctrl_flags; u8 igtk[32]; __le32 key_id; __le32 sta_id; __le64 receive_seq_cnt; } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */ struct iwl_mvm_wep_key { u8 key_index; u8 key_offset; __le16 reserved1; u8 key_size; u8 reserved2[3]; u8 key[16]; } __packed; struct iwl_mvm_wep_key_cmd { __le32 mac_id_n_color; u8 num_keys; u8 decryption_type; u8 flags; u8 reserved; struct iwl_mvm_wep_key wep_key[0]; } __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ /** * struct iwl_mvm_eosp_notification - EOSP notification from firmware * @remain_frame_count: # of frames remaining, non-zero if SP was cut * short by GO absence * @sta_id: station ID */ struct iwl_mvm_eosp_notification { __le32 remain_frame_count; __le32 sta_id; } __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */ #endif /* __iwl_fw_api_sta_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h000066400000000000000000000360641351064634500272740ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_stats_h__ #define __iwl_fw_api_stats_h__ #include "mac.h" struct mvm_statistics_dbg { __le32 burst_check; __le32 burst_count; __le32 wait_for_silence_timeout_cnt; u8 reserved[12]; } __packed; /* STATISTICS_DEBUG_API_S_VER_2 */ struct mvm_statistics_div { __le32 tx_on_a; __le32 tx_on_b; __le32 exec_time; __le32 probe_time; __le32 rssi_ant; __le32 reserved2; } __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */ /** * struct mvm_statistics_rx_non_phy * @bogus_cts: CTS received when not expecting CTS * @bogus_ack: ACK received when not expecting ACK * @non_channel_beacons: beacons with our bss id but not on our serving channel * @channel_beacons: beacons with our bss id and in our serving channel * @num_missed_bcon: number of missed beacons * @adc_rx_saturation_time: count in 0.8us units the time the ADC was in * saturation * @ina_detection_search_time: total time (in 0.8us) searched for INA * @beacon_silence_rssi_a: RSSI silence after beacon frame * @beacon_silence_rssi_b: RSSI silence after beacon frame * @beacon_silence_rssi_c: RSSI silence after beacon frame * @interference_data_flag: flag for interference data availability. 1 when data * is available. * @channel_load: counts RX Enable time in uSec * @beacon_rssi_a: beacon RSSI on anntena A * @beacon_rssi_b: beacon RSSI on antenna B * @beacon_rssi_c: beacon RSSI on antenna C * @beacon_energy_a: beacon energy on antenna A * @beacon_energy_b: beacon energy on antenna B * @beacon_energy_c: beacon energy on antenna C * @num_bt_kills: number of BT "kills" (frame TX aborts) * @mac_id: mac ID */ struct mvm_statistics_rx_non_phy { __le32 bogus_cts; __le32 bogus_ack; __le32 non_channel_beacons; __le32 channel_beacons; __le32 num_missed_bcon; __le32 adc_rx_saturation_time; __le32 ina_detection_search_time; __le32 beacon_silence_rssi_a; __le32 beacon_silence_rssi_b; __le32 beacon_silence_rssi_c; __le32 interference_data_flag; __le32 channel_load; __le32 beacon_rssi_a; __le32 beacon_rssi_b; __le32 beacon_rssi_c; __le32 beacon_energy_a; __le32 beacon_energy_b; __le32 beacon_energy_c; __le32 num_bt_kills; __le32 mac_id; } __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_4 */ struct mvm_statistics_rx_non_phy_v3 { __le32 bogus_cts; /* CTS received when not expecting CTS */ __le32 bogus_ack; /* ACK received when not expecting ACK */ __le32 non_bssid_frames; /* number of frames with BSSID that * doesn't belong to the STA BSSID */ __le32 filtered_frames; /* count frames that were dumped in the * filtering process */ __le32 non_channel_beacons; /* beacons with our bss id but not on * our serving channel */ __le32 channel_beacons; /* beacons with our bss id and in our * serving channel */ __le32 num_missed_bcon; /* number of missed beacons */ __le32 adc_rx_saturation_time; /* count in 0.8us units the time the * ADC was in saturation */ __le32 ina_detection_search_time;/* total time (in 0.8us) searched * for INA */ __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ __le32 interference_data_flag; /* flag for interference data * availability. 1 when data is * available. */ __le32 channel_load; /* counts RX Enable time in uSec */ __le32 dsp_false_alarms; /* DSP false alarm (both OFDM * and CCK) counter */ __le32 beacon_rssi_a; __le32 beacon_rssi_b; __le32 beacon_rssi_c; __le32 beacon_energy_a; __le32 beacon_energy_b; __le32 beacon_energy_c; __le32 num_bt_kills; __le32 mac_id; __le32 directed_data_mpdu; } __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */ struct mvm_statistics_rx_phy { __le32 unresponded_rts; __le32 rxe_frame_lmt_overrun; __le32 sent_ba_rsp_cnt; __le32 dsp_self_kill; __le32 reserved; } __packed; /* STATISTICS_RX_PHY_API_S_VER_3 */ struct mvm_statistics_rx_phy_v2 { __le32 ina_cnt; __le32 fina_cnt; __le32 plcp_err; __le32 crc32_err; __le32 overrun_err; __le32 early_overrun_err; __le32 crc32_good; __le32 false_alarm_cnt; __le32 fina_sync_err_cnt; __le32 sfd_timeout; __le32 fina_timeout; __le32 unresponded_rts; __le32 rxe_frame_lmt_overrun; __le32 sent_ack_cnt; __le32 sent_cts_cnt; __le32 sent_ba_rsp_cnt; __le32 dsp_self_kill; __le32 mh_format_err; __le32 re_acq_main_rssi_sum; __le32 reserved; } __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */ struct mvm_statistics_rx_ht_phy_v1 { __le32 plcp_err; __le32 overrun_err; __le32 early_overrun_err; __le32 crc32_good; __le32 crc32_err; __le32 mh_format_err; __le32 agg_crc32_good; __le32 agg_mpdu_cnt; __le32 agg_cnt; __le32 unsupport_mcs; } __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */ struct mvm_statistics_rx_ht_phy { __le32 mh_format_err; __le32 agg_mpdu_cnt; __le32 agg_cnt; __le32 unsupport_mcs; } __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_2 */ struct mvm_statistics_tx_non_phy_v3 { __le32 preamble_cnt; __le32 rx_detected_cnt; __le32 bt_prio_defer_cnt; __le32 bt_prio_kill_cnt; __le32 few_bytes_cnt; __le32 cts_timeout; __le32 ack_timeout; __le32 expected_ack_cnt; __le32 actual_ack_cnt; __le32 dump_msdu_cnt; __le32 burst_abort_next_frame_mismatch_cnt; __le32 burst_abort_missing_next_frame_cnt; __le32 cts_timeout_collision; __le32 ack_or_ba_timeout_collision; } __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */ struct mvm_statistics_tx_non_phy { __le32 bt_prio_defer_cnt; __le32 bt_prio_kill_cnt; __le32 few_bytes_cnt; __le32 cts_timeout; __le32 ack_timeout; __le32 dump_msdu_cnt; __le32 burst_abort_next_frame_mismatch_cnt; __le32 burst_abort_missing_next_frame_cnt; __le32 cts_timeout_collision; __le32 ack_or_ba_timeout_collision; } __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_4 */ #define MAX_CHAINS 3 struct mvm_statistics_tx_non_phy_agg { __le32 ba_timeout; __le32 ba_reschedule_frames; __le32 scd_query_agg_frame_cnt; __le32 scd_query_no_agg; __le32 scd_query_agg; __le32 scd_query_mismatch; __le32 frame_not_ready; __le32 underrun; __le32 bt_prio_kill; __le32 rx_ba_rsp_cnt; __s8 txpower[MAX_CHAINS]; __s8 reserved; __le32 reserved2; } __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */ struct mvm_statistics_tx_channel_width { __le32 ext_cca_narrow_ch20[1]; __le32 ext_cca_narrow_ch40[2]; __le32 ext_cca_narrow_ch80[3]; __le32 ext_cca_narrow_ch160[4]; __le32 last_tx_ch_width_indx; __le32 rx_detected_per_ch_width[4]; __le32 success_per_ch_width[4]; __le32 fail_per_ch_width[4]; }; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */ struct mvm_statistics_tx_v4 { struct mvm_statistics_tx_non_phy_v3 general; struct mvm_statistics_tx_non_phy_agg agg; struct mvm_statistics_tx_channel_width channel_width; } __packed; /* STATISTICS_TX_API_S_VER_4 */ struct mvm_statistics_tx { struct mvm_statistics_tx_non_phy general; struct mvm_statistics_tx_non_phy_agg agg; struct mvm_statistics_tx_channel_width channel_width; } __packed; /* STATISTICS_TX_API_S_VER_5 */ struct mvm_statistics_bt_activity { __le32 hi_priority_tx_req_cnt; __le32 hi_priority_tx_denied_cnt; __le32 lo_priority_tx_req_cnt; __le32 lo_priority_tx_denied_cnt; __le32 hi_priority_rx_req_cnt; __le32 hi_priority_rx_denied_cnt; __le32 lo_priority_rx_req_cnt; __le32 lo_priority_rx_denied_cnt; } __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ struct mvm_statistics_general_common_v19 { __le32 radio_temperature; __le32 radio_voltage; struct mvm_statistics_dbg dbg; __le32 sleep_time; __le32 slots_out; __le32 slots_idle; __le32 ttl_timestamp; struct mvm_statistics_div slow_div; __le32 rx_enable_counter; /* * num_of_sos_states: * count the number of times we have to re-tune * in order to get out of bad PHY status */ __le32 num_of_sos_states; __le32 beacon_filtered; __le32 missed_beacons; u8 beacon_filter_average_energy; u8 beacon_filter_reason; u8 beacon_filter_current_energy; u8 beacon_filter_reserved; __le32 beacon_filter_delta_time; struct mvm_statistics_bt_activity bt_activity; __le64 rx_time; __le64 on_time_rf; __le64 on_time_scan; __le64 tx_time; } __packed; struct mvm_statistics_general_common { __le32 radio_temperature; struct mvm_statistics_dbg dbg; __le32 sleep_time; __le32 slots_out; __le32 slots_idle; __le32 ttl_timestamp; struct mvm_statistics_div slow_div; __le32 rx_enable_counter; /* * num_of_sos_states: * count the number of times we have to re-tune * in order to get out of bad PHY status */ __le32 num_of_sos_states; __le32 beacon_filtered; __le32 missed_beacons; u8 beacon_filter_average_energy; u8 beacon_filter_reason; u8 beacon_filter_current_energy; u8 beacon_filter_reserved; __le32 beacon_filter_delta_time; struct mvm_statistics_bt_activity bt_activity; __le64 rx_time; __le64 on_time_rf; __le64 on_time_scan; __le64 tx_time; } __packed; /* STATISTICS_GENERAL_API_S_VER_10 */ struct mvm_statistics_general_v8 { struct mvm_statistics_general_common_v19 common; __le32 beacon_counter[NUM_MAC_INDEX]; u8 beacon_average_energy[NUM_MAC_INDEX]; u8 reserved[4 - (NUM_MAC_INDEX % 4)]; } __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ struct mvm_statistics_general { struct mvm_statistics_general_common common; __le32 beacon_counter[MAC_INDEX_AUX]; u8 beacon_average_energy[MAC_INDEX_AUX]; u8 reserved[8 - MAC_INDEX_AUX]; } __packed; /* STATISTICS_GENERAL_API_S_VER_10 */ /** * struct mvm_statistics_load - RX statistics for multi-queue devices * @air_time: accumulated air time, per mac * @byte_count: accumulated byte count, per mac * @pkt_count: accumulated packet count, per mac * @avg_energy: average RSSI, per station */ struct mvm_statistics_load { __le32 air_time[MAC_INDEX_AUX]; __le32 byte_count[MAC_INDEX_AUX]; __le32 pkt_count[MAC_INDEX_AUX]; u8 avg_energy[IWL_MVM_STATION_COUNT]; } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */ struct mvm_statistics_load_v1 { __le32 air_time[NUM_MAC_INDEX]; __le32 byte_count[NUM_MAC_INDEX]; __le32 pkt_count[NUM_MAC_INDEX]; u8 avg_energy[IWL_MVM_STATION_COUNT]; } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */ struct mvm_statistics_rx { struct mvm_statistics_rx_phy ofdm; struct mvm_statistics_rx_phy cck; struct mvm_statistics_rx_non_phy general; struct mvm_statistics_rx_ht_phy ofdm_ht; } __packed; /* STATISTICS_RX_API_S_VER_4 */ struct mvm_statistics_rx_v3 { struct mvm_statistics_rx_phy_v2 ofdm; struct mvm_statistics_rx_phy_v2 cck; struct mvm_statistics_rx_non_phy_v3 general; struct mvm_statistics_rx_ht_phy_v1 ofdm_ht; } __packed; /* STATISTICS_RX_API_S_VER_3 */ /* * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) * * By default, uCode issues this notification after receiving a beacon * while associated. To disable this behavior, set DISABLE_NOTIF flag in the * STATISTICS_CMD (0x9c), below. */ struct iwl_notif_statistics_v10 { __le32 flag; struct mvm_statistics_rx_v3 rx; struct mvm_statistics_tx_v4 tx; struct mvm_statistics_general_v8 general; } __packed; /* STATISTICS_NTFY_API_S_VER_10 */ struct iwl_notif_statistics_v11 { __le32 flag; struct mvm_statistics_rx_v3 rx; struct mvm_statistics_tx_v4 tx; struct mvm_statistics_general_v8 general; struct mvm_statistics_load_v1 load_stats; } __packed; /* STATISTICS_NTFY_API_S_VER_11 */ struct iwl_notif_statistics { __le32 flag; struct mvm_statistics_rx rx; struct mvm_statistics_tx tx; struct mvm_statistics_general general; struct mvm_statistics_load load_stats; } __packed; /* STATISTICS_NTFY_API_S_VER_13 */ /** * enum iwl_statistics_notif_flags - flags used in statistics notification * @IWL_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report */ enum iwl_statistics_notif_flags { IWL_STATISTICS_REPLY_FLG_CLEAR = 0x1, }; /** * enum iwl_statistics_cmd_flags - flags used in statistics command * @IWL_STATISTICS_FLG_CLEAR: request to clear statistics after the report * that's sent after this command * @IWL_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics * notifications */ enum iwl_statistics_cmd_flags { IWL_STATISTICS_FLG_CLEAR = 0x1, IWL_STATISTICS_FLG_DISABLE_NOTIF = 0x2, }; /** * struct iwl_statistics_cmd - statistics config command * @flags: flags from &enum iwl_statistics_cmd_flags */ struct iwl_statistics_cmd { __le32 flags; } __packed; /* STATISTICS_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_stats_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h000066400000000000000000000202661351064634500271010ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_tdls_h__ #define __iwl_fw_api_tdls_h__ #include "fw/api/tx.h" #include "fw/api/phy-ctxt.h" #define IWL_MVM_TDLS_STA_COUNT 4 /* Type of TDLS request */ enum iwl_tdls_channel_switch_type { TDLS_SEND_CHAN_SW_REQ = 0, TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, TDLS_MOVE_CH, }; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ /** * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch * @frame_timestamp: GP2 timestamp of channel-switch request/response packet * received from peer * @max_offchan_duration: What amount of microseconds out of a DTIM is given * to the TDLS off-channel communication. For instance if the DTIM is * 200TU and the TDLS peer is to be given 25% of the time, the value * given will be 50TU, or 50 * 1024 if translated into microseconds. * @switch_time: switch time the peer sent in its channel switch timing IE * @switch_timeout: switch timeout the peer sent in its channel switch timing IE */ struct iwl_tdls_channel_switch_timing { __le32 frame_timestamp; /* GP2 time of peer packet Rx */ __le32 max_offchan_duration; /* given in micro-seconds */ __le32 switch_time; /* given in micro-seconds */ __le32 switch_timeout; /* given in micro-seconds */ } __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ #define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 /** * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template * * A template representing a TDLS channel-switch request or response frame * * @switch_time_offset: offset to the channel switch timing IE in the template * @tx_cmd: Tx parameters for the frame * @data: frame data */ struct iwl_tdls_channel_switch_frame { __le32 switch_time_offset; struct iwl_tx_cmd tx_cmd; u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; } __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ /** * struct iwl_tdls_channel_switch_cmd_tail - tail of iwl_tdls_channel_switch_cmd * * @timing: timing related data for command * @frame: channel-switch request/response template, depending to switch_type */ struct iwl_tdls_channel_switch_cmd_tail { struct iwl_tdls_channel_switch_timing timing; struct iwl_tdls_channel_switch_frame frame; } __packed; /** * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command * * The command is sent to initiate a channel switch and also in response to * incoming TDLS channel-switch request/response packets from remote peers. * * @switch_type: see &enum iwl_tdls_channel_switch_type * @peer_sta_id: station id of TDLS peer * @ci: channel we switch to * @tail: command tail */ struct iwl_tdls_channel_switch_cmd { u8 switch_type; __le32 peer_sta_id; struct iwl_fw_channel_info ci; struct iwl_tdls_channel_switch_cmd_tail tail; } __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ /** * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification * * @status: non-zero on success * @offchannel_duration: duration given in microseconds * @sta_id: peer currently performing the channel-switch with */ struct iwl_tdls_channel_switch_notif { __le32 status; __le32 offchannel_duration; __le32 sta_id; } __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ /** * struct iwl_tdls_sta_info - TDLS station info * * @sta_id: station id of the TDLS peer * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise */ struct iwl_tdls_sta_info { u8 sta_id; u8 tx_to_peer_tid; __le16 tx_to_peer_ssn; __le32 is_initiator; } __packed; /* TDLS_STA_INFO_VER_1 */ /** * struct iwl_tdls_config_cmd - TDLS basic config command * * @id_and_color: MAC id and color being configured * @tdls_peer_count: amount of currently connected TDLS peers * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP * @sta_info: per-station info. Only the first tdls_peer_count entries are set * @pti_req_data_offset: offset of network-level data for the PTI template * @pti_req_tx_cmd: Tx parameters for PTI request template * @pti_req_template: PTI request template data */ struct iwl_tdls_config_cmd { __le32 id_and_color; /* mac id and color */ u8 tdls_peer_count; u8 tx_to_ap_tid; __le16 tx_to_ap_ssn; struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; __le32 pti_req_data_offset; struct iwl_tx_cmd pti_req_tx_cmd; u8 pti_req_template[0]; } __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ /** * struct iwl_tdls_config_sta_info_res - TDLS per-station config information * * @sta_id: station id of the TDLS peer * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to * the peer */ struct iwl_tdls_config_sta_info_res { __le16 sta_id; __le16 tx_to_peer_last_seq; } __packed; /* TDLS_STA_INFO_RSP_VER_1 */ /** * struct iwl_tdls_config_res - TDLS config information from FW * * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP * @sta_info: per-station TDLS config information */ struct iwl_tdls_config_res { __le32 tx_to_ap_last_seq; struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; } __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ #endif /* __iwl_fw_api_tdls_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/testing.h000066400000000000000000000122621351064634500276050ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_testing_h__ #define __iwl_fw_api_testing_h__ #define FIPS_KEY_LEN_128 16 #define FIPS_KEY_LEN_256 32 #define FIPS_MAX_KEY_LEN FIPS_KEY_LEN_256 #define FIPS_CCM_NONCE_LEN 13 #define FIPS_GCM_NONCE_LEN 12 #define FIPS_MAX_NONCE_LEN FIPS_CCM_NONCE_LEN #define FIPS_MAX_AAD_LEN 30 /** * enum iwl_fips_test_vector_flags - flags for FIPS test vector * @IWL_FIPS_TEST_VECTOR_FLAGS_AES: use AES algorithm * @IWL_FIPS_TEST_VECTOR_FLAGS_CCM: use CCM algorithm * @IWL_FIPS_TEST_VECTOR_FLAGS_GCM: use GCM algorithm * @IWL_FIPS_TEST_VECTOR_FLAGS_ENC: if set, the requested operation is * encryption. Otherwise, the requested operation is decryption. * @IWL_FIPS_TEST_VECTOR_FLAGS_KEY_256: if set, the test vector uses a * 256 bit key. Otherwise 128 bit key is used. */ enum iwl_fips_test_vector_flags { IWL_FIPS_TEST_VECTOR_FLAGS_AES = BIT(0), IWL_FIPS_TEST_VECTOR_FLAGS_CCM = BIT(1), IWL_FIPS_TEST_VECTOR_FLAGS_GCM = BIT(2), IWL_FIPS_TEST_VECTOR_FLAGS_ENC = BIT(3), IWL_FIPS_TEST_VECTOR_FLAGS_KEY_256 = BIT(5), }; /** * struct iwl_fips_test_cmd - FIPS test command for AES/CCM/GCM tests * @flags: test vector flags from &enum iwl_fips_test_vector_flags. * @payload_len: the length of the @payload field in bytes. * @aad_len: the length of the @aad field in bytes. * @key: the key used for encryption/decryption. In case a 128-bit key is used, * pad with zero. * @aad: AAD. If the AAD is shorter than the buffer, pad with zero. Only valid * for CCM/GCM tests. * @reserved: for alignment. * @nonce: nonce. Only valid for CCM/GCM tests. * @reserved2: for alignment. * @payload: the plaintext to encrypt or the cipher text to decrypt + MIC. */ struct iwl_fips_test_cmd { __le32 flags; __le32 payload_len; __le32 aad_len; u8 key[FIPS_MAX_KEY_LEN]; u8 aad[FIPS_MAX_AAD_LEN]; __le16 reserved; u8 nonce[FIPS_MAX_NONCE_LEN]; u8 reserved2[3]; u8 payload[0]; } __packed; /* AES_SEC_TEST_VECTOR_HDR_API_S_VER_1 */ /** * enum iwl_fips_test_status - FIPS test result status * @IWL_FIPS_TEST_STATUS_FAILURE: the requested operation failed. * @IWL_FIPS_TEST_STATUS_SUCCESS: the requested operation was completed * successfully. The result buffer is valid. */ enum iwl_fips_test_status { IWL_FIPS_TEST_STATUS_FAILURE, IWL_FIPS_TEST_STATUS_SUCCESS, }; /** * struct iwl_fips_test_resp - FIPS test response * @len: the length of the result in bytes. * @payload: @len bytes of response followed by status code (u32, one of * &enum iwl_fips_test_status). */ struct iwl_fips_test_resp { __le32 len; u8 payload[0]; } __packed; /* AES_SEC_TEST_VECTOR_RESP_API_S_VER_1 */ #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h000066400000000000000000000337751351064634500302210ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_time_event_h__ #define __iwl_fw_api_time_event_h__ #include "fw/api/phy-ctxt.h" /* Time Event types, according to MAC type */ enum iwl_time_event_type { /* BSS Station Events */ TE_BSS_STA_AGGRESSIVE_ASSOC, TE_BSS_STA_ASSOC, TE_BSS_EAP_DHCP_PROT, TE_BSS_QUIET_PERIOD, /* P2P Device Events */ TE_P2P_DEVICE_DISCOVERABLE, TE_P2P_DEVICE_LISTEN, TE_P2P_DEVICE_ACTION_SCAN, TE_P2P_DEVICE_FULL_SCAN, /* P2P Client Events */ TE_P2P_CLIENT_AGGRESSIVE_ASSOC, TE_P2P_CLIENT_ASSOC, TE_P2P_CLIENT_QUIET_PERIOD, /* P2P GO Events */ TE_P2P_GO_ASSOC_PROT, TE_P2P_GO_REPETITIVET_NOA, TE_P2P_GO_CT_WINDOW, /* WiDi Sync Events */ TE_WIDI_TX_SYNC, /* Channel Switch NoA */ TE_CHANNEL_SWITCH_PERIOD, TE_MAX }; /* MAC_EVENT_TYPE_API_E_VER_1 */ /* Time event - defines for command API v1 */ /* * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only * the first fragment is scheduled. * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only * the first 2 fragments are scheduled. * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any * number of fragments are valid. * * Other than the constant defined above, specifying a fragmentation value 'x' * means that the event can be fragmented but only the first 'x' will be * scheduled. */ enum { TE_V1_FRAG_NONE = 0, TE_V1_FRAG_SINGLE = 1, TE_V1_FRAG_DUAL = 2, TE_V1_FRAG_ENDLESS = 0xffffffff }; /* If a Time Event can be fragmented, this is the max number of fragments */ #define TE_V1_FRAG_MAX_MSK 0x0fffffff /* Repeat the time event endlessly (until removed) */ #define TE_V1_REPEAT_ENDLESS 0xffffffff /* If a Time Event has bounded repetitions, this is the maximal value */ #define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff /* Time Event dependencies: none, on another TE, or in a specific time */ enum { TE_V1_INDEPENDENT = 0, TE_V1_DEP_OTHER = BIT(0), TE_V1_DEP_TSF = BIT(1), TE_V1_EVENT_SOCIOPATHIC = BIT(2), }; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ /* * @TE_V1_NOTIF_NONE: no notifications * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. * * Supported Time event notifications configuration. * A notification (both event and fragment) includes a status indicating weather * the FW was able to schedule the event or not. For fragment start/end * notification the status is always success. There is no start/end fragment * notification for monolithic events. */ enum { TE_V1_NOTIF_NONE = 0, TE_V1_NOTIF_HOST_EVENT_START = BIT(0), TE_V1_NOTIF_HOST_EVENT_END = BIT(1), TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), TE_V1_NOTIF_HOST_FRAG_START = BIT(4), TE_V1_NOTIF_HOST_FRAG_END = BIT(5), TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), }; /* MAC_EVENT_ACTION_API_E_VER_2 */ /* Time event - defines for command API */ /* * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only * the first fragment is scheduled. * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only * the first 2 fragments are scheduled. * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any * number of fragments are valid. * * Other than the constant defined above, specifying a fragmentation value 'x' * means that the event can be fragmented but only the first 'x' will be * scheduled. */ enum { TE_V2_FRAG_NONE = 0, TE_V2_FRAG_SINGLE = 1, TE_V2_FRAG_DUAL = 2, TE_V2_FRAG_MAX = 0xfe, TE_V2_FRAG_ENDLESS = 0xff }; /* Repeat the time event endlessly (until removed) */ #define TE_V2_REPEAT_ENDLESS 0xff /* If a Time Event has bounded repetitions, this is the maximal value */ #define TE_V2_REPEAT_MAX 0xfe #define TE_V2_PLACEMENT_POS 12 #define TE_V2_ABSENCE_POS 15 /** * enum iwl_time_event_policy - Time event policy values * A notification (both event and fragment) includes a status indicating weather * the FW was able to schedule the event or not. For fragment start/end * notification the status is always success. There is no start/end fragment * notification for monolithic events. * * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. * @TE_V2_START_IMMEDIATELY: start time event immediately * @TE_V2_DEP_OTHER: depends on another time event * @TE_V2_DEP_TSF: depends on a specific time * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC * @TE_V2_ABSENCE: are we present or absent during the Time Event. */ enum iwl_time_event_policy { TE_V2_DEFAULT_POLICY = 0x0, /* notifications (event start/stop, fragment start/stop) */ TE_V2_NOTIF_HOST_EVENT_START = BIT(0), TE_V2_NOTIF_HOST_EVENT_END = BIT(1), TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), TE_V2_NOTIF_HOST_FRAG_START = BIT(4), TE_V2_NOTIF_HOST_FRAG_END = BIT(5), TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), TE_V2_START_IMMEDIATELY = BIT(11), /* placement characteristics */ TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), /* are we present or absent during the Time Event. */ TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), }; /** * struct iwl_time_event_cmd - configuring Time Events * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also * with version 1. determined by IWL_UCODE_TLV_FLAGS) * ( TIME_EVENT_CMD = 0x29 ) * @id_and_color: ID and color of the relevant MAC, * &enum iwl_ctxt_id_and_color * @action: action to perform, one of &enum iwl_ctxt_action * @id: this field has two meanings, depending on the action: * If the action is ADD, then it means the type of event to add. * For all other actions it is the unique event ID assigned when the * event was added by the FW. * @apply_time: When to start the Time Event (in GP2) * @max_delay: maximum delay to event's start (apply time), in TU * @depends_on: the unique ID of the event we depend on (if any) * @interval: interval between repetitions, in TU * @duration: duration of event in TU * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS * @max_frags: maximal number of fragments the Time Event can be divided to * @policy: defines whether uCode shall notify the host or other uCode modules * on event and/or fragment start and/or end * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF * TE_EVENT_SOCIOPATHIC * using TE_ABSENCE and using TE_NOTIF_*, * &enum iwl_time_event_policy */ struct iwl_time_event_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; __le32 id; /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ __le32 apply_time; __le32 max_delay; __le32 depends_on; __le32 interval; __le32 duration; u8 repeat; u8 max_frags; __le16 policy; } __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ /** * struct iwl_time_event_resp - response structure to iwl_time_event_cmd * @status: bit 0 indicates success, all others specify errors * @id: the Time Event type * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE * @id_and_color: ID and color of the relevant MAC, * &enum iwl_ctxt_id_and_color */ struct iwl_time_event_resp { __le32 status; __le32 id; __le32 unique_id; __le32 id_and_color; } __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ /** * struct iwl_time_event_notif - notifications of time event start/stop * ( TIME_EVENT_NOTIFICATION = 0x2a ) * @timestamp: action timestamp in GP2 * @session_id: session's unique id * @unique_id: unique id of the Time Event itself * @id_and_color: ID and color of the relevant MAC * @action: &enum iwl_time_event_policy * @status: true if scheduled, false otherwise (not executed) */ struct iwl_time_event_notif { __le32 timestamp; __le32 session_id; __le32 unique_id; __le32 id_and_color; __le32 action; __le32 status; } __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ /* * struct iwl_hs20_roc_req_tail - tail of iwl_hs20_roc_req * * @node_addr: Our MAC Address * @reserved: reserved for alignment * @apply_time: GP2 value to start (should always be the current GP2 value) * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max * time by which start of the event is allowed to be postponed. * @duration: event duration in TU To calculate event duration: * timeEventDuration = min(duration, remainingQuota) */ struct iwl_hs20_roc_req_tail { u8 node_addr[ETH_ALEN]; __le16 reserved; __le32 apply_time; __le32 apply_time_max_delay; __le32 duration; } __packed; /* * Aux ROC command * * Command requests the firmware to create a time event for a certain duration * and remain on the given channel. This is done by using the Aux framework in * the FW. * The command was first used for Hot Spot issues - but can be used regardless * to Hot Spot. * * ( HOT_SPOT_CMD 0x53 ) * * @id_and_color: ID and color of the MAC * @action: action to perform, one of FW_CTXT_ACTION_* * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the * event_unique_id should be the id of the time event assigned by ucode. * Otherwise ignore the event_unique_id. * @sta_id_and_color: station id and color, resumed during "Remain On Channel" * activity. * @channel_info: channel info */ struct iwl_hs20_roc_req { /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ __le32 id_and_color; __le32 action; __le32 event_unique_id; __le32 sta_id_and_color; struct iwl_fw_channel_info channel_info; struct iwl_hs20_roc_req_tail tail; } __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ /* * values for AUX ROC result values */ enum iwl_mvm_hot_spot { HOT_SPOT_RSP_STATUS_OK, HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, HOT_SPOT_MAX_NUM_OF_SESSIONS, }; /* * Aux ROC command response * * In response to iwl_hs20_roc_req the FW sends this command to notify the * driver the uid of the timevent. * * ( HOT_SPOT_CMD 0x53 ) * * @event_unique_id: Unique ID of time event assigned by ucode * @status: Return status 0 is success, all the rest used for specific errors */ struct iwl_hs20_roc_res { __le32 event_unique_id; __le32 status; } __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ #endif /* __iwl_fw_api_time_event_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h000066400000000000000000000765661351064634500266040ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_fw_api_tx_h__ #define __iwl_fw_api_tx_h__ /** * enum iwl_tx_flags - bitmasks for tx_flags in TX command * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame * @TX_CMD_FLG_ACK: expect ACK from receiving station * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. * Otherwise, use rate_n_flags from the TX command * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected * Must set TX_CMD_FLG_ACK with this flag. * @TX_CMD_FLG_TXOP_PROT: TXOP protection requested * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC) * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored * on old firmwares). * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control. * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame * Should be set for beacons and probe responses * @TX_CMD_FLG_CALIB: activate PA TX power calibrations * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header. * Should be set for 26/30 length MAC headers * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped * @TX_CMD_FLG_EXEC_PAPD: execute PAPD * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk */ enum iwl_tx_flags { TX_CMD_FLG_PROT_REQUIRE = BIT(0), TX_CMD_FLG_WRITE_TX_POWER = BIT(1), TX_CMD_FLG_ACK = BIT(3), TX_CMD_FLG_STA_RATE = BIT(4), TX_CMD_FLG_BAR = BIT(6), TX_CMD_FLG_TXOP_PROT = BIT(7), TX_CMD_FLG_VHT_NDPA = BIT(8), TX_CMD_FLG_HT_NDPA = BIT(9), TX_CMD_FLG_CSI_FDBK2HOST = BIT(10), TX_CMD_FLG_BT_PRIO_POS = 11, TX_CMD_FLG_BT_DIS = BIT(12), TX_CMD_FLG_SEQ_CTL = BIT(13), TX_CMD_FLG_MORE_FRAG = BIT(14), TX_CMD_FLG_TSF = BIT(16), TX_CMD_FLG_CALIB = BIT(17), TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18), TX_CMD_FLG_MH_PAD = BIT(20), TX_CMD_FLG_RESP_TO_DRV = BIT(21), TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), TX_CMD_FLG_DUR = BIT(25), TX_CMD_FLG_FW_DROP = BIT(26), TX_CMD_FLG_EXEC_PAPD = BIT(27), TX_CMD_FLG_PAPD_TYPE = BIT(28), TX_CMD_FLG_HCCA_CHUNK = BIT(31) }; /* TX_FLAGS_BITS_API_S_VER_1 */ /** * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000 * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs * to a secured STA * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate * selection, retry limits and BT kill */ enum iwl_tx_cmd_flags { IWL_TX_FLAGS_CMD_RATE = BIT(0), IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1), IWL_TX_FLAGS_HIGH_PRI = BIT(2), }; /* TX_FLAGS_BITS_API_S_VER_3 */ /** * enum iwl_tx_pm_timeouts - pm timeout values in TX command * @PM_FRAME_NONE: no need to suspend sleep mode * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec */ enum iwl_tx_pm_timeouts { PM_FRAME_NONE = 0, PM_FRAME_MGMT = 2, PM_FRAME_ASSOC = 3, }; #define TX_CMD_SEC_MSK 0x07 #define TX_CMD_SEC_WEP_KEY_IDX_POS 6 #define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 /** * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command * @TX_CMD_SEC_WEP: WEP encryption algorithm. * @TX_CMD_SEC_CCM: CCM encryption algorithm. * @TX_CMD_SEC_TKIP: TKIP encryption algorithm. * @TX_CMD_SEC_EXT: extended cipher algorithm. * @TX_CMD_SEC_GCMP: GCMP encryption algorithm. * @TX_CMD_SEC_KEY128: set for 104 bits WEP key. * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken * from the table instead of from the TX command. * If the key is taken from the key table its index should be given by the * first byte of the TX command key field. */ enum iwl_tx_cmd_sec_ctrl { TX_CMD_SEC_WEP = 0x01, TX_CMD_SEC_CCM = 0x02, TX_CMD_SEC_TKIP = 0x03, TX_CMD_SEC_EXT = 0x04, TX_CMD_SEC_GCMP = 0x05, TX_CMD_SEC_KEY128 = 0x08, TX_CMD_SEC_KEY_FROM_TABLE = 0x10, }; /* * TX command Frame life time in us - to be written in pm_frame_timeout */ #define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF #define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/ #define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */ #define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0 /* * TID for non QoS frames - to be written in tid_tspec */ #define IWL_TID_NON_QOS 0 /* * Limits on the retransmissions - to be written in {data,rts}_retry_limit */ #define IWL_DEFAULT_TX_RETRY 15 #define IWL_MGMT_DFAULT_RETRY_LIMIT 3 #define IWL_RTS_DFAULT_RETRY_LIMIT 60 #define IWL_BAR_DFAULT_RETRY_LIMIT 60 #define IWL_LOW_RETRY_LIMIT 7 /** * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words) * from mac header end. For normal case it is 4 words for SNAP. * note: tx_cmd, mac header and pad are not counted in the offset. * This is used to help the offload in case there is tunneling such as * IPv6 in IPv4, in such case the ip header offset should point to the * inner ip header and IPv4 checksum of the external header should be * calculated by driver. * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum * @TX_CMD_OFFLD_L3_EN: enable IP header checksum * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV * field. Doesn't include the pad. * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for * alignment * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU */ enum iwl_tx_offload_assist_flags_pos { TX_CMD_OFFLD_IP_HDR = 0, TX_CMD_OFFLD_L4_EN = 6, TX_CMD_OFFLD_L3_EN = 7, TX_CMD_OFFLD_MH_SIZE = 8, TX_CMD_OFFLD_PAD = 13, TX_CMD_OFFLD_AMSDU = 14, }; #define IWL_TX_CMD_OFFLD_MH_MASK 0x1f #define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f /* TODO: complete documentation for try_cnt and btkill_cnt */ /** * struct iwl_tx_cmd - TX command struct to FW * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @offload_assist: TX offload configuration * @tx_flags: combination of TX_CMD_FLG_* * @scratch: scratch buffer used by the device * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* * @sta_id: index of destination station in FW station table * @sec_ctl: security control, TX_CMD_SEC_* * @initial_rate_index: index into the the rate table for initial TX attempt. * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames. * @reserved2: reserved * @key: security key * @reserved3: reserved * @life_time: frame life time (usecs??) * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt + * btkill_cnd + reserved), first 32 bits. "0" disables usage. * @dram_msb_ptr: upper bits of the scratch physical address * @rts_retry_limit: max attempts for RTS * @data_retry_limit: max attempts to send the data packet * @tid_tspec: TID/tspec * @pm_frame_timeout: PM TX frame timeout * @reserved4: reserved * @payload: payload (same as @hdr) * @hdr: 802.11 header (same as @payload) * * The byte count (both len and next_frame_len) includes MAC header * (24/26/30/32 bytes) * + 2 bytes pad if 26/30 header size * + 8 byte IV for CCM or TKIP (not used for WEP) * + Data payload * + 8-byte MIC (not used for CCM/WEP) * It does not include post-MAC padding, i.e., * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes. * Range of len: 14-2342 bytes. * * After the struct fields the MAC header is placed, plus any padding, * and then the actial payload. */ struct iwl_tx_cmd { __le16 len; __le16 offload_assist; __le32 tx_flags; struct { u8 try_cnt; u8 btkill_cnt; __le16 reserved; } scratch; /* DRAM_SCRATCH_API_U_VER_1 */ __le32 rate_n_flags; u8 sta_id; u8 sec_ctl; u8 initial_rate_index; u8 reserved2; u8 key[16]; __le32 reserved3; __le32 life_time; __le32 dram_lsb_ptr; u8 dram_msb_ptr; u8 rts_retry_limit; u8 data_retry_limit; u8 tid_tspec; __le16 pm_frame_timeout; __le16 reserved4; u8 payload[0]; struct ieee80211_hdr hdr[0]; } __packed; /* TX_CMD_API_S_VER_6 */ struct iwl_dram_sec_info { __le32 pn_low; __le16 pn_high; __le16 aux_info; } __packed; /* DRAM_SEC_INFO_API_S_VER_1 */ /** * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @offload_assist: TX offload configuration * @flags: combination of &enum iwl_tx_cmd_flags * @dram_info: FW internal DRAM storage * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* * @hdr: 802.11 header */ struct iwl_tx_cmd_gen2 { __le16 len; __le16 offload_assist; __le32 flags; struct iwl_dram_sec_info dram_info; __le32 rate_n_flags; struct ieee80211_hdr hdr[0]; } __packed; /* TX_CMD_API_S_VER_7 */ /** * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @flags: combination of &enum iwl_tx_cmd_flags * @offload_assist: TX offload configuration * @dram_info: FW internal DRAM storage * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* * @ttl: time to live - packet lifetime limit. The FW should drop if * passed. * @hdr: 802.11 header */ struct iwl_tx_cmd_gen3 { __le16 len; __le16 flags; __le32 offload_assist; struct iwl_dram_sec_info dram_info; __le32 rate_n_flags; __le64 ttl; struct ieee80211_hdr hdr[0]; } __packed; /* TX_CMD_API_S_VER_8 */ /* * TX response related data */ /* * enum iwl_tx_status - status that is returned by the fw after attempts to Tx * @TX_STATUS_SUCCESS: * @TX_STATUS_DIRECT_DONE: * @TX_STATUS_POSTPONE_DELAY: * @TX_STATUS_POSTPONE_FEW_BYTES: * @TX_STATUS_POSTPONE_BT_PRIO: * @TX_STATUS_POSTPONE_QUIET_PERIOD: * @TX_STATUS_POSTPONE_CALC_TTAK: * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: * @TX_STATUS_FAIL_SHORT_LIMIT: * @TX_STATUS_FAIL_LONG_LIMIT: * @TX_STATUS_FAIL_UNDERRUN: * @TX_STATUS_FAIL_DRAIN_FLOW: * @TX_STATUS_FAIL_RFKILL_FLUSH: * @TX_STATUS_FAIL_LIFE_EXPIRE: * @TX_STATUS_FAIL_DEST_PS: * @TX_STATUS_FAIL_HOST_ABORTED: * @TX_STATUS_FAIL_BT_RETRY: * @TX_STATUS_FAIL_STA_INVALID: * @TX_TATUS_FAIL_FRAG_DROPPED: * @TX_STATUS_FAIL_TID_DISABLE: * @TX_STATUS_FAIL_FIFO_FLUSHED: * @TX_STATUS_FAIL_SMALL_CF_POLL: * @TX_STATUS_FAIL_FW_DROP: * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and * STA table * @TX_FRAME_STATUS_INTERNAL_ABORT: * @TX_MODE_MSK: * @TX_MODE_NO_BURST: * @TX_MODE_IN_BURST_SEQ: * @TX_MODE_FIRST_IN_BURST: * @TX_QUEUE_NUM_MSK: * * Valid only if frame_count =1 * TODO: complete documentation */ enum iwl_tx_status { TX_STATUS_MSK = 0x000000ff, TX_STATUS_SUCCESS = 0x01, TX_STATUS_DIRECT_DONE = 0x02, /* postpone TX */ TX_STATUS_POSTPONE_DELAY = 0x40, TX_STATUS_POSTPONE_FEW_BYTES = 0x41, TX_STATUS_POSTPONE_BT_PRIO = 0x42, TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, TX_STATUS_POSTPONE_CALC_TTAK = 0x44, /* abort TX */ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, TX_STATUS_FAIL_SHORT_LIMIT = 0x82, TX_STATUS_FAIL_LONG_LIMIT = 0x83, TX_STATUS_FAIL_UNDERRUN = 0x84, TX_STATUS_FAIL_DRAIN_FLOW = 0x85, TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, TX_STATUS_FAIL_DEST_PS = 0x88, TX_STATUS_FAIL_HOST_ABORTED = 0x89, TX_STATUS_FAIL_BT_RETRY = 0x8a, TX_STATUS_FAIL_STA_INVALID = 0x8b, TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, TX_STATUS_FAIL_TID_DISABLE = 0x8d, TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f, TX_STATUS_FAIL_FW_DROP = 0x90, TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91, TX_STATUS_INTERNAL_ABORT = 0x92, TX_MODE_MSK = 0x00000f00, TX_MODE_NO_BURST = 0x00000000, TX_MODE_IN_BURST_SEQ = 0x00000100, TX_MODE_FIRST_IN_BURST = 0x00000200, TX_QUEUE_NUM_MSK = 0x0001f000, TX_NARROW_BW_MSK = 0x00060000, TX_NARROW_BW_1DIV2 = 0x00020000, TX_NARROW_BW_1DIV4 = 0x00040000, TX_NARROW_BW_1DIV8 = 0x00060000, }; /* * enum iwl_tx_agg_status - TX aggregation status * @AGG_TX_STATE_STATUS_MSK: * @AGG_TX_STATE_TRANSMITTED: * @AGG_TX_STATE_UNDERRUN: * @AGG_TX_STATE_BT_PRIO: * @AGG_TX_STATE_FEW_BYTES: * @AGG_TX_STATE_ABORT: * @AGG_TX_STATE_TX_ON_AIR_DROP: TX_ON_AIR signal drop without underrun or * BT detection * @AGG_TX_STATE_LAST_SENT_TRY_CNT: * @AGG_TX_STATE_LAST_SENT_BT_KILL: * @AGG_TX_STATE_SCD_QUERY: * @AGG_TX_STATE_TEST_BAD_CRC32: * @AGG_TX_STATE_RESPONSE: * @AGG_TX_STATE_DUMP_TX: * @AGG_TX_STATE_DELAY_TX: * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries * occur if tx failed for this frame when it was a member of a previous * aggregation block). If rate scaling is used, retry count indicates the * rate table entry used for all frames in the new agg. * @AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for * this frame * * TODO: complete documentation */ enum iwl_tx_agg_status { AGG_TX_STATE_STATUS_MSK = 0x00fff, AGG_TX_STATE_TRANSMITTED = 0x000, AGG_TX_STATE_UNDERRUN = 0x001, AGG_TX_STATE_BT_PRIO = 0x002, AGG_TX_STATE_FEW_BYTES = 0x004, AGG_TX_STATE_ABORT = 0x008, AGG_TX_STATE_TX_ON_AIR_DROP = 0x010, AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, AGG_TX_STATE_SCD_QUERY = 0x080, AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100, AGG_TX_STATE_RESPONSE = 0x1ff, AGG_TX_STATE_DUMP_TX = 0x200, AGG_TX_STATE_DELAY_TX = 0x400, AGG_TX_STATE_TRY_CNT_POS = 12, AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, }; /* * The mask below describes a status where we are absolutely sure that the MPDU * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've * written the bytes to the TXE, but we know nothing about what the DSP did. */ #define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \ AGG_TX_STATE_ABORT | \ AGG_TX_STATE_SCD_QUERY) /* * REPLY_TX = 0x1c (response) * * This response may be in one of two slightly different formats, indicated * by the frame_count field: * * 1) No aggregation (frame_count == 1). This reports Tx results for a single * frame. Multiple attempts, at various bit rates, may have been made for * this frame. * * 2) Aggregation (frame_count > 1). This reports Tx results for two or more * frames that used block-acknowledge. All frames were transmitted at * same rate. Rate scaling may have been used if first frame in this new * agg block failed in previous agg block(s). * * Note that, for aggregation, ACK (block-ack) status is not delivered * here; block-ack has not been received by the time the device records * this status. * This status relates to reasons the tx might have been blocked or aborted * within the device, rather than whether it was received successfully by * the destination station. */ /** * struct agg_tx_status - per packet TX aggregation status * @status: See &enum iwl_tx_agg_status * @sequence: Sequence # for this frame's Tx cmd (not SSN!) */ struct agg_tx_status { __le16 status; __le16 sequence; } __packed; /* * definitions for initial rate index field * bits [3:0] initial rate index * bits [6:4] rate table color, used for the initial rate * bit-7 invalid rate indication */ #define TX_RES_INIT_RATE_INDEX_MSK 0x0f #define TX_RES_RATE_TABLE_COLOR_POS 4 #define TX_RES_RATE_TABLE_COLOR_MSK 0x70 #define TX_RES_INV_RATE_INDEX_MSK 0x80 #define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\ TX_RES_RATE_TABLE_COLOR_POS) #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) /** * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet * ( REPLY_TX = 0x1c ) * @frame_count: 1 no aggregation, >1 aggregation * @bt_kill_count: num of times blocked by bluetooth (unused for agg) * @failure_rts: num of failures due to unsuccessful RTS * @failure_frame: num failures due to no ACK (unused for agg) * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the * Tx of all the batch. RATE_MCS_* * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. * for agg: RTS + CTS + aggregation tx time + block-ack time. * in usec. * @pa_status: tx power info * @pa_integ_res_a: tx power info * @pa_integ_res_b: tx power info * @pa_integ_res_c: tx power info * @measurement_req_id: tx power info * @reduced_tpc: transmit power reduction used * @reserved: reserved * @tfd_info: TFD information set by the FH * @seq_ctl: sequence control from the Tx cmd * @byte_cnt: byte count from the Tx cmd * @tlc_info: TLC rate info * @ra_tid: bits [3:0] = ra, bits [7:4] = tid * @frame_ctrl: frame control * @status: for non-agg: frame status TX_STATUS_* * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields * follow this one, up to frame_count. Length in @frame_count. * * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. */ struct iwl_mvm_tx_resp_v3 { u8 frame_count; u8 bt_kill_count; u8 failure_rts; u8 failure_frame; __le32 initial_rate; __le16 wireless_media_time; u8 pa_status; u8 pa_integ_res_a[3]; u8 pa_integ_res_b[3]; u8 pa_integ_res_c[3]; __le16 measurement_req_id; u8 reduced_tpc; u8 reserved; __le32 tfd_info; __le16 seq_ctl; __le16 byte_cnt; u8 tlc_info; u8 ra_tid; __le16 frame_ctrl; struct agg_tx_status status[]; } __packed; /* TX_RSP_API_S_VER_3 */ /** * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet * ( REPLY_TX = 0x1c ) * @frame_count: 1 no aggregation, >1 aggregation * @bt_kill_count: num of times blocked by bluetooth (unused for agg) * @failure_rts: num of failures due to unsuccessful RTS * @failure_frame: num failures due to no ACK (unused for agg) * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the * Tx of all the batch. RATE_MCS_* * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. * for agg: RTS + CTS + aggregation tx time + block-ack time. * in usec. * @pa_status: tx power info * @pa_integ_res_a: tx power info * @pa_integ_res_b: tx power info * @pa_integ_res_c: tx power info * @measurement_req_id: tx power info * @reduced_tpc: transmit power reduction used * @reserved: reserved * @tfd_info: TFD information set by the FH * @seq_ctl: sequence control from the Tx cmd * @byte_cnt: byte count from the Tx cmd * @tlc_info: TLC rate info * @ra_tid: bits [3:0] = ra, bits [7:4] = tid * @frame_ctrl: frame control * @tx_queue: TX queue for this response * @reserved2: reserved for padding/alignment * @status: for non-agg: frame status TX_STATUS_* * For version 6 TX response isn't received for aggregation at all. * * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. */ struct iwl_mvm_tx_resp { u8 frame_count; u8 bt_kill_count; u8 failure_rts; u8 failure_frame; __le32 initial_rate; __le16 wireless_media_time; u8 pa_status; u8 pa_integ_res_a[3]; u8 pa_integ_res_b[3]; u8 pa_integ_res_c[3]; __le16 measurement_req_id; u8 reduced_tpc; u8 reserved; __le32 tfd_info; __le16 seq_ctl; __le16 byte_cnt; u8 tlc_info; u8 ra_tid; __le16 frame_ctrl; __le16 tx_queue; __le16 reserved2; struct agg_tx_status status; } __packed; /* TX_RSP_API_S_VER_6 */ /** * struct iwl_mvm_ba_notif - notifies about reception of BA * ( BA_NOTIF = 0xc5 ) * @sta_addr: MAC address * @reserved: reserved * @sta_id: Index of recipient (BA-sending) station in fw's station table * @tid: tid of the session * @seq_ctl: sequence control field * @bitmap: the bitmap of the BA notification as seen in the air * @scd_flow: the tx queue this BA relates to * @scd_ssn: the index of the last contiguously sent packet * @txed: number of Txed frames in this batch * @txed_2_done: number of Acked frames in this batch * @reduced_txp: power reduced according to TPC. This is the actual value and * not a copy from the LQ command. Thus, if not the first rate was used * for Tx-ing then this value will be set to 0 by FW. * @reserved1: reserved */ struct iwl_mvm_ba_notif { u8 sta_addr[ETH_ALEN]; __le16 reserved; u8 sta_id; u8 tid; __le16 seq_ctl; __le64 bitmap; __le16 scd_flow; __le16 scd_ssn; u8 txed; u8 txed_2_done; u8 reduced_txp; u8 reserved1; } __packed; /** * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue * @q_num: TFD queue number * @tfd_index: Index of first un-acked frame in the TFD queue * @scd_queue: For debug only - the physical queue the TFD queue is bound to * @tid: TID of the queue (0-7) * @reserved: reserved for alignment */ struct iwl_mvm_compressed_ba_tfd { __le16 q_num; __le16 tfd_index; u8 scd_queue; u8 tid; u8 reserved[2]; } __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ /** * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue * @q_num: RA TID queue number * @tid: TID of the queue * @ssn: BA window current SSN */ struct iwl_mvm_compressed_ba_ratid { u8 q_num; u8 tid; __le16 ssn; } __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */ /* * enum iwl_mvm_ba_resp_flags - TX aggregation status * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the * expected time */ enum iwl_mvm_ba_resp_flags { IWL_MVM_BA_RESP_TX_AGG, IWL_MVM_BA_RESP_TX_BAR, IWL_MVM_BA_RESP_TX_AGG_FAIL, IWL_MVM_BA_RESP_TX_UNDERRUN, IWL_MVM_BA_RESP_TX_BT_KILL, IWL_MVM_BA_RESP_TX_DSP_TIMEOUT }; /** * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA * ( BA_NOTIF = 0xc5 ) * @flags: status flag, see the &iwl_mvm_ba_resp_flags * @sta_id: Index of recipient (BA-sending) station in fw's station table * @reduced_txp: power reduced according to TPC. This is the actual value and * not a copy from the LQ command. Thus, if not the first rate was used * for Tx-ing then this value will be set to 0 by FW. * @tlc_rate_info: TLC rate info, initial rate index, TLC table color * @retry_cnt: retry count * @query_byte_cnt: SCD query byte count * @query_frame_cnt: SCD query frame count * @txed: number of frames sent in the aggregation (all-TIDs) * @done: number of frames that were Acked by the BA (all-TIDs) * @reserved: reserved (for alignment) * @wireless_time: Wireless-media time * @tx_rate: the rate the aggregation was sent at * @tfd_cnt: number of TFD-Q elements * @ra_tid_cnt: number of RATID-Q elements * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd * for details. Length in @tfd_cnt. * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt. */ struct iwl_mvm_compressed_ba_notif { __le32 flags; u8 sta_id; u8 reduced_txp; u8 tlc_rate_info; u8 retry_cnt; __le32 query_byte_cnt; __le16 query_frame_cnt; __le16 txed; __le16 done; __le16 reserved; __le32 wireless_time; __le32 tx_rate; __le16 tfd_cnt; __le16 ra_tid_cnt; struct iwl_mvm_compressed_ba_tfd tfd[0]; struct iwl_mvm_compressed_ba_ratid ra_tid[0]; } __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */ /** * struct iwl_mac_beacon_cmd_v6 - beacon template command * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon * @tim_size: the length of the tim IE * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd_v6 { struct iwl_tx_cmd tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; struct ieee80211_hdr frame[0]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ /** * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon * @tim_size: the length of the tim IE * @ecsa_offset: offset to the ECSA IE if present * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd_v7 { struct iwl_tx_cmd tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[0]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ enum iwl_mac_beacon_flags { IWL_MAC_BEACON_CCK = BIT(8), IWL_MAC_BEACON_ANT_A = BIT(9), IWL_MAC_BEACON_ANT_B = BIT(10), IWL_MAC_BEACON_ANT_C = BIT(11), }; /** * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA * @byte_cnt: byte count of the beacon frame. * @flags: least significant byte for rate code. The most significant byte * is &enum iwl_mac_beacon_flags. * @reserved: reserved * @template_id: currently equal to the mac context id of the coresponding mac. * @tim_idx: the offset of the tim IE in the beacon * @tim_size: the length of the tim IE * @ecsa_offset: offset to the ECSA IE if present * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd { __le16 byte_cnt; __le16 flags; __le64 reserved; __le32 template_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[0]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */ struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; } __packed; /** * struct iwl_extended_beacon_notif_v5 - notifies about beacon transmission * @beacon_notify_hdr: tx response command associated with the beacon * @tsf: last beacon tsf * @ibss_mgr_status: whether IBSS is manager * @gp2: last beacon time in gp2 */ struct iwl_extended_beacon_notif_v5 { struct iwl_mvm_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; __le32 gp2; } __packed; /* BEACON_NTFY_API_S_VER_5 */ /** * struct iwl_extended_beacon_notif - notifies about beacon transmission * @status: the status of the Tx response of the beacon * @tsf: last beacon tsf * @ibss_mgr_status: whether IBSS is manager * @gp2: last beacon time in gp2 */ struct iwl_extended_beacon_notif { __le32 status; __le64 tsf; __le32 ibss_mgr_status; __le32 gp2; } __packed; /* BEACON_NTFY_API_S_VER_6_ */ /** * enum iwl_dump_control - dump (flush) control flags * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty * and the TFD queues are empty. */ enum iwl_dump_control { DUMP_TX_FIFO_FLUSH = BIT(1), }; /** * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command * @queues_ctl: bitmap of queues to flush * @flush_ctl: control flags * @reserved: reserved */ struct iwl_tx_path_flush_cmd_v1 { __le32 queues_ctl; __le16 flush_ctl; __le16 reserved; } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ /** * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command * @sta_id: station ID to flush * @tid_mask: TID mask to flush * @reserved: reserved */ struct iwl_tx_path_flush_cmd { __le32 sta_id; __le16 tid_mask; __le16 reserved; } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */ /* Available options for the SCD_QUEUE_CFG HCMD */ enum iwl_scd_cfg_actions { SCD_CFG_DISABLE_QUEUE = 0x0, SCD_CFG_ENABLE_QUEUE = 0x1, SCD_CFG_UPDATE_QUEUE_TID = 0x2, }; /** * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command * @token: unused * @sta_id: station id * @tid: TID * @scd_queue: scheduler queue to confiug * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner * Value is one of &enum iwl_scd_cfg_actions options * @aggregate: 1 aggregated queue, 0 otherwise * @tx_fifo: &enum iwl_mvm_tx_fifo * @window: BA window size * @ssn: SSN for the BA agreement * @reserved: reserved */ struct iwl_scd_txq_cfg_cmd { u8 token; u8 sta_id; u8 tid; u8 scd_queue; u8 action; u8 aggregate; u8 tx_fifo; u8 window; __le16 ssn; __le16 reserved; } __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */ /** * struct iwl_scd_txq_cfg_rsp * @token: taken from the command * @sta_id: station id from the command * @tid: tid from the command * @scd_queue: scd_queue from the command */ struct iwl_scd_txq_cfg_rsp { u8 token; u8 sta_id; u8 tid; u8 scd_queue; } __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */ #endif /* __iwl_fw_api_tx_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h000066400000000000000000000146701351064634500267510ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_txq_h__ #define __iwl_fw_api_txq_h__ /* * DQA queue numbers * * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using * monitor mode. Note this queue is the same as the queue for P2P device * but we can't have active monitor mode along with P2P device anyway. * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure * that we are never left without the possibility to connect to an AP. * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. * Each MGMT queue is mapped to a single STA * MGMT frames are frames that return true on ieee80211_is_mgmt() * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe * responses * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. * DATA frames are intended for !ieee80211_is_mgmt() frames, but if * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues * as well * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames */ enum iwl_mvm_dqa_txq { IWL_MVM_DQA_CMD_QUEUE = 0, IWL_MVM_DQA_AUX_QUEUE = 1, IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2, IWL_MVM_DQA_GCAST_QUEUE = 3, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, IWL_MVM_DQA_MIN_DATA_QUEUE = 10, IWL_MVM_DQA_MAX_DATA_QUEUE = 31, }; enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_BK = 0, IWL_MVM_TX_FIFO_BE, IWL_MVM_TX_FIFO_VI, IWL_MVM_TX_FIFO_VO, IWL_MVM_TX_FIFO_MCAST = 5, IWL_MVM_TX_FIFO_CMD = 7, }; enum iwl_gen2_tx_fifo { IWL_GEN2_TX_FIFO_CMD = 0, IWL_GEN2_EDCA_TX_FIFO_BK, IWL_GEN2_EDCA_TX_FIFO_BE, IWL_GEN2_EDCA_TX_FIFO_VI, IWL_GEN2_EDCA_TX_FIFO_VO, IWL_GEN2_TRIG_TX_FIFO_BK, IWL_GEN2_TRIG_TX_FIFO_BE, IWL_GEN2_TRIG_TX_FIFO_VI, IWL_GEN2_TRIG_TX_FIFO_VO, }; /** * enum iwl_tx_queue_cfg_actions - TXQ config options * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format */ enum iwl_tx_queue_cfg_actions { TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), }; #define IWL_DEFAULT_QUEUE_SIZE 256 #define IWL_MGMT_QUEUE_SIZE 16 #define IWL_CMD_QUEUE_SIZE 32 /** * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command * @sta_id: station id * @tid: tid of the queue * @flags: see &enum iwl_tx_queue_cfg_actions * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) * @byte_cnt_addr: address of byte count table * @tfdq_addr: address of TFD circular buffer */ struct iwl_tx_queue_cfg_cmd { u8 sta_id; u8 tid; __le16 flags; __le32 cb_size; __le64 byte_cnt_addr; __le64 tfdq_addr; } __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ /** * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config * @queue_number: queue number assigned to this RA -TID * @flags: set on failure * @write_pointer: initial value for write pointer * @reserved: reserved */ struct iwl_tx_queue_cfg_rsp { __le16 queue_number; __le16 flags; __le16 write_pointer; __le16 reserved; } __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ #endif /* __iwl_fw_api_txq_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/dbg.c000066400000000000000000002634061351064634500261160ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" #include "debugfs.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" /** * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump * * @fwrt_ptr: pointer to the buffer coming from fwrt * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the * transport's data. * @trans_len: length of the valid data in trans_ptr * @fwrt_len: length of the valid data in fwrt_ptr */ struct iwl_fw_dump_ptrs { struct iwl_trans_dump_data *trans_ptr; void *fwrt_ptr; u32 fwrt_len; }; #define RADIO_REG_MAX_READ 0x2ad static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { u8 *pos = (void *)(*dump_data)->data; unsigned long flags; int i; IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ); for (i = 0; i < RADIO_REG_MAX_READ; i++) { u32 rd_cmd = RADIO_RSP_RD_CMD; rd_cmd |= i << RADIO_RSP_ADDR_POS; iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd); *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT); pos++; } *dump_data = iwl_fw_error_next_data(*dump_data); iwl_trans_release_nic_access(fwrt->trans, &flags); } static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; u32 fifo_len; int i; fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = size; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) return; /* Add a TLV for the RXF */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_D_SPACE + offset)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_WR_PTR + offset)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_RD_PTR + offset)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_FENCE_PTR + offset)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset)); /* Lock fence */ iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1); /* Set fence pointer to the same place like WR pointer */ iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1); /* Set fence offset */ iwl_trans_write_prph(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, RXF_FIFO_RD_FENCE_INC + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; u32 fifo_len; int i; fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = size; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) return; /* Add a TLV for the FIFO */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FIFO_ITEM_CNT + offset)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_WR_PTR + offset)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_RD_PTR + offset)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FENCE_PTR + offset)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_LOCK_FENCE + offset)); /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, TXF_WR_PTR + offset); /* Dummy-read to advance the read pointer to the head */ iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; unsigned long flags; IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) { /* Pull RXF1 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); /* Pull RXF2 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV + fwrt->trans->cfg->umac_prph_offset, 1); /* Pull LMAC2 RXF1 */ if (fwrt->smem_cfg.num_lmacs > 1) iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, LMAC2_PRPH_OFFSET, 2); } iwl_trans_release_nic_access(fwrt->trans, &flags); } static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; u32 *fifo_data; u32 fifo_len; unsigned long flags; int i, j; IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) { /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], 0, i); } /* Pull TXF data from LMAC2 */ if (fwrt->smem_cfg.num_lmacs > 1) { for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, i); iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[1].txfifo_size[i], LMAC2_PRPH_OFFSET, i + cfg->num_txfifo_entries); } } } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = fwrt->smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) continue; /* Add a TLV for the internal FIFOs */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(i); /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + fwrt->smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FIFO_ITEM_CNT)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_WR_PTR)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_RD_PTR)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FENCE_PTR)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_LOCK_FENCE)); /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_ADDR, TXF_CPU2_WR_PTR); /* Dummy-read to advance the read pointer to head */ iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (j = 0; j < fifo_len; j++) fifo_data[j] = iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); *dump_data = iwl_fw_error_next_data(*dump_data); } } iwl_trans_release_nic_access(fwrt->trans, &flags); } #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ #define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ struct iwl_prph_range { u32 start, end; }; static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a0003c }, { .start = 0x00a00410, .end = 0x00a00418 }, { .start = 0x00a00420, .end = 0x00a00420 }, { .start = 0x00a00428, .end = 0x00a00428 }, { .start = 0x00a00430, .end = 0x00a0043c }, { .start = 0x00a00444, .end = 0x00a00444 }, { .start = 0x00a004c0, .end = 0x00a004cc }, { .start = 0x00a004d8, .end = 0x00a004d8 }, { .start = 0x00a004e0, .end = 0x00a004f0 }, { .start = 0x00a00840, .end = 0x00a00840 }, { .start = 0x00a00850, .end = 0x00a00858 }, { .start = 0x00a01004, .end = 0x00a01008 }, { .start = 0x00a01010, .end = 0x00a01010 }, { .start = 0x00a01018, .end = 0x00a01018 }, { .start = 0x00a01024, .end = 0x00a01024 }, { .start = 0x00a0102c, .end = 0x00a01034 }, { .start = 0x00a0103c, .end = 0x00a01040 }, { .start = 0x00a01048, .end = 0x00a01094 }, { .start = 0x00a01c00, .end = 0x00a01c20 }, { .start = 0x00a01c58, .end = 0x00a01c58 }, { .start = 0x00a01c7c, .end = 0x00a01c7c }, { .start = 0x00a01c28, .end = 0x00a01c54 }, { .start = 0x00a01c5c, .end = 0x00a01c5c }, { .start = 0x00a01c60, .end = 0x00a01cdc }, { .start = 0x00a01ce0, .end = 0x00a01d0c }, { .start = 0x00a01d18, .end = 0x00a01d20 }, { .start = 0x00a01d2c, .end = 0x00a01d30 }, { .start = 0x00a01d40, .end = 0x00a01d5c }, { .start = 0x00a01d80, .end = 0x00a01d80 }, { .start = 0x00a01d98, .end = 0x00a01d9c }, { .start = 0x00a01da8, .end = 0x00a01da8 }, { .start = 0x00a01db8, .end = 0x00a01df4 }, { .start = 0x00a01dc0, .end = 0x00a01dfc }, { .start = 0x00a01e00, .end = 0x00a01e2c }, { .start = 0x00a01e40, .end = 0x00a01e60 }, { .start = 0x00a01e68, .end = 0x00a01e6c }, { .start = 0x00a01e74, .end = 0x00a01e74 }, { .start = 0x00a01e84, .end = 0x00a01e90 }, { .start = 0x00a01e9c, .end = 0x00a01ec4 }, { .start = 0x00a01ed0, .end = 0x00a01ee0 }, { .start = 0x00a01f00, .end = 0x00a01f1c }, { .start = 0x00a01f44, .end = 0x00a01ffc }, { .start = 0x00a02000, .end = 0x00a02048 }, { .start = 0x00a02068, .end = 0x00a020f0 }, { .start = 0x00a02100, .end = 0x00a02118 }, { .start = 0x00a02140, .end = 0x00a0214c }, { .start = 0x00a02168, .end = 0x00a0218c }, { .start = 0x00a021c0, .end = 0x00a021c0 }, { .start = 0x00a02400, .end = 0x00a02410 }, { .start = 0x00a02418, .end = 0x00a02420 }, { .start = 0x00a02428, .end = 0x00a0242c }, { .start = 0x00a02434, .end = 0x00a02434 }, { .start = 0x00a02440, .end = 0x00a02460 }, { .start = 0x00a02468, .end = 0x00a024b0 }, { .start = 0x00a024c8, .end = 0x00a024cc }, { .start = 0x00a02500, .end = 0x00a02504 }, { .start = 0x00a0250c, .end = 0x00a02510 }, { .start = 0x00a02540, .end = 0x00a02554 }, { .start = 0x00a02580, .end = 0x00a025f4 }, { .start = 0x00a02600, .end = 0x00a0260c }, { .start = 0x00a02648, .end = 0x00a02650 }, { .start = 0x00a02680, .end = 0x00a02680 }, { .start = 0x00a026c0, .end = 0x00a026d0 }, { .start = 0x00a02700, .end = 0x00a0270c }, { .start = 0x00a02804, .end = 0x00a02804 }, { .start = 0x00a02818, .end = 0x00a0281c }, { .start = 0x00a02c00, .end = 0x00a02db4 }, { .start = 0x00a02df4, .end = 0x00a02fb0 }, { .start = 0x00a03000, .end = 0x00a03014 }, { .start = 0x00a0301c, .end = 0x00a0302c }, { .start = 0x00a03034, .end = 0x00a03038 }, { .start = 0x00a03040, .end = 0x00a03048 }, { .start = 0x00a03060, .end = 0x00a03068 }, { .start = 0x00a03070, .end = 0x00a03074 }, { .start = 0x00a0307c, .end = 0x00a0307c }, { .start = 0x00a03080, .end = 0x00a03084 }, { .start = 0x00a0308c, .end = 0x00a03090 }, { .start = 0x00a03098, .end = 0x00a03098 }, { .start = 0x00a030a0, .end = 0x00a030a0 }, { .start = 0x00a030a8, .end = 0x00a030b4 }, { .start = 0x00a030bc, .end = 0x00a030bc }, { .start = 0x00a030c0, .end = 0x00a0312c }, { .start = 0x00a03c00, .end = 0x00a03c5c }, { .start = 0x00a04400, .end = 0x00a04454 }, { .start = 0x00a04460, .end = 0x00a04474 }, { .start = 0x00a044c0, .end = 0x00a044ec }, { .start = 0x00a04500, .end = 0x00a04504 }, { .start = 0x00a04510, .end = 0x00a04538 }, { .start = 0x00a04540, .end = 0x00a04548 }, { .start = 0x00a04560, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a05c00, .end = 0x00a05c18 }, { .start = 0x00a05400, .end = 0x00a056e8 }, { .start = 0x00a08000, .end = 0x00a098bc }, { .start = 0x00a02400, .end = 0x00a02758 }, { .start = 0x00a04764, .end = 0x00a0476c }, { .start = 0x00a04770, .end = 0x00a04774 }, { .start = 0x00a04620, .end = 0x00a04624 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a00034 }, { .start = 0x00a0003c, .end = 0x00a0003c }, { .start = 0x00a00410, .end = 0x00a00418 }, { .start = 0x00a00420, .end = 0x00a00420 }, { .start = 0x00a00428, .end = 0x00a00428 }, { .start = 0x00a00430, .end = 0x00a0043c }, { .start = 0x00a00444, .end = 0x00a00444 }, { .start = 0x00a00840, .end = 0x00a00840 }, { .start = 0x00a00850, .end = 0x00a00858 }, { .start = 0x00a01004, .end = 0x00a01008 }, { .start = 0x00a01010, .end = 0x00a01010 }, { .start = 0x00a01018, .end = 0x00a01018 }, { .start = 0x00a01024, .end = 0x00a01024 }, { .start = 0x00a0102c, .end = 0x00a01034 }, { .start = 0x00a0103c, .end = 0x00a01040 }, { .start = 0x00a01048, .end = 0x00a01050 }, { .start = 0x00a01058, .end = 0x00a01058 }, { .start = 0x00a01060, .end = 0x00a01070 }, { .start = 0x00a0108c, .end = 0x00a0108c }, { .start = 0x00a01c20, .end = 0x00a01c28 }, { .start = 0x00a01d10, .end = 0x00a01d10 }, { .start = 0x00a01e28, .end = 0x00a01e2c }, { .start = 0x00a01e60, .end = 0x00a01e60 }, { .start = 0x00a01e80, .end = 0x00a01e80 }, { .start = 0x00a01ea0, .end = 0x00a01ea0 }, { .start = 0x00a02000, .end = 0x00a0201c }, { .start = 0x00a02024, .end = 0x00a02024 }, { .start = 0x00a02040, .end = 0x00a02048 }, { .start = 0x00a020c0, .end = 0x00a020e0 }, { .start = 0x00a02400, .end = 0x00a02404 }, { .start = 0x00a0240c, .end = 0x00a02414 }, { .start = 0x00a0241c, .end = 0x00a0243c }, { .start = 0x00a02448, .end = 0x00a024bc }, { .start = 0x00a024c4, .end = 0x00a024cc }, { .start = 0x00a02508, .end = 0x00a02508 }, { .start = 0x00a02510, .end = 0x00a02514 }, { .start = 0x00a0251c, .end = 0x00a0251c }, { .start = 0x00a0252c, .end = 0x00a0255c }, { .start = 0x00a02564, .end = 0x00a025a0 }, { .start = 0x00a025a8, .end = 0x00a025b4 }, { .start = 0x00a025c0, .end = 0x00a025c0 }, { .start = 0x00a025e8, .end = 0x00a025f4 }, { .start = 0x00a02c08, .end = 0x00a02c18 }, { .start = 0x00a02c2c, .end = 0x00a02c38 }, { .start = 0x00a02c68, .end = 0x00a02c78 }, { .start = 0x00a03000, .end = 0x00a03000 }, { .start = 0x00a03010, .end = 0x00a03014 }, { .start = 0x00a0301c, .end = 0x00a0302c }, { .start = 0x00a03034, .end = 0x00a03038 }, { .start = 0x00a03040, .end = 0x00a03044 }, { .start = 0x00a03060, .end = 0x00a03068 }, { .start = 0x00a03070, .end = 0x00a03070 }, { .start = 0x00a0307c, .end = 0x00a03084 }, { .start = 0x00a0308c, .end = 0x00a03090 }, { .start = 0x00a03098, .end = 0x00a03098 }, { .start = 0x00a030a0, .end = 0x00a030a0 }, { .start = 0x00a030a8, .end = 0x00a030b4 }, { .start = 0x00a030bc, .end = 0x00a030c0 }, { .start = 0x00a030c8, .end = 0x00a030f4 }, { .start = 0x00a03100, .end = 0x00a0312c }, { .start = 0x00a03c00, .end = 0x00a03c5c }, { .start = 0x00a04400, .end = 0x00a04454 }, { .start = 0x00a04460, .end = 0x00a04474 }, { .start = 0x00a044c0, .end = 0x00a044ec }, { .start = 0x00a04500, .end = 0x00a04504 }, { .start = 0x00a04510, .end = 0x00a04538 }, { .start = 0x00a04540, .end = 0x00a04548 }, { .start = 0x00a04560, .end = 0x00a04560 }, { .start = 0x00a04570, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04590 }, { .start = 0x00a04598, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, { .start = 0x00a05c18, .end = 0x00a05c1c }, { .start = 0x00a0c000, .end = 0x00a0c018 }, { .start = 0x00a0c020, .end = 0x00a0c028 }, { .start = 0x00a0c038, .end = 0x00a0c094 }, { .start = 0x00a0c0c0, .end = 0x00a0c104 }, { .start = 0x00a0c10c, .end = 0x00a0c118 }, { .start = 0x00a0c150, .end = 0x00a0c174 }, { .start = 0x00a0c17c, .end = 0x00a0c188 }, { .start = 0x00a0c190, .end = 0x00a0c198 }, { .start = 0x00a0c1a0, .end = 0x00a0c1a8 }, { .start = 0x00a0c1b0, .end = 0x00a0c1b8 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = { { .start = 0x00d03c00, .end = 0x00d03c64 }, { .start = 0x00d05c18, .end = 0x00d05c1c }, { .start = 0x00d0c000, .end = 0x00d0c174 }, }; static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, u32 len_bytes, __le32 *data) { u32 i; for (i = 0; i < len_bytes; i += 4) *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); } static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, u32 range_len, void *ptr) { struct iwl_fw_error_dump_prph *prph; struct iwl_trans *trans = fwrt->trans; struct iwl_fw_error_dump_data **data = (struct iwl_fw_error_dump_data **)ptr; unsigned long flags; u32 i; if (!data) return; IWL_DEBUG_INFO(trans, "WRT PRPH dump\n"); if (!iwl_trans_grab_nic_access(trans, &flags)) return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk); prph = (void *)(*data)->data; prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, /* our range is inclusive, hence + 4 */ iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4, (void *)prph->data); *data = iwl_fw_error_next_data(*data); } iwl_trans_release_nic_access(trans, &flags); } /* * alloc_sgtable - allocates scallerlist table in the given size, * fills it with pages and returns it * @size: the size (in bytes) of the table */ static struct scatterlist *alloc_sgtable(int size) { int alloc_size, nents, i; struct page *new_page; struct scatterlist *iter; struct scatterlist *table; nents = DIV_ROUND_UP(size, PAGE_SIZE); table = kcalloc(nents, sizeof(*table), GFP_KERNEL); if (!table) return NULL; sg_init_table(table, nents); iter = table; for_each_sg(table, iter, sg_nents(table), i) { new_page = alloc_page(GFP_KERNEL); if (!new_page) { /* release all previous allocated pages in the table */ iter = table; for_each_sg(table, iter, sg_nents(table), i) { new_page = sg_page(iter); if (new_page) __free_page(new_page); } return NULL; } alloc_size = min_t(int, size, PAGE_SIZE); size -= PAGE_SIZE; sg_set_page(iter, new_page, alloc_size, 0); } return table; } static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, u32 range_len, void *ptr) { u32 *prph_len = (u32 *)ptr; int i, num_bytes_in_chunk; if (!prph_len) return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; *prph_len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_prph) + num_bytes_in_chunk; } } static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, void (*handler)(struct iwl_fw_runtime *, const struct iwl_prph_range *, u32, void *)) { u32 range_len; if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210); handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr); } else if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000); handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr); } else { range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm); handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr); if (fwrt->trans->cfg->mq_rx_supported) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000); handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr); } } } static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, u32 len, u32 ofs, u32 type) { struct iwl_fw_error_dump_mem *dump_mem; if (!len) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); dump_mem = (void *)(*dump_data)->data; dump_mem->type = cpu_to_le32(type); dump_mem->offset = cpu_to_le32(ofs); iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); *dump_data = iwl_fw_error_next_data(*dump_data); IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); } #define ADD_LEN(len, item_len, const_len) \ do {size_t item = item_len; len += (!!item) * const_len + item; } \ while (0) static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) return 0; /* Count RXF2 size */ ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); /* Count RXF1 sizes */ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) mem_cfg->num_lmacs = MAX_NUM_LMAC; for (i = 0; i < mem_cfg->num_lmacs; i++) ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); return fifo_len; } static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) goto dump_internal_txf; /* Count TXF sizes */ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) mem_cfg->num_lmacs = MAX_NUM_LMAC; for (i = 0; i < mem_cfg->num_lmacs; i++) { int j; for (j = 0; j < mem_cfg->num_txfifo_entries; j++) ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], hdr_len); } dump_internal_txf: if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) goto out; for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++) ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len); out: return fifo_len; } static void iwl_dump_paging(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **data) { int i; IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = fwrt->fw_paging_db[i].fw_paging_block; dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); (*data)->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); paging = (void *)(*data)->data; paging->index = cpu_to_le32(i); dma_sync_single_for_cpu(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); dma_sync_single_for_device(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); (*data) = iwl_fw_error_next_data(*data); } } static struct iwl_fw_error_dump_file * iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, struct iwl_fw_dump_ptrs *fw_error_dump) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; u32 sram_len, sram_ofs; const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv; struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0; u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; int i; /* SRAM - include stack CCM if driver knows the values for it */ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) return NULL; img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } else { sram_ofs = fwrt->trans->cfg->dccm_offset; sram_len = fwrt->trans->cfg->dccm_len; } /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg); fifo_len += iwl_fw_txf_len(fwrt, mem_cfg); /* Make room for PRPH registers */ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len); if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) file_len += sizeof(*dump_data) + sizeof(*dump_info); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { size_t hdr_len = sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_mem); /* Dump SRAM only if no mem_tlvs */ if (!fwrt->fw->dbg.n_mem_tlv) ADD_LEN(file_len, sram_len, hdr_len); /* Make room for all mem types that exist */ ADD_LEN(file_len, smem_len, hdr_len); ADD_LEN(file_len, sram2_len, hdr_len); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len); } /* Make room for fw's virtual image pages, if it exists */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) file_len += fwrt->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { file_len += sizeof(*dump_data) + fwrt->trans->cfg->d3_debug_data_length * 2; } /* If we only want a monitor dump, reset the file length */ if (fwrt->dump.monitor_only) { file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len; dump_file = vzalloc(file_len); if (!dump_file) return NULL; fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; dump_info->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, sizeof(dump_info->dev_human_readable) - 1); strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable) - 1); dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs; dump_info->lmac_err_id[0] = cpu_to_le32(fwrt->dump.lmac_err_id[0]); if (fwrt->smem_cfg.num_lmacs > 1) dump_info->lmac_err_id[1] = cpu_to_le32(fwrt->dump.lmac_err_id[1]); dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id); dump_data = iwl_fw_error_next_data(dump_data); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) { /* Dump shared memory configuration */ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); dump_smem_cfg = (void *)dump_data->data; dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); dump_smem_cfg->num_txfifo_entries = cpu_to_le32(mem_cfg->num_txfifo_entries); for (i = 0; i < MAX_NUM_LMAC; i++) { int j; u32 *txf_size = mem_cfg->lmac[i].txfifo_size; for (j = 0; j < TX_FIFO_MAX_NUM; j++) dump_smem_cfg->lmac[i].txfifo_size[j] = cpu_to_le32(txf_size[j]); dump_smem_cfg->lmac[i].rxfifo1_size = cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); } dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); dump_smem_cfg->internal_txfifo_addr = cpu_to_le32(mem_cfg->internal_txfifo_addr); for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { dump_smem_cfg->internal_txfifo_size[i] = cpu_to_le32(mem_cfg->internal_txfifo_size[i]); } dump_data = iwl_fw_error_next_data(dump_data); } /* We only dump the FIFOs if the FW is in error state */ if (fifo_len) { iwl_fw_dump_rxf(fwrt, &dump_data); iwl_fw_dump_txf(fwrt, &dump_data); } if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + fwrt->dump.desc->len); dump_trig = (void *)dump_data->data; memcpy(dump_trig, &fwrt->dump.desc->trig_desc, sizeof(*dump_trig) + fwrt->dump.desc->len); dump_data = iwl_fw_error_next_data(dump_data); } /* In case we only want monitor dump, skip to dump trasport data */ if (fwrt->dump.monitor_only) goto out; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg.mem_tlv; if (!fwrt->fw->dbg.n_mem_tlv) iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, IWL_FW_ERROR_DUMP_MEM_SRAM); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) { u32 len = le32_to_cpu(fw_dbg_mem[i].len); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, le32_to_cpu(fw_dbg_mem[i].data_type)); } iwl_fw_dump_mem(fwrt, &dump_data, smem_len, fwrt->trans->cfg->smem_offset, IWL_FW_ERROR_DUMP_MEM_SMEM); iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, fwrt->trans->cfg->dccm2_offset, IWL_FW_ERROR_DUMP_MEM_SRAM); } if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr; size_t data_size = fwrt->trans->cfg->d3_debug_data_length; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); dump_data->len = cpu_to_le32(data_size * 2); memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size); kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; iwl_trans_read_mem_bytes(fwrt->trans, addr, dump_data->data + data_size, data_size); dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) iwl_dump_paging(fwrt, &dump_data); if (prph_len) iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph); out: dump_file->file_len = cpu_to_le32(file_len); return dump_file; } static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_val; u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset); int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->internal.range_data_size; for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) { prph_val = iwl_read_prph(fwrt->trans, addr + i); if (prph_val == 0x5a5a5a5a) return -EBUSY; *val++ = cpu_to_le32(prph_val); } return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset); int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->internal.range_data_size; for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->internal.range_data_size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->internal.range_data_size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 page_size = fwrt->trans->init_dram.paging[idx].size; range->page_num = cpu_to_le32(idx); range->range_data_size = cpu_to_le32(page_size); memcpy(range->data, fwrt->trans->init_dram.paging[idx].block, page_size); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, int idx) { /* increase idx by 1 since the pages are from 1 to * fwrt->num_of_paging_blk + 1 */ struct page *page = fwrt->fw_paging_db[++idx].fw_paging_block; struct iwl_fw_ini_error_dump_range *range = range_ptr; dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys; u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size; range->page_num = cpu_to_le32(idx); range->range_data_size = cpu_to_le32(page_size); dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); memcpy(range->data, page_address(page), page_size); dma_sync_single_for_device(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 start_addr = iwl_read_umac_prph(fwrt->trans, MON_BUFF_BASE_ADDR_VER2); if (start_addr == 0x5a5a5a5a) return -EBUSY; range->dram_base_addr = cpu_to_le64(start_addr); range->range_data_size = cpu_to_le32(fwrt->trans->dbg.fw_mon[idx].size); memcpy(range->data, fwrt->trans->dbg.fw_mon[idx].block, fwrt->trans->dbg.fw_mon[idx].size); return sizeof(*range) + le32_to_cpu(range->range_data_size); } struct iwl_ini_txf_iter_data { int fifo; int lmac; u32 fifo_size; bool internal_txf; bool init; }; static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { struct iwl_ini_txf_iter_data *iter = fwrt->dump.fifo_iter; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; int txf_num = cfg->num_txfifo_entries; int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size); u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid1); if (!iter) return false; if (iter->init) { if (le32_to_cpu(reg->offset) && WARN_ONCE(cfg->num_lmacs == 1, "Invalid lmac offset: 0x%x\n", le32_to_cpu(reg->offset))) return false; iter->init = false; iter->internal_txf = false; iter->fifo_size = 0; iter->fifo = -1; if (le32_to_cpu(reg->offset)) iter->lmac = 1; else iter->lmac = 0; } if (!iter->internal_txf) for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) { iter->fifo_size = cfg->lmac[iter->lmac].txfifo_size[iter->fifo]; if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) return true; } iter->internal_txf = true; if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) return false; for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) { iter->fifo_size = cfg->internal_txfifo_size[iter->fifo - txf_num]; if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) return true; } return false; } static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_ini_txf_iter_data *iter; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->offset), addr; u32 registers_size = le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump); __le32 *data; unsigned long flags; int i; if (!iwl_ini_txf_iter(fwrt, reg)) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return -EBUSY; iter = fwrt->dump.fifo_iter; range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo); range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers; range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size); iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo); /* * read txf registers. for each register, write to the dump the * register address and its value */ for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) { addr = le32_to_cpu(reg->start_addr[i]) + offs; reg_dump->addr = cpu_to_le32(addr); reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); reg_dump++; } if (reg->fifos.header_only) { range->range_data_size = cpu_to_le32(registers_size); goto out; } /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ iwl_write_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_ADDR + offs, TXF_WR_PTR + offs); /* Dummy-read to advance the read pointer to the head */ iwl_read_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_DATA + offs); /* Read FIFO */ addr = TXF_READ_MODIFY_DATA + offs; data = (void *)reg_dump; for (i = 0; i < iter->fifo_size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); out: iwl_trans_release_nic_access(fwrt->trans, &flags); return sizeof(*range) + le32_to_cpu(range->range_data_size); } struct iwl_ini_rxf_data { u32 fifo_num; u32 size; u32 offset; }; static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, struct iwl_ini_rxf_data *data) { u32 fid1 = le32_to_cpu(reg->fifos.fid1); u32 fid2 = le32_to_cpu(reg->fifos.fid2); u32 fifo_idx; if (!data) return; memset(data, 0, sizeof(*data)); if (WARN_ON_ONCE((fid1 && fid2) || (!fid1 && !fid2))) return; fifo_idx = ffs(fid1) - 1; if (fid1 && !WARN_ON_ONCE((~BIT(fifo_idx) & fid1) || fifo_idx >= MAX_NUM_LMAC)) { data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size; data->fifo_num = fifo_idx; return; } fifo_idx = ffs(fid2) - 1; if (fid2 && !WARN_ON_ONCE(fifo_idx != 0)) { data->size = fwrt->smem_cfg.rxfifo2_size; data->offset = RXF_DIFF_FROM_PREV; /* use bit 31 to distinguish between umac and lmac rxf while * parsing the dump */ data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT; return; } } static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_ini_rxf_data rxf_data; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->offset), addr; u32 registers_size = le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump); __le32 *data; unsigned long flags; int i; iwl_ini_get_rxf_data(fwrt, reg, &rxf_data); if (!rxf_data.size) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return -EBUSY; range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num); range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers; range->range_data_size = cpu_to_le32(rxf_data.size + registers_size); /* * read rxf registers. for each register, write to the dump the * register address and its value */ for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) { addr = le32_to_cpu(reg->start_addr[i]) + offs; reg_dump->addr = cpu_to_le32(addr); reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); reg_dump++; } if (reg->fifos.header_only) { range->range_data_size = cpu_to_le32(registers_size); goto out; } /* * region register have absolute value so apply rxf offset after * reading the registers */ offs += rxf_data.offset; /* Lock fence */ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); /* Set fence pointer to the same place like WR pointer */ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_WR2FENCE + offs, 0x1); /* Set fence offset */ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offs, 0x0); /* Read FIFO */ addr = RXF_FIFO_RD_FENCE_INC + offs; data = (void *)reg_dump; for (i = 0; i < rxf_data.size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); out: iwl_trans_release_nic_access(fwrt->trans, &flags); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *data) { struct iwl_fw_ini_error_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return dump->ranges; } static void *iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, struct iwl_fw_ini_monitor_dump *data, u32 write_ptr_addr, u32 write_ptr_msk, u32 cycle_cnt_addr, u32 cycle_cnt_msk) { u32 write_ptr, cycle_cnt; unsigned long flags; if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) { IWL_ERR(fwrt, "Failed to get monitor header\n"); return NULL; } write_ptr = iwl_read_prph_no_grab(fwrt->trans, write_ptr_addr); cycle_cnt = iwl_read_prph_no_grab(fwrt->trans, cycle_cnt_addr); iwl_trans_release_nic_access(fwrt->trans, &flags); data->header.version = cpu_to_le32(IWL_INI_DUMP_VER); data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk); data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk); return data->ranges; } static void *iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *data) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk; switch (fwrt->trans->cfg->device_family) { case IWL_DEVICE_FAMILY_9000: case IWL_DEVICE_FAMILY_22000: write_ptr_addr = MON_BUFF_WRPTR_VER2; write_ptr_msk = -1; cycle_cnt_addr = MON_BUFF_CYCLE_CNT_VER2; cycle_cnt_msk = -1; break; default: IWL_ERR(fwrt, "Unsupported device family %d\n", fwrt->trans->cfg->device_family); return NULL; } return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk); } static void *iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *data) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; const struct iwl_cfg *cfg = fwrt->trans->cfg; if (fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_9000 && fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) { IWL_ERR(fwrt, "Unsupported device family %d\n", fwrt->trans->cfg->device_family); return NULL; } return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, cfg->fw_mon_smem_write_ptr_addr, cfg->fw_mon_smem_write_ptr_msk, cfg->fw_mon_smem_cycle_cnt_ptr_addr, cfg->fw_mon_smem_cycle_cnt_ptr_msk); } static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { return le32_to_cpu(reg->internal.num_of_ranges); } static u32 iwl_dump_ini_paging_gen2_ranges(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { return fwrt->trans->init_dram.paging_cnt; } static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { return fwrt->num_of_paging_blk; } static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { return 1; } static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { struct iwl_ini_txf_iter_data iter = { .init = true }; void *fifo_iter = fwrt->dump.fifo_iter; u32 num_of_fifos = 0; fwrt->dump.fifo_iter = &iter; while (iwl_ini_txf_iter(fwrt, reg)) num_of_fifos++; fwrt->dump.fifo_iter = fifo_iter; return num_of_fifos; } static u32 iwl_dump_ini_rxf_ranges(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { /* Each Rx fifo needs a different offset and therefore, it's * region can contain only one fifo, i.e. 1 memory range. */ return 1; } static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { return sizeof(struct iwl_fw_ini_error_dump) + iwl_dump_ini_mem_ranges(fwrt, reg) * (sizeof(struct iwl_fw_ini_error_dump_range) + le32_to_cpu(reg->internal.range_data_size)); } static u32 iwl_dump_ini_paging_gen2_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { int i; u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); u32 size = sizeof(struct iwl_fw_ini_error_dump); for (i = 0; i < iwl_dump_ini_paging_gen2_ranges(fwrt, reg); i++) size += range_header_len + fwrt->trans->init_dram.paging[i].size; return size; } static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { int i; u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); u32 size = sizeof(struct iwl_fw_ini_error_dump); for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg); i++) size += range_header_len + fwrt->fw_paging_db[i].fw_paging_size; return size; } static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { u32 size = sizeof(struct iwl_fw_ini_monitor_dump) + sizeof(struct iwl_fw_ini_error_dump_range); if (fwrt->trans->dbg.num_blocks) size += fwrt->trans->dbg.fw_mon[0].size; return size; } static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { return sizeof(struct iwl_fw_ini_monitor_dump) + iwl_dump_ini_mem_ranges(fwrt, reg) * (sizeof(struct iwl_fw_ini_error_dump_range) + le32_to_cpu(reg->internal.range_data_size)); } static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { struct iwl_ini_txf_iter_data iter = { .init = true }; void *fifo_iter = fwrt->dump.fifo_iter; u32 size = 0; u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(struct iwl_fw_ini_error_dump_register); fwrt->dump.fifo_iter = &iter; while (iwl_ini_txf_iter(fwrt, reg)) { size += fifo_hdr; if (!reg->fifos.header_only) size += iter.fifo_size; } if (size) size += sizeof(struct iwl_fw_ini_error_dump); fwrt->dump.fifo_iter = fifo_iter; return size; } static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { struct iwl_ini_rxf_data rx_data; u32 size = sizeof(struct iwl_fw_ini_error_dump) + sizeof(struct iwl_fw_ini_error_dump_range) + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(struct iwl_fw_ini_error_dump_register); if (reg->fifos.header_only) return size; iwl_ini_get_rxf_data(fwrt, reg, &rx_data); size += rx_data.size; return size; } /** * struct iwl_dump_ini_mem_ops - ini memory dump operations * @get_num_of_ranges: returns the number of memory ranges in the region. * @get_size: returns the total size of the region. * @fill_mem_hdr: fills region type specific headers and returns pointer to * the first range or NULL if failed to fill headers. * @fill_range: copies a given memory range into the dump. * Returns the size of the range or negative error value otherwise. */ struct iwl_dump_ini_mem_ops { u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg); u32 (*get_size)(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg); void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *data); int (*fill_range)(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range, int idx); }; /** * iwl_dump_ini_mem - copy a memory region into the dump * @fwrt: fw runtime struct. * @data: dump memory data. * @reg: region to copy to the dump. * @ops: memory dump operations. */ static void iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **data, struct iwl_fw_ini_region_cfg *reg, struct iwl_dump_ini_mem_ops *ops) { struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data; u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type), size; void *range; if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr || !ops->fill_range)) return; size = ops->get_size(fwrt, reg); if (!size) return; IWL_DEBUG_FW(fwrt, "WRT: collecting region: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); num_of_ranges = ops->get_num_of_ranges(fwrt, reg); (*data)->type = cpu_to_le32(type); (*data)->len = cpu_to_le32(size); header->region_id = reg->region_id; header->num_of_ranges = cpu_to_le32(num_of_ranges); header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME, le32_to_cpu(reg->name_len))); memcpy(header->name, reg->name, le32_to_cpu(header->name_len)); range = ops->fill_mem_hdr(fwrt, reg, header); if (!range) { IWL_ERR(fwrt, "WRT: failed to fill region header: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); memset(*data, 0, size); return; } for (i = 0; i < num_of_ranges; i++) { int range_size = ops->fill_range(fwrt, reg, range, i); if (range_size < 0) { IWL_ERR(fwrt, "WRT: failed to dump region: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); memset(*data, 0, size); return; } range = range + range_size; } *data = iwl_fw_error_next_data(*data); } static void iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trigger, struct iwl_fw_error_dump_data **data) { struct iwl_fw_ini_dump_info *dump = (void *)(*data)->data; u32 reg_ids_size = le32_to_cpu(trigger->num_regions) * sizeof(__le32); (*data)->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE); (*data)->len = cpu_to_le32(sizeof(*dump) + reg_ids_size); dump->version = cpu_to_le32(IWL_INI_DUMP_VER); dump->trigger_id = trigger->trigger_id; dump->is_external_cfg = cpu_to_le32(fwrt->trans->dbg.external_ini_loaded); dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type); dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); dump->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); dump->rf_id_flavor = cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id)); dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id)); dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id)); dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)); dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major); dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor); dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major); dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor); dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag)); memcpy(dump->build_tag, fwrt->fw->human_readable, sizeof(dump->build_tag)); dump->img_name_len = cpu_to_le32(sizeof(dump->img_name)); memcpy(dump->img_name, fwrt->dump.img_name, sizeof(dump->img_name)); dump->internal_dbg_cfg_name_len = cpu_to_le32(sizeof(dump->internal_dbg_cfg_name)); memcpy(dump->internal_dbg_cfg_name, fwrt->dump.internal_dbg_cfg_name, sizeof(dump->internal_dbg_cfg_name)); dump->external_dbg_cfg_name_len = cpu_to_le32(sizeof(dump->external_dbg_cfg_name)); /* dump info size is allocated in iwl_fw_ini_get_trigger_len. * The driver allocates (sizeof(*dump) + reg_ids_size) so it is safe to * use reg_ids_size */ memcpy(dump->external_dbg_cfg_name, fwrt->dump.external_dbg_cfg_name, sizeof(dump->external_dbg_cfg_name)); dump->regions_num = trigger->num_regions; memcpy(dump->region_ids, trigger->data, reg_ids_size); *data = iwl_fw_error_next_data(*data); } static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trigger) { int i, ret_size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data); u32 size; if (!trigger || !trigger->num_regions) return 0; for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) { u32 reg_id = le32_to_cpu(trigger->data[i]); struct iwl_fw_ini_region_cfg *reg; if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))) continue; reg = fwrt->dump.active_regs[reg_id]; if (!reg) { IWL_WARN(fwrt, "WRT: unassigned region id %d, skipping\n", reg_id); continue; } /* currently the driver supports always on domain only */ if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) continue; switch (le32_to_cpu(reg->region_type)) { case IWL_FW_INI_REGION_DEVICE_MEMORY: case IWL_FW_INI_REGION_PERIPHERY_MAC: case IWL_FW_INI_REGION_PERIPHERY_PHY: case IWL_FW_INI_REGION_PERIPHERY_AUX: case IWL_FW_INI_REGION_CSR: case IWL_FW_INI_REGION_LMAC_ERROR_TABLE: case IWL_FW_INI_REGION_UMAC_ERROR_TABLE: size = iwl_dump_ini_mem_get_size(fwrt, reg); if (size) ret_size += hdr_len + size; break; case IWL_FW_INI_REGION_TXF: size = iwl_dump_ini_txf_get_size(fwrt, reg); if (size) ret_size += hdr_len + size; break; case IWL_FW_INI_REGION_RXF: size = iwl_dump_ini_rxf_get_size(fwrt, reg); if (size) ret_size += hdr_len + size; break; case IWL_FW_INI_REGION_PAGING: if (iwl_fw_dbg_is_paging_enabled(fwrt)) size = iwl_dump_ini_paging_get_size(fwrt, reg); else size = iwl_dump_ini_paging_gen2_get_size(fwrt, reg); if (size) ret_size += hdr_len + size; break; case IWL_FW_INI_REGION_DRAM_BUFFER: if (!fwrt->trans->dbg.num_blocks) break; size = iwl_dump_ini_mon_dram_get_size(fwrt, reg); if (size) ret_size += hdr_len + size; break; case IWL_FW_INI_REGION_INTERNAL_BUFFER: size = iwl_dump_ini_mon_smem_get_size(fwrt, reg); if (size) ret_size += hdr_len + size; break; case IWL_FW_INI_REGION_DRAM_IMR: /* Undefined yet */ default: break; } } /* add dump info size */ if (ret_size) ret_size += hdr_len + sizeof(struct iwl_fw_ini_dump_info) + (le32_to_cpu(trigger->num_regions) * sizeof(__le32)); return ret_size; } static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trigger, struct iwl_fw_error_dump_data **data) { int i, num = le32_to_cpu(trigger->num_regions); iwl_dump_ini_info(fwrt, trigger, data); for (i = 0; i < num; i++) { u32 reg_id = le32_to_cpu(trigger->data[i]); struct iwl_fw_ini_region_cfg *reg; struct iwl_dump_ini_mem_ops ops; if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)) continue; reg = fwrt->dump.active_regs[reg_id]; /* Don't warn, get_trigger_len already warned */ if (!reg) continue; /* currently the driver supports always on domain only */ if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) continue; switch (le32_to_cpu(reg->region_type)) { case IWL_FW_INI_REGION_DEVICE_MEMORY: case IWL_FW_INI_REGION_LMAC_ERROR_TABLE: case IWL_FW_INI_REGION_UMAC_ERROR_TABLE: ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; ops.get_size = iwl_dump_ini_mem_get_size; ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; ops.fill_range = iwl_dump_ini_dev_mem_iter; iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_PERIPHERY_MAC: ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; ops.get_size = iwl_dump_ini_mem_get_size; ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; ops.fill_range = iwl_dump_ini_prph_iter; iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_DRAM_BUFFER: ops.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges; ops.get_size = iwl_dump_ini_mon_dram_get_size; ops.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header; ops.fill_range = iwl_dump_ini_mon_dram_iter; iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_INTERNAL_BUFFER: ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; ops.get_size = iwl_dump_ini_mon_smem_get_size; ops.fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header; ops.fill_range = iwl_dump_ini_dev_mem_iter; iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_PAGING: ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; if (iwl_fw_dbg_is_paging_enabled(fwrt)) { ops.get_num_of_ranges = iwl_dump_ini_paging_ranges; ops.get_size = iwl_dump_ini_paging_get_size; ops.fill_range = iwl_dump_ini_paging_iter; } else { ops.get_num_of_ranges = iwl_dump_ini_paging_gen2_ranges; ops.get_size = iwl_dump_ini_paging_gen2_get_size; ops.fill_range = iwl_dump_ini_paging_gen2_iter; } iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_TXF: { struct iwl_ini_txf_iter_data iter = { .init = true }; void *fifo_iter = fwrt->dump.fifo_iter; fwrt->dump.fifo_iter = &iter; ops.get_num_of_ranges = iwl_dump_ini_txf_ranges; ops.get_size = iwl_dump_ini_txf_get_size; ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; ops.fill_range = iwl_dump_ini_txf_iter; iwl_dump_ini_mem(fwrt, data, reg, &ops); fwrt->dump.fifo_iter = fifo_iter; break; } case IWL_FW_INI_REGION_RXF: ops.get_num_of_ranges = iwl_dump_ini_rxf_ranges; ops.get_size = iwl_dump_ini_rxf_get_size; ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; ops.fill_range = iwl_dump_ini_rxf_iter; iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_CSR: ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; ops.get_size = iwl_dump_ini_mem_get_size; ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; ops.fill_range = iwl_dump_ini_csr_iter; iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_PERIPHERY_PHY: case IWL_FW_INI_REGION_PERIPHERY_AUX: case IWL_FW_INI_REGION_DRAM_IMR: /* This is undefined yet */ default: break; } } } static struct iwl_fw_error_dump_file * iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_trigger_id trig_id) { int size; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_ini_trigger *trigger; if (!iwl_fw_ini_trigger_on(fwrt, trig_id)) return NULL; trigger = fwrt->dump.active_trigs[trig_id].trig; size = iwl_fw_ini_get_trigger_len(fwrt, trigger); if (!size) return NULL; size += sizeof(*dump_file); dump_file = vzalloc(size); if (!dump_file) return NULL; dump_file->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; dump_file->file_len = cpu_to_le32(size); iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data); return dump_file; } static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) { struct iwl_fw_dump_ptrs fw_error_dump = {}; struct iwl_fw_error_dump_file *dump_file; struct scatterlist *sg_dump_data; u32 file_len; u32 dump_mask = fwrt->fw->dbg.dump_mask; dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump); if (!dump_file) goto out; if (fwrt->dump.monitor_only) dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR; fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask); file_len = le32_to_cpu(dump_file->file_len); fw_error_dump.fwrt_len = file_len; if (fw_error_dump.trans_ptr) { file_len += fw_error_dump.trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); } sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump.fwrt_ptr, fw_error_dump.fwrt_len, 0); if (fw_error_dump.trans_ptr) sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump.trans_ptr->data, fw_error_dump.trans_ptr->len, fw_error_dump.fwrt_len); dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } vfree(fw_error_dump.fwrt_ptr); vfree(fw_error_dump.trans_ptr); out: iwl_fw_free_dump_desc(fwrt); } static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx) { enum iwl_fw_ini_trigger_id trig_id = fwrt->dump.wks[wk_idx].ini_trig_id; struct iwl_fw_error_dump_file *dump_file; struct scatterlist *sg_dump_data; u32 file_len; dump_file = iwl_fw_error_ini_dump_file(fwrt, trig_id); if (!dump_file) goto out; file_len = le32_to_cpu(dump_file->file_len); sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), dump_file, file_len, 0); dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } vfree(dump_file); out: fwrt->dump.wks[wk_idx].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID; } const struct iwl_fw_dump_desc iwl_dump_desc_assert = { .trig_desc = { .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), }, }; IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay) { u32 trig_type = le32_to_cpu(desc->trig_desc.type); int ret; if (fwrt->trans->dbg.ini_valid) { ret = iwl_fw_dbg_ini_collect(fwrt, trig_type); if (!ret) iwl_fw_free_dump_desc(fwrt); return ret; } /* use wks[0] since dump flow prior to ini does not need to support * consecutive triggers collection */ if (test_and_set_bit(fwrt->dump.wks[0].idx, &fwrt->dump.active_wks)) return -EBUSY; if (WARN_ON(fwrt->dump.desc)) iwl_fw_free_dump_desc(fwrt); IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type)); fwrt->dump.desc = desc; fwrt->dump.monitor_only = monitor_only; schedule_delayed_work(&fwrt->dump.wks[0].wk, usecs_to_jiffies(delay)); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type) { int ret; struct iwl_fw_dump_desc *iwl_dump_error_desc; if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) return -EIO; iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL); if (!iwl_dump_error_desc) return -ENOMEM; iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type); iwl_dump_error_desc->len = 0; ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); if (ret) kfree(iwl_dump_error_desc); else iwl_trans_sync_nmi(fwrt->trans); return ret; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; unsigned int delay = 0; bool monitor_only = false; if (trigger) { u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; if (!le16_to_cpu(trigger->occurrences)) return 0; if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) { IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig); iwl_force_nmi(fwrt->trans); return 0; } trigger->occurrences = cpu_to_le16(occurrences); monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY; /* convert msec to usec */ delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC; } desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); if (!desc) return -ENOMEM; desc->len = len; desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_trigger_id id) { struct iwl_fw_ini_active_triggers *active; u32 occur, delay; unsigned long idx; if (!iwl_fw_ini_trigger_on(fwrt, id)) { IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", id); return -EINVAL; } active = &fwrt->dump.active_trigs[id]; delay = le32_to_cpu(active->trig->dump_delay); occur = le32_to_cpu(active->trig->occurrences); if (!occur) return 0; active->trig->occurrences = cpu_to_le32(--occur); if (le32_to_cpu(active->trig->force_restart)) { IWL_WARN(fwrt, "WRT: force restart: trigger %d fired.\n", id); iwl_force_nmi(fwrt->trans); return 0; } /* Check there is an available worker. * ffz return value is undefined if no zero exists, * so check against ~0UL first. */ if (fwrt->dump.active_wks == ~0UL) return -EBUSY; idx = ffz(fwrt->dump.active_wks); if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) return -EBUSY; fwrt->dump.wks[idx].ini_trig_id = id; IWL_WARN(fwrt, "WRT: collecting data: ini trigger %d fired.\n", id); schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); return 0; } IWL_EXPORT_SYMBOL(_iwl_fw_dbg_ini_collect); int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id) { int id; switch (legacy_trigger_id) { case FW_DBG_TRIGGER_FW_ASSERT: case FW_DBG_TRIGGER_ALIVE_TIMEOUT: case FW_DBG_TRIGGER_DRIVER: id = IWL_FW_TRIGGER_ID_FW_ASSERT; break; case FW_DBG_TRIGGER_USER: id = IWL_FW_TRIGGER_ID_USER_TRIGGER; break; default: return -EIO; } return _iwl_fw_dbg_ini_collect(fwrt, id); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_ini_collect); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) { int ret, len = 0; char buf[64]; if (fmt) { va_list ap; buf[sizeof(buf) - 1] = '\0'; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); /* check for truncation */ if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) buf[sizeof(buf) - 1] = '\0'; len = strlen(buf) + 1; } ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, trigger); if (ret) return ret; return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) { u8 *ptr; int ret; int i; if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv), "Invalid configuration %d\n", conf_id)) return -EINVAL; /* EARLY START - firmware's configuration is hard coded */ if ((!fwrt->fw->dbg.conf_tlv[conf_id] || !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) && conf_id == FW_DBG_START_FROM_ALIVE) return 0; if (!fwrt->fw->dbg.conf_tlv[conf_id]) return -EINVAL; if (fwrt->dump.conf != FW_DBG_INVALID) IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES /* start default config marker cmd for syncing logs */ if (fwrt->trans->dbg_cfg.enable_timestamp_marker_cmd) iwl_fw_trigger_timestamp(fwrt, 1); #endif /* Send all HCMDs for configuring the FW debug */ ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd; for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) { struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; struct iwl_host_cmd hcmd = { .id = cmd->id, .len = { le16_to_cpu(cmd->len), }, .data = { cmd->data, }, }; ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); if (ret) return ret; ptr += sizeof(*cmd); ptr += le16_to_cpu(cmd->len); } fwrt->dump.conf = conf_id; return 0; } IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); /* this function assumes dump_start was called beforehand and dump_end will be * called afterwards */ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) { struct iwl_fw_dbg_params params = {0}; if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) { IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n"); iwl_fw_free_dump_desc(fwrt); goto out; } /* there's no point in fw dump if the bus is dead */ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); goto out; } if (iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true)) { IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n"); goto out; } IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection start\n"); if (fwrt->trans->dbg.ini_valid) iwl_fw_error_ini_dump(fwrt, wk_idx); else iwl_fw_error_dump(fwrt); IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection done\n"); iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); out: clear_bit(wk_idx, &fwrt->dump.active_wks); } void iwl_fw_error_dump_wk(struct work_struct *work) { struct iwl_fw_runtime *fwrt; typeof(fwrt->dump.wks[0]) *wks; wks = container_of(work, typeof(fwrt->dump.wks[0]), wk.work); fwrt = container_of(wks, struct iwl_fw_runtime, dump.wks[wks->idx]); /* assumes the op mode mutex is locked in dump_start since * iwl_fw_dbg_collect_sync can't run in parallel */ if (fwrt->ops && fwrt->ops->dump_start && fwrt->ops->dump_start(fwrt->ops_ctx)) return; iwl_fw_dbg_collect_sync(fwrt, wks->idx); if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) { const struct iwl_cfg *cfg = fwrt->trans->cfg; if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt)) return; if (!fwrt->dump.d3_debug_data) { fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length, GFP_KERNEL); if (!fwrt->dump.d3_debug_data) { IWL_ERR(fwrt, "failed to allocate memory for D3 debug data\n"); return; } } /* if the buffer holds previous debug data it is overwritten */ iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, cfg->d3_debug_data_length); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); static void iwl_fw_dbg_info_apply(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_debug_info_tlv *dbg_info, bool ext, enum iwl_fw_ini_apply_point pnt) { u32 img_name_len = le32_to_cpu(dbg_info->img_name_len); u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len); const char err_str[] = "WRT: ext=%d. Invalid %s name length %d, expected %d\n"; if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) { IWL_WARN(fwrt, err_str, ext, "image", img_name_len, IWL_FW_INI_MAX_IMG_NAME_LEN); return; } if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) { IWL_WARN(fwrt, err_str, ext, "debug cfg", dbg_cfg_name_len, IWL_FW_INI_MAX_DBG_CFG_NAME_LEN); return; } if (ext) { memcpy(fwrt->dump.external_dbg_cfg_name, dbg_info->dbg_cfg_name, sizeof(fwrt->dump.external_dbg_cfg_name)); } else { memcpy(fwrt->dump.img_name, dbg_info->img_name, sizeof(fwrt->dump.img_name)); memcpy(fwrt->dump.internal_dbg_cfg_name, dbg_info->dbg_cfg_name, sizeof(fwrt->dump.internal_dbg_cfg_name)); } } static void iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size) { struct iwl_trans *trans = fwrt->trans; void *virtual_addr = NULL; dma_addr_t phys_addr; if (WARN_ON_ONCE(trans->dbg.num_blocks == ARRAY_SIZE(trans->dbg.fw_mon))) return; virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr, GFP_KERNEL | __GFP_NOWARN); /* TODO: alloc fragments if needed */ if (!virtual_addr) IWL_ERR(fwrt, "Failed to allocate debug memory\n"); IWL_DEBUG_FW(trans, "Allocated DRAM buffer[%d], size=0x%x\n", trans->dbg.num_blocks, size); trans->dbg.fw_mon[trans->dbg.num_blocks].block = virtual_addr; trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys_addr; trans->dbg.fw_mon[trans->dbg.num_blocks].size = size; trans->dbg.num_blocks++; } static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_allocation_tlv *alloc, enum iwl_fw_ini_apply_point pnt) { struct iwl_trans *trans = fwrt->trans; struct iwl_ldbg_config_cmd ldbg_cmd = { .type = cpu_to_le32(BUFFER_ALLOCATION), }; struct iwl_buffer_allocation_cmd *cmd = &ldbg_cmd.buffer_allocation; struct iwl_host_cmd hcmd = { .id = LDBG_CONFIG_CMD, .flags = CMD_ASYNC, .data[0] = &ldbg_cmd, .len[0] = sizeof(ldbg_cmd), }; int block_idx = trans->dbg.num_blocks; u32 buf_location = le32_to_cpu(alloc->buffer_location); u32 alloc_id = le32_to_cpu(alloc->allocation_id); if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID || alloc_id >= IWL_FW_INI_ALLOCATION_NUM) { IWL_ERR(fwrt, "WRT: Invalid allocation id %d\n", alloc_id); return; } if (fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID) fwrt->trans->dbg.ini_dest = buf_location; if (buf_location != fwrt->trans->dbg.ini_dest) { WARN(fwrt, "WRT: attempt to override buffer location on apply point %d\n", pnt); return; } if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) { IWL_DEBUG_FW(trans, "WRT: applying SMEM buffer destination\n"); /* set sram monitor by enabling bit 7 */ iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); return; } if (buf_location != IWL_FW_INI_LOCATION_DRAM_PATH) return; if (!(BIT(alloc_id) & fwrt->trans->dbg.is_alloc)) { iwl_fw_dbg_buffer_allocation(fwrt, le32_to_cpu(alloc->size)); if (block_idx == trans->dbg.num_blocks) return; fwrt->trans->dbg.is_alloc |= BIT(alloc_id); } /* First block is assigned via registers / context info */ if (trans->dbg.num_blocks == 1) return; IWL_DEBUG_FW(trans, "WRT: applying DRAM buffer[%d] destination\n", block_idx); cmd->num_frags = cpu_to_le32(1); cmd->fragments[0].address = cpu_to_le64(trans->dbg.fw_mon[block_idx].physical); cmd->fragments[0].size = alloc->size; cmd->allocation_id = alloc->allocation_id; cmd->buffer_location = alloc->buffer_location; iwl_trans_send_cmd(trans, &hcmd); } static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt, struct iwl_ucode_tlv *tlv, bool ext) { struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0]; struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd; u16 len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv); struct iwl_host_cmd hcmd = { .id = WIDE_ID(data->group, data->id), .len = { len, }, .data = { data->data, }, }; /* currently the driver supports always on domain only */ if (le32_to_cpu(hcmd_tlv->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) return; IWL_DEBUG_FW(fwrt, "WRT: ext=%d. Sending host command id=0x%x, group=0x%x\n", ext, data->id, data->group); iwl_trans_send_cmd(fwrt->trans, &hcmd); } static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_tlv *tlv, bool ext, enum iwl_fw_ini_apply_point pnt) { void *iter = (void *)tlv->region_config; int i, size = le32_to_cpu(tlv->num_regions); const char *err_st = "WRT: ext=%d. Invalid region %s %d for apply point %d\n"; for (i = 0; i < size; i++) { struct iwl_fw_ini_region_cfg *reg = iter, **active; int id = le32_to_cpu(reg->region_id); u32 type = le32_to_cpu(reg->region_type); if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), err_st, ext, "id", id, pnt)) break; if (WARN(type == 0 || type >= IWL_FW_INI_REGION_NUM, err_st, ext, "type", type, pnt)) break; active = &fwrt->dump.active_regs[id]; if (*active) IWL_WARN(fwrt->trans, "WRT: ext=%d. Region id %d override\n", ext, id); IWL_DEBUG_FW(fwrt, "WRT: ext=%d. Activating region id %d\n", ext, id); *active = reg; if (type == IWL_FW_INI_REGION_TXF || type == IWL_FW_INI_REGION_RXF) iter += le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); else if (type == IWL_FW_INI_REGION_DEVICE_MEMORY || type == IWL_FW_INI_REGION_PERIPHERY_MAC || type == IWL_FW_INI_REGION_PERIPHERY_PHY || type == IWL_FW_INI_REGION_PERIPHERY_AUX || type == IWL_FW_INI_REGION_INTERNAL_BUFFER || type == IWL_FW_INI_REGION_PAGING || type == IWL_FW_INI_REGION_CSR || type == IWL_FW_INI_REGION_LMAC_ERROR_TABLE || type == IWL_FW_INI_REGION_UMAC_ERROR_TABLE) iter += le32_to_cpu(reg->internal.num_of_ranges) * sizeof(__le32); iter += sizeof(*reg); } } static int iwl_fw_dbg_trig_realloc(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_active_triggers *active, u32 id, int size) { void *ptr; if (size <= active->size) return 0; ptr = krealloc(active->trig, size, GFP_KERNEL); if (!ptr) { IWL_ERR(fwrt, "WRT: Failed to allocate memory for trigger %d\n", id); return -ENOMEM; } active->trig = ptr; active->size = size; return 0; } static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger_tlv *tlv, bool ext, enum iwl_fw_ini_apply_point apply_point) { int i, size = le32_to_cpu(tlv->num_triggers); void *iter = (void *)tlv->trigger_config; for (i = 0; i < size; i++) { struct iwl_fw_ini_trigger *trig = iter; struct iwl_fw_ini_active_triggers *active; int id = le32_to_cpu(trig->trigger_id); u32 trig_regs_size = le32_to_cpu(trig->num_regions) * sizeof(__le32); if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_trigs), "WRT: ext=%d. Invalid trigger id %d for apply point %d\n", ext, id, apply_point)) break; active = &fwrt->dump.active_trigs[id]; if (!active->active) { size_t trig_size = sizeof(*trig) + trig_regs_size; IWL_DEBUG_FW(fwrt, "WRT: ext=%d. Activating trigger %d\n", ext, id); if (iwl_fw_dbg_trig_realloc(fwrt, active, id, trig_size)) goto next; memcpy(active->trig, trig, trig_size); } else { u32 conf_override = !(le32_to_cpu(trig->override_trig) & 0xff); u32 region_override = !(le32_to_cpu(trig->override_trig) & 0xff00); u32 offset = 0; u32 active_regs = le32_to_cpu(active->trig->num_regions); u32 new_regs = le32_to_cpu(trig->num_regions); int mem_to_add = trig_regs_size; if (region_override) { IWL_DEBUG_FW(fwrt, "WRT: ext=%d. Trigger %d regions override\n", ext, id); mem_to_add -= active_regs * sizeof(__le32); } else { IWL_DEBUG_FW(fwrt, "WRT: ext=%d. Trigger %d regions appending\n", ext, id); offset += active_regs; new_regs += active_regs; } if (iwl_fw_dbg_trig_realloc(fwrt, active, id, active->size + mem_to_add)) goto next; if (conf_override) { IWL_DEBUG_FW(fwrt, "WRT: ext=%d. Trigger %d configuration override\n", ext, id); memcpy(active->trig, trig, sizeof(*trig)); } memcpy(active->trig->data + offset, trig->data, trig_regs_size); active->trig->num_regions = cpu_to_le32(new_regs); } /* Since zero means infinity - just set to -1 */ if (!le32_to_cpu(active->trig->occurrences)) active->trig->occurrences = cpu_to_le32(-1); active->active = true; if (id == IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER) { u32 collect_interval = le32_to_cpu(trig->trigger_data); /* the minimum allowed interval is 50ms */ if (collect_interval < 50) { collect_interval = 50; trig->trigger_data = cpu_to_le32(collect_interval); } mod_timer(&fwrt->dump.periodic_trig, jiffies + msecs_to_jiffies(collect_interval)); } next: iter += sizeof(*trig) + trig_regs_size; } } static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, struct iwl_apply_point_data *data, enum iwl_fw_ini_apply_point pnt, bool ext) { struct iwl_apply_point_data *iter; if (!data->list.next) return; list_for_each_entry(iter, &data->list, list) { struct iwl_ucode_tlv *tlv = &iter->tlv; void *ini_tlv = (void *)tlv->data; u32 type = le32_to_cpu(tlv->type); const char invalid_ap_str[] = "WRT: ext=%d. Invalid apply point %d for %s\n"; switch (type) { case IWL_UCODE_TLV_TYPE_DEBUG_INFO: iwl_fw_dbg_info_apply(fwrt, ini_tlv, ext, pnt); break; case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: if (pnt != IWL_FW_INI_APPLY_EARLY) { IWL_ERR(fwrt, invalid_ap_str, ext, pnt, "buffer allocation"); break; } iwl_fw_dbg_buffer_apply(fwrt, ini_tlv, pnt); break; case IWL_UCODE_TLV_TYPE_HCMD: if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) { IWL_ERR(fwrt, invalid_ap_str, ext, pnt, "host command"); break; } iwl_fw_dbg_send_hcmd(fwrt, tlv, ext); break; case IWL_UCODE_TLV_TYPE_REGIONS: iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt); break; case IWL_UCODE_TLV_TYPE_TRIGGERS: iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt); break; case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: break; default: WARN_ONCE(1, "WRT: ext=%d. Invalid TLV 0x%x for apply point\n", ext, type); break; } } } static void iwl_fw_dbg_ini_reset_cfg(struct iwl_fw_runtime *fwrt) { int i; for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++) fwrt->dump.active_regs[i] = NULL; /* disable the triggers, used in recovery flow */ for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) fwrt->dump.active_trigs[i].active = false; memset(fwrt->dump.img_name, 0, sizeof(fwrt->dump.img_name)); memset(fwrt->dump.internal_dbg_cfg_name, 0, sizeof(fwrt->dump.internal_dbg_cfg_name)); memset(fwrt->dump.external_dbg_cfg_name, 0, sizeof(fwrt->dump.external_dbg_cfg_name)); fwrt->trans->dbg.ini_dest = IWL_FW_INI_LOCATION_INVALID; } void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_apply_point apply_point) { void *data = &fwrt->trans->dbg.apply_points[apply_point]; IWL_DEBUG_FW(fwrt, "WRT: enabling apply point %d\n", apply_point); if (apply_point == IWL_FW_INI_APPLY_EARLY) iwl_fw_dbg_ini_reset_cfg(fwrt); _iwl_fw_dbg_apply_point(fwrt, data, apply_point, false); data = &fwrt->trans->dbg.apply_points_ext[apply_point]; _iwl_fw_dbg_apply_point(fwrt, data, apply_point, true); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point); void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt) { int i; del_timer(&fwrt->dump.periodic_trig); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) iwl_fw_dbg_collect_sync(fwrt, i); iwl_fw_dbg_stop_restart_recording(fwrt, NULL, true); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync); void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t) { struct iwl_fw_runtime *fwrt; enum iwl_fw_ini_trigger_id id = IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER; int ret; typeof(fwrt->dump) *dump_ptr = container_of(t, typeof(fwrt->dump), periodic_trig); fwrt = container_of(dump_ptr, typeof(*fwrt), dump); ret = _iwl_fw_dbg_ini_collect(fwrt, id); if (!ret || ret == -EBUSY) { struct iwl_fw_ini_trigger *trig = fwrt->dump.active_trigs[id].trig; u32 occur = le32_to_cpu(trig->occurrences); u32 collect_interval = le32_to_cpu(trig->trigger_data); if (!occur) return; mod_timer(&fwrt->dump.periodic_trig, jiffies + msecs_to_jiffies(collect_interval)); } } #define FSEQ_REG(x) { .addr = (x), .str = #x, } void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt) { struct iwl_trans *trans = fwrt->trans; unsigned long flags; int i; struct { u32 addr; const char *str; } fseq_regs[] = { FSEQ_REG(FSEQ_ERROR_CODE), FSEQ_REG(FSEQ_TOP_INIT_VERSION), FSEQ_REG(FSEQ_CNVIO_INIT_VERSION), FSEQ_REG(FSEQ_OTP_VERSION), FSEQ_REG(FSEQ_TOP_CONTENT_VERSION), FSEQ_REG(FSEQ_ALIVE_TOKEN), FSEQ_REG(FSEQ_CNVI_ID), FSEQ_REG(FSEQ_CNVR_ID), FSEQ_REG(CNVI_AUX_MISC_CHIP), FSEQ_REG(CNVR_AUX_MISC_CHIP), FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM), FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR), }; if (!iwl_trans_grab_nic_access(trans, &flags)) return; IWL_ERR(fwrt, "Fseq Registers:\n"); for (i = 0; i < ARRAY_SIZE(fseq_regs); i++) IWL_ERR(fwrt, "0x%08X | %s\n", iwl_read_prph_no_grab(trans, fseq_regs[i].addr), fseq_regs[i].str); iwl_trans_release_nic_access(trans, &flags); } IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs); static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend) { struct iwl_dbg_suspend_resume_cmd cmd = { .operation = suspend ? cpu_to_le32(DBGC_SUSPEND_CMD) : cpu_to_le32(DBGC_RESUME_CMD), }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, DBGC_SUSPEND_RESUME), .data[0] = &cmd, .len[0] = sizeof(cmd), }; return iwl_trans_send_cmd(trans, &hcmd); } static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); return; } if (params) { params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE); params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL); } iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0); /* wait for the DBGC to finish writing the internal buffer to DRAM to * avoid halting the HW while writing */ usleep_range(700, 1000); iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0); } static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { if (!params) return -EIO; if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); } else { iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample); iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); } return 0; } int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop) { int ret = 0; /* if the FW crashed or not debug monitor cfg was given, there is * no point in changing the recording state */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) || (!fwrt->trans->dbg.dest_tlv && fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)) return 0; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) ret = iwl_fw_dbg_suspend_resume_hcmd(fwrt->trans, stop); else if (stop) iwl_fw_dbg_stop_recording(fwrt->trans, params); else ret = iwl_fw_dbg_restart_recording(fwrt->trans, params); #ifdef CPTCFG_IWLWIFI_DEBUGFS if (!ret) { if (stop) fwrt->trans->dbg.rec_on = false; else iwl_fw_set_dbg_rec_on(fwrt); } #endif return ret; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/dbg.h000066400000000000000000000322651351064634500261200ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_dbg_h__ #define __iwl_fw_dbg_h__ #include #include #include "runtime.h" #include "iwl-prph.h" #include "iwl-io.h" #include "file.h" #include "error-dump.h" #include "api/commands.h" #include "api/dbg-tlv.h" #include "api/alive.h" /** * struct iwl_fw_dump_desc - describes the dump * @len: length of trig_desc->data * @trig_desc: the description of the dump */ struct iwl_fw_dump_desc { size_t len; /* must be last */ struct iwl_fw_error_dump_trigger_desc trig_desc; }; /** * struct iwl_fw_dbg_params - register values to restore * @in_sample: DBGC_IN_SAMPLE value * @out_ctrl: DBGC_OUT_CTRL value */ struct iwl_fw_dbg_params { u32 in_sample; u32 out_ctrl; }; extern const struct iwl_fw_dump_desc iwl_dump_desc_assert; static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) { if (fwrt->dump.desc != &iwl_dump_desc_assert) kfree(fwrt->dump.desc); fwrt->dump.desc = NULL; fwrt->dump.lmac_err_id[0] = 0; if (fwrt->smem_cfg.num_lmacs > 1) fwrt->dump.lmac_err_id[1] = 0; fwrt->dump.umac_err_id = 0; } int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay); int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type); int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_trigger_id id); int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) __printf(3, 4); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); #define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \ unlikely(__dbg_trigger); \ }) static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) { return fw->dbg.trigger_tlv[id]; } #define iwl_fw_dbg_get_trigger(fw, id) ({ \ BUILD_BUG_ON(!__builtin_constant_p(id)); \ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ _iwl_fw_dbg_get_trigger((fw), (id)); \ }) static inline bool iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, struct wireless_dev *wdev) { u32 trig_vif = le32_to_cpu(trig->vif_type); return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || wdev->iftype == trig_vif; } static inline bool iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trig) { return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && (fwrt->dump.conf == FW_DBG_INVALID || (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids)))); } static inline bool iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_usec) { unsigned long wind_jiff = usecs_to_jiffies(dis_usec); /* If this is the first event checked, jump to update start ts */ if (fwrt->dump.non_collect_ts_start[id] && (time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff, jiffies))) return true; fwrt->dump.non_collect_ts_start[id] = jiffies; return false; } static inline bool iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trig) { u32 usec = le16_to_cpu(trig->trig_dis_ms) * USEC_PER_MSEC; if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) return false; if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), usec)) { IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", trig->id); return false; } return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); } static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, const enum iwl_fw_dbg_trigger id) { struct iwl_fw_dbg_trigger_tlv *trig; if (fwrt->trans->dbg.ini_valid) return NULL; if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id)) return NULL; trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id); if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig)) return NULL; return trig; } #define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \ BUILD_BUG_ON(!__builtin_constant_p(id)); \ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \ }) static inline bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_trigger_id id) { struct iwl_fw_ini_trigger *trig; u32 usec; if (!fwrt->trans->dbg.ini_valid || id == IWL_FW_TRIGGER_ID_INVALID || id >= IWL_FW_TRIGGER_ID_NUM || !fwrt->dump.active_trigs[id].active) return false; trig = fwrt->dump.active_trigs[id].trig; usec = le32_to_cpu(trig->ignore_consec); if (iwl_fw_dbg_no_trig_window(fwrt, id, usec)) { IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id); return false; } return true; } static inline void _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trigger) { if (!trigger) return; if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger)) return; iwl_fw_dbg_collect_trig(fwrt, trigger, NULL); } #define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \ _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop); #ifdef CPTCFG_IWLWIFI_DEBUGFS static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) { if (fwrt->cur_fw_img == IWL_UCODE_REGULAR && (fwrt->fw->dbg.dest_tlv || fwrt->trans->dbg.ini_dest != IWL_FW_INI_LOCATION_INVALID)) fwrt->trans->dbg.rec_on = true; } #endif static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) { fwrt->dump.conf = FW_DBG_INVALID; } void iwl_fw_error_dump_wk(struct work_struct *work); static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type) { return (fwrt->fw->dbg.dump_mask & BIT(type)); } static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) { return fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D3_DEBUG) && fwrt->trans->cfg->d3_debug_data_length && fwrt->ops && fwrt->ops->d3_debug_enable && fwrt->ops->d3_debug_enable(fwrt->ops_ctx) && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); } static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt) { return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->cfg->gen2 && fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block; } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt) { int i; del_timer(&fwrt->dump.periodic_trig); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) { flush_delayed_work(&fwrt->dump.wks[i].wk); fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID; } } static inline void iwl_fw_cancel_dumps(struct iwl_fw_runtime *fwrt) { int i; del_timer(&fwrt->dump.periodic_trig); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) { cancel_delayed_work_sync(&fwrt->dump.wks[i].wk); fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID; } } #ifdef CPTCFG_IWLWIFI_DEBUGFS static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) { fwrt->timestamp.delay = 0; cancel_delayed_work_sync(&fwrt->timestamp.wk); } void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay); static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) { cancel_delayed_work_sync(&fwrt->timestamp.wk); } static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) { if (!fwrt->timestamp.delay) return; schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay)); } #else static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay) {} static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} #endif /* CPTCFG_IWLWIFI_DEBUGFS */ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_apply_point apply_point); static inline void iwl_fw_lmac1_set_alive_err_table(struct iwl_trans *trans, u32 lmac_error_event_table) { if (!(trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_LMAC1) || WARN_ON(trans->dbg.lmac_error_event_table[0] != lmac_error_event_table)) trans->dbg.lmac_error_event_table[0] = lmac_error_event_table; } static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, u32 umac_error_event_table) { if (!(trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_UMAC) || WARN_ON(trans->dbg.umac_error_event_table != umac_error_event_table)) trans->dbg.umac_error_event_table = umac_error_event_table; } void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) { if (fwrt->trans->dbg.ini_valid && fwrt->trans->dbg.hw_error) { _iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR); fwrt->trans->dbg.hw_error = false; } else { iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0); } } void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t); static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt, struct iwl_lmac_alive *lmac, struct iwl_umac_alive *umac) { if (lmac) { fwrt->dump.fw_ver.type = lmac->ver_type; fwrt->dump.fw_ver.subtype = lmac->ver_subtype; fwrt->dump.fw_ver.lmac_major = le32_to_cpu(lmac->ucode_major); fwrt->dump.fw_ver.lmac_minor = le32_to_cpu(lmac->ucode_minor); } if (umac) { fwrt->dump.fw_ver.umac_major = le32_to_cpu(umac->umac_major); fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor); } } void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_dbg_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c000066400000000000000000000242221351064634500267700ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "api/commands.h" #include "debugfs.h" #include "dbg.h" #include "api/rs.h" #define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ struct dbgfs_##name##_data { \ argtype *arg; \ bool read_done; \ ssize_t rlen; \ char rbuf[buflen]; \ }; \ static int _iwl_dbgfs_##name##_open(struct inode *inode, \ struct file *file) \ { \ struct dbgfs_##name##_data *data; \ \ data = kzalloc(sizeof(*data), GFP_KERNEL); \ if (!data) \ return -ENOMEM; \ \ data->read_done = false; \ data->arg = inode->i_private; \ file->private_data = data; \ \ return 0; \ } #define FWRT_DEBUGFS_READ_WRAPPER(name) \ static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \ char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ struct dbgfs_##name##_data *data = file->private_data; \ \ if (!data->read_done) { \ data->read_done = true; \ data->rlen = iwl_dbgfs_##name##_read(data->arg, \ sizeof(data->rbuf),\ data->rbuf); \ } \ \ if (data->rlen < 0) \ return data->rlen; \ return simple_read_from_buffer(user_buf, count, ppos, \ data->rbuf, data->rlen); \ } static int _iwl_dbgfs_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } #define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \ FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ FWRT_DEBUGFS_READ_WRAPPER(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = _iwl_dbgfs_##name##_read, \ .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ .release = _iwl_dbgfs_release, \ } #define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ const char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ argtype *arg = \ ((struct dbgfs_##name##_data *)file->private_data)->arg;\ char buf[buflen] = {}; \ size_t buf_size = min(count, sizeof(buf) - 1); \ \ if (copy_from_user(buf, user_buf, buf_size)) \ return -EFAULT; \ \ return iwl_dbgfs_##name##_write(arg, buf, buf_size); \ } #define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \ FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ FWRT_DEBUGFS_READ_WRAPPER(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ .read = _iwl_dbgfs_##name##_read, \ .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ .release = _iwl_dbgfs_release, \ } #define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \ FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ .release = _iwl_dbgfs_release, \ } #define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz) \ _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime) #define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) #define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) #define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, fwrt, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt) { struct iwl_mvm_marker marker = { .dw_len = sizeof(struct iwl_mvm_marker) / 4, .marker_id = MARKER_ID_SYNC_CLOCK, /* the real timestamp is taken from the ftrace clock * this is for finding the match between fw and kernel logs */ .timestamp = cpu_to_le64(fwrt->timestamp.seq++), }; struct iwl_host_cmd hcmd = { .id = MARKER_CMD, .flags = CMD_ASYNC, .data[0] = &marker, .len[0] = sizeof(marker), }; return iwl_trans_send_cmd(fwrt->trans, &hcmd); } static void iwl_fw_timestamp_marker_wk(struct work_struct *work) { int ret; struct iwl_fw_runtime *fwrt = container_of(work, struct iwl_fw_runtime, timestamp.wk.work); unsigned long delay = fwrt->timestamp.delay; ret = iwl_fw_send_timestamp_marker_cmd(fwrt); if (!ret && delay) schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(delay)); else IWL_INFO(fwrt, "stopping timestamp_marker, ret: %d, delay: %u\n", ret, jiffies_to_msecs(delay) / 1000); } void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay) { IWL_INFO(fwrt, "starting timestamp_marker trigger with delay: %us\n", delay); iwl_fw_cancel_timestamp(fwrt); fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000); schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay)); } static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt, char *buf, size_t count) { int ret; u32 delay; ret = kstrtou32(buf, 10, &delay); if (ret < 0) return ret; iwl_fw_trigger_timestamp(fwrt, delay); return count; } static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt, size_t size, char *buf) { u32 delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000; return scnprintf(buf, size, "%d\n", delay_secs); } FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16); struct hcmd_write_data { __be32 cmd_id; __be32 flags; __be16 length; u8 data[0]; } __packed; static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, size_t count) { size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2; size_t data_size = (count - 1) / 2; int ret; struct hcmd_write_data *data; struct iwl_host_cmd hcmd = { .len = { 0, }, .data = { NULL, }, }; if (!iwl_trans_fw_running(fwrt->trans)) return -EIO; if (count < header_size + 1 || count > 1024 * 4) return -EINVAL; data = kmalloc(data_size, GFP_KERNEL); if (!data) return -ENOMEM; ret = hex2bin((u8 *)data, buf, data_size); if (ret) goto out; hcmd.id = be32_to_cpu(data->cmd_id); hcmd.flags = be32_to_cpu(data->flags); hcmd.len[0] = be16_to_cpu(data->length); hcmd.data[0] = data->data; if (count != header_size + hcmd.len[0] * 2 + 1) { IWL_ERR(fwrt, "host command data size does not match header length\n"); ret = -EINVAL; goto out; } if (fwrt->ops && fwrt->ops->send_hcmd) ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd); else ret = -EPERM; if (ret < 0) goto out; if (hcmd.flags & CMD_WANT_SKB) iwl_free_resp(&hcmd); out: kfree(data); return ret ?: count; } FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512); void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) { INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h000066400000000000000000000062601351064634500267770ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "runtime.h" #ifdef CPTCFG_IWLWIFI_DEBUGFS void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir); #else static inline void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) { } #endif /* CPTCFG_IWLWIFI_DEBUGFS */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h000066400000000000000000000421371351064634500274570ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __fw_error_dump_h__ #define __fw_error_dump_h__ #include #define IWL_FW_ERROR_DUMP_BARKER 0x14789632 #define IWL_FW_INI_ERROR_DUMP_BARKER 0x14789633 /** * enum iwl_fw_error_dump_type - types of data in the dump file * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0 * @IWL_FW_ERROR_DUMP_RXF: * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as * &struct iwl_fw_error_dump_txcmd packets * @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info * info on the device / firmware. * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several * sections like this in a single file. * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers * @IWL_FW_ERROR_DUMP_MEM: chunk of memory * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump. * Structured as &struct iwl_fw_error_dump_trigger_desc. * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as * &struct iwl_fw_error_dump_rb * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were * paged to the DRAM. * @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers. * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and * for that reason is not in use in any other place in the Linux Wi-Fi * stack. * @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem, * which we get from the fw after ALIVE. The content is structured as * &struct iwl_fw_error_dump_smem_cfg. */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ IWL_FW_ERROR_DUMP_CSR = 1, IWL_FW_ERROR_DUMP_RXF = 2, IWL_FW_ERROR_DUMP_TXCMD = 3, IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4, IWL_FW_ERROR_DUMP_FW_MONITOR = 5, IWL_FW_ERROR_DUMP_PRPH = 6, IWL_FW_ERROR_DUMP_TXF = 7, IWL_FW_ERROR_DUMP_FH_REGS = 8, IWL_FW_ERROR_DUMP_MEM = 9, IWL_FW_ERROR_DUMP_ERROR_INFO = 10, IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_PAGING = 12, IWL_FW_ERROR_DUMP_RADIO_REG = 13, IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */ IWL_FW_ERROR_DUMP_MEM_CFG = 16, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17, IWL_FW_ERROR_DUMP_MAX, }; /** * struct iwl_fw_error_dump_data - data for one type * @type: &enum iwl_fw_error_dump_type * @len: the length starting from %data * @data: the data itself */ struct iwl_fw_error_dump_data { __le32 type; __le32 len; __u8 data[]; } __packed; /** * struct iwl_fw_error_dump_file - the layout of the header of the file * @barker: must be %IWL_FW_ERROR_DUMP_BARKER * @file_len: the length of all the file starting from %barker * @data: array of &struct iwl_fw_error_dump_data */ struct iwl_fw_error_dump_file { __le32 barker; __le32 file_len; u8 data[0]; } __packed; /** * struct iwl_fw_error_dump_txcmd - TX command data * @cmdlen: original length of command * @caplen: captured length of command (may be less) * @data: captured command data, @caplen bytes */ struct iwl_fw_error_dump_txcmd { __le32 cmdlen; __le32 caplen; u8 data[]; } __packed; /** * struct iwl_fw_error_dump_fifo - RX/TX FIFO data * @fifo_num: number of FIFO (starting from 0) * @available_bytes: num of bytes available in FIFO (may be less than FIFO size) * @wr_ptr: position of write pointer * @rd_ptr: position of read pointer * @fence_ptr: position of fence pointer * @fence_mode: the current mode of the fence (before locking) - * 0=follow RD pointer ; 1 = freeze * @data: all of the FIFO's data */ struct iwl_fw_error_dump_fifo { __le32 fifo_num; __le32 available_bytes; __le32 wr_ptr; __le32 rd_ptr; __le32 fence_ptr; __le32 fence_mode; u8 data[]; } __packed; enum iwl_fw_error_dump_family { IWL_FW_ERROR_DUMP_FAMILY_7 = 7, IWL_FW_ERROR_DUMP_FAMILY_8 = 8, }; #define MAX_NUM_LMAC 2 /** * struct iwl_fw_error_dump_info - info on the device / firmware * @hw_type: the type of the device * @hw_step: the step of the device * @fw_human_readable: human readable FW version * @dev_human_readable: name of the device * @bus_human_readable: name of the bus used * @num_of_lmacs: the number of lmacs * @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump * if the dump collection was not initiated by an assert, the value is 0 * @umac_err_id: the umac error_id/rt_status that triggered the latest dump * if the dump collection was not initiated by an assert, the value is 0 */ struct iwl_fw_error_dump_info { __le32 hw_type; __le32 hw_step; u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ]; u8 dev_human_readable[64]; u8 bus_human_readable[8]; u8 num_of_lmacs; __le32 umac_err_id; __le32 lmac_err_id[MAX_NUM_LMAC]; } __packed; /** * struct iwl_fw_error_dump_fw_mon - FW monitor data * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer * @fw_mon_base_ptr: base pointer of the data * @fw_mon_cycle_cnt: number of wraparounds * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit * so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold * MSB 32 bits * @reserved: for future use * @data: captured data */ struct iwl_fw_error_dump_fw_mon { __le32 fw_mon_wr_ptr; __le32 fw_mon_base_ptr; __le32 fw_mon_cycle_cnt; __le32 fw_mon_base_high_ptr; __le32 reserved[2]; u8 data[]; } __packed; #define MAX_NUM_LMAC 2 #define TX_FIFO_INTERNAL_MAX_NUM 6 #define TX_FIFO_MAX_NUM 15 /** * struct iwl_fw_error_dump_smem_cfg - Dump SMEM configuration * This must follow &struct iwl_fwrt_shared_mem_cfg. * @num_lmacs: number of lmacs * @num_txfifo_entries: number of tx fifos * @lmac: sizes of lmacs txfifos and rxfifo1 * @rxfifo2_size: size of rxfifo2 * @internal_txfifo_addr: address of internal tx fifo * @internal_txfifo_size: size of internal tx fifo */ struct iwl_fw_error_dump_smem_cfg { __le32 num_lmacs; __le32 num_txfifo_entries; struct { __le32 txfifo_size[TX_FIFO_MAX_NUM]; __le32 rxfifo1_size; } lmac[MAX_NUM_LMAC]; __le32 rxfifo2_size; __le32 internal_txfifo_addr; __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; } __packed; /** * struct iwl_fw_error_dump_prph - periphery registers data * @prph_start: address of the first register in this chunk * @data: the content of the registers */ struct iwl_fw_error_dump_prph { __le32 prph_start; __le32 data[]; }; enum iwl_fw_error_dump_mem_type { IWL_FW_ERROR_DUMP_MEM_SRAM, IWL_FW_ERROR_DUMP_MEM_SMEM, IWL_FW_ERROR_DUMP_MEM_NAMED_MEM = 10, }; /** * struct iwl_fw_error_dump_mem - chunk of memory * @type: &enum iwl_fw_error_dump_mem_type * @offset: the offset from which the memory was read * @data: the content of the memory */ struct iwl_fw_error_dump_mem { __le32 type; __le32 offset; u8 data[]; }; /* Dump version, used by the dump parser to differentiate between * different dump formats */ #define IWL_INI_DUMP_VER 1 /* Use bit 31 as dump info type to avoid colliding with region types */ #define IWL_INI_DUMP_INFO_TYPE BIT(31) /** * struct iwl_fw_ini_fifo_hdr - fifo range header * @fifo_num: the fifo number. In case of umac rx fifo, set BIT(31) to * distinguish between lmac and umac rx fifos * @num_of_registers: num of registers to dump, dword size each */ struct iwl_fw_ini_fifo_hdr { __le32 fifo_num; __le32 num_of_registers; } __packed; /** * struct iwl_fw_ini_error_dump_range - range of memory * @range_data_size: the size of this range, in bytes * @internal_base_addr - base address of internal memory range * @dram_base_addr - base address of dram monitor range * @page_num - page number of memory range * @fifo_hdr - fifo header of memory range * @data: the actual memory */ struct iwl_fw_ini_error_dump_range { __le32 range_data_size; union { __le32 internal_base_addr; __le64 dram_base_addr; __le32 page_num; struct iwl_fw_ini_fifo_hdr fifo_hdr; }; __le32 data[]; } __packed; /** * struct iwl_fw_ini_error_dump_header - ini region dump header * @version: dump version * @region_id: id of the region * @num_of_ranges: number of ranges in this region * @name_len: number of bytes allocated to the name string of this region * @name: name of the region */ struct iwl_fw_ini_error_dump_header { __le32 version; __le32 region_id; __le32 num_of_ranges; __le32 name_len; u8 name[IWL_FW_INI_MAX_NAME]; }; /** * struct iwl_fw_ini_error_dump - ini region dump * @header: the header of this region * @ranges: the memory ranges of this region */ struct iwl_fw_ini_error_dump { struct iwl_fw_ini_error_dump_header header; struct iwl_fw_ini_error_dump_range ranges[]; } __packed; /* This bit is used to differentiate between lmac and umac rxf */ #define IWL_RXF_UMAC_BIT BIT(31) /** * struct iwl_fw_ini_error_dump_register - ini register dump * @addr: address of the register * @data: data of the register */ struct iwl_fw_ini_error_dump_register { __le32 addr; __le32 data; } __packed; /* struct iwl_fw_ini_dump_info - ini dump information * @version: dump version * @trigger_id: trigger id that caused the dump collection * @trigger_reason: not supported yet * @is_external_cfg: 1 if an external debug configuration was loaded * and 0 otherwise * @ver_type: FW version type * @ver_subtype: FW version subype * @hw_step: HW step * @hw_type: HW type * @rf_id_flavor: HW RF id flavor * @rf_id_dash: HW RF id dash * @rf_id_step: HW RF id step * @rf_id_type: HW RF id type * @lmac_major: lmac major version * @lmac_minor: lmac minor version * @umac_major: umac major version * @umac_minor: umac minor version * @build_tag_len: length of the build tag * @build_tag: build tag string * @img_name_len: length of the FW image name * @img_name: FW image name * @internal_dbg_cfg_name_len: length of the internal debug configuration name * @internal_dbg_cfg_name: internal debug configuration name * @external_dbg_cfg_name_len: length of the external debug configuration name * @external_dbg_cfg_name: external debug configuration name * @regions_num: number of region ids * @region_ids: region ids the trigger configured to collect */ struct iwl_fw_ini_dump_info { __le32 version; __le32 trigger_id; __le32 trigger_reason; __le32 is_external_cfg; __le32 ver_type; __le32 ver_subtype; __le32 hw_step; __le32 hw_type; __le32 rf_id_flavor; __le32 rf_id_dash; __le32 rf_id_step; __le32 rf_id_type; __le32 lmac_major; __le32 lmac_minor; __le32 umac_major; __le32 umac_minor; __le32 build_tag_len; u8 build_tag[FW_VER_HUMAN_READABLE_SZ]; __le32 img_name_len; u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN]; __le32 internal_dbg_cfg_name_len; u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN]; __le32 external_dbg_cfg_name_len; u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN]; __le32 regions_num; __le32 region_ids[]; } __packed; /** * struct iwl_fw_error_dump_rb - content of an Receive Buffer * @index: the index of the Receive Buffer in the Rx queue * @rxq: the RB's Rx queue * @reserved: * @data: the content of the Receive Buffer */ struct iwl_fw_error_dump_rb { __le32 index; __le32 rxq; __le32 reserved; u8 data[]; }; /** * struct iwl_fw_ini_monitor_dump - ini monitor dump * @header - header of the region * @write_ptr - write pointer position in the buffer * @cycle_cnt - cycles count * @ranges - the memory ranges of this this region */ struct iwl_fw_ini_monitor_dump { struct iwl_fw_ini_error_dump_header header; __le32 write_ptr; __le32 cycle_cnt; struct iwl_fw_ini_error_dump_range ranges[]; } __packed; /** * struct iwl_fw_error_dump_paging - content of the UMAC's image page * block on DRAM * @index: the index of the page block * @reserved: * @data: the content of the page block */ struct iwl_fw_error_dump_paging { __le32 index; __le32 reserved; u8 data[]; }; /** * iwl_fw_error_next_data - advance fw error dump data pointer * @data: previous data block * Returns: next data block */ static inline struct iwl_fw_error_dump_data * iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) { return (void *)(data->data + le32_to_cpu(data->len)); } /** * enum iwl_fw_dbg_trigger - triggers available * * @FW_DBG_TRIGGER_USER: trigger log collection by user * This should not be defined as a trigger to the driver, but a value the * driver should set to indicate that the trigger was initiated by the * user. * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are * missed. * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch. * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a * command response or a notification. * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event. * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold. * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon * goes below a threshold. * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang * detection. * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related * events. * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events. * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a * threshold. * @FW_DBG_TDLS: trigger log collection upon TDLS related events. * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when * the firmware sends a tx reply. * @FW_DBG_TRIGGER_USER_EXTENDED: trigger log collection upon user space * request. * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure * in the driver. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, FW_DBG_TRIGGER_USER, FW_DBG_TRIGGER_FW_ASSERT, FW_DBG_TRIGGER_MISSED_BEACONS, FW_DBG_TRIGGER_CHANNEL_SWITCH, FW_DBG_TRIGGER_FW_NOTIF, FW_DBG_TRIGGER_MLME, FW_DBG_TRIGGER_STATS, FW_DBG_TRIGGER_RSSI, FW_DBG_TRIGGER_TXQ_TIMERS, FW_DBG_TRIGGER_TIME_EVENT, FW_DBG_TRIGGER_BA, FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TDLS, FW_DBG_TRIGGER_TX_STATUS, FW_DBG_TRIGGER_USER_EXTENDED, FW_DBG_TRIGGER_ALIVE_TIMEOUT, FW_DBG_TRIGGER_DRIVER, /* must be last */ FW_DBG_TRIGGER_MAX, }; /** * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition * @type: &enum iwl_fw_dbg_trigger * @data: raw data about what happened */ struct iwl_fw_error_dump_trigger_desc { __le32 type; u8 data[]; }; #endif /* __fw_error_dump_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/file.h000066400000000000000000001156671351064634500263130ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * Copyright(c) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * Copyright(c) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_fw_file_h__ #define __iwl_fw_file_h__ #include #include /* v1/v2 uCode file layout */ struct iwl_ucode_header { __le32 ver; /* major/minor/API/serial */ union { struct { __le32 inst_size; /* bytes of runtime code */ __le32 data_size; /* bytes of runtime data */ __le32 init_size; /* bytes of init code */ __le32 init_data_size; /* bytes of init data */ __le32 boot_size; /* bytes of bootstrap code */ u8 data[0]; /* in same order as sizes */ } v1; struct { __le32 build; /* build number */ __le32 inst_size; /* bytes of runtime code */ __le32 data_size; /* bytes of runtime data */ __le32 init_size; /* bytes of init code */ __le32 init_data_size; /* bytes of init data */ __le32 boot_size; /* bytes of bootstrap code */ u8 data[0]; /* in same order as sizes */ } v2; } u; }; #define IWL_UCODE_INI_TLV_GROUP 0x1000000 /* * new TLV uCode file layout * * The new TLV file format contains TLVs, that each specify * some piece of data. */ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_INVALID = 0, /* unused */ IWL_UCODE_TLV_INST = 1, IWL_UCODE_TLV_DATA = 2, IWL_UCODE_TLV_INIT = 3, IWL_UCODE_TLV_INIT_DATA = 4, IWL_UCODE_TLV_BOOT = 5, IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ IWL_UCODE_TLV_PAN = 7, IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11, IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12, IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, IWL_UCODE_TLV_WOWLAN_INST = 16, IWL_UCODE_TLV_WOWLAN_DATA = 17, IWL_UCODE_TLV_FLAGS = 18, IWL_UCODE_TLV_SEC_RT = 19, IWL_UCODE_TLV_SEC_INIT = 20, IWL_UCODE_TLV_SEC_WOWLAN = 21, IWL_UCODE_TLV_DEF_CALIB = 22, IWL_UCODE_TLV_PHY_SKU = 23, IWL_UCODE_TLV_SECURE_SEC_RT = 24, IWL_UCODE_TLV_SECURE_SEC_INIT = 25, IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26, IWL_UCODE_TLV_NUM_OF_CPU = 27, IWL_UCODE_TLV_CSCHEME = 28, IWL_UCODE_TLV_API_CHANGES_SET = 29, IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, IWL_UCODE_TLV_PAGING = 32, IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, /* 35 is unused */ IWL_UCODE_TLV_FW_VERSION = 36, IWL_UCODE_TLV_FW_DBG_DEST = 38, IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, IWL_UCODE_TLV_CMD_VERSIONS = 48, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_IML = 52, IWL_UCODE_TLV_FW_FMAC_API_VERSION = 53, IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54, IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55, IWL_UCODE_TLV_FW_RECOVERY_INFO = 57, IWL_UCODE_TLV_FW_FMAC_RECOVERY_INFO = 59, IWL_UCODE_TLV_FW_FSEQ_VERSION = 60, IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_INI_TLV_GROUP, IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0, IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1, IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2, IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_TLV_DEBUG_BASE + 3, IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_TLV_DEBUG_BASE + 4, IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_TLV_DEBUG_BASE + 5, IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW, /* TLVs 0x1000-0x2000 are for internal driver usage */ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, }; struct iwl_ucode_tlv { __le32 type; /* see above */ __le32 length; /* not including type/length fields */ u8 data[0]; }; #define IWL_TLV_FW_DBG_MAGIC 0xb5221389 #define IWL_TLV_UCODE_MAGIC 0x0a4c5749 #define FW_VER_HUMAN_READABLE_SZ 64 struct iwl_tlv_ucode_header { /* * The TLV style ucode header is distinguished from * the v1/v2 style header by first four bytes being * zero, as such is an invalid combination of * major/minor/API/serial versions. */ __le32 zero; __le32 magic; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; /* major/minor/API/serial or major in new format */ __le32 ver; __le32 build; __le64 ignore; /* * The data contained herein has a TLV layout, * see above for the TLV header and types. * Note that each TLV is padded to a length * that is a multiple of 4 for alignment. */ u8 data[0]; }; /* * ucode TLVs * * ability to get extension for: flags & capabilities from ucode binaries files */ struct iwl_ucode_api { __le32 api_index; __le32 api_flags; } __packed; struct iwl_ucode_capa { __le32 api_index; __le32 api_capa; } __packed; /** * enum iwl_ucode_tlv_flag - ucode API flags * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously * was a separate TLV but moved here to save space. * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID, * treats good CRC threshold as a boolean * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan * offload profile config command. * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six * (rather than two) IPv6 addresses * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element * from the probe request template. * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. */ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_PAN = BIT(0), IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), IWL_UCODE_TLV_FLAGS_MFP = BIT(2), IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), }; typedef unsigned int __bitwise iwl_ucode_tlv_api_t; /** * enum iwl_ucode_tlv_api - ucode api * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan * iteration complete notification, and the timestamp reported for RX * received during scan, are reported in TSF of the mac specified in the * scan request. * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of * ADD_MODIFY_STA_KEY_API_S_VER_2. * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2 * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field * indicating low latency direction. * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is * deprecated. * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8 * of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8 * @IWL_UCODE_TLV_API_NAN_NOTIF_V2: This ucode supports version 2 of * the NAN discovery notification. * @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS * @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of * the REDUCE_TX_POWER_CMD. * @IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF: This ucode supports the short * version of the beacon notification. * @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of * BEACON_FILTER_CONFIG_API_S_VER_4. * @IWL_UCODE_TLV_API_REGULATORY_NVM_INFO: This ucode supports v4 of * REGULATORY_NVM_GET_INFO_RSP_API_S. * @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of * LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S. * @IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS: This ucode supports v2 of * SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S and v3 of * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S. * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of * STA_CONTEXT_DOT11AX_API_S * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar * version tables. * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of * SCAN_CONFIG_DB_CMD_API_S. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ enum iwl_ucode_tlv_api { /* API Set 0 */ IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28, IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29, IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = (__force iwl_ucode_tlv_api_t)36, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38, IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41, IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42, IWL_UCODE_TLV_API_NAN_NOTIF_V2 = (__force iwl_ucode_tlv_api_t)43, IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44, IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45, IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46, IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47, IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = (__force iwl_ucode_tlv_api_t)48, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50, IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52, IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54, IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55, IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57, IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ = 128 #endif }; typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; /** * enum iwl_ucode_tlv_capa - ucode capabilities * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current * tx power value into TPC Report action frame and Link Measurement Report * action frame * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current * channel in DS parameter set element in probe requests. * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in * probe requests. * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_2G_COEX_SUPPORT: supports 2G coex Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it * is standalone or with a BSS station interface in the same binding. * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different * sources for the MCC. This TLV bit is a future replacement to * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR * is supported. * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used) * @IWL_UCODE_TLV_CAPA_NAN_SUPPORT: supports NAN * @IWL_UCODE_TLV_CAPA_UMAC_UPLOAD: supports upload mode in umac (1=supported, * 0=no support) * @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting * stabilization latency for SoCs. * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2 * IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band * (6 GHz). * @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT * @IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD: the firmware supports CSA * countdown offloading. Beacon notifications are not sent to the host. * The fw also offloads TBTT alignment. * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what * antenna the beacon should be transmitted * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon * from AP and will send it upon d0i3 exit. * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3 * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature * thresholds reporting * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in * regular image. * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared * memory addresses from the firmware. * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement * @IWL_UCODE_TLV_CAPA_LMAC_UPLOAD: supports upload mode in lmac (1=supported, * 0=no support) #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE * @IWL_UCODE_TLV_CAPA_AX_SAP_TM_V2: support 11ax softap testmode APIs version 2 #endif * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger * command size (command version 4) that supports toggling ACK TX * power reduction. #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE * @IWL_UCODE_TLV_CAPA_AX_SAP_TM: support 11ax softap testmode APIs #endif * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax * capability. * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured * to report the CSI information with (certain) RX frames * @IWL_UCODE_TLV_CAPA_CSI_REPORTING_V2: firmware is capable of being configured * to report the CSI information with (certain) RX frames, with the new CSI * filter API * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both * initiator and responder * * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ enum iwl_ucode_tlv_capa { /* set 0 */ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10, IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, IWL_UCODE_TLV_CAPA_2G_COEX_SUPPORT = (__force iwl_ucode_tlv_capa_t)20, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, /* set 1 */ IWL_UCODE_TLV_CAPA_NAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)34, IWL_UCODE_TLV_CAPA_UMAC_UPLOAD = (__force iwl_ucode_tlv_capa_t)35, IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39, IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47, IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48, IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50, IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, IWL_UCODE_TLV_CAPA_LMAC_UPLOAD = (__force iwl_ucode_tlv_capa_t)79, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89, IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90, IWL_UCODE_TLV_CAPA_CSI_REPORTING_V2 = (__force iwl_ucode_tlv_capa_t)91, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)92, /* set 3 */ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ = 128 #endif }; /* The default calibrate table size if not specified by firmware file */ #define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 #define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 #define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 /* The default max probe length if not specified by the firmware file */ #define IWL_DEFAULT_MAX_PROBE_LENGTH 200 /* * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC #define PAGING_SEPARATOR_SECTION 0xAAAABBBB /* uCode version contains 4 values: Major/Minor/API/Serial */ #define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) #define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) #define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) #define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) /** * struct iwl_tlv_calib_ctrl - Calibration control struct. * Sent as part of the phy configuration command. * @flow_trigger: bitmap for which calibrations to perform according to * flow triggers. * @event_trigger: bitmap for which calibrations to perform according to * event triggers. */ struct iwl_tlv_calib_ctrl { __le32 flow_trigger; __le32 event_trigger; } __packed; enum iwl_fw_phy_cfg { FW_PHY_CFG_RADIO_TYPE_POS = 0, FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS, FW_PHY_CFG_RADIO_STEP_POS = 2, FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS, FW_PHY_CFG_RADIO_DASH_POS = 4, FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS, FW_PHY_CFG_TX_CHAIN_POS = 16, FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, FW_PHY_CFG_RX_CHAIN_POS = 20, FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, FW_PHY_CFG_SHARED_CLK = BIT(31), }; #define IWL_UCODE_MAX_CS 1 /** * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW. * @cipher: a cipher suite selector * @flags: cipher scheme flags (currently reserved for a future use) * @hdr_len: a size of MPDU security header * @pn_len: a size of PN * @pn_off: an offset of pn from the beginning of the security header * @key_idx_off: an offset of key index byte in the security header * @key_idx_mask: a bit mask of key_idx bits * @key_idx_shift: bit shift needed to get key_idx * @mic_len: mic length in bytes * @hw_cipher: a HW cipher index used in host commands */ struct iwl_fw_cipher_scheme { __le32 cipher; u8 flags; u8 hdr_len; u8 pn_len; u8 pn_off; u8 key_idx_off; u8 key_idx_mask; u8 key_idx_shift; u8 mic_len; u8 hw_cipher; } __packed; enum iwl_fw_dbg_reg_operator { CSR_ASSIGN, CSR_SETBIT, CSR_CLEARBIT, PRPH_ASSIGN, PRPH_SETBIT, PRPH_CLEARBIT, INDIRECT_ASSIGN, INDIRECT_SETBIT, INDIRECT_CLEARBIT, PRPH_BLOCKBIT, }; /** * struct iwl_fw_dbg_reg_op - an operation on a register * * @op: &enum iwl_fw_dbg_reg_operator * @addr: offset of the register * @val: value */ struct iwl_fw_dbg_reg_op { u8 op; u8 reserved[3]; __le32 addr; __le32 val; } __packed; /** * enum iwl_fw_dbg_monitor_mode - available monitor recording modes * * @SMEM_MODE: monitor stores the data in SMEM * @EXTERNAL_MODE: monitor stores the data in allocated DRAM * @MARBH_MODE: monitor stores the data in MARBH buffer * @MIPI_MODE: monitor outputs the data through the MIPI interface */ enum iwl_fw_dbg_monitor_mode { SMEM_MODE = 0, EXTERNAL_MODE = 1, MARBH_MODE = 2, MIPI_MODE = 3, }; /** * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments * * @data_type: the memory segment type to record * @ofs: the memory segment offset * @len: the memory segment length, in bytes * * This parses IWL_UCODE_TLV_FW_MEM_SEG */ struct iwl_fw_dbg_mem_seg_tlv { __le32 data_type; __le32 ofs; __le32 len; } __packed; /** * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data * * @version: version of the TLV - currently 0 * @monitor_mode: &enum iwl_fw_dbg_monitor_mode * @size_power: buffer size will be 2^(size_power + 11) * @base_reg: addr of the base addr register (PRPH) * @end_reg: addr of the end addr register (PRPH) * @write_ptr_reg: the addr of the reg of the write pointer * @wrap_count: the addr of the reg of the wrap_count * @base_shift: shift right of the base addr reg * @end_shift: shift right of the end addr reg * @reg_ops: array of registers operations * * This parses IWL_UCODE_TLV_FW_DBG_DEST */ struct iwl_fw_dbg_dest_tlv_v1 { u8 version; u8 monitor_mode; u8 size_power; u8 reserved; __le32 base_reg; __le32 end_reg; __le32 write_ptr_reg; __le32 wrap_count; u8 base_shift; u8 end_shift; struct iwl_fw_dbg_reg_op reg_ops[0]; } __packed; /* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */ #define IWL_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000 /* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */ #define IWL_LDBG_M2S_BUF_BA_MSK 0x00000fff /* The smem buffer chunks are in units of 256 bits */ #define IWL_M2S_UNIT_SIZE 0x100 struct iwl_fw_dbg_dest_tlv { u8 version; u8 monitor_mode; u8 size_power; u8 reserved; __le32 cfg_reg; __le32 write_ptr_reg; __le32 wrap_count; u8 base_shift; u8 size_shift; struct iwl_fw_dbg_reg_op reg_ops[0]; } __packed; struct iwl_fw_dbg_conf_hcmd { u8 id; u8 reserved; __le16 len; u8 data[0]; } __packed; /** * enum iwl_fw_dbg_trigger_mode - triggers functionalities * * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to * collect only monitor data */ enum iwl_fw_dbg_trigger_mode { IWL_FW_DBG_TRIGGER_START = BIT(0), IWL_FW_DBG_TRIGGER_STOP = BIT(1), IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2), }; /** * enum iwl_fw_dbg_trigger_flags - the flags supported by wrt triggers * @IWL_FW_DBG_FORCE_RESTART: force a firmware restart */ enum iwl_fw_dbg_trigger_flags { IWL_FW_DBG_FORCE_RESTART = BIT(0), }; /** * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger * @IWL_FW_DBG_CONF_VIF_ANY: any vif type * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode * @IWL_FW_DBG_CONF_VIF_AP: AP mode * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device * @IWL_FW_DBG_CONF_VIF_NAN: NAN device */ enum iwl_fw_dbg_trigger_vif_type { IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED, IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC, IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION, IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP, IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT, IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO, IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE, IWL_FW_DBG_CONF_VIF_NAN = NL80211_IFTYPE_NAN, }; /** * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger * @id: &enum iwl_fw_dbg_trigger * @vif_type: &enum iwl_fw_dbg_trigger_vif_type * @stop_conf_ids: bitmap of configurations this trigger relates to. * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding * to the currently running configuration is set, the data should be * collected. * @stop_delay: how many milliseconds to wait before collecting the data * after the STOP trigger fires. * @mode: &enum iwl_fw_dbg_trigger_mode - can be stop / start of both * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what * configuration should be applied when the triggers kicks in. * @occurrences: number of occurrences. 0 means the trigger will never fire. * @trig_dis_ms: the time, in milliseconds, after an occurrence of this * trigger in which another occurrence should be ignored. * @flags: &enum iwl_fw_dbg_trigger_flags */ struct iwl_fw_dbg_trigger_tlv { __le32 id; __le32 vif_type; __le32 stop_conf_ids; __le32 stop_delay; u8 mode; u8 start_conf_id; __le16 occurrences; __le16 trig_dis_ms; u8 flags; u8 reserved[5]; u8 data[0]; } __packed; #define FW_DBG_START_FROM_ALIVE 0 #define FW_DBG_CONF_MAX 32 #define FW_DBG_INVALID 0xff /** * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons * @stop_consec_missed_bcon: stop recording if threshold is crossed. * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed. * @start_consec_missed_bcon: start recording if threshold is crossed. * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed. * @reserved1: reserved * @reserved2: reserved */ struct iwl_fw_dbg_trigger_missed_bcon { __le32 stop_consec_missed_bcon; __le32 stop_consec_missed_bcon_since_rx; __le32 reserved2[2]; __le32 start_consec_missed_bcon; __le32 start_consec_missed_bcon_since_rx; __le32 reserved1[2]; } __packed; /** * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW. * cmds: the list of commands to trigger the collection on */ struct iwl_fw_dbg_trigger_cmd { struct cmd { u8 cmd_id; u8 group_id; } __packed cmds[16]; } __packed; /** * iwl_fw_dbg_trigger_stats - configures trigger for statistics * @stop_offset: the offset of the value to be monitored * @stop_threshold: the threshold above which to collect * @start_offset: the offset of the value to be monitored * @start_threshold: the threshold above which to start recording */ struct iwl_fw_dbg_trigger_stats { __le32 stop_offset; __le32 stop_threshold; __le32 start_offset; __le32 start_threshold; } __packed; /** * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI * @rssi: RSSI value to trigger at */ struct iwl_fw_dbg_trigger_low_rssi { __le32 rssi; } __packed; /** * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events * @stop_auth_denied: number of denied authentication to collect * @stop_auth_timeout: number of authentication timeout to collect * @stop_rx_deauth: number of Rx deauth before to collect * @stop_tx_deauth: number of Tx deauth before to collect * @stop_assoc_denied: number of denied association to collect * @stop_assoc_timeout: number of association timeout to collect * @stop_connection_loss: number of connection loss to collect * @start_auth_denied: number of denied authentication to start recording * @start_auth_timeout: number of authentication timeout to start recording * @start_rx_deauth: number of Rx deauth to start recording * @start_tx_deauth: number of Tx deauth to start recording * @start_assoc_denied: number of denied association to start recording * @start_assoc_timeout: number of association timeout to start recording * @start_connection_loss: number of connection loss to start recording */ struct iwl_fw_dbg_trigger_mlme { u8 stop_auth_denied; u8 stop_auth_timeout; u8 stop_rx_deauth; u8 stop_tx_deauth; u8 stop_assoc_denied; u8 stop_assoc_timeout; u8 stop_connection_loss; u8 reserved; u8 start_auth_denied; u8 start_auth_timeout; u8 start_rx_deauth; u8 start_tx_deauth; u8 start_assoc_denied; u8 start_assoc_timeout; u8 start_connection_loss; u8 reserved2; } __packed; /** * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer * @command_queue: timeout for the command queue in ms * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms * @softap: timeout for the queues of a softAP in ms * @p2p_go: timeout for the queues of a P2P GO in ms * @p2p_client: timeout for the queues of a P2P client in ms * @p2p_device: timeout for the queues of a P2P device in ms * @ibss: timeout for the queues of an IBSS in ms * @tdls: timeout for the queues of a TDLS station in ms */ struct iwl_fw_dbg_trigger_txq_timer { __le32 command_queue; __le32 bss; __le32 softap; __le32 p2p_go; __le32 p2p_client; __le32 p2p_device; __le32 ibss; __le32 tdls; __le32 reserved[4]; } __packed; /** * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger * time_Events: a list of tuples . The driver will issue a * trigger each time a time event notification that relates to time event * id with one of the actions in the bitmap is received and * BIT(notif->status) is set in status_bitmap. * */ struct iwl_fw_dbg_trigger_time_event { struct { __le32 id; __le32 action_bitmap; __le32 status_bitmap; } __packed time_events[16]; } __packed; /** * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger * rx_ba_start: tid bitmap to configure on what tid the trigger should occur * when an Rx BlockAck session is started. * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur * when an Rx BlockAck session is stopped. * tx_ba_start: tid bitmap to configure on what tid the trigger should occur * when a Tx BlockAck session is started. * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur * when a Tx BlockAck session is stopped. * rx_bar: tid bitmap to configure on what tid the trigger should occur * when a BAR is received (for a Tx BlockAck session). * tx_bar: tid bitmap to configure on what tid the trigger should occur * when a BAR is send (for an Rx BlocAck session). * frame_timeout: tid bitmap to configure on what tid the trigger should occur * when a frame times out in the reodering buffer. */ struct iwl_fw_dbg_trigger_ba { __le16 rx_ba_start; __le16 rx_ba_stop; __le16 tx_ba_start; __le16 tx_ba_stop; __le16 rx_bar; __le16 tx_bar; __le16 frame_timeout; } __packed; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS /** * struct iwl_fw_dbg_trigger_tx_latency - configures tx latency related trigger * @thrshold: the wanted threshold. * @tid_bitmap: the tid to apply the threshold on * @mode: recording mode (Internal buffer or continues recording) * @window: the size of the window before collecting. * @reserved: reserved. */ struct iwl_fw_dbg_trigger_tx_latency { __le32 thrshold; __le16 tid_bitmap; __le16 mode; __le32 window; __le32 reserved[4]; } __packed; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ /** * struct iwl_fw_dbg_trigger_tdls - configures trigger for TDLS events. * @action_bitmap: the TDLS action to trigger the collection upon * @peer_mode: trigger on specific peer or all * @peer: the TDLS peer to trigger the collection on */ struct iwl_fw_dbg_trigger_tdls { u8 action_bitmap; u8 peer_mode; u8 peer[ETH_ALEN]; u8 reserved[4]; } __packed; /** * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response * status. * @statuses: the list of statuses to trigger the collection on */ struct iwl_fw_dbg_trigger_tx_status { struct tx_status { u8 status; u8 reserved[3]; } __packed statuses[16]; __le32 reserved[2]; } __packed; /** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id * @usniffer: should the uSniffer image be used * @num_of_hcmds: how many HCMDs to send are present here * @hcmd: a variable length host command to be sent to apply the configuration. * If there is more than one HCMD to send, they will appear one after the * other and be sent in the order that they appear in. * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to * %FW_DBG_CONF_MAX configuration per run. */ struct iwl_fw_dbg_conf_tlv { u8 id; u8 usniffer; u8 reserved; u8 num_of_hcmds; struct iwl_fw_dbg_conf_hcmd hcmd; } __packed; #define IWL_FW_CMD_VER_UNKNOWN 99 /** * struct iwl_fw_cmd_version - firmware command version entry * @cmd: command ID * @group: group ID * @cmd_ver: command version * @notif_ver: notification version */ struct iwl_fw_cmd_version { u8 cmd; u8 group; u8 cmd_ver; u8 notif_ver; } __packed; #endif /* __iwl_fw_file_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/img.h000066400000000000000000000231431351064634500261330ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_fw_img_h__ #define __iwl_fw_img_h__ #include #include "api/dbg-tlv.h" #include "file.h" #include "error-dump.h" /** * enum iwl_ucode_type * * The type of ucode. * * @IWL_UCODE_REGULAR: Normal runtime ucode * @IWL_UCODE_INIT: Initial ucode * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image */ enum iwl_ucode_type { IWL_UCODE_REGULAR, IWL_UCODE_INIT, IWL_UCODE_WOWLAN, IWL_UCODE_REGULAR_USNIFFER, IWL_UCODE_TYPE_MAX, }; /* * enumeration of ucode section. * This enumeration is used directly for older firmware (before 16.0). * For new firmware, there can be up to 4 sections (see below) but the * first one packaged into the firmware file is the DATA section and * some debugging code accesses that. */ enum iwl_ucode_sec { IWL_UCODE_SECTION_DATA, IWL_UCODE_SECTION_INST, }; struct iwl_ucode_capabilities { u32 max_probe_length; u32 n_scan_channels; u32 standard_phy_calibration_size; u32 flags; u32 error_log_addr; u32 error_log_size; unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)]; unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)]; const struct iwl_fw_cmd_version *cmd_versions; u32 n_cmd_versions; }; static inline bool fw_has_api(const struct iwl_ucode_capabilities *capabilities, iwl_ucode_tlv_api_t api) { return test_bit((__force long)api, capabilities->_api); } static inline bool fw_has_capa(const struct iwl_ucode_capabilities *capabilities, iwl_ucode_tlv_capa_t capa) { return test_bit((__force long)capa, capabilities->_capa); } /* one for each uCode image (inst/data, init/runtime/wowlan) */ struct fw_desc { const void *data; /* vmalloc'ed data */ u32 len; /* size in bytes */ u32 offset; /* offset in the device */ }; struct fw_img { struct fw_desc *sec; int num_sec; bool is_dual_cpus; u32 paging_mem_size; }; /* * Block paging calculations */ #define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */ #define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */ #define PAGE_PER_GROUP_2_EXP_SIZE 3 /* 8 pages per group */ #define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE) /* don't change, support only 32KB size */ #define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE) /* 32K == 2^15 */ #define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE) /* * Image paging calculations */ #define BLOCK_PER_IMAGE_2_EXP_SIZE 5 /* 2^5 == 32 blocks per image */ #define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE) /* maximum image size 1024KB */ #define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE) /* Virtual address signature */ #define PAGING_ADDR_SIG 0xAA000000 #define PAGING_CMD_IS_SECURED BIT(9) #define PAGING_CMD_IS_ENABLED BIT(8) #define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 #define PAGING_TLV_SECURE_MASK 1 /** * struct iwl_fw_paging * @fw_paging_phys: page phy pointer * @fw_paging_block: pointer to the allocated block * @fw_paging_size: page size */ struct iwl_fw_paging { dma_addr_t fw_paging_phys; struct page *fw_paging_block; u32 fw_paging_size; }; /** * struct iwl_fw_cscheme_list - a cipher scheme list * @size: a number of entries * @cs: cipher scheme entries */ struct iwl_fw_cscheme_list { u8 size; struct iwl_fw_cipher_scheme cs[]; } __packed; /** * enum iwl_fw_type - iwlwifi firmware type * @IWL_FW_DVM: DVM firmware * @IWL_FW_MVM: MVM firmware */ enum iwl_fw_type { IWL_FW_DVM, IWL_FW_MVM, }; /** * struct iwl_fw_dbg - debug data * * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM) * @n_dest_reg: num of reg_ops in dest_tlv * @conf_tlv: array of pointers to configuration HCMDs * @trigger_tlv: array of pointers to triggers TLVs * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries * @mem_tlv: Runtime addresses to dump * @n_mem_tlv: number of runtime addresses * @dump_mask: bitmask of dump regions */ struct iwl_fw_dbg { struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; u8 n_dest_reg; struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *mem_tlv; size_t n_mem_tlv; u32 dump_mask; }; /** * struct iwl_fw_ini_active_triggers * @active: is this trigger active * @size: allocated memory size of the trigger * @trig: trigger */ struct iwl_fw_ini_active_triggers { bool active; size_t size; struct iwl_fw_ini_trigger *trig; }; /** * struct iwl_fw - variables associated with the firmware * * @ucode_ver: ucode version from the ucode file * @fw_version: firmware version string * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan. * @iml_len: length of the image loader image * @iml: image loader fw image * @ucode_capa: capabilities parsed from the ucode file. * @enhance_sensitivity_table: device can do enhanced sensitivity. * @init_evtlog_ptr: event log offset for init ucode. * @init_evtlog_size: event log size for init ucode. * @init_errlog_ptr: error log offfset for init ucode. * @inst_evtlog_ptr: event log offset for runtime ucode. * @inst_evtlog_size: event log size for runtime ucode. * @inst_errlog_ptr: error log offfset for runtime ucode. * @type: firmware type (&enum iwl_fw_type) * @cipher_scheme: optional external cipher scheme. * @human_readable: human readable version * we get the ALIVE from the uCode */ struct iwl_fw { u32 ucode_ver; char fw_version[ETHTOOL_FWVERS_LEN]; /* ucode images */ struct fw_img img[IWL_UCODE_TYPE_MAX]; size_t iml_len; u8 *iml; struct iwl_ucode_capabilities ucode_capa; bool enhance_sensitivity_table; u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX]; u32 phy_config; u8 valid_tx_ant; u8 valid_rx_ant; enum iwl_fw_type type; struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; struct iwl_fw_dbg dbg; }; static inline const char *get_fw_dbg_mode_string(int mode) { switch (mode) { case SMEM_MODE: return "SMEM"; case EXTERNAL_MODE: return "EXTERNAL_DRAM"; case MARBH_MODE: return "MARBH"; case MIPI_MODE: return "MIPI"; default: return "UNKNOWN"; } } static inline bool iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) { const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id]; if (!conf_tlv) return false; return conf_tlv->usniffer; } static inline const struct fw_img * iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type) { if (ucode_type >= IWL_UCODE_TYPE_MAX) return NULL; return &fw->img[ucode_type]; } #endif /* __iwl_fw_img_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/init.c000066400000000000000000000073361351064634500263230ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" #include "debugfs.h" void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, const struct iwl_fw_runtime_ops *ops, void *ops_ctx, struct dentry *dbgfs_dir) { int i; memset(fwrt, 0, sizeof(*fwrt)); fwrt->trans = trans; fwrt->fw = fw; fwrt->dev = trans->dev; fwrt->dump.conf = FW_DBG_INVALID; fwrt->ops = ops; fwrt->ops_ctx = ops_ctx; for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) { fwrt->dump.wks[i].idx = i; INIT_DELAYED_WORK(&fwrt->dump.wks[i].wk, iwl_fw_error_dump_wk); } iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); timer_setup(&fwrt->dump.periodic_trig, iwl_fw_dbg_periodic_trig_handler, 0); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt) { iwl_fw_suspend_timestamp(fwrt); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend); void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt) { iwl_fw_resume_timestamp(fwrt); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c000066400000000000000000000141501351064634500274310ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-drv.h" #include "notif-wait.h" void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait) { spin_lock_init(¬if_wait->notif_wait_lock); INIT_LIST_HEAD(¬if_wait->notif_waits); init_waitqueue_head(¬if_wait->notif_waitq); } IWL_EXPORT_SYMBOL(iwl_notification_wait_init); bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt) { bool triggered = false; if (!list_empty(¬if_wait->notif_waits)) { struct iwl_notification_wait *w; spin_lock(¬if_wait->notif_wait_lock); list_for_each_entry(w, ¬if_wait->notif_waits, list) { int i; bool found = false; /* * If it already finished (triggered) or has been * aborted then don't evaluate it again to avoid races, * Otherwise the function could be called again even * though it returned true before */ if (w->triggered || w->aborted) continue; for (i = 0; i < w->n_cmds; i++) { u16 rec_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (w->cmds[i] == rec_id || (!iwl_cmd_groupid(w->cmds[i]) && DEF_ID(w->cmds[i]) == rec_id)) { found = true; break; } } if (!found) continue; if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) { w->triggered = true; triggered = true; } } spin_unlock(¬if_wait->notif_wait_lock); } return triggered; } IWL_EXPORT_SYMBOL(iwl_notification_wait); void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) { struct iwl_notification_wait *wait_entry; spin_lock(¬if_wait->notif_wait_lock); list_for_each_entry(wait_entry, ¬if_wait->notif_waits, list) wait_entry->aborted = true; spin_unlock(¬if_wait->notif_wait_lock); wake_up_all(¬if_wait->notif_waitq); } IWL_EXPORT_SYMBOL(iwl_abort_notification_waits); void iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, const u16 *cmds, int n_cmds, bool (*fn)(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data), void *fn_data) { if (WARN_ON(n_cmds > MAX_NOTIF_CMDS)) n_cmds = MAX_NOTIF_CMDS; wait_entry->fn = fn; wait_entry->fn_data = fn_data; wait_entry->n_cmds = n_cmds; memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16)); wait_entry->triggered = false; wait_entry->aborted = false; spin_lock_bh(¬if_wait->notif_wait_lock); list_add(&wait_entry->list, ¬if_wait->notif_waits); spin_unlock_bh(¬if_wait->notif_wait_lock); } IWL_EXPORT_SYMBOL(iwl_init_notification_wait); void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry) { spin_lock_bh(¬if_wait->notif_wait_lock); list_del(&wait_entry->list); spin_unlock_bh(¬if_wait->notif_wait_lock); } IWL_EXPORT_SYMBOL(iwl_remove_notification); int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, unsigned long timeout) { int ret; ret = wait_event_timeout(notif_wait->notif_waitq, wait_entry->triggered || wait_entry->aborted, timeout); iwl_remove_notification(notif_wait, wait_entry); if (wait_entry->aborted) return -EIO; /* return value is always >= 0 */ if (ret <= 0) return -ETIMEDOUT; return 0; } IWL_EXPORT_SYMBOL(iwl_wait_notification); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h000066400000000000000000000125731351064634500274450ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_notif_wait_h__ #define __iwl_notif_wait_h__ #include #include "iwl-trans.h" struct iwl_notif_wait_data { struct list_head notif_waits; spinlock_t notif_wait_lock; wait_queue_head_t notif_waitq; }; #define MAX_NOTIF_CMDS 5 /** * struct iwl_notification_wait - notification wait entry * @list: list head for global list * @fn: Function called with the notification. If the function * returns true, the wait is over, if it returns false then * the waiter stays blocked. If no function is given, any * of the listed commands will unblock the waiter. * @cmds: command IDs * @n_cmds: number of command IDs * @triggered: waiter should be woken up * @aborted: wait was aborted * * This structure is not used directly, to wait for a * notification declare it on the stack, and call * iwl_init_notification_wait() with appropriate * parameters. Then do whatever will cause the ucode * to notify the driver, and to wait for that then * call iwl_wait_notification(). * * Each notification is one-shot. If at some point we * need to support multi-shot notifications (which * can't be allocated on the stack) we need to modify * the code for them. */ struct iwl_notification_wait { struct list_head list; bool (*fn)(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data); void *fn_data; u16 cmds[MAX_NOTIF_CMDS]; u8 n_cmds; bool triggered, aborted; }; /* caller functions */ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data); bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt); void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); static inline void iwl_notification_notify(struct iwl_notif_wait_data *notif_data) { wake_up_all(¬if_data->notif_waitq); } static inline void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt) { if (iwl_notification_wait(notif_data, pkt)) iwl_notification_notify(notif_data); } /* user functions */ void __acquires(wait_entry) iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry, const u16 *cmds, int n_cmds, bool (*fn)(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data), void *fn_data); int __must_check __releases(wait_entry) iwl_wait_notification(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry, unsigned long timeout); void __releases(wait_entry) iwl_remove_notification(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry); #endif /* __iwl_notif_wait_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/paging.c000066400000000000000000000252351351064634500266230ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-drv.h" #include "runtime.h" #include "fw/api/commands.h" void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt) { int i; if (!fwrt->fw_paging_db[0].fw_paging_block) return; for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { struct iwl_fw_paging *paging = &fwrt->fw_paging_db[i]; if (!paging->fw_paging_block) { IWL_DEBUG_FW(fwrt, "Paging: block %d already freed, continue to next page\n", i); continue; } dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys, paging->fw_paging_size, DMA_BIDIRECTIONAL); __free_pages(paging->fw_paging_block, get_order(paging->fw_paging_size)); paging->fw_paging_block = NULL; } memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db)); } IWL_EXPORT_SYMBOL(iwl_free_fw_paging); static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, const struct fw_img *image) { struct page *block; dma_addr_t phys = 0; int blk_idx, order, num_of_pages, size; if (fwrt->fw_paging_db[0].fw_paging_block) return 0; /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; fwrt->num_of_paging_blk = DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); fwrt->num_of_pages_in_last_blk = num_of_pages - NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1); IWL_DEBUG_FW(fwrt, "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", fwrt->num_of_paging_blk, fwrt->num_of_pages_in_last_blk); /* * Allocate CSS and paging blocks in dram. */ for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; order = get_order(size); block = alloc_pages(GFP_KERNEL, order); if (!block) { /* free all the previous pages since we failed */ iwl_free_fw_paging(fwrt); return -ENOMEM; } fwrt->fw_paging_db[blk_idx].fw_paging_block = block; fwrt->fw_paging_db[blk_idx].fw_paging_size = size; phys = dma_map_page(fwrt->trans->dev, block, 0, PAGE_SIZE << order, DMA_BIDIRECTIONAL); if (dma_mapping_error(fwrt->trans->dev, phys)) { /* * free the previous pages and the current one * since we failed to map_page. */ iwl_free_fw_paging(fwrt); return -ENOMEM; } fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys; if (!blk_idx) IWL_DEBUG_FW(fwrt, "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", order); else IWL_DEBUG_FW(fwrt, "Paging: allocated 32K bytes (order %d) for firmware paging.\n", order); } return 0; } static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, const struct fw_img *image) { int sec_idx, idx, ret; u32 offset = 0; /* * find where is the paging image start point: * if CPU2 exist and it's in paging format, then the image looks like: * CPU1 sections (2 or more) * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 * CPU2 sections (not paged) * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 * non paged to CPU2 paging sec * CPU2 paging CSS * CPU2 paging image (including instruction and data) */ for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { sec_idx++; break; } } /* * If paging is enabled there should be at least 2 more sections left * (one for CSS and one for Paging data) */ if (sec_idx >= image->num_sec - 1) { IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); ret = -EINVAL; goto err; } /* copy the CSS block to the dram */ IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", sec_idx); if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) { IWL_ERR(fwrt, "CSS block is larger than paging size\n"); ret = -EINVAL; goto err; } memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data, image->sec[sec_idx].len); dma_sync_single_for_device(fwrt->trans->dev, fwrt->fw_paging_db[0].fw_paging_phys, fwrt->fw_paging_db[0].fw_paging_size, DMA_BIDIRECTIONAL); IWL_DEBUG_FW(fwrt, "Paging: copied %d CSS bytes to first block\n", fwrt->fw_paging_db[0].fw_paging_size); sec_idx++; /* * Copy the paging blocks to the dram. The loop index starts * from 1 since the CSS block (index 0) was already copied to * dram. We use num_of_paging_blk + 1 to account for that. */ for (idx = 1; idx < fwrt->num_of_paging_blk + 1; idx++) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; int remaining = image->sec[sec_idx].len - offset; int len = block->fw_paging_size; /* * For the last block, we copy all that is remaining, * for all other blocks, we copy fw_paging_size at a * time. */ if (idx == fwrt->num_of_paging_blk) { len = remaining; if (remaining != fwrt->num_of_pages_in_last_blk * FW_PAGING_SIZE) { IWL_ERR(fwrt, "Paging: last block contains more data than expected %d\n", remaining); ret = -EINVAL; goto err; } } else if (block->fw_paging_size > remaining) { IWL_ERR(fwrt, "Paging: not enough data in other in block %d (%d)\n", idx, remaining); ret = -EINVAL; goto err; } memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, len); dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size, DMA_BIDIRECTIONAL); IWL_DEBUG_FW(fwrt, "Paging: copied %d paging bytes to block %d\n", len, idx); offset += block->fw_paging_size; } return 0; err: iwl_free_fw_paging(fwrt); return ret; } static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, const struct fw_img *fw) { int ret; ret = iwl_alloc_fw_paging_mem(fwrt, fw); if (ret) return ret; return iwl_fill_paging_mem(fwrt, fw); } /* send paging cmd to FW in case CPU2 has paging image */ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt, const struct fw_img *fw) { struct iwl_fw_paging_cmd paging_cmd = { .flags = cpu_to_le32(PAGING_CMD_IS_SECURED | PAGING_CMD_IS_ENABLED | (fwrt->num_of_pages_in_last_blk << PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), .block_num = cpu_to_le32(fwrt->num_of_paging_blk), }; struct iwl_host_cmd hcmd = { .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0), .len = { sizeof(paging_cmd), }, .data = { &paging_cmd, }, }; int blk_idx; /* loop for for all paging blocks + CSS block */ for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys; __le32 phy_addr; addr = addr >> PAGE_2_EXP_SIZE; phy_addr = cpu_to_le32(addr); paging_cmd.device_phy_addr[blk_idx] = phy_addr; } return iwl_trans_send_cmd(fwrt->trans, &hcmd); } int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type) { const struct fw_img *fw = &fwrt->fw->img[type]; int ret; if (fwrt->trans->cfg->gen2) return 0; /* * Configure and operate fw paging mechanism. * The driver configures the paging flow only once. * The CPU2 paging image is included in the IWL_UCODE_INIT image. */ if (!fw->paging_mem_size) return 0; ret = iwl_save_fw_paging(fwrt, fw); if (ret) { IWL_ERR(fwrt, "failed to save the FW paging image\n"); return ret; } ret = iwl_send_paging_cmd(fwrt, fw); if (ret) { IWL_ERR(fwrt, "failed to send the paging cmd\n"); iwl_free_fw_paging(fwrt); return ret; } return 0; } IWL_EXPORT_SYMBOL(iwl_init_paging); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/runtime.h000066400000000000000000000147121351064634500270440ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_runtime_h__ #define __iwl_fw_runtime_h__ #include "iwl-config.h" #include "iwl-trans.h" #include "img.h" #include "fw/api/debug.h" #include "fw/api/dbg-tlv.h" #include "fw/api/paging.h" #include "iwl-eeprom-parse.h" struct iwl_fw_runtime_ops { int (*dump_start)(void *ctx); void (*dump_end)(void *ctx); int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); bool (*d3_debug_enable)(void *ctx); }; #define MAX_NUM_LMAC 2 struct iwl_fwrt_shared_mem_cfg { int num_lmacs; int num_txfifo_entries; struct { u32 txfifo_size[TX_FIFO_MAX_NUM]; u32 rxfifo1_size; } lmac[MAX_NUM_LMAC]; u32 rxfifo2_size; u32 internal_txfifo_addr; u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; #define IWL_FW_RUNTIME_DUMP_WK_NUM 5 /** * struct iwl_fw_runtime - runtime data for firmware * @fw: firmware image * @cfg: NIC configuration * @dev: device pointer * @ops: user ops * @ops_ctx: user ops context * @fw_paging_db: paging database * @num_of_paging_blk: number of paging blocks * @num_of_pages_in_last_blk: number of pages in the last block * @smem_cfg: saved firmware SMEM configuration * @cur_fw_img: current firmware image, must be maintained by * the driver by calling &iwl_fw_set_current_image() * @dump: debug dump data */ struct iwl_fw_runtime { struct iwl_trans *trans; const struct iwl_fw *fw; struct device *dev; const struct iwl_fw_runtime_ops *ops; void *ops_ctx; /* Paging */ struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; u16 num_of_paging_blk; u16 num_of_pages_in_last_blk; enum iwl_ucode_type cur_fw_img; /* memory configuration */ struct iwl_fwrt_shared_mem_cfg smem_cfg; /* debug */ struct { const struct iwl_fw_dump_desc *desc; bool monitor_only; struct { u8 idx; enum iwl_fw_ini_trigger_id ini_trig_id; struct delayed_work wk; } wks[IWL_FW_RUNTIME_DUMP_WK_NUM]; unsigned long active_wks; u8 conf; /* ts of the beginning of a non-collect fw dbg data period */ unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM]; u32 *d3_debug_data; struct iwl_fw_ini_region_cfg *active_regs[IWL_FW_INI_MAX_REGION_ID]; struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM]; u32 lmac_err_id[MAX_NUM_LMAC]; u32 umac_err_id; void *fifo_iter; struct timer_list periodic_trig; u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN]; u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN]; u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN]; struct { u8 type; u8 subtype; u32 lmac_major; u32 lmac_minor; u32 umac_major; u32 umac_minor; } fw_ver; } dump; #ifdef CPTCFG_IWLWIFI_DEBUGFS struct { struct delayed_work wk; u32 delay; u64 seq; } timestamp; bool tpc_enabled; #endif /* CPTCFG_IWLWIFI_DEBUGFS */ }; void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, const struct iwl_fw_runtime_ops *ops, void *ops_ctx, struct dentry *dbgfs_dir); static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt) { int i; kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) { struct iwl_fw_ini_active_triggers *active = &fwrt->dump.active_trigs[i]; active->active = false; active->size = 0; kfree(active->trig); active->trig = NULL; } } void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type cur_fw_img) { fwrt->cur_fw_img = cur_fw_img; } int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/smem.c000066400000000000000000000133701351064634500263140ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-drv.h" #include "runtime.h" #include "fw/api/commands.h" static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt, struct iwl_rx_packet *pkt) { struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; int i, lmac; int lmac_num = le32_to_cpu(mem_cfg->lmac_num); if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) return; fwrt->smem_cfg.num_lmacs = lmac_num; fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); for (lmac = 0; lmac < lmac_num; lmac++) { struct iwl_shared_mem_lmac_cfg *lmac_cfg = &mem_cfg->lmac_smem[lmac]; for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) fwrt->smem_cfg.lmac[lmac].txfifo_size[i] = le32_to_cpu(lmac_cfg->txfifo_size[i]); fwrt->smem_cfg.lmac[lmac].rxfifo1_size = le32_to_cpu(lmac_cfg->rxfifo1_size); } } static void iwl_parse_shared_mem(struct iwl_fw_runtime *fwrt, struct iwl_rx_packet *pkt) { struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data; int i; fwrt->smem_cfg.num_lmacs = 1; fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) fwrt->smem_cfg.lmac[0].txfifo_size[i] = le32_to_cpu(mem_cfg->txfifo_size[i]); fwrt->smem_cfg.lmac[0].rxfifo1_size = le32_to_cpu(mem_cfg->rxfifo_size[0]); fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); /* new API has more data, from rxfifo_addr field and on */ if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) != sizeof(mem_cfg->internal_txfifo_size)); fwrt->smem_cfg.internal_txfifo_addr = le32_to_cpu(mem_cfg->internal_txfifo_addr); for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) fwrt->smem_cfg.internal_txfifo_size[i] = le32_to_cpu(mem_cfg->internal_txfifo_size[i]); } } void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) { struct iwl_host_cmd cmd = { .flags = CMD_WANT_SKB, .data = { NULL, }, .len = { 0, }, }; struct iwl_rx_packet *pkt; int ret; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); else cmd.id = SHARED_MEM_CFG; ret = iwl_trans_send_cmd(fwrt->trans, &cmd); if (ret) { WARN(ret != -ERFKILL, "Could not send the SMEM command: %d\n", ret); return; } pkt = cmd.resp_pkt; if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) iwl_parse_shared_mem_22000(fwrt, pkt); else iwl_parse_shared_mem(fwrt, pkt); IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n"); iwl_free_resp(&cmd); } IWL_EXPORT_SYMBOL(iwl_get_shared_mem_conf); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/testmode.c000066400000000000000000000315011351064634500271730ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-trans.h" #include "iwl-tm-infc.h" #include "iwl-drv.h" #include "iwl-prph.h" #include "iwl-io.h" static int iwl_tm_send_hcmd(struct iwl_testmode *testmode, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_tm_cmd_request *hcmd_req = data_in->data; struct iwl_tm_cmd_request *cmd_resp; u32 reply_len, resp_size; struct iwl_rx_packet *pkt; struct iwl_host_cmd host_cmd = { .id = hcmd_req->id, .data[0] = hcmd_req->data, .len[0] = hcmd_req->len, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int ret; if (!testmode->send_hcmd) return -EOPNOTSUPP; if (hcmd_req->want_resp) host_cmd.flags |= CMD_WANT_SKB; mutex_lock(testmode->mutex); ret = testmode->send_hcmd(testmode->op_mode, &host_cmd); mutex_unlock(testmode->mutex); if (ret) return ret; /* if no reply is required, we are done */ if (!(host_cmd.flags & CMD_WANT_SKB)) return 0; /* Retrieve response packet */ pkt = host_cmd.resp_pkt; reply_len = iwl_rx_packet_len(pkt); /* Set response data */ resp_size = sizeof(struct iwl_tm_cmd_request) + reply_len; cmd_resp = kzalloc(resp_size, GFP_KERNEL); if (!cmd_resp) { ret = -ENOMEM; goto out; } cmd_resp->id = hcmd_req->id; cmd_resp->len = reply_len; memcpy(cmd_resp->data, &(pkt->hdr), reply_len); data_out->data = cmd_resp; data_out->len = resp_size; ret = 0; out: iwl_free_resp(&host_cmd); return ret; } static void iwl_tm_execute_reg_ops(struct iwl_testmode *testmode, struct iwl_tm_regs_request *request, struct iwl_tm_regs_request *result) { struct iwl_tm_reg_op *cur_op; u32 idx, read_idx; for (idx = 0, read_idx = 0; idx < request->num; idx++) { cur_op = &request->reg_ops[idx]; if (cur_op->op_type == IWL_TM_REG_OP_READ) { cur_op->value = iwl_read32(testmode->trans, cur_op->address); memcpy(&result->reg_ops[read_idx], cur_op, sizeof(*cur_op)); read_idx++; } else { /* IWL_TM_REG_OP_WRITE is the only possible option */ iwl_write32(testmode->trans, cur_op->address, cur_op->value); } } } static int iwl_tm_reg_ops(struct iwl_testmode *testmode, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_tm_reg_op *cur_op; struct iwl_tm_regs_request *request = data_in->data; struct iwl_tm_regs_request *result; u32 result_size; u32 idx, read_idx; bool is_grab_nic_access_required = true; unsigned long flags; /* Calculate result size (result is returned only for read ops) */ for (idx = 0, read_idx = 0; idx < request->num; idx++) { if (request->reg_ops[idx].op_type == IWL_TM_REG_OP_READ) read_idx++; /* check if there is an operation that it is not */ /* in the CSR range (0x00000000 - 0x000003FF) */ /* and not in the AL range */ cur_op = &request->reg_ops[idx]; if (IS_AL_ADDR(cur_op->address) || cur_op->address < HBUS_BASE) is_grab_nic_access_required = false; } result_size = sizeof(struct iwl_tm_regs_request) + read_idx * sizeof(struct iwl_tm_reg_op); result = kzalloc(result_size, GFP_KERNEL); if (!result) return -ENOMEM; result->num = read_idx; if (is_grab_nic_access_required) { if (!iwl_trans_grab_nic_access(testmode->trans, &flags)) { kfree(result); return -EBUSY; } iwl_tm_execute_reg_ops(testmode, request, result); iwl_trans_release_nic_access(testmode->trans, &flags); } else { iwl_tm_execute_reg_ops(testmode, request, result); } data_out->data = result; data_out->len = result_size; return 0; } static int iwl_tm_get_dev_info(struct iwl_testmode *testmode, struct iwl_tm_data *data_out) { struct iwl_tm_dev_info *dev_info; const u8 driver_ver[] = BACKPORTS_GIT_TRACKED; dev_info = kzalloc(sizeof(*dev_info) + (strlen(driver_ver) + 1) * sizeof(u8), GFP_KERNEL); if (!dev_info) return -ENOMEM; dev_info->dev_id = testmode->trans->hw_id; dev_info->fw_ver = testmode->fw->ucode_ver; dev_info->vendor_id = PCI_VENDOR_ID_INTEL; dev_info->silicon_step = CSR_HW_REV_STEP(testmode->trans->hw_rev); /* TODO: Assign real value when feature is implemented */ dev_info->build_ver = 0x00; strcpy(dev_info->driver_ver, driver_ver); data_out->data = dev_info; data_out->len = sizeof(*dev_info); return 0; } static int iwl_tm_indirect_read(struct iwl_testmode *testmode, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_trans *trans = testmode->trans; struct iwl_tm_sram_read_request *cmd_in = data_in->data; u32 addr = cmd_in->offset; u32 size = cmd_in->length; u32 *buf32, size32, i; unsigned long flags; if (size & (sizeof(u32) - 1)) return -EINVAL; data_out->data = kmalloc(size, GFP_KERNEL); if (!data_out->data) return -ENOMEM; data_out->len = size; size32 = size / sizeof(u32); buf32 = data_out->data; mutex_lock(testmode->mutex); /* Hard-coded periphery absolute address */ if (addr >= IWL_ABS_PRPH_START && addr < IWL_ABS_PRPH_START + PRPH_END) { if (!iwl_trans_grab_nic_access(trans, &flags)) { mutex_unlock(testmode->mutex); return -EBUSY; } for (i = 0; i < size32; i++) buf32[i] = iwl_trans_read_prph(trans, addr + i * sizeof(u32)); iwl_trans_release_nic_access(trans, &flags); } else { /* target memory (SRAM) */ iwl_trans_read_mem(trans, addr, buf32, size32); } mutex_unlock(testmode->mutex); return 0; } static int iwl_tm_indirect_write(struct iwl_testmode *testmode, struct iwl_tm_data *data_in) { struct iwl_trans *trans = testmode->trans; struct iwl_tm_sram_write_request *cmd_in = data_in->data; u32 addr = cmd_in->offset; u32 size = cmd_in->len; u8 *buf = cmd_in->buffer; u32 *buf32 = (u32 *)buf, size32 = size / sizeof(u32); unsigned long flags; u32 val, i; mutex_lock(testmode->mutex); if (addr >= IWL_ABS_PRPH_START && addr < IWL_ABS_PRPH_START + PRPH_END) { /* Periphery writes can be 1-3 bytes long, or DWORDs */ if (size < 4) { memcpy(&val, buf, size); if (!iwl_trans_grab_nic_access(trans, &flags)) { mutex_unlock(testmode->mutex); return -EBUSY; } iwl_write32(trans, HBUS_TARG_PRPH_WADDR, (addr & 0x000FFFFF) | ((size - 1) << 24)); iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); iwl_trans_release_nic_access(trans, &flags); } else { if (size % sizeof(u32)) { mutex_unlock(testmode->mutex); return -EINVAL; } for (i = 0; i < size32; i++) iwl_write_prph(trans, addr + i * sizeof(u32), buf32[i]); } } else { iwl_trans_write_mem(trans, addr, buf32, size32); } mutex_unlock(testmode->mutex); return 0; } static int iwl_tm_get_fw_info(struct iwl_testmode *testmode, struct iwl_tm_data *data_out) { struct iwl_tm_get_fw_info *fw_info; u32 api_len, capa_len; u32 *bitmap; int i; if (!testmode->fw_major_ver || !testmode->fw_minor_ver) return -EOPNOTSUPP; api_len = 4 * DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32); capa_len = 4 * DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32); fw_info = kzalloc(sizeof(*fw_info) + api_len + capa_len, GFP_KERNEL); if (!fw_info) return -ENOMEM; fw_info->fw_major_ver = testmode->fw_major_ver; fw_info->fw_minor_ver = testmode->fw_minor_ver; fw_info->fw_capa_api_len = api_len; fw_info->fw_capa_flags = testmode->fw->ucode_capa.flags; fw_info->fw_capa_len = capa_len; bitmap = (u32 *)fw_info->data; for (i = 0; i < NUM_IWL_UCODE_TLV_API; i++) { if (fw_has_api(&testmode->fw->ucode_capa, (__force iwl_ucode_tlv_api_t)i)) bitmap[i / 32] |= BIT(i % 32); } bitmap = (u32 *)(fw_info->data + api_len); for (i = 0; i < NUM_IWL_UCODE_TLV_CAPA; i++) { if (fw_has_capa(&testmode->fw->ucode_capa, (__force iwl_ucode_tlv_capa_t)i)) bitmap[i / 32] |= BIT(i % 32); } data_out->data = fw_info; data_out->len = sizeof(*fw_info) + api_len + capa_len; return 0; } /** * iwl_tm_execute_cmd - Implementation of test command executor * @testmode: iwl_testmode, holds all relevant data for execution * @cmd: User space command's index * @data_in: Input data. "data" field is to be casted to relevant * data structure. All verification must be done in the * caller function, therefor assuming that input data * length is valid. * @data_out: Will be allocated inside, freeing is in the caller's * responsibility */ int iwl_tm_execute_cmd(struct iwl_testmode *testmode, u32 cmd, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { const struct iwl_test_ops *test_ops; bool cmd_supported = false; int ret; if (!testmode->trans->op_mode) { IWL_ERR(testmode->trans, "No op_mode!\n"); return -ENODEV; } if (WARN_ON_ONCE(!testmode->op_mode || !data_in)) return -EINVAL; test_ops = &testmode->trans->op_mode->ops->test_ops; if (test_ops->cmd_exec) ret = test_ops->cmd_exec(testmode, cmd, data_in, data_out, &cmd_supported); if (cmd_supported) goto out; switch (cmd) { case IWL_TM_USER_CMD_HCMD: ret = iwl_tm_send_hcmd(testmode, data_in, data_out); break; case IWL_TM_USER_CMD_REG_ACCESS: ret = iwl_tm_reg_ops(testmode, data_in, data_out); break; case IWL_TM_USER_CMD_SRAM_WRITE: ret = iwl_tm_indirect_write(testmode, data_in); break; case IWL_TM_USER_CMD_SRAM_READ: ret = iwl_tm_indirect_read(testmode, data_in, data_out); break; case IWL_TM_USER_CMD_GET_DEVICE_INFO: ret = iwl_tm_get_dev_info(testmode, data_out); break; case IWL_TM_USER_CMD_GET_FW_INFO: ret = iwl_tm_get_fw_info(testmode, data_out); break; default: ret = -EOPNOTSUPP; break; } out: return ret; } void iwl_tm_init(struct iwl_trans *trans, const struct iwl_fw *fw, struct mutex *mutex, void *op_mode) { struct iwl_testmode *testmode = &trans->testmode; testmode->trans = trans; testmode->fw = fw; testmode->mutex = mutex; testmode->op_mode = op_mode; if (trans->op_mode->ops->test_ops.send_hcmd) testmode->send_hcmd = trans->op_mode->ops->test_ops.send_hcmd; } IWL_EXPORT_SYMBOL(iwl_tm_init); void iwl_tm_set_fw_ver(struct iwl_trans *trans, u32 fw_major_ver, u32 fw_minor_var) { struct iwl_testmode *testmode = &trans->testmode; testmode->fw_major_ver = fw_major_ver; testmode->fw_minor_ver = fw_minor_var; } IWL_EXPORT_SYMBOL(iwl_tm_set_fw_ver); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/fw/testmode.h000066400000000000000000000120271351064634500272020ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __IWL_TESTMODE_H__ #define __IWL_TESTMODE_H__ #ifdef CPTCFG_NL80211_TESTMODE /** * enum iwl_testmode_attrs - testmode attributes inside * NL80211_ATTR_TESTDATA * @IWL_TM_ATTR_UNSPEC: (invalid attribute) * @IWL_TM_ATTR_CMD: sub command, see &enum iwl_testmode_commands (u32) * @IWL_TM_ATTR_NOA_DURATION: requested NoA duration (u32) * @IWL_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32) * @NUM_IWL_TM_ATTRS: number of attributes in the enum * @IWL_TM_ATTR_MAX: max amount of attributes */ enum iwl_testmode_attrs { IWL_TM_ATTR_UNSPEC, IWL_TM_ATTR_CMD, IWL_TM_ATTR_NOA_DURATION, IWL_TM_ATTR_BEACON_FILTER_STATE, /* keep last */ NUM_IWL_TM_ATTRS, IWL_TM_ATTR_MAX = NUM_IWL_TM_ATTRS - 1, }; /** * enum iwl_testmode_commands - trans testmode commands * @IWL_TM_CMD_SET_NOA: set NoA on GO vif for testing * @IWL_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on */ enum iwl_testmode_commands { IWL_TM_CMD_SET_NOA, IWL_TM_CMD_SET_BEACON_FILTER, }; #endif #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE struct iwl_host_cmd; struct iwl_rx_cmd_buffer; struct iwl_testmode { struct iwl_trans *trans; const struct iwl_fw *fw; /* the mutex of the op_mode */ struct mutex *mutex; void *op_mode; int (*send_hcmd)(void *op_mode, struct iwl_host_cmd *host_cmd); u32 fw_major_ver; u32 fw_minor_ver; }; /** * iwl_tm_data - A data packet for testmode usages * @data: Pointer to be casted to relevant data type * (According to usage) * @len: Size of data in bytes * * This data structure is used for sending/receiving data packets * between internal testmode interfaces */ struct iwl_tm_data { void *data; u32 len; }; void iwl_tm_init(struct iwl_trans *trans, const struct iwl_fw *fw, struct mutex *mutex, void *op_mode); void iwl_tm_set_fw_ver(struct iwl_trans *trans, u32 fw_major_ver, u32 fw_minor_var); int iwl_tm_execute_cmd(struct iwl_testmode *testmode, u32 cmd, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out); #define ADDR_IN_AL_MSK (0x80000000) #define GET_AL_ADDR(ofs) (ofs & ~(ADDR_IN_AL_MSK)) #define IS_AL_ADDR(ofs) (!!(ofs & (ADDR_IN_AL_MSK))) #endif #endif /* __IWL_TESTMODE_H__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h000066400000000000000000000110021351064634500267040ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ /* * Please use this file (iwl-agn-hw.h) only for hardware-related definitions. */ #ifndef __iwl_agn_hw_h__ #define __iwl_agn_hw_h__ #define IWLAGN_RTC_INST_LOWER_BOUND (0x000000) #define IWLAGN_RTC_INST_UPPER_BOUND (0x020000) #define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000) #define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000) #define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \ IWLAGN_RTC_INST_LOWER_BOUND) #define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \ IWLAGN_RTC_DATA_LOWER_BOUND) #define IWL60_RTC_INST_LOWER_BOUND (0x000000) #define IWL60_RTC_INST_UPPER_BOUND (0x040000) #define IWL60_RTC_DATA_LOWER_BOUND (0x800000) #define IWL60_RTC_DATA_UPPER_BOUND (0x814000) #define IWL60_RTC_INST_SIZE \ (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND) #define IWL60_RTC_DATA_SIZE \ (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND) /* RSSI to dBm */ #define IWLAGN_RSSI_OFFSET 44 #define IWLAGN_DEFAULT_TX_RETRY 15 #define IWLAGN_MGMT_DFAULT_RETRY_LIMIT 3 #define IWLAGN_RTS_DFAULT_RETRY_LIMIT 60 #define IWLAGN_BAR_DFAULT_RETRY_LIMIT 60 #define IWLAGN_LOW_RETRY_LIMIT 7 /* Limit range of txpower output target to be between these values */ #define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ #define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */ /* EEPROM */ #define IWLAGN_EEPROM_IMG_SIZE 2048 /* high blocks contain PAPD data */ #define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */ #define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */ #define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ #define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ #define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ #define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */ #define IWLAGN_NUM_QUEUES 20 #endif /* __iwl_agn_hw_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-config.h000066400000000000000000000506601351064634500270050ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ #include #include #include #include #include "iwl-csr.h" enum iwl_device_family { IWL_DEVICE_FAMILY_UNDEFINED, IWL_DEVICE_FAMILY_1000, IWL_DEVICE_FAMILY_100, IWL_DEVICE_FAMILY_2000, IWL_DEVICE_FAMILY_2030, IWL_DEVICE_FAMILY_105, IWL_DEVICE_FAMILY_135, IWL_DEVICE_FAMILY_5000, IWL_DEVICE_FAMILY_5150, IWL_DEVICE_FAMILY_6000, IWL_DEVICE_FAMILY_6000i, IWL_DEVICE_FAMILY_6005, IWL_DEVICE_FAMILY_6030, IWL_DEVICE_FAMILY_6050, IWL_DEVICE_FAMILY_6150, IWL_DEVICE_FAMILY_7000, IWL_DEVICE_FAMILY_8000, IWL_DEVICE_FAMILY_9000, IWL_DEVICE_FAMILY_22000, IWL_DEVICE_FAMILY_22560, IWL_DEVICE_FAMILY_AX210, }; /* * LED mode * IWL_LED_DEFAULT: use device default * IWL_LED_RF_STATE: turn LED on/off based on RF state * LED ON = RF ON * LED OFF = RF OFF * IWL_LED_BLINK: adjust led blink rate based on blink table * IWL_LED_DISABLE: led disabled */ enum iwl_led_mode { IWL_LED_DEFAULT, IWL_LED_RF_STATE, IWL_LED_BLINK, IWL_LED_DISABLE, }; /** * enum iwl_nvm_type - nvm formats * @IWL_NVM: the regular format * @IWL_NVM_EXT: extended NVM format * @IWL_NVM_SDP: NVM format used by 3168 series */ enum iwl_nvm_type { IWL_NVM, IWL_NVM_EXT, IWL_NVM_SDP, }; /* * This is the threshold value of plcp error rate per 100mSecs. It is * used to set and check for the validity of plcp_delta. */ #define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1 #define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50 #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100 #define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200 #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255 #define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0 /* TX queue watchdog timeouts in mSecs */ #define IWL_WATCHDOG_DISABLED 0 #define IWL_DEF_WD_TIMEOUT (2500 * CPTCFG_IWL_TIMEOUT_FACTOR) #define IWL_LONG_WD_TIMEOUT (10000 * CPTCFG_IWL_TIMEOUT_FACTOR) #define IWL_MAX_WD_TIMEOUT (120000 * CPTCFG_IWL_TIMEOUT_FACTOR) #define IWL_DEFAULT_MAX_TX_POWER 22 #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ NETIF_F_TSO | NETIF_F_TSO6) /* Antenna presence definitions */ #define ANT_NONE 0x0 #define ANT_INVALID 0xff #define ANT_A BIT(0) #define ANT_B BIT(1) #define ANT_C BIT(2) #define ANT_AB (ANT_A | ANT_B) #define ANT_AC (ANT_A | ANT_C) #define ANT_BC (ANT_B | ANT_C) #define ANT_ABC (ANT_A | ANT_B | ANT_C) #define MAX_ANT_NUM 3 static inline u8 num_of_ant(u8 mask) { return !!((mask) & ANT_A) + !!((mask) & ANT_B) + !!((mask) & ANT_C); } /* * @max_ll_items: max number of OTP blocks * @shadow_ram_support: shadow support for OTP memory * @led_compensation: compensate on the led on/off time per HW according * to the deviation to achieve the desired led frequency. * The detail algorithm is described in iwl-led.c * @wd_timeout: TX queues watchdog timeout * @max_event_log_size: size of event log buffer size for ucode event logging * @shadow_reg_enable: HW shadow register support * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command * is in flight. This is due to a HW bug in 7260, 3160 and 7265. * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. * @max_tfd_queue_size: max number of entries in tfd queue. */ struct iwl_base_params { unsigned int wd_timeout; u16 eeprom_size; u16 max_event_log_size; u8 pll_cfg:1, /* for iwl_pcie_apm_init() */ shadow_ram_support:1, shadow_reg_enable:1, pcie_l1_allowed:1, apmg_wake_up_wa:1, scd_chain_ext_wa:1; u16 num_of_queues; /* def: HW dependent */ u32 max_tfd_queue_size; /* def: HW dependent */ u8 max_ll_items; u8 led_compensation; }; /* * @stbc: support Tx STBC and 1*SS Rx STBC * @ldpc: support Tx/Rx with LDPC * @use_rts_for_aggregation: use rts/cts protection for HT traffic * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40 */ struct iwl_ht_params { u8 ht_greenfield_support:1, stbc:1, ldpc:1, use_rts_for_aggregation:1; u8 ht40_bands; }; /* * Tx-backoff threshold * @temperature: The threshold in Celsius * @backoff: The tx-backoff in uSec */ struct iwl_tt_tx_backoff { s32 temperature; u32 backoff; }; #define TT_TX_BACKOFF_SIZE 6 /** * struct iwl_tt_params - thermal throttling parameters * @ct_kill_entry: CT Kill entry threshold * @ct_kill_exit: CT Kill exit threshold * @ct_kill_duration: The time intervals (in uSec) in which the driver needs * to checks whether to exit CT Kill. * @dynamic_smps_entry: Dynamic SMPS entry threshold * @dynamic_smps_exit: Dynamic SMPS exit threshold * @tx_protection_entry: TX protection entry threshold * @tx_protection_exit: TX protection exit threshold * @tx_backoff: Array of thresholds for tx-backoff , in ascending order. * @support_ct_kill: Support CT Kill? * @support_dynamic_smps: Support dynamic SMPS? * @support_tx_protection: Support tx protection? * @support_tx_backoff: Support tx-backoff? */ struct iwl_tt_params { u32 ct_kill_entry; u32 ct_kill_exit; u32 ct_kill_duration; u32 dynamic_smps_entry; u32 dynamic_smps_exit; u32 tx_protection_entry; u32 tx_protection_exit; struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; u8 support_ct_kill:1, support_dynamic_smps:1, support_tx_protection:1, support_tx_backoff:1; }; /* * information on how to parse the EEPROM */ #define EEPROM_REG_BAND_1_CHANNELS 0x08 #define EEPROM_REG_BAND_2_CHANNELS 0x26 #define EEPROM_REG_BAND_3_CHANNELS 0x42 #define EEPROM_REG_BAND_4_CHANNELS 0x5C #define EEPROM_REG_BAND_5_CHANNELS 0x74 #define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82 #define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92 #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80 #define EEPROM_REGULATORY_BAND_NO_HT40 0 /* lower blocks contain EEPROM image and calibration data */ #define OTP_LOW_IMAGE_SIZE_2K (2 * 512 * sizeof(u16)) /* 2 KB */ #define OTP_LOW_IMAGE_SIZE_16K (16 * 512 * sizeof(u16)) /* 16 KB */ #define OTP_LOW_IMAGE_SIZE_32K (32 * 512 * sizeof(u16)) /* 32 KB */ struct iwl_eeprom_params { const u8 regulatory_bands[7]; bool enhanced_txpower; }; /* Tx-backoff power threshold * @pwr: The power limit in mw * @backoff: The tx-backoff in uSec */ struct iwl_pwr_tx_backoff { u32 pwr; u32 backoff; }; /** * struct iwl_csr_params * * @flag_sw_reset: reset the device * @flag_mac_clock_ready: * Indicates MAC (ucode processor, etc.) is powered up and can run. * Internal resources are accessible. * NOTE: This does not indicate that the processor is actually running. * NOTE: This does not indicate that device has completed * init or post-power-down restore of internal SRAM memory. * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that * SRAM is restored and uCode is in normal operation mode. * This note is relevant only for pre 5xxx devices. * NOTE: After device reset, this bit remains "0" until host sets * INIT_DONE * @flag_init_done: Host sets this to put device into fully operational * D0 power mode. Host resets this after SW_RESET to put device into * low power mode. * @flag_mac_access_req: Host sets this to request and maintain MAC wakeup, * to allow host access to device-internal resources. Host must wait for * mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device * registers. * @flag_val_mac_access_en: mac access is enabled * @flag_master_dis: disable master * @flag_stop_master: stop master * @addr_sw_reset: address for resetting the device * @mac_addr0_otp: first part of MAC address from OTP * @mac_addr1_otp: second part of MAC address from OTP * @mac_addr0_strap: first part of MAC address from strap * @mac_addr1_strap: second part of MAC address from strap */ struct iwl_csr_params { u8 flag_sw_reset; u8 flag_mac_clock_ready; u8 flag_init_done; u8 flag_mac_access_req; u8 flag_val_mac_access_en; u8 flag_master_dis; u8 flag_stop_master; u8 addr_sw_reset; u32 mac_addr0_otp; u32 mac_addr1_otp; u32 mac_addr0_strap; u32 mac_addr1_strap; }; /** * struct iwl_cfg * @name: Official name of the device * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The * filename is constructed as fw_name_pre.ucode. * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section (only DVM) * @max_data_size: The maximal length of the fw data section (only DVM) * @valid_tx_ant: valid transmit antenna * @valid_rx_ant: valid receive antenna * @non_shared_ant: the antenna that is for WiFi only * @nvm_ver: NVM version * @nvm_calib_ver: NVM calibration version * @lib: pointer to the lib ops * @base_params: pointer to basic parameters * @ht_params: point to ht parameters * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device * @high_temp: Is this NIC is designated to be in high temperature. * @host_interrupt_operation_mode: device needs host interrupt operation * mode set * @nvm_hw_section_num: the ID of the HW NVM section * @mac_addr_from_csr: read HW address from CSR registers * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs * @csr: csr flags and addresses that are different across devices * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the * station can receive in HT * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the * station can receive in VHT * @dccm_offset: offset from which DCCM begins * @dccm_len: length of DCCM (including runtime stack CCM) * @dccm2_offset: offset from which the second DCCM begins * @dccm2_len: length of the second DCCM * @smem_offset: offset from which the SMEM begins * @smem_len: the length of SMEM * @mq_rx_supported: multi-queue rx support * @vht_mu_mimo_supported: VHT MU-MIMO support * @rf_id: need to read rf_id to determine the firmware image * @integrated: discrete or integrated * @gen2: 22000 and on transport operation * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type * @d3_debug_data_base_addr: base address where D3 debug data is stored * @d3_debug_data_length: length of the D3 debug data * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @min_txq_size: minimum number of slots required in a TX queue * @umac_prph_offset: offset to add to UMAC periphery address * @uhb_supported: ultra high band channels supported * @min_256_ba_txq_size: minimum number of slots required in a TX queue which * supports 256 BA aggregation * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs * and/or the uCode API version instead. */ struct iwl_cfg { /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; /* params not likely to change within a device family */ const struct iwl_base_params *base_params; /* params likely to change within a device family */ const struct iwl_ht_params *ht_params; const struct iwl_eeprom_params *eeprom_params; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; const char *default_nvm_file_C_step; const struct iwl_tt_params *thermal_params; const struct iwl_csr_params *csr; enum iwl_device_family device_family; enum iwl_led_mode led_mode; enum iwl_nvm_type nvm_type; u32 max_data_size; u32 max_inst_size; netdev_features_t features; u32 dccm_offset; u32 dccm_len; u32 dccm2_offset; u32 dccm2_len; u32 smem_offset; u32 smem_len; u32 soc_latency; u16 nvm_ver; u16 nvm_calib_ver; u32 rx_with_siso_diversity:1, bt_shared_single_ant:1, internal_wimax_coex:1, host_interrupt_operation_mode:1, high_temp:1, mac_addr_from_csr:1, lp_xtal_workaround:1, disable_dummy_notification:1, apmg_not_supported:1, mq_rx_supported:1, vht_mu_mimo_supported:1, rf_id:1, integrated:1, use_tfh:1, gen2:1, cdb:1, dbgc_supported:1, bisr_workaround:1, uhb_supported:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; u8 nvm_hw_section_num; u8 max_rx_agg_size; u8 max_tx_agg_size; u8 max_ht_ampdu_exponent; u8 max_vht_ampdu_exponent; u8 ucode_api_max; u8 ucode_api_min; u32 min_umac_error_event_table; u32 extra_phy_cfg_flags; u32 d3_debug_data_base_addr; u32 d3_debug_data_length; u32 min_txq_size; u32 umac_prph_offset; u32 fw_mon_smem_write_ptr_addr; u32 fw_mon_smem_write_ptr_msk; u32 fw_mon_smem_cycle_cnt_ptr_addr; u32 fw_mon_smem_cycle_cnt_ptr_msk; u32 gp2_reg_addr; u32 min_256_ba_txq_size; }; extern const struct iwl_csr_params iwl_csr_v1; extern const struct iwl_csr_params iwl_csr_v2; /* * This list declares the config structures for all devices. */ #if IS_ENABLED(CPTCFG_IWLMVM) extern const struct iwl_cfg iwl7260_2ac_cfg; extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp; extern const struct iwl_cfg iwl7260_2n_cfg; extern const struct iwl_cfg iwl7260_n_cfg; extern const struct iwl_cfg iwl3160_2ac_cfg; extern const struct iwl_cfg iwl3160_2n_cfg; extern const struct iwl_cfg iwl3160_n_cfg; extern const struct iwl_cfg iwl3165_2ac_cfg; extern const struct iwl_cfg iwl3168_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_n_cfg; extern const struct iwl_cfg iwl7265d_2ac_cfg; extern const struct iwl_cfg iwl7265d_2n_cfg; extern const struct iwl_cfg iwl7265d_n_cfg; extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8265_2ac_cfg; extern const struct iwl_cfg iwl8275_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; #endif /* IS_ENABLED(CPTCFG_IWLMVM) */ #if IS_ENABLED(CPTCFG_IWLMVM) || IS_ENABLED(CPTCFG_IWLFMAC) extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_160_cfg; extern const struct iwl_cfg iwl9260_killer_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc; extern const struct iwl_cfg iwl9560_2ac_160_cfg; extern const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc; extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc; extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk; extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0; extern const struct iwl_cfg iwl_ax101_cfg_quz_hr; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; extern const struct iwl_cfg iwl_ax200_cfg_cc; extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0; extern const struct iwl_cfg iwl_ax201_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0; extern const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0; extern const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0; extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0; extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_jf; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0; extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0; #endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ #endif /* __IWL_CONFIG_H__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h000066400000000000000000000247751351064634500310170ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ #include "iwl-context-info.h" #define CSR_CTXT_INFO_BOOT_CTRL 0x0 #define CSR_CTXT_INFO_ADDR 0x118 #define CSR_IML_DATA_ADDR 0x120 #define CSR_IML_SIZE_ADDR 0x128 #define CSR_IML_RESP_ADDR 0x12c /* Set bit for enabling automatic function boot */ #define CSR_AUTO_FUNC_BOOT_ENA BIT(1) /* Set bit for initiating function boot */ #define CSR_AUTO_FUNC_INIT BIT(7) /** * enum iwl_prph_scratch_mtr_format - tfd size configuration * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd */ enum iwl_prph_scratch_mtr_format { IWL_PRPH_MTR_FORMAT_16B = 0x0, IWL_PRPH_MTR_FORMAT_32B = 0x40000, IWL_PRPH_MTR_FORMAT_64B = 0x80000, IWL_PRPH_MTR_FORMAT_256B = 0xC0000, }; /** * enum iwl_prph_scratch_flags - PRPH scratch control flags * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated * in hwm config. * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for * multicomm. * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for * completion descriptor, 1 for responses (legacy) * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit, * 3: 256 bit. */ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4), IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8), IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9), IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10), IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11), IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16), IWL_PRPH_SCRATCH_MTR_MODE = BIT(17), IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19), }; /* * struct iwl_prph_scratch_version - version structure * @mac_id: SKU and revision id * @version: prph scratch information version id * @size: the size of the context information in DWs * @reserved: reserved */ struct iwl_prph_scratch_version { __le16 mac_id; __le16 version; __le16 size; __le16 reserved; } __packed; /* PERIPH_SCRATCH_VERSION_S */ /* * struct iwl_prph_scratch_control - control structure * @control_flags: context information flags see &enum iwl_prph_scratch_flags * @reserved: reserved */ struct iwl_prph_scratch_control { __le32 control_flags; __le32 reserved; } __packed; /* PERIPH_SCRATCH_CONTROL_S */ /* * struct iwl_prph_scratch_ror_cfg - ror config * @ror_base_addr: ror start address * @ror_size: ror size in DWs * @reserved: reserved */ struct iwl_prph_scratch_ror_cfg { __le64 ror_base_addr; __le32 ror_size; __le32 reserved; } __packed; /* PERIPH_SCRATCH_ROR_CFG_S */ /* * struct iwl_prph_scratch_hwm_cfg - hwm config * @hwm_base_addr: hwm start address * @hwm_size: hwm size in DWs * @reserved: reserved */ struct iwl_prph_scratch_hwm_cfg { __le64 hwm_base_addr; __le32 hwm_size; __le32 reserved; } __packed; /* PERIPH_SCRATCH_HWM_CFG_S */ /* * struct iwl_prph_scratch_rbd_cfg - RBDs configuration * @free_rbd_addr: default queue free RB CB base address * @reserved: reserved */ struct iwl_prph_scratch_rbd_cfg { __le64 free_rbd_addr; __le32 reserved; } __packed; /* PERIPH_SCRATCH_RBD_CFG_S */ /* * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config * @version: version information of context info and HW * @control: control flags of FH configurations * @ror_cfg: ror configuration * @hwm_cfg: hwm configuration * @rbd_cfg: default RX queue configuration */ struct iwl_prph_scratch_ctrl_cfg { struct iwl_prph_scratch_version version; struct iwl_prph_scratch_control control; struct iwl_prph_scratch_ror_cfg ror_cfg; struct iwl_prph_scratch_hwm_cfg hwm_cfg; struct iwl_prph_scratch_rbd_cfg rbd_cfg; } __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */ /* * struct iwl_prph_scratch - peripheral scratch mapping * @ctrl_cfg: control and configuration of prph scratch * @dram: firmware images addresses in DRAM * @reserved: reserved */ struct iwl_prph_scratch { struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; __le32 reserved[16]; struct iwl_context_info_dram dram; } __packed; /* PERIPH_SCRATCH_S */ /* * struct iwl_prph_info - peripheral information * @boot_stage_mirror: reflects the value in the Boot Stage CSR register * @ipc_status_mirror: reflects the value in the IPC Status CSR register * @sleep_notif: indicates the peripheral sleep status * @reserved: reserved */ struct iwl_prph_info { __le32 boot_stage_mirror; __le32 ipc_status_mirror; __le32 sleep_notif; __le32 reserved; } __packed; /* PERIPH_INFO_S */ /* * struct iwl_context_info_gen3 - device INIT configuration * @version: version of the context information * @size: size of context information in DWs * @config: context in which the peripheral would execute - a subset of * capability csr register published by the peripheral * @prph_info_base_addr: the peripheral information structure start address * @cr_head_idx_arr_base_addr: the completion ring head index array * start address * @tr_tail_idx_arr_base_addr: the transfer ring tail index array * start address * @cr_tail_idx_arr_base_addr: the completion ring tail index array * start address * @tr_head_idx_arr_base_addr: the transfer ring head index array * start address * @cr_idx_arr_size: number of entries in the completion ring index array * @tr_idx_arr_size: number of entries in the transfer ring index array * @mtr_base_addr: the message transfer ring start address * @mcr_base_addr: the message completion ring start address * @mtr_size: number of entries which the message transfer ring can hold * @mcr_size: number of entries which the message completion ring can hold * @mtr_doorbell_vec: the doorbell vector associated with the message * transfer ring * @mcr_doorbell_vec: the doorbell vector associated with the message * completion ring * @mtr_msi_vec: the MSI which shall be generated by the peripheral after * completing a transfer descriptor in the message transfer ring * @mcr_msi_vec: the MSI which shall be generated by the peripheral after * completing a completion descriptor in the message completion ring * @mtr_opt_header_size: the size of the optional header in the transfer * descriptor associated with the message transfer ring in DWs * @mtr_opt_footer_size: the size of the optional footer in the transfer * descriptor associated with the message transfer ring in DWs * @mcr_opt_header_size: the size of the optional header in the completion * descriptor associated with the message completion ring in DWs * @mcr_opt_footer_size: the size of the optional footer in the completion * descriptor associated with the message completion ring in DWs * @msg_rings_ctrl_flags: message rings control flags * @prph_info_msi_vec: the MSI which shall be generated by the peripheral * after updating the Peripheral Information structure * @prph_scratch_base_addr: the peripheral scratch structure start address * @prph_scratch_size: the size of the peripheral scratch structure in DWs * @reserved: reserved */ struct iwl_context_info_gen3 { __le16 version; __le16 size; __le32 config; __le64 prph_info_base_addr; __le64 cr_head_idx_arr_base_addr; __le64 tr_tail_idx_arr_base_addr; __le64 cr_tail_idx_arr_base_addr; __le64 tr_head_idx_arr_base_addr; __le16 cr_idx_arr_size; __le16 tr_idx_arr_size; __le64 mtr_base_addr; __le64 mcr_base_addr; __le16 mtr_size; __le16 mcr_size; __le16 mtr_doorbell_vec; __le16 mcr_doorbell_vec; __le16 mtr_msi_vec; __le16 mcr_msi_vec; u8 mtr_opt_header_size; u8 mtr_opt_footer_size; u8 mcr_opt_header_size; u8 mcr_opt_footer_size; __le16 msg_rings_ctrl_flags; __le16 prph_info_msi_vec; __le64 prph_scratch_base_addr; __le32 prph_scratch_size; __le32 reserved; } __packed; /* IPC_CONTEXT_INFO_S */ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw); void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans); #endif /* __iwl_context_info_file_gen3_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h000066400000000000000000000200011351064634500301370ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_context_info_file_h__ #define __iwl_context_info_file_h__ /* maximmum number of DRAM map entries supported by FW */ #define IWL_MAX_DRAM_ENTRY 64 #define CSR_CTXT_INFO_BA 0x40 /** * enum iwl_context_info_flags - Context information control flags * @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting * the init done for driver command that configures several system modes * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size * exponent, the actual size is 2**value, valid sizes are 8-2048. * The value is four bits long. Maximum valid exponent is 12 * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the * default is short format - not supported by the driver) * @IWL_CTXT_INFO_RB_SIZE_POS: RB size position * (values are IWL_CTXT_INFO_RB_SIZE_*K) * @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size * @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size * @IWL_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size * @IWL_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size * @IWL_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size * @IWL_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size * @IWL_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size * @IWL_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size * @IWL_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size * @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size */ enum iwl_context_info_flags { IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0), IWL_CTXT_INFO_EARLY_DEBUG = BIT(1), IWL_CTXT_INFO_ENABLE_CDMP = BIT(2), IWL_CTXT_INFO_RB_CB_SIZE_POS = 4, IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8), IWL_CTXT_INFO_RB_SIZE_POS = 9, IWL_CTXT_INFO_RB_SIZE_1K = 0x1, IWL_CTXT_INFO_RB_SIZE_2K = 0x2, IWL_CTXT_INFO_RB_SIZE_4K = 0x4, IWL_CTXT_INFO_RB_SIZE_8K = 0x8, IWL_CTXT_INFO_RB_SIZE_12K = 0x9, IWL_CTXT_INFO_RB_SIZE_16K = 0xa, IWL_CTXT_INFO_RB_SIZE_20K = 0xb, IWL_CTXT_INFO_RB_SIZE_24K = 0xc, IWL_CTXT_INFO_RB_SIZE_28K = 0xd, IWL_CTXT_INFO_RB_SIZE_32K = 0xe, }; /* * struct iwl_context_info_version - version structure * @mac_id: SKU and revision id * @version: context information version id * @size: the size of the context information in DWs */ struct iwl_context_info_version { __le16 mac_id; __le16 version; __le16 size; __le16 reserved; } __packed; /* * struct iwl_context_info_control - version structure * @control_flags: context information flags see &enum iwl_context_info_flags */ struct iwl_context_info_control { __le32 control_flags; __le32 reserved; } __packed; /* * struct iwl_context_info_dram - images DRAM map * each entry in the map represents a DRAM chunk of up to 32 KB * @umac_img: UMAC image DRAM map * @lmac_img: LMAC image DRAM map * @virtual_img: paged image DRAM map */ struct iwl_context_info_dram { __le64 umac_img[IWL_MAX_DRAM_ENTRY]; __le64 lmac_img[IWL_MAX_DRAM_ENTRY]; __le64 virtual_img[IWL_MAX_DRAM_ENTRY]; } __packed; /* * struct iwl_context_info_rbd_cfg - RBDs configuration * @free_rbd_addr: default queue free RB CB base address * @used_rbd_addr: default queue used RB CB base address * @status_wr_ptr: default queue used RB status write pointer */ struct iwl_context_info_rbd_cfg { __le64 free_rbd_addr; __le64 used_rbd_addr; __le64 status_wr_ptr; } __packed; /* * struct iwl_context_info_hcmd_cfg - command queue configuration * @cmd_queue_addr: address of command queue * @cmd_queue_size: number of entries */ struct iwl_context_info_hcmd_cfg { __le64 cmd_queue_addr; u8 cmd_queue_size; u8 reserved[7]; } __packed; /* * struct iwl_context_info_dump_cfg - Core Dump configuration * @core_dump_addr: core dump (debug DRAM address) start address * @core_dump_size: size, in DWs */ struct iwl_context_info_dump_cfg { __le64 core_dump_addr; __le32 core_dump_size; __le32 reserved; } __packed; /* * struct iwl_context_info_pnvm_cfg - platform NVM data configuration * @platform_nvm_addr: Platform NVM data start address * @platform_nvm_size: size in DWs */ struct iwl_context_info_pnvm_cfg { __le64 platform_nvm_addr; __le32 platform_nvm_size; __le32 reserved; } __packed; /* * struct iwl_context_info_early_dbg_cfg - early debug configuration for * dumping DRAM addresses * @early_debug_addr: early debug start address * @early_debug_size: size in DWs */ struct iwl_context_info_early_dbg_cfg { __le64 early_debug_addr; __le32 early_debug_size; __le32 reserved; } __packed; /* * struct iwl_context_info - device INIT configuration * @version: version information of context info and HW * @control: control flags of FH configurations * @rbd_cfg: default RX queue configuration * @hcmd_cfg: command queue configuration * @dump_cfg: core dump data * @edbg_cfg: early debug configuration * @pnvm_cfg: platform nvm configuration * @dram: firmware image addresses in DRAM */ struct iwl_context_info { struct iwl_context_info_version version; struct iwl_context_info_control control; __le64 reserved0; struct iwl_context_info_rbd_cfg rbd_cfg; struct iwl_context_info_hcmd_cfg hcmd_cfg; __le32 reserved1[4]; struct iwl_context_info_dump_cfg dump_cfg; struct iwl_context_info_early_dbg_cfg edbg_cfg; struct iwl_context_info_pnvm_cfg pnvm_cfg; __le32 reserved2[16]; struct iwl_context_info_dram dram; __le32 reserved3[16]; } __packed; int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw); void iwl_pcie_ctxt_info_free(struct iwl_trans *trans); void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); int iwl_pcie_init_fw_sec(struct iwl_trans *trans, const struct fw_img *fw, struct iwl_context_info_dram *ctxt_dram); #endif /* __iwl_context_info_file_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-csr.h000066400000000000000000000624251351064634500263310ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_csr_h__ #define __iwl_csr_h__ /* * CSR (control and status registers) * * CSR registers are mapped directly into PCI bus space, and are accessible * whenever platform supplies power to device, even when device is in * low power states due to driver-invoked device resets * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. * * Use iwl_write32() and iwl_read32() family to access these registers; * these provide simple PCI bus access, without waking up the MAC. * Do not use iwl_write_direct32() family for these registers; * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. * The MAC (uCode processor, etc.) does not need to be powered up for accessing * the CSR registers. * * NOTE: Device does need to be awake in order to read this memory * via CSR_EEPROM and CSR_OTP registers */ #define CSR_BASE (0x000) #define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ #define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ #define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ #define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ #define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ #define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ #define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ #define CSR_GP_CNTRL (CSR_BASE+0x024) /* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ #define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) /* * Hardware revision info * Bit fields: * 31-16: Reserved * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D * 1-0: "Dash" (-) value, as in A-1, etc. */ #define CSR_HW_REV (CSR_BASE+0x028) /* * RF ID revision info * Bit fields: * 31:24: Reserved (set to 0x0) * 23:12: Type * 11:8: Step (A - 0x0, B - 0x1, etc) * 7:4: Dash * 3:0: Flavor */ #define CSR_HW_RF_ID (CSR_BASE+0x09c) /* * EEPROM and OTP (one-time-programmable) memory reads * * NOTE: Device must be awake, initialized via apm_ops.init(), * in order to read. */ #define CSR_EEPROM_REG (CSR_BASE+0x02c) #define CSR_EEPROM_GP (CSR_BASE+0x030) #define CSR_OTP_GP_REG (CSR_BASE+0x034) #define CSR_GIO_REG (CSR_BASE+0x03C) #define CSR_GP_UCODE_REG (CSR_BASE+0x048) #define CSR_GP_DRIVER_REG (CSR_BASE+0x050) /* * UCODE-DRIVER GP (general purpose) mailbox registers. * SET/CLR registers set/clear bit(s) if "1" is written. */ #define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) #define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) #define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) #define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) #define CSR_MBOX_SET_REG (CSR_BASE + 0x88) #define CSR_LED_REG (CSR_BASE+0x094) #define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) #define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE + 0x0A8) /* 6000 and up */ #define CSR_MAC_SHADOW_REG_CTRL_RX_WAKE BIT(20) #define CSR_MAC_SHADOW_REG_CTL2 (CSR_BASE + 0x0AC) #define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE 0xFFFF /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) /* host chicken bits */ #define CSR_HOST_CHICKEN (CSR_BASE + 0x204) #define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME BIT(19) /* Analog phase-lock-loop configuration */ #define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) /* * CSR HW resources monitor registers */ #define CSR_MONITOR_CFG_REG (CSR_BASE+0x214) #define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228) #define CSR_MONITOR_XTAL_RESOURCES (0x00000010) /* * CSR Hardware Revision Workaround Register. Indicates hardware rev; * "step" determines CCK backoff for txpower calculation. * See also CSR_HW_REV register. * Bit fields: * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step * 1-0: "Dash" (-) value, as in C-1, etc. */ #define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) #define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) #define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) /* Bits for CSR_HW_IF_CONFIG_REG */ #define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003) #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C) #define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080) #define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) #define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) #define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) #define CSR_HW_IF_CONFIG_REG_D3_DEBUG (0x00000200) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000) #define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0) #define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2) #define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6) #define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10) #define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12) #define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14) #define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) #define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) #define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ #define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ #define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ #define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000) #define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ #define CSR_MBOX_SET_REG_OS_ALIVE BIT(5) #define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ #define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ /* interrupt flags in INTA, set by uCode or hardware (e.g. dma), * acknowledged (reset) by host writing "1" to flagged bits. */ #define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ #define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ #define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ #define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ #define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ #define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ #define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ #define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ #define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */ #define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ #define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ #define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ CSR_INT_BIT_HW_ERR | \ CSR_INT_BIT_FH_TX | \ CSR_INT_BIT_SW_ERR | \ CSR_INT_BIT_RF_KILL | \ CSR_INT_BIT_SW_RX | \ CSR_INT_BIT_WAKEUP | \ CSR_INT_BIT_ALIVE | \ CSR_INT_BIT_RX_PERIODIC) /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ #define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ #define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ #define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ #define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ #define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ #define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ #define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ CSR_FH_INT_BIT_RX_CHNL1 | \ CSR_FH_INT_BIT_RX_CHNL0) #define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ CSR_FH_INT_BIT_TX_CHNL0) /* GPIO */ #define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) #define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) #define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200) /* RESET */ #define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) #define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) #define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) #define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) #define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) /* * GP (general purpose) CONTROL REGISTER * Bit fields: * 27: HW_RF_KILL_SW * Indicates state of (platform's) hardware RF-Kill switch * 26-24: POWER_SAVE_TYPE * Indicates current power-saving mode: * 000 -- No power saving * 001 -- MAC power-down * 010 -- PHY (radio) power-down * 011 -- Error * 10: XTAL ON request * 9-6: SYS_CONFIG * Indicates current system configuration, reflecting pins on chip * as forced high/low by device circuit board. * 4: GOING_TO_SLEEP * Indicates MAC is entering a power-saving sleep power-down. * Not a good time to access device-internal resources. */ #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) #define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) #define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) /* HW REV */ #define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) #define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2) #define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4) /* HW RFID */ #define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0) #define CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4) #define CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8) #define CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12) /** * hw_rev values */ enum { SILICON_A_STEP = 0, SILICON_B_STEP, SILICON_C_STEP, }; #define CSR_HW_REV_TYPE_MSK (0x000FFF0) #define CSR_HW_REV_TYPE_5300 (0x0000020) #define CSR_HW_REV_TYPE_5350 (0x0000030) #define CSR_HW_REV_TYPE_5100 (0x0000050) #define CSR_HW_REV_TYPE_5150 (0x0000040) #define CSR_HW_REV_TYPE_1000 (0x0000060) #define CSR_HW_REV_TYPE_6x00 (0x0000070) #define CSR_HW_REV_TYPE_6x50 (0x0000080) #define CSR_HW_REV_TYPE_6150 (0x0000084) #define CSR_HW_REV_TYPE_6x05 (0x00000B0) #define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 #define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 #define CSR_HW_REV_TYPE_2x30 (0x00000C0) #define CSR_HW_REV_TYPE_2x00 (0x0000100) #define CSR_HW_REV_TYPE_105 (0x0000110) #define CSR_HW_REV_TYPE_135 (0x0000120) #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) #define CSR_HW_REV_TYPE_QU_B0 (0x0000334) #define CSR_HW_REV_TYPE_QU_C0 (0x0000338) #define CSR_HW_REV_TYPE_QUZ (0x0000354) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) #define CSR_HW_REV_TYPE_SO (0x0000370) #define CSR_HW_REV_TYPE_TY (0x0000420) /* RF_ID value */ #define CSR_HW_RF_ID_TYPE_JF (0x00105100) #define CSR_HW_RF_ID_TYPE_HR (0x0010A000) #define CSR_HW_RF_ID_TYPE_HR1 (0x0010c100) #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) #define CSR_HW_RF_ID_TYPE_GF (0x0010D000) #define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000) /* HW_RF CHIP ID */ #define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF) /* HW_RF CHIP STEP */ #define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_BIT_CMD (0x00000002) #define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC) #define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) /* EEPROM GP */ #define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ #define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) #define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000) #define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001) #define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) #define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) /* One-time-programmable memory general purpose reg */ #define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */ #define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ #define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ #define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ /* GP REG */ #define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ #define CSR_GP_REG_NO_POWER_SAVE (0x00000000) #define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) #define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) #define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) /* CSR GIO */ #define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) /* * UCODE-DRIVER GP (general purpose) mailbox register 1 * Host driver and uCode write and/or read this register to communicate with * each other. * Bit fields: * 4: UCODE_DISABLE * Host sets this to request permanent halt of uCode, same as * sending CARD_STATE command with "halt" bit set. * 3: CT_KILL_EXIT * Host sets this to request exit from CT_KILL state, i.e. host thinks * device temperature is low enough to continue normal operation. * 2: CMD_BLOCKED * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL) * to release uCode to clear all Tx and command queues, enter * unassociated mode, and power down. * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit. * 1: SW_BIT_RFKILL * Host sets this when issuing CARD_STATE command to request * device sleep. * 0: MAC_SLEEP * uCode sets this when preparing a power-saving power-down. * uCode resets this when power-up is complete and SRAM is sane. * NOTE: device saves internal SRAM data to host when powering down, * and must restore this data after powering back up. * MAC_SLEEP is the best indication that restore is complete. * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and * do not need to save/restore it. */ #define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) #define CSR_UCODE_SW_BIT_RFKILL (0x00000002) #define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) #define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) #define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020) /* GP Driver */ #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) #define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) #define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008) #define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080) /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) #define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) /* LED */ #define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) #define CSR_LED_REG_TURN_ON (0x60) #define CSR_LED_REG_TURN_OFF (0x20) /* ANA_PLL */ #define CSR50_ANA_PLL_CFG_VAL (0x00880300) /* HPET MEM debug */ #define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) /* DRAM INT TABLE */ #define CSR_DRAM_INT_TBL_ENABLE (1 << 31) #define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28) #define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) /* * SHR target access (Shared block memory space) * * Shared internal registers can be accessed directly from PCI bus through SHR * arbiter without need for the MAC HW to be powered up. This is possible due to * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers. * * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW * need not be powered up so no "grab inc access" is required. */ /* * Registers for accessing shared registers (e.g. SHR_APMG_GP1, * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC), * first, write to the control register: * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access) * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0]. * * To write the register, first, write to the data register * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then: * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access) */ #define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec) #define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4) /* * HBUS (Host-side Bus) * * HBUS registers are mapped directly into PCI bus space, but are used * to indirectly access device's internal memory or registers that * may be powered-down. * * Use iwl_write_direct32()/iwl_read_direct32() family for these registers; * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ * to make sure the MAC (uCode processor, etc.) is powered up for accessing * internal resources. * * Do not use iwl_write32()/iwl_read32() family to access these registers; * these provide only simple PCI bus access, without waking up the MAC. */ #define HBUS_BASE (0x400) /* * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM * structures, error log, event log, verifying uCode load). * First write to address register, then read from or write to data register * to complete the job. Once the address register is set up, accesses to * data registers auto-increment the address by one dword. * Bit usage for address registers (read or write): * 0-31: memory address within device */ #define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) #define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) #define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) #define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) /* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */ #define HBUS_TARG_MBX_C (HBUS_BASE+0x030) #define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) /* * Registers for accessing device's internal peripheral registers * (e.g. SCD, BSM, etc.). First write to address register, * then read from or write to data register to complete the job. * Bit usage for address registers (read or write): * 0-15: register address (offset) within device * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword) */ #define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) #define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) #define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) #define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) /* Used to enable DBGM */ #define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c) /* * Per-Tx-queue write pointer (index, really!) * Indicates index to next TFD that driver will fill (1 past latest filled). * Bit usage: * 0-7: queue write index * 11-8: queue selector */ #define HBUS_TARG_WRPTR (HBUS_BASE+0x060) /********************************************************** * CSR values **********************************************************/ /* * host interrupt timeout value * used with setting interrupt coalescing timer * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit * * default interrupt coalescing timer is 64 x 32 = 2048 usecs */ #define IWL_HOST_INT_TIMEOUT_MAX (0xFF) #define IWL_HOST_INT_TIMEOUT_DEF (0x40) #define IWL_HOST_INT_TIMEOUT_MIN (0x0) #define IWL_HOST_INT_OPER_MODE BIT(31) /***************************************************************************** * 7000/3000 series SHR DTS addresses * *****************************************************************************/ /* Diode Results Register Structure: */ enum dtd_diode_reg { DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */ DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */ DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */ DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */ DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */ DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */ /* Those are the masks INSIDE the flags bit-field: */ DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0, DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */ DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7, DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ }; /***************************************************************************** * MSIX related registers * *****************************************************************************/ #define CSR_MSIX_BASE (0x2000) #define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800) #define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804) #define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808) #define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C) #define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810) #define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880) #define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890) #define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000) #define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause)) #define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause)) #define MSIX_FH_INT_CAUSES_Q(q) (q) /* * Causes for the FH register interrupts */ enum msix_fh_int_causes { MSIX_FH_INT_CAUSES_Q0 = BIT(0), MSIX_FH_INT_CAUSES_Q1 = BIT(1), MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16), MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17), MSIX_FH_INT_CAUSES_S2D = BIT(19), MSIX_FH_INT_CAUSES_FH_ERR = BIT(21), }; /* * Causes for the HW register interrupts */ enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), MSIX_HW_INT_CAUSES_REG_IPC = BIT(1), MSIX_HW_INT_CAUSES_REG_IML = BIT(2), MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25), MSIX_HW_INT_CAUSES_REG_SCD = BIT(26), MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27), MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29), MSIX_HW_INT_CAUSES_REG_HAP = BIT(30), }; #define MSIX_MIN_INTERRUPT_VECTORS 2 #define MSIX_AUTO_CLEAR_CAUSE 0 #define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) /***************************************************************************** * HW address related registers * *****************************************************************************/ #define CSR_ADDR_BASE (0x380) #define CSR_MAC_ADDR0_OTP (CSR_ADDR_BASE) #define CSR_MAC_ADDR1_OTP (CSR_ADDR_BASE + 4) #define CSR_MAC_ADDR0_STRAP (CSR_ADDR_BASE + 8) #define CSR_MAC_ADDR1_STRAP (CSR_ADDR_BASE + 0xC) #endif /* !__iwl_csr_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dbg-cfg.c000066400000000000000000000273561351064634500270320ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2015, 2019 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 - 2015, 2019 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include "iwl-dbg-cfg.h" #include "iwl-modparams.h" /* grab default values */ #undef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES #if IS_ENABLED(CPTCFG_IWLXVT) #include "xvt/constants.h" #endif #if IS_ENABLED(CPTCFG_IWLMVM) #include "mvm/constants.h" #endif struct iwl_dbg_cfg current_dbg_config = { #define DBG_CFG_REINCLUDE #define IWL_DBG_CFG(type, name) \ .name = IWL_ ## name, #define IWL_DBG_CFG_STR(name) /* no default */ #define IWL_DBG_CFG_NODEF(type, name) /* no default */ #define IWL_DBG_CFG_BIN(name) /* nothing, default empty */ #define IWL_DBG_CFG_BINA(name, max) /* nothing, default empty */ #define IWL_MOD_PARAM(type, name) /* nothing, default empty */ #define IWL_MVM_MOD_PARAM(type, name) /* nothing, default empty */ #define IWL_DBG_CFG_RANGE(type, name, min, max) \ .name = IWL_ ## name, #include "iwl-dbg-cfg.h" #undef IWL_DBG_CFG #undef IWL_DBG_CFG_STR #undef IWL_DBG_CFG_NODEF #undef IWL_DBG_CFG_BIN #undef IWL_DBG_CFG_BINA #undef IWL_DBG_CFG_RANGE #undef IWL_MOD_PARAM #undef IWL_MVM_MOD_PARAM }; static const char dbg_cfg_magic[] = "[IWL DEBUG CONFIG DATA]"; #define DBG_CFG_LOADER(_type) \ static void __maybe_unused \ dbg_cfg_load_ ## _type(const char *name, const char *val, \ void *out, s64 min, s64 max) \ { \ _type r; \ \ if (kstrto ## _type(val, 0, &r)) { \ printk(KERN_INFO "iwlwifi debug config: Invalid data for %s: %s\n",\ name, val); \ return; \ } \ \ if (min && max && (r < min || r > max)) { \ printk(KERN_INFO "iwlwifi debug config: value %u for %s out of range [%lld,%lld]\n",\ r, name, min, max); \ return; \ } \ \ *(_type *)out = r; \ printk(KERN_INFO "iwlwifi debug config: %s=%d\n", name, *(_type *)out); \ } DBG_CFG_LOADER(u8) DBG_CFG_LOADER(u16) DBG_CFG_LOADER(u32) DBG_CFG_LOADER(int) DBG_CFG_LOADER(uint) static void __maybe_unused dbg_cfg_load_bool(const char *name, const char *val, void *out, s64 min, s64 max) { u8 v; if (kstrtou8(val, 0, &v)) { printk(KERN_INFO "iwlwifi debug config: Invalid data for %s: %s\n", name, val); } else { *(bool *)out = v; printk(KERN_INFO "iwlwifi debug config: %s=%d\n", name, *(bool *)out); } } static int __maybe_unused dbg_cfg_load_bin(const char *name, const char *val, struct iwl_dbg_cfg_bin *out) { int len = strlen(val); u8 *data; if (len % 2) goto error; len /= 2; data = kzalloc(len, GFP_KERNEL); if (!data) return -ENOMEM; if (hex2bin(data, val, len)) { kfree(data); goto error; } out->data = data; out->len = len; printk(KERN_INFO "iwlwifi debug config: %d bytes for %s\n", len, name); return 0; error: printk(KERN_INFO "iwlwifi debug config: Invalid data for %s\n", name); return -EINVAL; } static __maybe_unused void dbg_cfg_load_str(const char *name, const char *val, void *out, s64 min, s64 max) { if (strlen(val) == 0) { printk(KERN_INFO "iwlwifi debug config: Invalid data for %s\n", name); } else { *(char **)out = kstrdup(val, GFP_KERNEL); printk(KERN_INFO "iwlwifi debug config: %s=%s\n", name, *(char **)out); } } void iwl_dbg_cfg_free(struct iwl_dbg_cfg *dbgcfg) { #define IWL_DBG_CFG(t, n) /* nothing */ #define IWL_DBG_CFG_STR(n) \ kfree(dbgcfg->n); #define IWL_DBG_CFG_NODEF(t, n) /* nothing */ #define IWL_DBG_CFG_BIN(n) \ do { \ kfree(dbgcfg->n.data); \ dbgcfg->n.data = NULL; \ dbgcfg->n.len = 0; \ } while (0); #define IWL_DBG_CFG_BINA(n, max) \ do { \ int i; \ \ for (i = 0; i < max; i++) { \ kfree(dbgcfg->n[i].data); \ dbgcfg->n[i].data = NULL; \ dbgcfg->n[i].len = 0; \ } \ dbgcfg->n_ ## n = 0; \ } while (0); #define IWL_DBG_CFG_RANGE(t, n, min, max) /* nothing */ #define IWL_MOD_PARAM(t, n) /* nothing */ #define IWL_MVM_MOD_PARAM(t, n) /* nothing */ #include "iwl-dbg-cfg.h" #undef IWL_DBG_CFG #undef IWL_DBG_CFG_STR #undef IWL_DBG_CFG_NODEF #undef IWL_DBG_CFG_BIN #undef IWL_DBG_CFG_BINA #undef IWL_DBG_CFG_RANGE #undef IWL_MOD_PARAM #undef IWL_MVM_MOD_PARAM } struct iwl_dbg_cfg_loader { const char *name; s64 min, max; void (*loader)(const char *name, const char *val, void *out, s64 min, s64 max); u32 offset; }; static const struct iwl_dbg_cfg_loader iwl_dbg_cfg_loaders[] = { #define IWL_DBG_CFG(t, n) \ { \ .name = #n, \ .offset = offsetof(struct iwl_dbg_cfg, n), \ .loader = dbg_cfg_load_##t, \ }, #define IWL_DBG_CFG_STR(n) \ { \ .name = #n, \ .offset = offsetof(struct iwl_dbg_cfg, n), \ .loader = dbg_cfg_load_str, \ }, #define IWL_DBG_CFG_NODEF(t, n) IWL_DBG_CFG(t, n) #define IWL_DBG_CFG_BIN(n) /* not using this */ #define IWL_DBG_CFG_BINA(n, max) /* not using this */ #define IWL_DBG_CFG_RANGE(t, n, _min, _max) \ { \ .name = #n, \ .offset = offsetof(struct iwl_dbg_cfg, n), \ .min = _min, \ .max = _max, \ .loader = dbg_cfg_load_##t, \ }, #define IWL_MOD_PARAM(t, n) /* no using this */ #define IWL_MVM_MOD_PARAM(t, n) /* no using this */ #include "iwl-dbg-cfg.h" #undef IWL_DBG_CFG #undef IWL_DBG_CFG_STR #undef IWL_DBG_CFG_NODEF #undef IWL_DBG_CFG_BIN #undef IWL_DBG_CFG_BINA #undef IWL_DBG_CFG_RANGE #undef IWL_MOD_PARAM #undef IWL_MVM_MOD_PARAM }; void iwl_dbg_cfg_load_ini(struct device *dev, struct iwl_dbg_cfg *dbgcfg) { const struct firmware *fw; char *data, *end, *pos; int err; if (dbgcfg->loaded) return; /* TODO: maybe add a per-device file? */ err = request_firmware(&fw, "iwl-dbg-cfg.ini", dev); if (err) return; /* must be ini file style with magic section header */ if (fw->size < strlen(dbg_cfg_magic)) goto release; if (memcmp(fw->data, dbg_cfg_magic, strlen(dbg_cfg_magic))) { printk(KERN_INFO "iwlwifi debug config: file is malformed\n"); goto release; } /* +1 guarantees the last line gets NUL-terminated even without \n */ data = kzalloc(fw->size - strlen(dbg_cfg_magic) + 1, GFP_KERNEL); if (!data) goto release; memcpy(data, fw->data + strlen(dbg_cfg_magic), fw->size - strlen(dbg_cfg_magic)); end = data + fw->size - strlen(dbg_cfg_magic); /* replace CR/LF with NULs to make parsing easier */ for (pos = data; pos < end; pos++) { if (*pos == '\n' || *pos == '\r') *pos = '\0'; } pos = data; while (pos < end) { const char *line = pos; bool loaded = false; int idx; /* skip to next line */ while (pos < end && *pos) pos++; /* skip to start of next line, over empty ones if any */ while (pos < end && !*pos) pos++; /* skip empty lines and comments */ if (!*line || *line == '#') continue; for (idx = 0; idx < ARRAY_SIZE(iwl_dbg_cfg_loaders); idx++) { const struct iwl_dbg_cfg_loader *l; l = &iwl_dbg_cfg_loaders[idx]; if (strncmp(l->name, line, strlen(l->name)) == 0 && line[strlen(l->name)] == '=') { l->loader(l->name, line + strlen(l->name) + 1, (void *)((u8 *)dbgcfg + l->offset), l->min, l->max); loaded = true; } } /* * if it was loaded by the loaders, don't bother checking * more or printing an error message below */ if (loaded) continue; #define IWL_DBG_CFG(t, n) /* handled above */ #define IWL_DBG_CFG_NODEF(t, n) /* handled above */ #define IWL_DBG_CFG_BIN(n) \ if (strncmp(#n, line, strlen(#n)) == 0 && \ line[strlen(#n)] == '=') { \ dbg_cfg_load_bin(#n, line + strlen(#n) + 1, \ &dbgcfg->n); \ continue; \ } #define IWL_DBG_CFG_BINA(n, max) \ if (strncmp(#n, line, strlen(#n)) == 0 && \ line[strlen(#n)] == '=') { \ if (dbgcfg->n_##n >= max) { \ printk(KERN_INFO \ "iwlwifi debug config: " #n " given too many times\n");\ continue; \ } \ if (!dbg_cfg_load_bin(#n, line + strlen(#n) + 1,\ &dbgcfg->n[dbgcfg->n_##n]))\ dbgcfg->n_##n++; \ continue; \ } #define IWL_DBG_CFG_RANGE(t, n, min, max) /* handled above */ #define IWL_DBG_CFG_STR(n) /* handled above */ #define IWL_MOD_PARAM(t, n) \ if (strncmp(#n, line, strlen(#n)) == 0 && \ line[strlen(#n)] == '=') { \ dbg_cfg_load_##t(#n, line + strlen(#n) + 1, \ &iwlwifi_mod_params.n, 0, 0); \ continue; \ } #define IWL_MVM_MOD_PARAM(t, n) { \ if (strncmp("mvm." #n, line, strlen("mvm." #n)) == 0 && \ line[strlen("mvm." #n)] == '=') { \ dbg_cfg_load_##t("mvm." #n, \ line + strlen("mvm." #n) + 1, \ &dbgcfg->mvm_##n, 0, 0); \ dbgcfg->__mvm_mod_param_##n = true; \ continue; \ } \ } #include "iwl-dbg-cfg.h" #undef IWL_DBG_CFG #undef IWL_DBG_CFG_STR #undef IWL_DBG_CFG_NODEF #undef IWL_DBG_CFG_BIN #undef IWL_DBG_CFG_BINA #undef IWL_DBG_CFG_RANGE #undef IWL_MOD_PARAM #undef IWL_MVM_MOD_PARAM printk(KERN_INFO "iwlwifi debug config: failed to load line \"%s\"\n", line); } kfree(data); release: release_firmware(fw); dbgcfg->loaded = true; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dbg-cfg.h000066400000000000000000000315731351064634500270330ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright (C) 2018-2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #if !defined(__IWL_DBG_CFG_H__) || defined(DBG_CFG_REINCLUDE) #undef __IWL_DBG_CFG_H__ /* avoid warning */ #define __IWL_DBG_CFG_H__ /* * with DBG_CFG_REINCLUDE set this file should contain nothing * but IWL_DBG_CFG() macro invocations so it can be used in * the C file to generate more code (e.g. for debugfs and the * struct initialization with default values) */ #ifndef DBG_CFG_REINCLUDE #include struct iwl_dbg_cfg_bin { const void *data; unsigned int len; }; struct iwl_dbg_cfg { bool loaded; #define IWL_DBG_CFG(type, name) type name; #define IWL_DBG_CFG_NODEF(type, name) type name; #define IWL_DBG_CFG_BIN(name) struct iwl_dbg_cfg_bin name; #define IWL_DBG_CFG_STR(name) const char *name; #define IWL_DBG_CFG_BINA(name, max) struct iwl_dbg_cfg_bin name[max]; \ int n_ ## name; #define IWL_DBG_CFG_RANGE(type, name, min, max) IWL_DBG_CFG(type, name) #define IWL_MOD_PARAM(type, name) /* do nothing */ #define IWL_MVM_MOD_PARAM(type, name) type mvm_##name; \ bool __mvm_mod_param_##name; #endif /* DBG_CFG_REINCLUDE */ #if IS_ENABLED(CPTCFG_IWLXVT) IWL_DBG_CFG(u32, XVT_DEFAULT_DBGM_MEM_POWER) IWL_DBG_CFG(u32, XVT_DEFAULT_DBGM_LMAC_MASK) IWL_DBG_CFG(u32, XVT_DEFAULT_DBGM_PRPH_MASK) IWL_MOD_PARAM(bool, xvt_default_mode) #endif IWL_DBG_CFG_NODEF(bool, disable_52GHz) IWL_DBG_CFG_NODEF(bool, disable_24GHz) #if IS_ENABLED(CPTCFG_IWLMVM) || IS_ENABLED(CPTCFG_IWLFMAC) IWL_DBG_CFG_NODEF(u32, MVM_CALIB_OVERRIDE_CONTROL) IWL_DBG_CFG_NODEF(u32, MVM_CALIB_INIT_FLOW) IWL_DBG_CFG_NODEF(u32, MVM_CALIB_INIT_EVENT) IWL_DBG_CFG_NODEF(u32, MVM_CALIB_D0_FLOW) IWL_DBG_CFG_NODEF(u32, MVM_CALIB_D0_EVENT) IWL_DBG_CFG_NODEF(u32, MVM_CALIB_D3_FLOW) IWL_DBG_CFG_NODEF(u32, MVM_CALIB_D3_EVENT) IWL_DBG_CFG_NODEF(bool, enable_timestamp_marker_cmd) #endif IWL_DBG_CFG_NODEF(bool, STARTUP_RFKILL) #if IS_ENABLED(CPTCFG_IWLMVM) IWL_DBG_CFG(u32, MVM_DEFAULT_PS_TX_DATA_TIMEOUT) IWL_DBG_CFG(u32, MVM_DEFAULT_PS_RX_DATA_TIMEOUT) IWL_DBG_CFG(u32, MVM_WOWLAN_PS_TX_DATA_TIMEOUT) IWL_DBG_CFG(u32, MVM_WOWLAN_PS_RX_DATA_TIMEOUT) IWL_DBG_CFG(u32, MVM_SHORT_PS_TX_DATA_TIMEOUT) IWL_DBG_CFG(u32, MVM_SHORT_PS_RX_DATA_TIMEOUT) IWL_DBG_CFG(u32, MVM_UAPSD_TX_DATA_TIMEOUT) IWL_DBG_CFG(u32, MVM_UAPSD_RX_DATA_TIMEOUT) IWL_DBG_CFG(u32, MVM_UAPSD_QUEUES) IWL_DBG_CFG_NODEF(bool, MVM_USE_PS_POLL) IWL_DBG_CFG(u8, MVM_PS_HEAVY_TX_THLD_PACKETS) IWL_DBG_CFG(u8, MVM_PS_HEAVY_RX_THLD_PACKETS) IWL_DBG_CFG(u8, MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS) IWL_DBG_CFG(u8, MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS) IWL_DBG_CFG(u8, MVM_PS_HEAVY_TX_THLD_PERCENT) IWL_DBG_CFG(u8, MVM_PS_HEAVY_RX_THLD_PERCENT) IWL_DBG_CFG(u16, MVM_PS_SNOOZE_INTERVAL) IWL_DBG_CFG(u16, MVM_PS_SNOOZE_WINDOW) IWL_DBG_CFG(u16, MVM_WOWLAN_PS_SNOOZE_WINDOW) IWL_DBG_CFG(u8, MVM_LOWLAT_QUOTA_MIN_PERCENT) IWL_DBG_CFG(u16, MVM_BT_COEX_EN_RED_TXP_THRESH) IWL_DBG_CFG(u16, MVM_BT_COEX_DIS_RED_TXP_THRESH) IWL_DBG_CFG(u32, MVM_BT_COEX_ANTENNA_COUPLING_THRS) IWL_DBG_CFG(u32, MVM_BT_COEX_MPLUT_REG0) IWL_DBG_CFG(u32, MVM_BT_COEX_MPLUT_REG1) IWL_DBG_CFG(bool, MVM_BT_COEX_SYNC2SCO) IWL_DBG_CFG(bool, MVM_BT_COEX_MPLUT) IWL_DBG_CFG(bool, MVM_BT_COEX_TTC) IWL_DBG_CFG(bool, MVM_BT_COEX_RRC) IWL_DBG_CFG(bool, MVM_FW_MCAST_FILTER_PASS_ALL) IWL_DBG_CFG(bool, MVM_FW_BCAST_FILTER_PASS_ALL) IWL_DBG_CFG(bool, MVM_TOF_IS_RESPONDER) IWL_DBG_CFG(bool, MVM_P2P_LOWLATENCY_PS_ENABLE) IWL_DBG_CFG(bool, MVM_SW_TX_CSUM_OFFLOAD) IWL_DBG_CFG(bool, MVM_HW_CSUM_DISABLE) IWL_DBG_CFG(bool, MVM_PARSE_NVM) IWL_DBG_CFG(bool, MVM_ADWELL_ENABLE) IWL_DBG_CFG(u16, MVM_ADWELL_MAX_BUDGET) IWL_DBG_CFG(u32, MVM_TCM_LOAD_MEDIUM_THRESH) IWL_DBG_CFG(u32, MVM_TCM_LOAD_HIGH_THRESH) IWL_DBG_CFG(u32, MVM_TCM_LOWLAT_ENABLE_THRESH) IWL_DBG_CFG(u32, MVM_UAPSD_NONAGG_PERIOD) IWL_DBG_CFG_RANGE(u8, MVM_UAPSD_NOAGG_LIST_LEN, 1, IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM) IWL_DBG_CFG(bool, MVM_NON_TRANSMITTING_AP) IWL_DBG_CFG(u8, MVM_QUOTA_THRESHOLD) IWL_DBG_CFG(u8, MVM_RS_RSSI_BASED_INIT_RATE) IWL_DBG_CFG(u8, MVM_RS_80_20_FAR_RANGE_TWEAK) IWL_DBG_CFG(u8, MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE) IWL_DBG_CFG(u8, MVM_RS_HT_VHT_RETRIES_PER_RATE) IWL_DBG_CFG(u8, MVM_RS_HT_VHT_RETRIES_PER_RATE_TW) IWL_DBG_CFG(u8, MVM_RS_INITIAL_MIMO_NUM_RATES) IWL_DBG_CFG(u8, MVM_RS_INITIAL_SISO_NUM_RATES) IWL_DBG_CFG(u8, MVM_RS_INITIAL_LEGACY_NUM_RATES) IWL_DBG_CFG(u8, MVM_RS_INITIAL_LEGACY_RETRIES) IWL_DBG_CFG(u8, MVM_RS_SECONDARY_LEGACY_RETRIES) IWL_DBG_CFG(u8, MVM_RS_SECONDARY_LEGACY_NUM_RATES) IWL_DBG_CFG(u8, MVM_RS_SECONDARY_SISO_NUM_RATES) IWL_DBG_CFG(u8, MVM_RS_SECONDARY_SISO_RETRIES) IWL_DBG_CFG(u8, MVM_RS_RATE_MIN_FAILURE_TH) IWL_DBG_CFG(u8, MVM_RS_RATE_MIN_SUCCESS_TH) IWL_DBG_CFG(u8, MVM_RS_STAY_IN_COLUMN_TIMEOUT) IWL_DBG_CFG(u8, MVM_RS_IDLE_TIMEOUT) IWL_DBG_CFG(u8, MVM_RS_MISSED_RATE_MAX) IWL_DBG_CFG(u16, MVM_RS_LEGACY_FAILURE_LIMIT) IWL_DBG_CFG(u16, MVM_RS_LEGACY_SUCCESS_LIMIT) IWL_DBG_CFG(u16, MVM_RS_LEGACY_TABLE_COUNT) IWL_DBG_CFG(u16, MVM_RS_NON_LEGACY_FAILURE_LIMIT) IWL_DBG_CFG(u16, MVM_RS_NON_LEGACY_SUCCESS_LIMIT) IWL_DBG_CFG(u16, MVM_RS_NON_LEGACY_TABLE_COUNT) IWL_DBG_CFG(u16, MVM_RS_SR_FORCE_DECREASE) IWL_DBG_CFG(u16, MVM_RS_SR_NO_DECREASE) IWL_DBG_CFG(u16, MVM_RS_AGG_TIME_LIMIT) IWL_DBG_CFG(u8, MVM_RS_AGG_DISABLE_START) IWL_DBG_CFG(u8, MVM_RS_AGG_START_THRESHOLD) IWL_DBG_CFG(u16, MVM_RS_TPC_SR_FORCE_INCREASE) IWL_DBG_CFG(u16, MVM_RS_TPC_SR_NO_INCREASE) IWL_DBG_CFG(u8, MVM_RS_TPC_TX_POWER_STEP) IWL_DBG_CFG(bool, MVM_ENABLE_EBS) IWL_DBG_CFG_NODEF(u16, MVM_FTM_RESP_TOA_OFFSET) IWL_DBG_CFG_NODEF(u32, MVM_FTM_RESP_VALID) IWL_DBG_CFG_NODEF(u32, MVM_FTM_RESP_FLAGS) IWL_DBG_CFG(u8, MVM_FTM_INITIATOR_ALGO) IWL_DBG_CFG(bool, MVM_FTM_INITIATOR_DYNACK) IWL_DBG_CFG_NODEF(bool, MVM_FTM_INITIATOR_MCSI_ENABLED) IWL_DBG_CFG_NODEF(int, MVM_FTM_INITIATOR_COMMON_CALIB) IWL_DBG_CFG_NODEF(bool, MVM_FTM_INITIATOR_FAST_ALGO_DISABLE) IWL_DBG_CFG(bool, MVM_D3_DEBUG) IWL_DBG_CFG(bool, MVM_USE_TWT) IWL_MVM_MOD_PARAM(int, power_scheme) IWL_MVM_MOD_PARAM(bool, init_dbg) IWL_MVM_MOD_PARAM(bool, tfd_q_hang_detect) #endif /* CPTCFG_IWLMVM */ #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE IWL_DBG_CFG_NODEF(u32, dnt_out_mode) /* XXX: should be dbgm_ or dbg_mon_ for consistency? */ IWL_DBG_CFG_NODEF(u32, dbm_destination_path) /* XXX: should be dbg_mon_ for consistency? */ IWL_DBG_CFG_NODEF(u32, dbgm_enable_mode) IWL_DBG_CFG_NODEF(u32, dbgm_mem_power) IWL_DBG_CFG_NODEF(u32, dbg_flags) IWL_DBG_CFG_NODEF(u32, dbg_mon_sample_ctl_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_sample_ctl_val) IWL_DBG_CFG_NODEF(u32, dbg_mon_buff_base_addr_reg_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_buff_end_addr_reg_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_data_sel_ctl_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_data_sel_ctl_val) IWL_DBG_CFG_NODEF(u32, dbg_mon_mc_msk_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_mc_msk_val) IWL_DBG_CFG_NODEF(u32, dbg_mon_sample_mask_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_sample_mask_val) IWL_DBG_CFG_NODEF(u32, dbg_mon_start_mask_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_start_mask_val) IWL_DBG_CFG_NODEF(u32, dbg_mon_end_mask_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_end_mask_val) IWL_DBG_CFG_NODEF(u32, dbg_mon_end_threshold_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_end_threshold_val) IWL_DBG_CFG_NODEF(u32, dbg_mon_sample_period_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_sample_period_val) IWL_DBG_CFG_NODEF(u32, dbg_mon_wr_ptr_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_cyc_cnt_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_dmarb_rd_ctl_addr) IWL_DBG_CFG_NODEF(u32, dbg_mon_dmarb_rd_data_addr) IWL_DBG_CFG_NODEF(u32, dbg_marbh_conf_reg) IWL_DBG_CFG_NODEF(u32, dbg_marbh_conf_mask) IWL_DBG_CFG_NODEF(u32, dbg_marbh_access_type) IWL_DBG_CFG_NODEF(u32, dbgc_hb_base_addr) IWL_DBG_CFG_NODEF(u32, dbgc_hb_end_addr) IWL_DBG_CFG_NODEF(u32, dbgc_dram_wrptr_addr) IWL_DBG_CFG_NODEF(u32, dbgc_wrap_count_addr) IWL_DBG_CFG_NODEF(u32, dbg_mipi_conf_reg) IWL_DBG_CFG_NODEF(u32, dbg_mipi_conf_mask) IWL_DBG_CFG_NODEF(u32, dbgc_hb_base_val_smem) IWL_DBG_CFG_NODEF(u32, dbgc_hb_end_val_smem) IWL_DBG_CFG_BIN(dbg_conf_monitor_host_command) IWL_DBG_CFG_BIN(log_level_cmd) IWL_DBG_CFG_BINA(ldbg_cmd, 32) IWL_DBG_CFG_NODEF(u8, log_level_cmd_id) IWL_DBG_CFG_NODEF(u8, dbg_conf_monitor_cmd_id) IWL_DBG_CFG_NODEF(u8, ldbg_cmd_nums) IWL_DBG_CFG_NODEF(u32, dbg_mon_buff_base_addr_reg_addr_b_step) IWL_DBG_CFG_NODEF(u32, dbg_mon_buff_end_addr_reg_addr_b_step) IWL_DBG_CFG_NODEF(u32, dbg_mon_wr_ptr_addr_b_step) #endif /* CPTCFG_IWLWIFI_DEVICE_TESTMODE */ IWL_DBG_CFG_BIN(hw_address) IWL_DBG_CFG_STR(fw_dbg_conf) IWL_DBG_CFG_STR(nvm_file) IWL_DBG_CFG_STR(fw_file_pre) IWL_DBG_CFG_NODEF(u32, valid_ants) IWL_DBG_CFG_NODEF(u32, no_ack_en) IWL_DBG_CFG_NODEF(bool, no_ldpc) IWL_DBG_CFG_NODEF(u16, rx_agg_subframes) IWL_DBG_CFG_NODEF(bool, tx_siso_80bw_like_160bw) IWL_DBG_CFG_NODEF(u16, rx_mcs_80) IWL_DBG_CFG_NODEF(u16, tx_mcs_80) IWL_DBG_CFG_NODEF(u16, rx_mcs_160) IWL_DBG_CFG_NODEF(u16, tx_mcs_160) IWL_DBG_CFG_NODEF(u32, secure_boot_cfg) IWL_MOD_PARAM(u32, uapsd_disable) IWL_MOD_PARAM(bool, lar_disable) IWL_MOD_PARAM(bool, fw_monitor) IWL_MOD_PARAM(bool, fw_restart) IWL_MOD_PARAM(bool, power_save) IWL_MOD_PARAM(bool, bt_coex_active) IWL_MOD_PARAM(int, antenna_coupling) IWL_MOD_PARAM(int, power_level) IWL_MOD_PARAM(int, led_mode) IWL_MOD_PARAM(int, amsdu_size) IWL_MOD_PARAM(int, swcrypto) IWL_MOD_PARAM(uint, disable_11n) IWL_DBG_CFG_BIN(he_ppe_thres) IWL_DBG_CFG_NODEF(u8, he_chan_width_dis) IWL_DBG_CFG_NODEF(u32, vht_cap_flip) IWL_DBG_CFG_NODEF(u32, mu_edca) IWL_DBG_CFG_BIN(he_mac_cap) IWL_DBG_CFG_BIN(he_phy_cap) #ifdef CPTCFG_IWLWIFI_DEBUG IWL_MOD_PARAM(u32, debug_level) #endif /* CPTCFG_IWLWIFI_DEBUG */ #ifdef CPTCFG_IWLWIFI_DISALLOW_OLDER_FW IWL_DBG_CFG_NODEF(bool, load_old_fw) #endif /* CPTCFG_IWLWIFI_DISALLOW_OLDER_FW */ #undef IWL_DBG_CFG #undef IWL_DBG_CFG_STR #undef IWL_DBG_CFG_NODEF #undef IWL_DBG_CFG_BIN #undef IWL_DBG_CFG_BINA #undef IWL_DBG_CFG_RANGE #undef IWL_MOD_PARAM #undef IWL_MVM_MOD_PARAM #ifndef DBG_CFG_REINCLUDE }; extern struct iwl_dbg_cfg current_dbg_config; void iwl_dbg_cfg_free(struct iwl_dbg_cfg *dbgcfg); void iwl_dbg_cfg_load_ini(struct device *dev, struct iwl_dbg_cfg *dbgcfg); #endif /* DBG_CFG_REINCLUDE */ #endif /* __IWL_DBG_CFG_H__ || DBG_CFG_REINCLUDE */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c000066400000000000000000000131671351064634500270730ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "iwl-trans.h" #include "iwl-dbg-tlv.h" void iwl_dbg_tlv_copy(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, bool ext) { struct iwl_apply_point_data *dbg_cfg, *tlv_copy; struct iwl_fw_ini_header *header = (void *)&tlv->data[0]; u32 tlv_type = le32_to_cpu(tlv->type); u32 len = le32_to_cpu(tlv->length); u32 apply_point = le32_to_cpu(header->apply_point); if (le32_to_cpu(header->tlv_version) != 1) return; if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM, "Invalid apply point %d\n", apply_point)) return; IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n", tlv_type, apply_point); if (ext) dbg_cfg = &trans->dbg.apply_points_ext[apply_point]; else dbg_cfg = &trans->dbg.apply_points[apply_point]; tlv_copy = kzalloc(sizeof(*tlv_copy) + len, GFP_KERNEL); if (!tlv_copy) { IWL_ERR(trans, "WRT: No memory for TLV 0x%x, apply point %d\n", tlv_type, apply_point); return; } INIT_LIST_HEAD(&tlv_copy->list); memcpy(&tlv_copy->tlv, tlv, sizeof(*tlv) + len); if (!dbg_cfg->list.next) INIT_LIST_HEAD(&dbg_cfg->list); list_add_tail(&tlv_copy->list, &dbg_cfg->list); trans->dbg.ini_valid = true; } static void iwl_dbg_tlv_free_list(struct list_head *list) { if (!list || !list->next) return; while (!list_empty(list)) { struct iwl_apply_point_data *node = list_entry(list->next, typeof(*node), list); list_del(&node->list); kfree(node); } } void iwl_dbg_tlv_free(struct iwl_trans *trans) { int i; for (i = 0; i < ARRAY_SIZE(trans->dbg.apply_points); i++) { struct iwl_apply_point_data *data; data = &trans->dbg.apply_points[i]; iwl_dbg_tlv_free_list(&data->list); data = &trans->dbg.apply_points_ext[i]; iwl_dbg_tlv_free_list(&data->list); } } static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data, size_t len) { struct iwl_ucode_tlv *tlv; enum iwl_ucode_tlv_type tlv_type; u32 tlv_len; while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); tlv = (void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); return -EINVAL; } len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); switch (tlv_type) { case IWL_UCODE_TLV_TYPE_DEBUG_INFO: case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: case IWL_UCODE_TLV_TYPE_HCMD: case IWL_UCODE_TLV_TYPE_REGIONS: case IWL_UCODE_TLV_TYPE_TRIGGERS: case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: iwl_dbg_tlv_copy(trans, tlv, true); break; default: WARN_ONCE(1, "Invalid TLV %x\n", tlv_type); break; } } return 0; } void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) { const struct firmware *fw; int res; if (trans->dbg.external_ini_loaded || !iwlwifi_mod_params.enable_ini) return; res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev); if (res) return; iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size); trans->dbg.external_ini_loaded = true; release_firmware(fw); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h000066400000000000000000000062641351064634500271000ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_dbg_tlv_h__ #define __iwl_dbg_tlv_h__ #include #include /** * struct iwl_apply_point_data * @list: list to go through the TLVs of the apply point * @tlv: a debug TLV */ struct iwl_apply_point_data { struct list_head list; struct iwl_ucode_tlv tlv; }; struct iwl_trans; void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans); void iwl_dbg_tlv_free(struct iwl_trans *trans); void iwl_dbg_tlv_copy(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, bool ext); #endif /* __iwl_dbg_tlv_h__*/ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-debug.c000066400000000000000000000107721351064634500266210ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "iwl-drv.h" #include "iwl-debug.h" #include "iwl-devtrace.h" #define __iwl_fn(fn) \ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ { \ struct va_format vaf = { \ .fmt = fmt, \ }; \ va_list args1, args2; \ \ va_start(args1, fmt); \ va_copy(args2, args1); \ vaf.va = &args2; \ dev_ ##fn(dev, "%pV", &vaf); \ va_end(args2); \ vaf.va = &args1; \ trace_iwlwifi_ ##fn(&vaf); \ va_end(args1); \ } __iwl_fn(warn) IWL_EXPORT_SYMBOL(__iwl_warn); __iwl_fn(info) IWL_EXPORT_SYMBOL(__iwl_info); __iwl_fn(crit) IWL_EXPORT_SYMBOL(__iwl_crit); void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); if (!trace_only) { va_list args2; va_copy(args2, args); vaf.va = &args2; if (rfkill_prefix) dev_err(dev, "(RFKILL) %pV", &vaf); else dev_err(dev, "%pV", &vaf); va_end(args2); } vaf.va = &args; trace_iwlwifi_err(&vaf); va_end(args); } IWL_EXPORT_SYMBOL(__iwl_err); #if defined(CPTCFG_IWLWIFI_DEBUG) || defined(CPTCFG_IWLWIFI_DEVICE_TRACING) void __iwl_dbg(struct device *dev, u32 level, bool limit, const char *function, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); #ifdef CPTCFG_IWLWIFI_DEBUG if (iwl_have_debug_level(level) && (!limit || net_ratelimit())) { va_list args2; va_copy(args2, args); vaf.va = &args2; dev_printk(KERN_DEBUG, dev, "%c %s %pV", in_interrupt() ? 'I' : 'U', function, &vaf); va_end(args2); } #endif vaf.va = &args; trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); va_end(args); } IWL_EXPORT_SYMBOL(__iwl_dbg); #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-debug.h000066400000000000000000000206761351064634500266320ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 - 2019 Intel Corporation * * Portions of this file are derived from the ipw3945 project. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #ifndef __iwl_debug_h__ #define __iwl_debug_h__ #include "iwl-modparams.h" static inline bool iwl_have_debug_level(u32 level) { #ifdef CPTCFG_IWLWIFI_DEBUG return iwlwifi_mod_params.debug_level & level; #else return false; #endif } struct device; void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, const char *fmt, ...) __printf(4, 5); void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3); void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3); void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3); /* not all compilers can evaluate strlen() at compile time, so use sizeof() */ #define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n') /* No matter what is m (priv, bus, trans), this will work */ #define IWL_ERR_DEV(d, f, a...) \ do { \ CHECK_FOR_NEWLINE(f); \ __iwl_err((d), false, false, f, ## a); \ } while (0) #define IWL_WARN_DEV(d, f, a...) \ do { \ CHECK_FOR_NEWLINE(f); \ __iwl_warn((d), f, ## a); \ } while (0) #define IWL_ERR(m, f, a...) \ IWL_ERR_DEV((m)->dev, f, ## a) #define IWL_WARN(m, f, a...) \ do { \ CHECK_FOR_NEWLINE(f); \ __iwl_warn((m)->dev, f, ## a); \ } while (0) #define IWL_INFO(m, f, a...) \ do { \ CHECK_FOR_NEWLINE(f); \ __iwl_info((m)->dev, f, ## a); \ } while (0) #define IWL_CRIT(m, f, a...) \ do { \ CHECK_FOR_NEWLINE(f); \ __iwl_crit((m)->dev, f, ## a); \ } while (0) #if defined(CPTCFG_IWLWIFI_DEBUG) || defined(CPTCFG_IWLWIFI_DEVICE_TRACING) void __iwl_dbg(struct device *dev, u32 level, bool limit, const char *function, const char *fmt, ...) __printf(5, 6); #else __printf(5, 6) static inline void __iwl_dbg(struct device *dev, u32 level, bool limit, const char *function, const char *fmt, ...) {} #endif #define iwl_print_hex_error(m, p, len) \ do { \ print_hex_dump(KERN_ERR, "iwl data: ", \ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ } while (0) #define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \ do { \ CHECK_FOR_NEWLINE(fmt); \ __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \ } while (0) #define IWL_DEBUG(m, level, fmt, args...) \ __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args) #define IWL_DEBUG_DEV(dev, level, fmt, args...) \ __IWL_DEBUG_DEV(dev, level, false, fmt, ##args) #define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args) #ifdef CPTCFG_IWLWIFI_DEBUG #define iwl_print_hex_dump(m, level, p, len) \ do { \ if (iwl_have_debug_level(level)) \ print_hex_dump(KERN_DEBUG, "iwl data: ", \ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ } while (0) #else #define iwl_print_hex_dump(m, level, p, len) #endif /* CPTCFG_IWLWIFI_DEBUG */ /* * To use the debug system: * * If you are defining a new debug classification, simply add it to the #define * list here in the form of * * #define IWL_DL_xxxx VALUE * * where xxxx should be the name of the classification (for example, WEP). * * You then need to either add a IWL_xxxx_DEBUG() macro definition for your * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want * to send output to that classification. * * The active debug levels can be accessed via files * * /sys/module/iwlwifi/parameters/debug * when CPTCFG_IWLWIFI_DEBUG=y. * * /sys/kernel/debug/phy0/iwlwifi/debug/debug_level * when CPTCFG_IWLWIFI_DEBUGFS=y. * */ /* 0x0000000F - 0x00000001 */ #define IWL_DL_INFO 0x00000001 #define IWL_DL_MAC80211 0x00000002 #define IWL_DL_HCMD 0x00000004 #define IWL_DL_TDLS 0x00000008 /* 0x000000F0 - 0x00000010 */ #define IWL_DL_QUOTA 0x00000010 #define IWL_DL_TE 0x00000020 #define IWL_DL_EEPROM 0x00000040 #define IWL_DL_RADIO 0x00000080 /* 0x00000F00 - 0x00000100 */ #define IWL_DL_POWER 0x00000100 #define IWL_DL_TEMP 0x00000200 #define IWL_DL_RPM 0x00000400 #define IWL_DL_SCAN 0x00000800 /* 0x0000F000 - 0x00001000 */ #define IWL_DL_ASSOC 0x00001000 #define IWL_DL_DROP 0x00002000 #define IWL_DL_LAR 0x00004000 #define IWL_DL_COEX 0x00008000 /* 0x000F0000 - 0x00010000 */ #define IWL_DL_FW 0x00010000 #define IWL_DL_RF_KILL 0x00020000 #define IWL_DL_TPT 0x00040000 /* 0x00F00000 - 0x00100000 */ #define IWL_DL_RATE 0x00100000 #define IWL_DL_CALIB 0x00200000 #define IWL_DL_WEP 0x00400000 #define IWL_DL_TX 0x00800000 /* 0x0F000000 - 0x01000000 */ #define IWL_DL_RX 0x01000000 #define IWL_DL_ISR 0x02000000 #define IWL_DL_HT 0x04000000 #define IWL_DL_EXTERNAL 0x08000000 /* 0xF0000000 - 0x10000000 */ #define IWL_DL_11H 0x10000000 #define IWL_DL_STATS 0x20000000 #define IWL_DL_TX_REPLY 0x40000000 #define IWL_DL_TX_QUEUES 0x80000000 #define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) #define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a) #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) #define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a) #define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) #define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) #define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) #define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) #define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) #define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) #define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) #define IWL_DEBUG_QUOTA(p, f, a...) IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a) #define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a) #define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a) #define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) #define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) #define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) #define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) #define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) #define IWL_DEBUG_COEX(p, f, a...) IWL_DEBUG(p, IWL_DL_COEX, f, ## a) #define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) #define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) #define IWL_DEBUG_ASSOC(p, f, a...) \ IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) #define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) #define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a) #define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a) #define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) #define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) #define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a) #define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) #define IWL_DEBUG_DEV_RADIO(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a) #define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) #define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) #define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a) #define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a) #define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a) #define IWL_DEBUG_FW_INFO(p, f, a...) \ IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a) #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h000066400000000000000000000045401351064634500302400ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ) #define __IWLWIFI_DEVICE_TRACE_DATA #include #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_data TRACE_EVENT(iwlwifi_dev_tx_tb, TP_PROTO(const struct device *dev, struct sk_buff *skb, u8 *data_src, size_t data_len), TP_ARGS(dev, skb, data_src, data_len), TP_STRUCT__entry( DEV_ENTRY __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0) ), TP_fast_assign( DEV_ASSIGN; if (iwl_trace_data(skb)) memcpy(__get_dynamic_array(data), data_src, data_len); ), TP_printk("[%s] TX frame data", __get_str(dev)) ); TRACE_EVENT(iwlwifi_dev_rx_data, TP_PROTO(const struct device *dev, const struct iwl_trans *trans, void *rxbuf, size_t len), TP_ARGS(dev, trans, rxbuf, len), TP_STRUCT__entry( DEV_ENTRY __dynamic_array(u8, data, len - iwl_rx_trace_len(trans, rxbuf, len, NULL)) ), TP_fast_assign( size_t offs = iwl_rx_trace_len(trans, rxbuf, len, NULL); DEV_ASSIGN; if (offs < len) memcpy(__get_dynamic_array(data), ((u8 *)rxbuf) + offs, len - offs); ), TP_printk("[%s] RX frame data", __get_str(dev)) ); #endif /* __IWLWIFI_DEVICE_TRACE_DATA */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE iwl-devtrace-data #include backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h000066400000000000000000000121111351064634500277270ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_IO) || defined(TRACE_HEADER_MULTI_READ) #define __IWLWIFI_DEVICE_TRACE_IO #include #include #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_io TRACE_EVENT(iwlwifi_dev_ioread32, TP_PROTO(const struct device *dev, u32 offs, u32 val), TP_ARGS(dev, offs, val), TP_STRUCT__entry( DEV_ENTRY __field(u32, offs) __field(u32, val) ), TP_fast_assign( DEV_ASSIGN; __entry->offs = offs; __entry->val = val; ), TP_printk("[%s] read io[%#x] = %#x", __get_str(dev), __entry->offs, __entry->val) ); TRACE_EVENT(iwlwifi_dev_iowrite8, TP_PROTO(const struct device *dev, u32 offs, u8 val), TP_ARGS(dev, offs, val), TP_STRUCT__entry( DEV_ENTRY __field(u32, offs) __field(u8, val) ), TP_fast_assign( DEV_ASSIGN; __entry->offs = offs; __entry->val = val; ), TP_printk("[%s] write io[%#x] = %#x)", __get_str(dev), __entry->offs, __entry->val) ); TRACE_EVENT(iwlwifi_dev_iowrite32, TP_PROTO(const struct device *dev, u32 offs, u32 val), TP_ARGS(dev, offs, val), TP_STRUCT__entry( DEV_ENTRY __field(u32, offs) __field(u32, val) ), TP_fast_assign( DEV_ASSIGN; __entry->offs = offs; __entry->val = val; ), TP_printk("[%s] write io[%#x] = %#x)", __get_str(dev), __entry->offs, __entry->val) ); TRACE_EVENT(iwlwifi_dev_iowrite64, TP_PROTO(const struct device *dev, u64 offs, u64 val), TP_ARGS(dev, offs, val), TP_STRUCT__entry( DEV_ENTRY __field(u64, offs) __field(u64, val) ), TP_fast_assign( DEV_ASSIGN; __entry->offs = offs; __entry->val = val; ), TP_printk("[%s] write io[%llu] = %llu)", __get_str(dev), __entry->offs, __entry->val) ); TRACE_EVENT(iwlwifi_dev_iowrite_prph32, TP_PROTO(const struct device *dev, u32 offs, u32 val), TP_ARGS(dev, offs, val), TP_STRUCT__entry( DEV_ENTRY __field(u32, offs) __field(u32, val) ), TP_fast_assign( DEV_ASSIGN; __entry->offs = offs; __entry->val = val; ), TP_printk("[%s] write PRPH[%#x] = %#x)", __get_str(dev), __entry->offs, __entry->val) ); TRACE_EVENT(iwlwifi_dev_iowrite_prph64, TP_PROTO(const struct device *dev, u64 offs, u64 val), TP_ARGS(dev, offs, val), TP_STRUCT__entry( DEV_ENTRY __field(u64, offs) __field(u64, val) ), TP_fast_assign( DEV_ASSIGN; __entry->offs = offs; __entry->val = val; ), TP_printk("[%s] write PRPH[%llu] = %llu)", __get_str(dev), __entry->offs, __entry->val) ); TRACE_EVENT(iwlwifi_dev_ioread_prph32, TP_PROTO(const struct device *dev, u32 offs, u32 val), TP_ARGS(dev, offs, val), TP_STRUCT__entry( DEV_ENTRY __field(u32, offs) __field(u32, val) ), TP_fast_assign( DEV_ASSIGN; __entry->offs = offs; __entry->val = val; ), TP_printk("[%s] read PRPH[%#x] = %#x", __get_str(dev), __entry->offs, __entry->val) ); TRACE_EVENT(iwlwifi_dev_irq, TP_PROTO(const struct device *dev), TP_ARGS(dev), TP_STRUCT__entry( DEV_ENTRY ), TP_fast_assign( DEV_ASSIGN; ), /* TP_printk("") doesn't compile */ TP_printk("%d", 0) ); TRACE_EVENT(iwlwifi_dev_irq_msix, TP_PROTO(const struct device *dev, struct msix_entry *msix_entry, bool defirq, u32 inta_fh, u32 inta_hw), TP_ARGS(dev, msix_entry, defirq, inta_fh, inta_hw), TP_STRUCT__entry( DEV_ENTRY __field(u32, entry) __field(u8, defirq) __field(u32, inta_fh) __field(u32, inta_hw) ), TP_fast_assign( DEV_ASSIGN; __entry->entry = msix_entry->entry; __entry->defirq = defirq; __entry->inta_fh = inta_fh; __entry->inta_hw = inta_hw; ), TP_printk("entry:%d defirq:%d fh:0x%x, hw:0x%x", __entry->entry, __entry->defirq, __entry->inta_fh, __entry->inta_hw) ); TRACE_EVENT(iwlwifi_dev_ict_read, TP_PROTO(const struct device *dev, u32 index, u32 value), TP_ARGS(dev, index, value), TP_STRUCT__entry( DEV_ENTRY __field(u32, index) __field(u32, value) ), TP_fast_assign( DEV_ASSIGN; __entry->index = index; __entry->value = value; ), TP_printk("[%s] read ict[%d] = %#.8x", __get_str(dev), __entry->index, __entry->value) ); #endif /* __IWLWIFI_DEVICE_TRACE_IO */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE iwl-devtrace-io #include backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h000066400000000000000000000137361351064634500310100ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ) #define __IWLWIFI_DEVICE_TRACE_IWLWIFI #include #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi TRACE_EVENT(iwlwifi_dev_hcmd, TP_PROTO(const struct device *dev, struct iwl_host_cmd *cmd, u16 total_size, struct iwl_cmd_header_wide *hdr), TP_ARGS(dev, cmd, total_size, hdr), TP_STRUCT__entry( DEV_ENTRY __dynamic_array(u8, hcmd, total_size) __field(u32, flags) ), TP_fast_assign( int i, offset = sizeof(struct iwl_cmd_header); if (hdr->group_id) offset = sizeof(struct iwl_cmd_header_wide); DEV_ASSIGN; __entry->flags = cmd->flags; memcpy(__get_dynamic_array(hcmd), hdr, offset); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { if (!cmd->len[i]) continue; memcpy((u8 *)__get_dynamic_array(hcmd) + offset, cmd->data[i], cmd->len[i]); offset += cmd->len[i]; } ), TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)", __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1], ((u8 *)__get_dynamic_array(hcmd))[0], __entry->flags & CMD_ASYNC ? "a" : "") ); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS TRACE_EVENT(iwlwifi_dev_tx_latency_thrshld, TP_PROTO(const struct device *dev, u32 msrmnt, u32 pkt_start, u32 pkt_end, u16 tid, u32 event_time, u16 seq, u32 gp2, int max), TP_ARGS(dev, msrmnt, pkt_start, pkt_end, tid, event_time, seq, gp2, max), TP_STRUCT__entry( DEV_ENTRY __field(u32, msrmnt) __field(u32, pkt_start) __field(u32, pkt_end) __field(u16, tid) __field(u32, event_time) __field(u16, seq) __field(u32, gp2) __field(int, max) ), TP_fast_assign( DEV_ASSIGN; __entry->msrmnt = msrmnt; __entry->pkt_start = pkt_start; __entry->pkt_end = pkt_end; __entry->tid = tid; __entry->event_time = event_time; __entry->seq = seq; __entry->gp2 = gp2; __entry->max = max; ), TP_printk("Tx Latency Metadata: %u, %u, %u, %u, %u, %s, %u, %u", __entry->seq, __entry->pkt_start, __entry->pkt_end, __entry->msrmnt, __entry->tid, __entry->max ? "True" : "False", __entry->gp2, __entry->event_time) ); #endif /*CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ TRACE_EVENT(iwlwifi_dev_rx, TP_PROTO(const struct device *dev, const struct iwl_trans *trans, struct iwl_rx_packet *pkt, size_t len), TP_ARGS(dev, trans, pkt, len), TP_STRUCT__entry( DEV_ENTRY __field(u16, cmd) __field(u8, hdr_offset) __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len, NULL)) ), TP_fast_assign( size_t hdr_offset = 0; DEV_ASSIGN; __entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); memcpy(__get_dynamic_array(rxbuf), pkt, iwl_rx_trace_len(trans, pkt, len, &hdr_offset)); __entry->hdr_offset = hdr_offset; ), TP_printk("[%s] RX cmd %#.2x", __get_str(dev), __entry->cmd) ); TRACE_EVENT(iwlwifi_dev_tx, TP_PROTO(const struct device *dev, struct sk_buff *skb, void *tfd, size_t tfdlen, void *buf0, size_t buf0_len, int hdr_len), TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len), TP_STRUCT__entry( DEV_ENTRY __field(void *, skbaddr) __field(size_t, framelen) __dynamic_array(u8, tfd, tfdlen) /* * Do not insert between or below these items, * we want to keep the frame together (except * for the possible padding). */ __dynamic_array(u8, buf0, buf0_len) __dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ? 0 : skb->len - hdr_len) ), TP_fast_assign( DEV_ASSIGN; __entry->skbaddr = skb; __entry->framelen = buf0_len; if (hdr_len > 0) __entry->framelen += skb->len - hdr_len; memcpy(__get_dynamic_array(tfd), tfd, tfdlen); memcpy(__get_dynamic_array(buf0), buf0, buf0_len); if (hdr_len > 0 && !iwl_trace_data(skb)) skb_copy_bits(skb, hdr_len, __get_dynamic_array(buf1), skb->len - hdr_len); ), TP_printk("[%s] TX %.2x (%zu bytes) skbaddr=%p", __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0], __entry->framelen, __entry->skbaddr) ); TRACE_EVENT(iwlwifi_dev_ucode_event, TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), TP_ARGS(dev, time, data, ev), TP_STRUCT__entry( DEV_ENTRY __field(u32, time) __field(u32, data) __field(u32, ev) ), TP_fast_assign( DEV_ASSIGN; __entry->time = time; __entry->data = data; __entry->ev = ev; ), TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u", __get_str(dev), __entry->time, __entry->data, __entry->ev) ); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE TRACE_EVENT(iwlwifi_dev_dnt_data, TP_PROTO(const struct device *dev, void *dnt_data, size_t len), TP_ARGS(dev, dnt_data, len), TP_STRUCT__entry( DEV_ENTRY __dynamic_array(u8, data, len) ), TP_fast_assign( DEV_ASSIGN; memcpy(__get_dynamic_array(data), dnt_data, len); ), TP_printk("[%s] DnT data", __get_str(dev)) ); #endif #endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE iwl-devtrace-iwlwifi #include backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h000066400000000000000000000051541351064634500301170ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_MSG) || defined(TRACE_HEADER_MULTI_READ) #define __IWLWIFI_DEVICE_TRACE_MSG #include #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_msg #define MAX_MSG_LEN 110 DECLARE_EVENT_CLASS(iwlwifi_msg_event, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf), TP_STRUCT__entry( __dynamic_array(char, msg, MAX_MSG_LEN) ), TP_fast_assign( WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), MAX_MSG_LEN, vaf->fmt, *vaf->va) >= MAX_MSG_LEN); ), TP_printk("%s", __get_str(msg)) ); DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); TRACE_EVENT(iwlwifi_dbg, TP_PROTO(u32 level, bool in_interrupt, const char *function, struct va_format *vaf), TP_ARGS(level, in_interrupt, function, vaf), TP_STRUCT__entry( __field(u32, level) __field(u8, in_interrupt) __string(function, function) __dynamic_array(char, msg, MAX_MSG_LEN) ), TP_fast_assign( __entry->level = level; __entry->in_interrupt = in_interrupt; __assign_str(function, function); WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), MAX_MSG_LEN, vaf->fmt, *vaf->va) >= MAX_MSG_LEN); ), TP_printk("%s", __get_str(msg)) ); #endif /* __IWLWIFI_DEVICE_TRACE_MSG */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE iwl-devtrace-msg #include backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h000066400000000000000000000043721351064634500304310ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_UCODE) || defined(TRACE_HEADER_MULTI_READ) #define __IWLWIFI_DEVICE_TRACE_UCODE #include #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_ucode TRACE_EVENT(iwlwifi_dev_ucode_cont_event, TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), TP_ARGS(dev, time, data, ev), TP_STRUCT__entry( DEV_ENTRY __field(u32, time) __field(u32, data) __field(u32, ev) ), TP_fast_assign( DEV_ASSIGN; __entry->time = time; __entry->data = data; __entry->ev = ev; ), TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u", __get_str(dev), __entry->time, __entry->data, __entry->ev) ); TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry), TP_ARGS(dev, wraps, n_entry, p_entry), TP_STRUCT__entry( DEV_ENTRY __field(u32, wraps) __field(u32, n_entry) __field(u32, p_entry) ), TP_fast_assign( DEV_ASSIGN; __entry->wraps = wraps; __entry->n_entry = n_entry; __entry->p_entry = p_entry; ), TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X", __get_str(dev), __entry->wraps, __entry->n_entry, __entry->p_entry) ); #endif /* __IWLWIFI_DEVICE_TRACE_UCODE */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE iwl-devtrace-ucode #include backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c000066400000000000000000000027141351064634500273250ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include /* sparse doesn't like tracepoint macros */ #ifndef __CHECKER__ #include "iwl-trans.h" #define CREATE_TRACE_POINTS #include "iwl-devtrace.h" EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx_latency_thrshld); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ #endif /* __CHECKER__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h000066400000000000000000000065671351064634500273440ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(C) 2016 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #ifndef __IWLWIFI_DEVICE_TRACE #include #include #include #include "iwl-trans.h" #if !defined(__IWLWIFI_DEVICE_TRACE) static inline bool iwl_trace_data(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; int offs = 24; /* start with normal header length */ if (!ieee80211_is_data(fc)) return false; /* Try to determine if the frame is EAPOL. This might have false * positives (if there's no RFC 1042 header and we compare to some * payload instead) but since we're only doing tracing that's not * a problem. */ if (ieee80211_has_a4(fc)) offs += 6; if (ieee80211_is_data_qos(fc)) offs += 2; /* don't account for crypto - these are unencrypted */ /* also account for the RFC 1042 header, of course */ offs += 6; return skb->len <= offs + 2 || *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE); } static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, void *rxbuf, size_t len, size_t *out_hdr_offset) { struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32)); struct ieee80211_hdr *hdr = NULL; size_t hdr_offset; if (cmd->cmd != trans->rx_mpdu_cmd) return len; hdr_offset = sizeof(struct iwl_cmd_header) + trans->rx_mpdu_cmd_hdr_size; if (out_hdr_offset) *out_hdr_offset = hdr_offset; hdr = (void *)((u8 *)cmd + hdr_offset); if (!ieee80211_is_data(hdr->frame_control)) return len; /* maybe try to identify EAPOL frames? */ return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size + ieee80211_hdrlen(hdr->frame_control); } #endif #define __IWLWIFI_DEVICE_TRACE #include #include #include "iwl-trans.h" #if !defined(CPTCFG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) #undef TRACE_EVENT #define TRACE_EVENT(name, proto, ...) \ static inline void trace_ ## name(proto) {} #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(...) #undef DEFINE_EVENT #define DEFINE_EVENT(evt_class, name, proto, ...) \ static inline void trace_ ## name(proto) {} #endif #define DEV_ENTRY __string(dev, dev_name(dev)) #define DEV_ASSIGN __assign_str(dev, dev_name(dev)) #include "iwl-devtrace-io.h" #include "iwl-devtrace-ucode.h" #include "iwl-devtrace-msg.h" #include "iwl-devtrace-data.h" #include "iwl-devtrace-iwlwifi.h" #endif /* __IWLWIFI_DEVICE_TRACE */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dnt-cfg.c000066400000000000000000000263561351064634500270620ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include "iwl-drv.h" #include "iwl-config.h" #include "iwl-debug.h" #include "iwl-tm-gnl.h" #include "iwl-dnt-cfg.h" #include "iwl-dnt-dispatch.h" #include "iwl-dnt-dev-if.h" #ifdef CPTCFG_IWLWIFI_DEBUGFS /* * iwl_dnt_debugfs_log_read - returns ucodeMessages to the user. * The logs are returned in binary format until the buffer of debugfs is * exhausted. If a log can't be copied to user (due to general error, not * a size problem) it is totally discarded and lost. */ static ssize_t iwl_dnt_debugfs_log_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_dnt *dnt = trans->tmdev->dnt; struct dnt_collect_db *db = dnt->dispatch.um_db; unsigned char *temp_buf; int ret = 0; temp_buf = kzalloc(count, GFP_KERNEL); if (!temp_buf) return -ENOMEM; dnt->debugfs_counter++; do { /* wait for new logs */ ret = wait_event_interruptible_timeout( db->waitq, (!trans->op_mode || db->read_ptr != db->wr_ptr), HZ); if (ret < 0 || !trans->op_mode) { /* we reached EOF */ ret = 0; break; } if (ret == 0) { /* * temp_buf is zeroed at this point, so if we set the * size to non-zero we'll return zeroes to userspace, * which the trace viewer will ignore (outside of a * specific trace item/event) */ ret = sizeof(u32); break; } ret = iwl_dnt_dispatch_pull(trans, temp_buf, count, UCODE_MESSAGES); if (ret < 0) { IWL_DEBUG_INFO(trans, "Failed to retrieve debug data\n"); goto free_buf; } } while (!ret); *ppos = 0; ret = simple_read_from_buffer(user_buf, ret, ppos, temp_buf, count); free_buf: kfree(temp_buf); dnt->debugfs_counter--; wake_up(&dnt->debugfs_waitq); return ret; } static const struct file_operations iwl_dnt_debugfs_log_ops = { .read = iwl_dnt_debugfs_log_read, .open = simple_open, .llseek = generic_file_llseek, }; static bool iwl_dnt_register_debugfs_entries(struct iwl_trans *trans, struct dentry *dbgfs_dir) { struct iwl_dnt *dnt = trans->tmdev->dnt; dnt->debugfs_entry = debugfs_create_dir("dbgm", dbgfs_dir); if (!dnt->debugfs_entry) return false; if (!debugfs_create_file("log", S_IRUSR, dnt->debugfs_entry, trans, &iwl_dnt_debugfs_log_ops)) return false; return true; } #endif static bool iwl_dnt_configure_prepare_dma(struct iwl_dnt *dnt, struct iwl_trans *trans) { struct iwl_dbg_cfg *dbg_cfg = &trans->dbg_cfg; if (dbg_cfg->dbm_destination_path != DMA || !dbg_cfg->dbgm_mem_power) return true; dnt->mon_buf_size = 0x800 << dbg_cfg->dbgm_mem_power; dnt->mon_buf_cpu_addr = dma_alloc_coherent(trans->dev, dnt->mon_buf_size, &dnt->mon_dma_addr, GFP_KERNEL); if (!dnt->mon_buf_cpu_addr) return false; dnt->mon_base_addr = (u64) dnt->mon_dma_addr; dnt->mon_end_addr = dnt->mon_base_addr + dnt->mon_buf_size; dnt->iwl_dnt_status |= IWL_DNT_STATUS_DMA_BUFFER_ALLOCATED; return true; } static bool iwl_dnt_validate_configuration(struct iwl_trans *trans) { struct iwl_dbg_cfg *dbg_cfg = &trans->dbg_cfg; if (!strcmp(trans->dev->bus->name, BUS_TYPE_PCI)) return dbg_cfg->dbm_destination_path == DMA || dbg_cfg->dbm_destination_path == MARBH_ADC || dbg_cfg->dbm_destination_path == MARBH_DBG || dbg_cfg->dbm_destination_path == MIPI; else if (!strcmp(trans->dev->bus->name, BUS_TYPE_SDIO)) return dbg_cfg->dbm_destination_path == MARBH_ADC || dbg_cfg->dbm_destination_path == MARBH_DBG || dbg_cfg->dbm_destination_path == MIPI; return false; } static int iwl_dnt_conf_monitor(struct iwl_trans *trans, u32 output, u32 monitor_type, u32 target_mon_mode) { struct iwl_dnt *dnt = trans->tmdev->dnt; if (dnt->cur_input_mask & MONITOR_INPUT_MODE_MASK) { IWL_INFO(trans, "DNT: Resetting deivce configuration\n"); return iwl_dnt_dev_if_configure_monitor(dnt, trans); } dnt->cur_input_mask |= MONITOR; dnt->dispatch.mon_output = output; dnt->cur_mon_type = monitor_type; dnt->cur_mon_mode = target_mon_mode; if (monitor_type == INTERFACE) { if (output == NETLINK || output == FTRACE) { /* setting PUSH out mode */ dnt->dispatch.mon_out_mode = PUSH; dnt->dispatch.mon_in_mode = COLLECT; } else { dnt->dispatch.dbgm_db = iwl_dnt_dispatch_allocate_collect_db(dnt); if (!dnt->dispatch.dbgm_db) return -ENOMEM; dnt->dispatch.mon_in_mode = RETRIEVE; } } else { dnt->dispatch.mon_out_mode = PULL; dnt->dispatch.mon_in_mode = RETRIEVE; /* * If we're running a device that supports DBGC and monitor * was given value as MARBH, it should be interpreted as SMEM */ if (trans->cfg->dbgc_supported && (monitor_type == MARBH_ADC || monitor_type == MARBH_DBG)) dnt->cur_mon_type = SMEM; } return iwl_dnt_dev_if_configure_monitor(dnt, trans); } void iwl_dnt_start(struct iwl_trans *trans) { struct iwl_dnt *dnt = trans->tmdev->dnt; struct iwl_dbg_cfg *dbg_cfg = &trans->dbg_cfg; if (!dnt) return; if ((dnt->iwl_dnt_status & IWL_DNT_STATUS_MON_CONFIGURED) && dbg_cfg->dbg_conf_monitor_cmd_id) iwl_dnt_dev_if_start_monitor(dnt, trans); if ((dnt->iwl_dnt_status & IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED) && dbg_cfg->log_level_cmd_id) iwl_dnt_dev_if_set_log_level(dnt, trans); } IWL_EXPORT_SYMBOL(iwl_dnt_start); #ifdef CPTCFG_IWLWIFI_DEBUGFS static int iwl_dnt_conf_ucode_msgs_via_rx(struct iwl_trans *trans, u32 output) { struct iwl_dnt *dnt = trans->tmdev->dnt; dnt->cur_input_mask |= UCODE_MESSAGES; dnt->dispatch.ucode_msgs_output = output; if (output == NETLINK || output == FTRACE) { /* setting PUSH out mode */ dnt->dispatch.ucode_msgs_out_mode = PUSH; } else { dnt->dispatch.um_db = iwl_dnt_dispatch_allocate_collect_db(dnt); if (!dnt->dispatch.um_db) return -ENOMEM; dnt->dispatch.ucode_msgs_out_mode = RETRIEVE; } /* setting COLLECT in mode */ dnt->dispatch.ucode_msgs_in_mode = COLLECT; dnt->iwl_dnt_status |= IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED; return 0; } #endif void iwl_dnt_init(struct iwl_trans *trans, struct dentry *dbgfs_dir) { struct iwl_dnt *dnt; bool __maybe_unused ret; int __maybe_unused err; dnt = kzalloc(sizeof(struct iwl_dnt), GFP_KERNEL); if (!dnt) return; trans->tmdev->dnt = dnt; dnt->dev = trans->dev; #ifdef CPTCFG_IWLWIFI_DEBUGFS ret = iwl_dnt_register_debugfs_entries(trans, dbgfs_dir); if (!ret) { IWL_ERR(trans, "Failed to create dnt debugfs entries\n"); return; } err = iwl_dnt_conf_ucode_msgs_via_rx(trans, DEBUGFS); if (err) IWL_DEBUG_INFO(trans, "Failed to configure uCodeMessages\n"); init_waitqueue_head(&dnt->debugfs_waitq); #endif if (!iwl_dnt_validate_configuration(trans)) { dnt->iwl_dnt_status |= IWL_DNT_STATUS_INVALID_MONITOR_CONF; return; } /* allocate DMA if needed */ if (!iwl_dnt_configure_prepare_dma(dnt, trans)) { IWL_ERR(trans, "Failed to prepare DMA\n"); dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DMA; } } IWL_EXPORT_SYMBOL(iwl_dnt_init); void iwl_dnt_free(struct iwl_trans *trans) { struct iwl_dnt *dnt = trans->tmdev->dnt; if (!dnt) return; #ifdef CPTCFG_IWLWIFI_DEBUGFS debugfs_remove_recursive(dnt->debugfs_entry); if (dnt->debugfs_counter) { IWL_INFO(trans, "waiting for dnt debugfs release (cnt=%d)\n", dnt->debugfs_counter); wake_up_interruptible(&dnt->dispatch.um_db->waitq); wait_event(dnt->debugfs_waitq, dnt->debugfs_counter == 0); } #endif iwl_dnt_dispatch_free(dnt, trans); kfree(dnt); } IWL_EXPORT_SYMBOL(iwl_dnt_free); void iwl_dnt_configure(struct iwl_trans *trans, const struct fw_img *image) { struct iwl_dnt *dnt = trans->tmdev->dnt; struct iwl_dbg_cfg *dbg_cfg = &trans->dbg_cfg; bool is_conf_invalid; if (!dnt) return; dnt->image = image; is_conf_invalid = (dnt->iwl_dnt_status & IWL_DNT_STATUS_INVALID_MONITOR_CONF); if (is_conf_invalid) return; switch (dbg_cfg->dbm_destination_path) { case DMA: if (!dnt->mon_buf_cpu_addr) { IWL_ERR(trans, "DMA buffer wasn't allocated\n"); return; } /* fall through */ case NO_MONITOR: case MIPI: case INTERFACE: case MARBH_ADC: case MARBH_DBG: iwl_dnt_conf_monitor(trans, dbg_cfg->dnt_out_mode, dbg_cfg->dbm_destination_path, dbg_cfg->dbgm_enable_mode); break; default: IWL_INFO(trans, "Invalid monitor type\n"); return; } dnt->dispatch.crash_out_mode |= dbg_cfg->dnt_out_mode; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dnt-cfg.h000066400000000000000000000162261351064634500270620ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_dnt_cfg_h__ #define __iwl_dnt_cfg_h__ #include #include #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "iwl-config.h" #define IWL_DNT_ARRAY_SIZE 128 #define BUS_TYPE_PCI "pci" #define BUS_TYPE_SDIO "sdio" #define GET_RX_PACKET_SIZE(pkt) ((le32_to_cpu(pkt->len_n_flags) &\ FH_RSCSR_FRAME_SIZE_MSK) -\ sizeof(struct iwl_cmd_header)) #define MONITOR_INPUT_MODE_MASK 0x01 #define UCODE_MSGS_INPUT_MODE_MASK 0x02 /* DnT status */ enum { IWL_DNT_STATUS_MON_CONFIGURED = BIT(0), IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED = BIT(1), IWL_DNT_STATUS_DMA_BUFFER_ALLOCATED = BIT(2), IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DMA = BIT(3), IWL_DNT_STATUS_FAILED_START_MONITOR = BIT(4), IWL_DNT_STATUS_INVALID_MONITOR_CONF = BIT(5), IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DB = BIT(6), IWL_DNT_STATUS_FW_CRASH = BIT(7), }; /* input modes */ enum { MONITOR = BIT(0), UCODE_MESSAGES = BIT(1) }; /* output modes */ enum { NETLINK = BIT(0), DEBUGFS = BIT(1), FTRACE = BIT(2) }; /* monitor types */ enum { NO_MONITOR = 0, MIPI = BIT(0), INTERFACE = BIT(1), DMA = BIT(2), MARBH_ADC = BIT(3), MARBH_DBG = BIT(4), SMEM = BIT(5) }; /* monitor modes */ enum { NO_MON, DEBUG, SNIFFER, DBGC }; /* incoming mode */ enum { NO_IN, COLLECT, RETRIEVE }; /* outgoing mode */ enum { NO_OUT, PUSH, PULL }; /* crash data */ enum { NONE = 0, SRAM = BIT(0), DBGM = BIT(1), TX_FIFO = BIT(2), RX_FIFO = BIT(3), PERIPHERY = BIT(4) }; struct dnt_collect_entry { u8 *data; u32 size; }; /** * @list_read_ptr: list read pointer * @list_wr_ptr: list write pointer * @waitq: waitqueue for new data * @db_lock: lock for the list */ struct dnt_collect_db { struct dnt_collect_entry collect_array[IWL_DNT_ARRAY_SIZE]; unsigned int read_ptr; unsigned int wr_ptr; wait_queue_head_t waitq; spinlock_t db_lock; /*locks the array */ }; /** * struct dnt_crash_data - holds pointers for crash data * @sram: sram data pointer * @dbgm: monitor data pointer * @rx: rx fifo data pointer * @tx: tx fifo data pointer * @periph: periphery registers data pointer */ struct dnt_crash_data { u8 *sram; u32 sram_buf_size; u8 *dbgm; u32 dbgm_buf_size; u8 *rx; u32 rx_buf_size; u8 *tx; u32 tx_buf_size; u8 *periph; u32 periph_buf_size; }; /** * struct iwl_dnt_dispatch - the iwl Debug and Trace Dispatch * * @in_mode: The dispatch incoming data mode * @out_mode: The dispatch outgoing data mode * @dbgm_list: dbgm link list * @um_list: uCodeMessages link list */ struct iwl_dnt_dispatch { u32 mon_in_mode; u32 mon_out_mode; u32 mon_output; u32 ucode_msgs_in_mode; u32 ucode_msgs_out_mode; u32 ucode_msgs_output; u32 crash_out_mode; struct dnt_collect_db *dbgm_db; struct dnt_collect_db *um_db; struct dnt_crash_data crash; }; /** * struct iwl_dnt - the iwl Debug and Trace * * @cfg: pointer to user configuration * @dev: pointer to struct device for printing purposes * @iwl_dnt_status: represents the DnT status * @is_configuration_valid: indicates whether the persistent configuration * is valid or not * @cur_input_mask: current mode mask * @cur_output_mask: current output mask * @cur_mon_type: current monitor type * @mon_buf_cpu_addr: DMA buffer CPU address * @mon_dma_addr: DMA buffer address * @mon_base_addr: monitor dma buffer start address * @mon_end_addr: monitor dma buffer end address * @mon_buf_size: monitor dma buffer size * @cur_mon_mode: current monitor mode (DBGM/SNIFFER) * @dispatch: a pointer to dispatch */ struct iwl_dnt { struct device *dev; const struct fw_img *image; u32 iwl_dnt_status; bool is_configuration_valid; u8 cur_input_mask; u8 cur_output_mask; u32 cur_mon_type; u8 *mon_buf_cpu_addr; dma_addr_t mon_dma_addr; u64 mon_base_addr; u64 mon_end_addr; u32 mon_buf_size; u32 cur_mon_mode; struct iwl_dnt_dispatch dispatch; #ifdef CPTCFG_IWLWIFI_DEBUGFS u8 debugfs_counter; wait_queue_head_t debugfs_waitq; struct dentry *debugfs_entry; #endif }; /** * iwl_dnt_init - initialize iwl_dnt. * */ void iwl_dnt_init(struct iwl_trans *trans, struct dentry *dbgfs_dir); /** * iwl_dnt_free - free iwl_dnt. * */ void iwl_dnt_free(struct iwl_trans *trans); /** * iwl_dnt_configure - configures iwl_dnt. * */ void iwl_dnt_configure(struct iwl_trans *trans, const struct fw_img *image); /** * iwl_dnt_start - starts monitor and set log level * */ void iwl_dnt_start(struct iwl_trans *trans); #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dnt-dev-if.c000066400000000000000000000447361351064634500274770ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-trans.h" #include "iwl-tm-gnl.h" #include "iwl-dnt-cfg.h" #include "iwl-dnt-dev-if.h" #include "iwl-prph.h" #include "iwl-csr.h" static void iwl_dnt_dev_if_configure_mipi(struct iwl_trans *trans) { if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) { iwl_trans_set_bits_mask(trans, trans->dbg_cfg.dbg_mipi_conf_reg, trans->dbg_cfg.dbg_mipi_conf_mask, trans->dbg_cfg.dbg_mipi_conf_mask); return; } /* ABB_CguDTClkCtrl - set system trace and mtm clock souce as PLLA */ iowrite32(0x30303, (void __force __iomem *)0xe640110c); /* ABB_SpcuMemPower - set the power of the trace memory */ iowrite32(0x1, (void __force __iomem *)0xe640201c); /* set MIPI2 PCL, PCL_26 - PCL_30 */ iowrite32(0x10, (void __force __iomem *)0xe6300274); iowrite32(0x10, (void __force __iomem *)0xe6300278); iowrite32(0x10, (void __force __iomem *)0xe630027c); iowrite32(0x10, (void __force __iomem *)0xe6300280); iowrite32(0x10, (void __force __iomem *)0xe6300284); /* ARB0_CNF - enable generic arbiter */ iowrite32(0xc0000000, (void __force __iomem *)0xe6700108); /* enable WLAN arbiter */ iowrite32(0x80000006, (void __force __iomem *)0xe6700140); } static void iwl_dnt_dev_if_configure_marbh(struct iwl_trans *trans) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; u32 ret, reg_val = 0; if (cfg->dbg_marbh_access_type == ACCESS_TYPE_DIRECT) { iwl_trans_set_bits_mask(trans, cfg->dbg_marbh_conf_reg, cfg->dbg_marbh_conf_mask, cfg->dbg_marbh_conf_mask); } else if (cfg->dbg_marbh_access_type == ACCESS_TYPE_INDIRECT) { ret = iwl_trans_read_mem(trans, cfg->dbg_marbh_conf_reg, ®_val, 1); if (ret) { IWL_ERR(trans, "Failed to read MARBH conf reg\n"); return; } reg_val |= cfg->dbg_marbh_conf_mask; ret = iwl_trans_write_mem(trans, cfg->dbg_marbh_conf_reg, ®_val, 1); if (ret) { IWL_ERR(trans, "Failed to write MARBH conf reg\n"); return; } } else { IWL_ERR(trans, "Invalid MARBH access type\n"); } } static void iwl_dnt_dev_if_configure_dbgc_registers(struct iwl_trans *trans, u32 base_addr, u32 end_addr) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; switch (trans->tmdev->dnt->cur_mon_type) { case SMEM: iwl_write_prph(trans, cfg->dbgc_hb_base_addr, cfg->dbgc_hb_base_val_smem); iwl_write_prph(trans, cfg->dbgc_hb_end_addr, cfg->dbgc_hb_end_val_smem); /* * SMEM requires the same internal configuration as MARBH, * which preceded it. */ iwl_dnt_dev_if_configure_marbh(trans); break; case DMA: default: /* * The given addresses are already shifted by 4 places so we * need to shift by another 4. * Note that in SfP the end addr points to the last block of * data that the DBGC can write to, so when setting the end * register we need to set it to 1 block before. */ iwl_write_prph(trans, cfg->dbgc_hb_base_addr, base_addr >> 4); iwl_write_prph(trans, cfg->dbgc_hb_end_addr, (end_addr >> 4) - 1); break; }; } static void iwl_dnt_dev_if_configure_dbgm_registers(struct iwl_trans *trans, u32 base_addr, u32 end_addr) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; /* If we're running a device that supports DBGC - use it */ if (trans->cfg->dbgc_supported) { iwl_dnt_dev_if_configure_dbgc_registers(trans, base_addr, end_addr); return; } /* configuring monitor */ iwl_write_prph(trans, cfg->dbg_mon_buff_base_addr_reg_addr, base_addr); iwl_write_prph(trans, cfg->dbg_mon_buff_end_addr_reg_addr, end_addr); iwl_write_prph(trans, cfg->dbg_mon_data_sel_ctl_addr, cfg->dbg_mon_data_sel_ctl_val); iwl_write_prph(trans, cfg->dbg_mon_mc_msk_addr, cfg->dbg_mon_mc_msk_val); iwl_write_prph(trans, cfg->dbg_mon_sample_mask_addr, cfg->dbg_mon_sample_mask_val); iwl_write_prph(trans, cfg->dbg_mon_start_mask_addr, cfg->dbg_mon_start_mask_val); iwl_write_prph(trans, cfg->dbg_mon_end_threshold_addr, cfg->dbg_mon_end_threshold_val); iwl_write_prph(trans, cfg->dbg_mon_end_mask_addr, cfg->dbg_mon_end_mask_val); iwl_write_prph(trans, cfg->dbg_mon_sample_period_addr, cfg->dbg_mon_sample_period_val); /* starting monitor */ iwl_write_prph(trans, cfg->dbg_mon_sample_ctl_addr, cfg->dbg_mon_sample_ctl_val); } static int iwl_dnt_dev_if_retrieve_dma_monitor_data(struct iwl_dnt *dnt, struct iwl_trans *trans, void *buffer, u32 buffer_size) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; u32 wr_ptr, wrap_cnt; bool dont_reorder = false; /* FIXME send stop command to FW */ if (WARN_ON_ONCE(!dnt->mon_buf_cpu_addr)) { IWL_ERR(trans, "Can't retrieve data - DMA wasn't allocated\n"); return -ENOMEM; } /* If we're running a device that supports DBGC - use it */ if (trans->cfg->dbgc_supported) wr_ptr = iwl_read_prph(trans, cfg->dbgc_dram_wrptr_addr); else wr_ptr = iwl_read_prph(trans, cfg->dbg_mon_wr_ptr_addr); /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */ if (wr_ptr == 0x5a5a5a5a) { IWL_ERR(trans, "Can't read write pointer - not reordering buffer\n"); dont_reorder = true; } /* If we're running a device that supports DBGC.... */ if (trans->cfg->dbgc_supported) /* * wr_ptr is given relative to the base address, in * DWORD granularity, and points to the next chunk to * write to - i.e., the oldest data in the buffer. */ wr_ptr <<= 2; else wr_ptr = (wr_ptr << 4) - dnt->mon_base_addr; /* Misunderstanding wr_ptr can cause a page fault, so validate it... */ if (wr_ptr > dnt->mon_buf_size) { IWL_ERR(trans, "Write pointer DMA monitor register points to invalid data - setting to 0\n"); dont_reorder = true; } /* We have a problem with the wr_ptr, so just return the memory as-is */ if (dont_reorder) wr_ptr = 0; if (cfg->dbgc_wrap_count_addr) wrap_cnt = iwl_read_prph(trans, cfg->dbgc_wrap_count_addr); else wrap_cnt = 1; if (wrap_cnt) { memcpy(buffer, dnt->mon_buf_cpu_addr + wr_ptr, dnt->mon_buf_size - wr_ptr); memcpy(buffer + dnt->mon_buf_size - wr_ptr, dnt->mon_buf_cpu_addr, wr_ptr); } else { memcpy(buffer, dnt->mon_buf_cpu_addr, wr_ptr); memset(buffer + wr_ptr, 0, dnt->mon_buf_size - wr_ptr); } return dnt->mon_buf_size; } static int iwl_dnt_dev_if_retrieve_marbh_monitor_data(struct iwl_dnt *dnt, struct iwl_trans *trans, u8 *buffer, u32 buffer_size) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; int buf_size_in_dwords, buf_index, i; u32 wr_ptr, read_val; /* FIXME send stop command to FW */ wr_ptr = iwl_read_prph(trans, cfg->dbg_mon_wr_ptr_addr); /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */ if (wr_ptr == 0x5a5a5a5a) { IWL_ERR(trans, "Can't read write pointer\n"); return -ENODEV; } read_val = iwl_read_prph(trans, cfg->dbg_mon_buff_base_addr_reg_addr); if (read_val == 0x5a5a5a5a) { IWL_ERR(trans, "Can't read monitor base address\n"); return -ENODEV; } dnt->mon_base_addr = read_val; read_val = iwl_read_prph(trans, cfg->dbg_mon_buff_end_addr_reg_addr); if (read_val == 0x5a5a5a5a) { IWL_ERR(trans, "Can't read monitor end address\n"); return -ENODEV; } dnt->mon_end_addr = read_val; wr_ptr = wr_ptr - dnt->mon_base_addr; iwl_write_prph(trans, cfg->dbg_mon_dmarb_rd_ctl_addr, 0x00000001); /* buf size includes the end_addr as well */ buf_size_in_dwords = dnt->mon_end_addr - dnt->mon_base_addr + 1; for (i = 0; i < buf_size_in_dwords; i++) { /* reordering cyclic buffer */ buf_index = (i + (buf_size_in_dwords - wr_ptr)) % buf_size_in_dwords; read_val = iwl_read_prph(trans, cfg->dbg_mon_dmarb_rd_data_addr); memcpy(&buffer[buf_index * sizeof(u32)], &read_val, sizeof(u32)); } iwl_write_prph(trans, cfg->dbg_mon_dmarb_rd_ctl_addr, 0x00000000); return buf_size_in_dwords * sizeof(u32); } static int iwl_dnt_dev_if_retrieve_smem_monitor_data(struct iwl_dnt *dnt, struct iwl_trans *trans, u8 *buffer, u32 buffer_size) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; u32 i, bytes_to_end, calc_size; u32 base_addr, end_addr, wr_ptr_addr, wr_ptr_shift; u32 base, end, wr_ptr, pos, chunks_num, wr_ptr_offset, wrap_cnt; u8 *temp_buffer; /* assuming B-step or C-step */ base_addr = cfg->dbg_mon_buff_base_addr_reg_addr_b_step; end_addr = cfg->dbg_mon_buff_end_addr_reg_addr_b_step; wr_ptr_addr = cfg->dbg_mon_wr_ptr_addr_b_step; wr_ptr_shift = 2; base = iwl_read_prph(trans, base_addr); /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */ if (base == 0x5a5a5a5a) { IWL_ERR(trans, "Can't read base addr\n"); return -ENODEV; } end = iwl_read_prph(trans, end_addr); /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */ if (end == 0x5a5a5a5a) { IWL_ERR(trans, "Can't read end addr\n"); return -ENODEV; } if (base == end) { IWL_ERR(trans, "Invalid base and end values\n"); return -ENODEV; } wr_ptr = iwl_read_prph(trans, wr_ptr_addr); /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */ if (wr_ptr == 0x5a5a5a5a) { IWL_ERR(trans, "Can't read write pointer, not re-aligning\n"); wr_ptr = base << 8; } pos = base << 8; calc_size = (end - base + 1) << 8; wr_ptr <<= wr_ptr_shift; bytes_to_end = ((end + 1) << 8) - wr_ptr; chunks_num = calc_size / DNT_CHUNK_SIZE; wr_ptr_offset = wr_ptr - pos; if (wr_ptr_offset > calc_size) { IWL_ERR(trans, "Invalid wr_ptr value, not re-aligning\n"); wr_ptr_offset = 0; } if (calc_size > buffer_size) { IWL_ERR(trans, "Invalid buffer size\n"); return -EINVAL; } temp_buffer = kzalloc(calc_size, GFP_KERNEL); if (!temp_buffer) return -ENOMEM; for (i = 0; i < chunks_num; i++) iwl_trans_read_mem(trans, pos + (i * DNT_CHUNK_SIZE), temp_buffer + (i * DNT_CHUNK_SIZE), DNT_CHUNK_SIZE / sizeof(u32)); if (calc_size % DNT_CHUNK_SIZE) iwl_trans_read_mem(trans, pos + (chunks_num * DNT_CHUNK_SIZE), temp_buffer + (chunks_num * DNT_CHUNK_SIZE), (calc_size - (chunks_num * DNT_CHUNK_SIZE)) / sizeof(u32)); if (cfg->dbgc_wrap_count_addr) wrap_cnt = iwl_read_prph(trans, cfg->dbgc_wrap_count_addr); else wrap_cnt = 1; if (wrap_cnt) { memcpy(buffer, temp_buffer + wr_ptr_offset, bytes_to_end); memcpy(buffer + bytes_to_end, temp_buffer, wr_ptr_offset); } else { memcpy(buffer, temp_buffer, wr_ptr_offset); memset(buffer + wr_ptr_offset, 0, bytes_to_end); } kfree(temp_buffer); return calc_size; } int iwl_dnt_dev_if_configure_monitor(struct iwl_dnt *dnt, struct iwl_trans *trans) { u32 base_addr, end_addr; switch (dnt->cur_mon_type) { case NO_MONITOR: IWL_INFO(trans, "Monitor is disabled\n"); dnt->iwl_dnt_status &= ~IWL_DNT_STATUS_MON_CONFIGURED; break; case MARBH_ADC: case MARBH_DBG: iwl_dnt_dev_if_configure_marbh(trans); dnt->mon_buf_size = DNT_MARBH_BUF_SIZE; break; case DMA: if (!dnt->mon_buf_cpu_addr) { IWL_ERR(trans, "Can't configure DMA monitor: no cpu addr\n"); return -ENOMEM; } base_addr = dnt->mon_base_addr >> 4; end_addr = dnt->mon_end_addr >> 4; iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr); break; case MIPI: iwl_dnt_dev_if_configure_mipi(trans); break; case SMEM: base_addr = 0; end_addr = 0; iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr); dnt->mon_buf_size = DNT_SMEM_BUF_SIZE; break; case INTERFACE: base_addr = 0; end_addr = 0x400; iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr); break; default: dnt->iwl_dnt_status &= ~IWL_DNT_STATUS_MON_CONFIGURED; IWL_INFO(trans, "Invalid monitor type\n"); return -EINVAL; } dnt->iwl_dnt_status |= IWL_DNT_STATUS_MON_CONFIGURED; return 0; } static int iwl_dnt_dev_if_send_dbgm(struct iwl_dnt *dnt, struct iwl_trans *trans) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; struct iwl_host_cmd host_cmd = { .id = cfg->dbg_conf_monitor_cmd_id, .data[0] = cfg->dbg_conf_monitor_host_command.data, .len[0] = cfg->dbg_conf_monitor_host_command.len, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int ret; ret = iwl_trans_send_cmd(trans, &host_cmd); if (ret) { IWL_ERR(trans, "Failed to send monitor command\n"); dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_START_MONITOR; } return ret; } static int iwl_dnt_dev_if_send_ldbg(struct iwl_dnt *dnt, struct iwl_trans *trans, int cmd_index) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; struct iwl_host_cmd host_cmd = { .id = cfg->dbg_conf_monitor_cmd_id, .data[0] = cfg->ldbg_cmd[cmd_index].data, .len[0] = DNT_LDBG_CMD_SIZE, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; return iwl_trans_send_cmd(trans, &host_cmd); } int iwl_dnt_dev_if_start_monitor(struct iwl_dnt *dnt, struct iwl_trans *trans) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; int i, ret; switch (cfg->dbgm_enable_mode) { case DEBUG: return iwl_dnt_dev_if_send_dbgm(dnt, trans); case SNIFFER: ret = 0; for (i = 0; i < cfg->ldbg_cmd_nums; i++) { ret = iwl_dnt_dev_if_send_ldbg(dnt, trans, i); if (ret) { IWL_ERR(trans, "Failed to send ldbg command\n"); break; } } return ret; default: WARN_ONCE(1, "invalid option: %d\n", cfg->dbgm_enable_mode); return -EINVAL; } } int iwl_dnt_dev_if_set_log_level(struct iwl_dnt *dnt, struct iwl_trans *trans) { struct iwl_dbg_cfg *cfg = &trans->dbg_cfg; struct iwl_host_cmd host_cmd = { .id = cfg->log_level_cmd_id, .data[0] = cfg->log_level_cmd.data, .len[0] = cfg->log_level_cmd.len, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int ret; ret = iwl_trans_send_cmd(trans, &host_cmd); if (ret) IWL_ERR(trans, "Failed to send log level cmd\n"); return ret; } int iwl_dnt_dev_if_retrieve_monitor_data(struct iwl_dnt *dnt, struct iwl_trans *trans, u8 *buffer, u32 buffer_size) { switch (dnt->cur_mon_type) { case DMA: return iwl_dnt_dev_if_retrieve_dma_monitor_data(dnt, trans, buffer, buffer_size); case MARBH_ADC: case MARBH_DBG: return iwl_dnt_dev_if_retrieve_marbh_monitor_data(dnt, trans, buffer, buffer_size); case SMEM: return iwl_dnt_dev_if_retrieve_smem_monitor_data(dnt, trans, buffer, buffer_size); case INTERFACE: default: WARN_ONCE(1, "invalid option: %d\n", dnt->cur_mon_type); return -EINVAL; } } int iwl_dnt_dev_if_read_sram(struct iwl_dnt *dnt, struct iwl_trans *trans) { struct dnt_crash_data *crash = &dnt->dispatch.crash; int ofs, len = 0; ofs = dnt->image->sec[IWL_UCODE_SECTION_DATA].offset; len = dnt->image->sec[IWL_UCODE_SECTION_DATA].len; crash->sram = vmalloc(len); if (!crash->sram) return -ENOMEM; crash->sram_buf_size = len; return iwl_trans_read_mem(trans, ofs, crash->sram, len / sizeof(u32)); } IWL_EXPORT_SYMBOL(iwl_dnt_dev_if_read_sram); int iwl_dnt_dev_if_read_rx(struct iwl_dnt *dnt, struct iwl_trans *trans) { struct dnt_crash_data *crash = &dnt->dispatch.crash; int i, reg_val; u32 buf32_size, offset = 0; u32 *buf32; unsigned long flags; /* reading buffer size */ reg_val = iwl_trans_read_prph(trans, RXF_SIZE_ADDR); crash->rx_buf_size = (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS; /* the register holds the value divided by 128 */ crash->rx_buf_size = crash->rx_buf_size << 7; if (!crash->rx_buf_size) return -ENOMEM; buf32_size = crash->rx_buf_size / sizeof(u32); crash->rx = vmalloc(crash->rx_buf_size); if (!crash->rx) return -ENOMEM; buf32 = (u32 *)crash->rx; if (!iwl_trans_grab_nic_access(trans, &flags)) { vfree(crash->rx); return -EBUSY; } for (i = 0; i < buf32_size; i++) { iwl_trans_write_prph(trans, RXF_LD_FENCE_OFFSET_ADDR, offset); offset += sizeof(u32); buf32[i] = iwl_trans_read_prph(trans, RXF_FIFO_RD_FENCE_ADDR); } iwl_trans_release_nic_access(trans, &flags); return 0; } IWL_EXPORT_SYMBOL(iwl_dnt_dev_if_read_rx); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dnt-dev-if.h000066400000000000000000000103111351064634500274620ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_dnt_dev_if_h__ #define __iwl_dnt_dev_if_h__ #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "iwl-config.h" struct iwl_dnt; #define DNT_LDBG_CMD_SIZE 80 #define DNT_MARBH_BUF_SIZE (0x3cff * sizeof(u32)) #define DNT_SMEM_BUF_SIZE (0x18004) #define DNT_CHUNK_SIZE 512 /* marbh access types */ enum { ACCESS_TYPE_DIRECT = 0, ACCESS_TYPE_INDIRECT, }; /** * iwl_dnt_dev_if_configure_monitor - configure monitor. * * configure the correct monitor configuration - depends on the monitor mode. */ int iwl_dnt_dev_if_configure_monitor(struct iwl_dnt *dnt, struct iwl_trans *trans); /** * iwl_dnt_dev_if_retrieve_monitor_data - retrieve monitor data. * * retrieve monitor data - depends on the monitor mode. * Note: monitor must be stopped in order to retrieve data. */ int iwl_dnt_dev_if_retrieve_monitor_data(struct iwl_dnt *dnt, struct iwl_trans *trans, u8 *buffer, u32 buffer_size); /** * iwl_dnt_dev_if_start_monitor - start monitor data. * * starts monitor - sends command to start monitor. */ int iwl_dnt_dev_if_start_monitor(struct iwl_dnt *dnt, struct iwl_trans *trans); /** * iwl_dnt_dev_if_set_log_level - set ucode messages log level. */ int iwl_dnt_dev_if_set_log_level(struct iwl_dnt *dnt, struct iwl_trans *trans); int iwl_dnt_dev_if_read_sram(struct iwl_dnt *dnt, struct iwl_trans *trans); int iwl_dnt_dev_if_read_rx(struct iwl_dnt *dnt, struct iwl_trans *trans); #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dnt-dispatch.c000066400000000000000000000330701351064634500301110ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "iwl-debug.h" #include "iwl-dnt-cfg.h" #include "iwl-dnt-dispatch.h" #include "iwl-dnt-dev-if.h" #include "iwl-tm-infc.h" #include "iwl-tm-gnl.h" #include "iwl-io.h" #include "fw/error-dump.h" struct dnt_collect_db *iwl_dnt_dispatch_allocate_collect_db(struct iwl_dnt *dnt) { struct dnt_collect_db *db; db = kzalloc(sizeof(struct dnt_collect_db), GFP_KERNEL); if (!db) { dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DB; return NULL; } spin_lock_init(&db->db_lock); init_waitqueue_head(&db->waitq); return db; } static void iwl_dnt_dispatch_free_collect_db(struct dnt_collect_db *db) { int i; for (i = 0; i < ARRAY_SIZE(db->collect_array); i++) kfree(db->collect_array[i].data); kfree(db); } static int iwl_dnt_dispatch_get_list_data(struct dnt_collect_db *db, u8 *buffer, u32 buffer_size) { struct dnt_collect_entry *cur_entry; int data_offset = 0; spin_lock_bh(&db->db_lock); while (db->read_ptr != db->wr_ptr) { cur_entry = &db->collect_array[db->read_ptr]; if (data_offset + cur_entry->size > buffer_size) break; memcpy(buffer + data_offset, cur_entry->data, cur_entry->size); data_offset += cur_entry->size; cur_entry->size = 0; kfree(cur_entry->data); cur_entry->data = NULL; /* increment read_ptr */ db->read_ptr = (db->read_ptr + 1) % IWL_DNT_ARRAY_SIZE; } spin_unlock_bh(&db->db_lock); return data_offset; } /** * iwl_dnt_dispatch_push_ftrace_handler - handles data ad push it to ftrace. * */ static void iwl_dnt_dispatch_push_ftrace_handler(struct iwl_dnt *dnt, u8 *buffer, u32 buffer_size) { trace_iwlwifi_dev_dnt_data(dnt->dev, buffer, buffer_size); } /** * iwl_dnt_dispatch_push_netlink_handler - handles data ad push it to netlink. * */ static int iwl_dnt_dispatch_push_netlink_handler(struct iwl_dnt *dnt, struct iwl_trans *trans, unsigned int cmd_id, u8 *buffer, u32 buffer_size) { return iwl_tm_gnl_send_msg(trans, cmd_id, false, buffer, buffer_size, GFP_ATOMIC); } static int iwl_dnt_dispatch_pull_monitor(struct iwl_dnt *dnt, struct iwl_trans *trans, u8 *buffer, u32 buffer_size) { int ret = 0; if (dnt->cur_mon_type == INTERFACE) ret = iwl_dnt_dispatch_get_list_data(dnt->dispatch.dbgm_db, buffer, buffer_size); else ret = iwl_dnt_dev_if_retrieve_monitor_data(dnt, trans, buffer, buffer_size); return ret; } int iwl_dnt_dispatch_pull(struct iwl_trans *trans, u8 *buffer, u32 buffer_size, u32 input) { struct iwl_dnt *dnt = trans->tmdev->dnt; int ret = 0; if (!trans->op_mode) return -EINVAL; switch (input) { case MONITOR: ret = iwl_dnt_dispatch_pull_monitor(dnt, trans, buffer, buffer_size); break; case UCODE_MESSAGES: ret = iwl_dnt_dispatch_get_list_data(dnt->dispatch.um_db, buffer, buffer_size); break; default: WARN_ONCE(1, "Invalid input mode %d\n", input); return -EINVAL; } return ret; } static int iwl_dnt_dispatch_collect_data(struct iwl_dnt *dnt, struct dnt_collect_db *db, struct iwl_rx_packet *pkt) { struct dnt_collect_entry *wr_entry; u32 data_size; data_size = GET_RX_PACKET_SIZE(pkt); spin_lock(&db->db_lock); wr_entry = &db->collect_array[db->wr_ptr]; /* * cheking if wr_ptr is already in use * if so it means that we complete a cycle in the array * hence replacing data in wr_ptr */ if (WARN_ON_ONCE(wr_entry->data)) { spin_unlock(&db->db_lock); return -ENOMEM; } wr_entry->size = data_size; wr_entry->data = kzalloc(data_size, GFP_ATOMIC); if (!wr_entry->data) { spin_unlock(&db->db_lock); return -ENOMEM; } memcpy(wr_entry->data, pkt->data, wr_entry->size); db->wr_ptr = (db->wr_ptr + 1) % IWL_DNT_ARRAY_SIZE; if (db->wr_ptr == db->read_ptr) { /* * since we overrun oldest data we should update read * ptr to the next oldest data */ struct dnt_collect_entry *rd_entry = &db->collect_array[db->read_ptr]; kfree(rd_entry->data); rd_entry->data = NULL; db->read_ptr = (db->read_ptr + 1) % IWL_DNT_ARRAY_SIZE; } wake_up_interruptible(&db->waitq); spin_unlock(&db->db_lock); return 0; } int iwl_dnt_dispatch_collect_ucode_message(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb) { struct iwl_dnt *dnt = trans->tmdev->dnt; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_dnt_dispatch *dispatch; struct dnt_collect_db *db; int data_size; dispatch = &dnt->dispatch; db = dispatch->um_db; if (dispatch->ucode_msgs_in_mode != COLLECT) return 0; if (dispatch->ucode_msgs_out_mode != PUSH) return iwl_dnt_dispatch_collect_data(dnt, db, pkt); data_size = GET_RX_PACKET_SIZE(pkt); if (dispatch->ucode_msgs_output == FTRACE) iwl_dnt_dispatch_push_ftrace_handler(dnt, pkt->data, data_size); else if (dispatch->ucode_msgs_output == NETLINK) iwl_dnt_dispatch_push_netlink_handler(dnt, trans, IWL_TM_USER_CMD_NOTIF_UCODE_MSGS_DATA, pkt->data, data_size); return 0; } IWL_EXPORT_SYMBOL(iwl_dnt_dispatch_collect_ucode_message); void iwl_dnt_dispatch_free(struct iwl_dnt *dnt, struct iwl_trans *trans) { struct iwl_dnt_dispatch *dispatch = &dnt->dispatch; struct dnt_crash_data *crash = &dispatch->crash; if (dispatch->dbgm_db) iwl_dnt_dispatch_free_collect_db(dispatch->dbgm_db); if (dispatch->um_db) iwl_dnt_dispatch_free_collect_db(dispatch->um_db); if (dnt->mon_buf_cpu_addr) dma_free_coherent(trans->dev, dnt->mon_buf_size, dnt->mon_buf_cpu_addr, dnt->mon_dma_addr); if (crash->sram) vfree(crash->sram); if (crash->rx) vfree(crash->rx); if (crash->dbgm) vfree(crash->dbgm); memset(dispatch, 0, sizeof(*dispatch)); } static void iwl_dnt_dispatch_retrieve_crash_sram(struct iwl_dnt *dnt, struct iwl_trans *trans) { int ret; struct dnt_crash_data *crash = &dnt->dispatch.crash; if (crash->sram) { crash->sram_buf_size = 0; vfree(crash->sram); } ret = iwl_dnt_dev_if_read_sram(dnt, trans); if (ret) { IWL_ERR(dnt, "Failed to read sram\n"); return; } } static void iwl_dnt_dispatch_retrieve_crash_rx(struct iwl_dnt *dnt, struct iwl_trans *trans) { int ret; struct dnt_crash_data *crash = &dnt->dispatch.crash; if (crash->rx) { crash->rx_buf_size = 0; vfree(crash->rx); } ret = iwl_dnt_dev_if_read_rx(dnt, trans); if (ret) { IWL_ERR(dnt, "Failed to read rx\n"); return; } } static void iwl_dnt_dispatch_retrieve_crash_dbgm(struct iwl_dnt *dnt, struct iwl_trans *trans) { int ret; u32 buf_size; struct dnt_crash_data *crash = &dnt->dispatch.crash; if (crash->dbgm) { crash->dbgm_buf_size = 0; vfree(crash->dbgm); } switch (dnt->cur_mon_type) { case DMA: buf_size = dnt->mon_buf_size; break; case MARBH_ADC: case MARBH_DBG: buf_size = 0x2000 * sizeof(u32); break; case INTERFACE: if (dnt->dispatch.mon_output == NETLINK) return; buf_size = ARRAY_SIZE(dnt->dispatch.dbgm_db->collect_array); break; default: return; } crash->dbgm = vmalloc(buf_size); if (!crash->dbgm) return; if (dnt->cur_mon_type == INTERFACE) { iwl_dnt_dispatch_get_list_data(dnt->dispatch.dbgm_db, crash->dbgm, buf_size); } else { ret = iwl_dnt_dev_if_retrieve_monitor_data(dnt, trans, crash->dbgm, buf_size); if (ret != buf_size) { IWL_ERR(dnt, "Failed to read DBGM\n"); vfree(crash->dbgm); return; } } crash->dbgm_buf_size = buf_size; } static void iwl_dnt_dispatch_create_tlv(struct iwl_fw_error_dump_data *tlv, u32 type, u32 len, u8 *value) { tlv->type = cpu_to_le32(type); tlv->len = cpu_to_le32(len); memcpy(tlv->data, value, len); } static u32 iwl_dnt_dispatch_create_crash_tlv(struct iwl_trans *trans, u8 **tlv_buf) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *cur_tlv; struct iwl_dnt *dnt = trans->tmdev->dnt; struct dnt_crash_data *crash; u32 total_size; if (!dnt) { IWL_DEBUG_INFO(trans, "DnT is not intialized\n"); return 0; } crash = &dnt->dispatch.crash; /* * data will be represented as TLV - each buffer is represented as * follow: * u32 - type (SRAM/DBGM/RX/TX/PERIPHERY) * u32 - length * u8[] - data */ total_size = sizeof(*dump_file) + crash->sram_buf_size + crash->dbgm_buf_size + crash->rx_buf_size + crash->tx_buf_size + crash->periph_buf_size + sizeof(u32) * 10; dump_file = vmalloc(total_size); if (!dump_file) return 0; dump_file->file_len = cpu_to_le32(total_size); dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); *tlv_buf = (u8 *)dump_file; cur_tlv = (void *)dump_file->data; if (crash->sram_buf_size) { /* TODO: Convert to the new SMEM format */ iwl_dnt_dispatch_create_tlv(cur_tlv, 0, crash->sram_buf_size, crash->sram); cur_tlv = iwl_fw_error_next_data(cur_tlv); } if (crash->dbgm_buf_size) { iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_FW_MONITOR, crash->dbgm_buf_size, crash->dbgm); cur_tlv = iwl_fw_error_next_data(cur_tlv); } if (crash->tx_buf_size) { iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_TXF, crash->tx_buf_size, crash->tx); cur_tlv = iwl_fw_error_next_data(cur_tlv); } if (crash->rx_buf_size) { iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_RXF, crash->rx_buf_size, crash->rx); cur_tlv = iwl_fw_error_next_data(cur_tlv); } return total_size; } static void iwl_dnt_dispatch_handle_crash_netlink(struct iwl_dnt *dnt, struct iwl_trans *trans) { int ret; u8 *tlv_buf; u32 tlv_buf_size; struct iwl_tm_crash_data *crash_notif; tlv_buf_size = iwl_dnt_dispatch_create_crash_tlv(trans, &tlv_buf); if (!tlv_buf_size) return; crash_notif = vmalloc(sizeof(struct iwl_tm_crash_data) + tlv_buf_size); if (!crash_notif) return; crash_notif->size = tlv_buf_size; memcpy(crash_notif->data, tlv_buf, tlv_buf_size); ret = iwl_tm_gnl_send_msg(trans, IWL_TM_USER_CMD_NOTIF_CRASH_DATA, false, crash_notif, sizeof(struct iwl_tm_crash_data) + tlv_buf_size, GFP_ATOMIC); if (ret) IWL_ERR(dnt, "Failed to send crash data notification\n"); vfree(crash_notif); vfree(tlv_buf); } void iwl_dnt_dispatch_handle_nic_err(struct iwl_trans *trans) { struct iwl_dnt *dnt = trans->tmdev->dnt; struct iwl_dbg_cfg *dbg_cfg = &trans->dbg_cfg; trans->tmdev->dnt->iwl_dnt_status |= IWL_DNT_STATUS_FW_CRASH; if (!dbg_cfg->dbg_flags) return; if (dbg_cfg->dbg_flags & SRAM) iwl_dnt_dispatch_retrieve_crash_sram(dnt, trans); if (dbg_cfg->dbg_flags & RX_FIFO) iwl_dnt_dispatch_retrieve_crash_rx(dnt, trans); if (dbg_cfg->dbg_flags & DBGM) iwl_dnt_dispatch_retrieve_crash_dbgm(dnt, trans); if (dnt->dispatch.crash_out_mode & NETLINK) iwl_dnt_dispatch_handle_crash_netlink(dnt, trans); } IWL_EXPORT_SYMBOL(iwl_dnt_dispatch_handle_nic_err); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-dnt-dispatch.h000066400000000000000000000064761351064634500301300ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_dnt_dispatch_h__ #define __iwl_dnt_dispatch_h__ #include "iwl-debug.h" #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "iwl-config.h" /** * iwl_dnt_dispatch_pull - pulling debug data. */ int iwl_dnt_dispatch_pull(struct iwl_trans *trans, u8 *buffer, u32 buffer_size, u32 input); int iwl_dnt_dispatch_collect_ucode_message(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); void iwl_dnt_dispatch_free(struct iwl_dnt *dnt, struct iwl_trans *trans); struct dnt_collect_db *iwl_dnt_dispatch_allocate_collect_db( struct iwl_dnt *dnt); void iwl_dnt_dispatch_handle_nic_err(struct iwl_trans *trans); #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-drv.c000066400000000000000000001750111351064634500263240ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include "iwl-drv.h" #include "iwl-csr.h" #include "iwl-debug.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "iwl-agn-hw.h" #include "fw/img.h" #include "iwl-dbg-tlv.h" #include "iwl-config.h" #include "iwl-modparams.h" #include "fw/api/alive.h" #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES #include "iwl-dbg-cfg.h" #endif #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #include "iwl-tm-gnl.h" #endif /****************************************************************************** * * module boiler plate * ******************************************************************************/ #define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_LICENSE("GPL"); #ifdef CPTCFG_IWLWIFI_DEBUGFS static struct dentry *iwl_dbgfs_root; #endif /** * struct iwl_drv - drv common data * @list: list of drv structures using this opmode * @fw: the iwl_fw structure * @op_mode: the running op_mode * @trans: transport layer * @dev: for debug prints only * @fw_index: firmware revision to try loading * @firmware_name: composite filename of ucode file to load * @request_firmware_complete: the firmware has been obtained from user space */ struct iwl_drv { struct list_head list; struct iwl_fw fw; struct iwl_op_mode *op_mode; struct iwl_trans *trans; struct device *dev; #if IS_ENABLED(CPTCFG_IWLXVT) bool xvt_mode_on; #endif int fw_index; /* firmware we're trying to load */ char firmware_name[64]; /* name of firmware file to load */ struct completion request_firmware_complete; #ifdef CPTCFG_IWLWIFI_DEBUGFS struct dentry *dbgfs_drv; struct dentry *dbgfs_trans; struct dentry *dbgfs_op_mode; #endif }; enum { DVM_OP_MODE, MVM_OP_MODE, #if IS_ENABLED(CPTCFG_IWLXVT) XVT_OP_MODE, #endif }; /* Protects the table contents, i.e. the ops pointer & drv list */ static struct mutex iwlwifi_opmode_table_mtx; static struct iwlwifi_opmode_table { const char *name; /* name: iwldvm, iwlmvm, etc */ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ struct list_head drv; /* list of devices using this op_mode */ } iwlwifi_opmode_table[] = { /* ops set when driver is initialized */ [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL }, [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL }, #if IS_ENABLED(CPTCFG_IWLXVT) [XVT_OP_MODE] = { .name = "iwlxvt", .ops = NULL }, #endif }; #if IS_ENABLED(CPTCFG_IWLXVT) /* kernel object for a device dedicated * folder in the sysfs */ static struct kobject *iwl_kobj; static struct iwl_op_mode * _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op); static void _iwl_op_mode_stop(struct iwl_drv *drv); /* * iwl_drv_get_dev_container - Given a device, returns the pointer * to it's corresponding driver's struct */ struct iwl_drv *iwl_drv_get_dev_container(struct device *dev) { struct iwl_drv *drv_itr; int i; /* Going over all drivers, looking for the one that holds dev */ for (i = 0; (i < ARRAY_SIZE(iwlwifi_opmode_table)); i++) { list_for_each_entry(drv_itr, &iwlwifi_opmode_table[i].drv, list) if (drv_itr->dev == dev) return drv_itr; } return NULL; } IWL_EXPORT_SYMBOL(iwl_drv_get_dev_container); /* * iwl_drv_get_op_mode - Returns the index of the device's * active operation mode */ static int iwl_drv_get_op_mode_idx(struct iwl_drv *drv) { struct iwl_drv *drv_itr; int i; if (!drv || !drv->dev) return -ENODEV; /* Going over all drivers, looking for the list that holds it */ for (i = 0; (i < ARRAY_SIZE(iwlwifi_opmode_table)); i++) { list_for_each_entry(drv_itr, &iwlwifi_opmode_table[i].drv, list) if (drv_itr->dev == drv->dev) return i; } return -EINVAL; } static bool iwl_drv_xvt_mode_supported(enum iwl_fw_type fw_type, int mode_idx) { /* xVT mode is available only with 16 FW */ switch (fw_type) { case IWL_FW_MVM: break; default: return false; } /* check whether the requested operation mode is supported */ switch (mode_idx) { case XVT_OP_MODE: case MVM_OP_MODE: return true; default: return false; } } /* * iwl_drv_switch_op_mode - Switch between operation modes * Checks if the desired operation mode is valid, if it * is supported by the device. Stops the current op mode * and starts the desired mode. */ int iwl_drv_switch_op_mode(struct iwl_drv *drv, const char *new_op_name) { struct iwlwifi_opmode_table *new_op = NULL; int idx; /* Searching for wanted op_mode*/ for (idx = 0; idx < ARRAY_SIZE(iwlwifi_opmode_table); idx++) { if (!strcmp(iwlwifi_opmode_table[idx].name, new_op_name)) { new_op = &iwlwifi_opmode_table[idx]; break; } } /* Checking if the desired op mode is valid */ if (!new_op) { IWL_ERR(drv, "No such op mode \"%s\"\n", new_op_name); return -EINVAL; } /* * If the desired op mode is already the * device's current op mode, do nothing */ if (idx == iwl_drv_get_op_mode_idx(drv)) return 0; /* Check if the desired operation mode is supported by the device/fw */ if (!iwl_drv_xvt_mode_supported(drv->fw.type, idx)) { IWL_ERR(drv, "Op mode %s is not supported by the loaded fw\n", new_op_name); return -ENOTSUPP; } /* Recording new op mode state */ drv->xvt_mode_on = (idx == XVT_OP_MODE); /* Stopping the current op mode */ _iwl_op_mode_stop(drv); /* Changing operation mode */ mutex_lock(&iwlwifi_opmode_table_mtx); list_move_tail(&drv->list, &new_op->drv); mutex_unlock(&iwlwifi_opmode_table_mtx); /* Starting the new op mode */ if (new_op->ops) { drv->op_mode = _iwl_op_mode_start(drv, new_op); if (!drv->op_mode) { IWL_ERR(drv, "Error switching op modes\n"); return -EINVAL; } } else { return request_module("%s", new_op->name); } return 0; } IWL_EXPORT_SYMBOL(iwl_drv_switch_op_mode); /* * iwl_drv_sysfs_show - Returns device information to user */ static ssize_t iwl_drv_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iwl_drv *drv; int op_mode_idx = 0, itr; int ret = 0; /* Retrieving containing driver */ drv = iwl_drv_get_dev_container(dev); op_mode_idx = iwl_drv_get_op_mode_idx(drv); /* Checking if driver and driver information are valid */ if (op_mode_idx < 0) return op_mode_idx; /* Constructing output */ for (itr = 0; itr < ARRAY_SIZE(iwlwifi_opmode_table); itr++) { ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%-s\n", (itr == op_mode_idx) ? "* " : " ", iwlwifi_opmode_table[itr].name); } return ret; } /* Attribute for device */ static const DEVICE_ATTR(op_mode, S_IRUGO, iwl_drv_sysfs_show, NULL); /* * iwl_create_sysfs_file - Creates a sysfs entry (under PCI devices), * and a symlink under modules/iwlwifi */ static int iwl_create_sysfs_file(struct iwl_drv *drv) { int ret; ret = device_create_file(drv->dev, &dev_attr_op_mode); if (!ret) { ret = sysfs_create_link(iwl_kobj, &drv->dev->kobj, dev_name(drv->dev)); } return ret; } /* * iwl_remove_sysfs_file - Removes sysfs entries */ static void iwl_remove_sysfs_file(struct iwl_drv *drv) { sysfs_remove_link(iwl_kobj, dev_name(drv->dev)); device_remove_file(drv->dev, &dev_attr_op_mode); } #endif /* CPTCFG_IWLXVT */ #define IWL_DEFAULT_SCAN_CHANNELS 40 /* * struct fw_sec: Just for the image parsing process. * For the fw storage we are using struct fw_desc. */ struct fw_sec { const void *data; /* the sec data */ size_t size; /* section size */ u32 offset; /* offset of writing in the device */ }; static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) { vfree(desc->data); desc->data = NULL; desc->len = 0; } static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img) { int i; for (i = 0; i < img->num_sec; i++) iwl_free_fw_desc(drv, &img->sec[i]); kfree(img->sec); } static void iwl_dealloc_ucode(struct iwl_drv *drv) { int i; kfree(drv->fw.dbg.dest_tlv); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) kfree(drv->fw.dbg.conf_tlv[i]); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) kfree(drv->fw.dbg.trigger_tlv[i]); kfree(drv->fw.dbg.mem_tlv); kfree(drv->fw.iml); kfree(drv->fw.ucode_capa.cmd_versions); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); } static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, struct fw_sec *sec) { void *data; desc->data = NULL; if (!sec || !sec->size) return -EINVAL; data = vmalloc(sec->size); if (!data) return -ENOMEM; desc->len = sec->size; desc->offset = sec->offset; memcpy(data, sec->data, desc->len); desc->data = data; return 0; } static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context); static int iwl_request_firmware(struct iwl_drv *drv, bool first) { const struct iwl_cfg *cfg = drv->trans->cfg; char tag[8]; #if defined(CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES) char fw_name_temp[64]; #endif if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP && CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) { IWL_ERR(drv, "Only HW steps B and C are currently supported (0x%0x)\n", drv->trans->hw_rev); return -EINVAL; } if (first) { drv->fw_index = cfg->ucode_api_max; sprintf(tag, "%d", drv->fw_index); } else { drv->fw_index--; sprintf(tag, "%d", drv->fw_index); } #ifdef CPTCFG_IWLWIFI_DISALLOW_OLDER_FW /* The dbg-cfg check here works because the first time we get * here we always load the 'api_max' version, and once that * has returned we load the dbg-cfg file. */ if ((drv->fw_index != cfg->ucode_api_max #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES && !drv->trans->dbg_cfg.load_old_fw #endif ) || drv->fw_index < cfg->ucode_api_min) { #else if (drv->fw_index < cfg->ucode_api_min) { #endif IWL_ERR(drv, "no suitable firmware found!\n"); if (cfg->ucode_api_min == cfg->ucode_api_max) { IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre, cfg->ucode_api_max); } else { IWL_ERR(drv, "minimum version required: %s%d\n", cfg->fw_name_pre, cfg->ucode_api_min); IWL_ERR(drv, "maximum version supported: %s%d\n", cfg->fw_name_pre, cfg->ucode_api_max); } IWL_ERR(drv, "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n"); return -ENOENT; } snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", cfg->fw_name_pre, tag); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (drv->trans->dbg_cfg.fw_file_pre) { snprintf(fw_name_temp, sizeof(fw_name_temp), "%s%s", drv->trans->dbg_cfg.fw_file_pre, drv->firmware_name); strncpy(drv->firmware_name, fw_name_temp, sizeof(drv->firmware_name)); } #endif /* CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES */ IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n", drv->firmware_name); return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, drv->trans->dev, GFP_KERNEL, drv, iwl_req_fw_callback); } struct fw_img_parsing { struct fw_sec *sec; int sec_counter; }; /* * struct fw_sec_parsing: to extract fw section and it's offset from tlv */ struct fw_sec_parsing { __le32 offset; const u8 data[]; } __packed; /** * struct iwl_tlv_calib_data - parse the default calib data from TLV * * @ucode_type: the uCode to which the following default calib relates. * @calib: default calibrations. */ struct iwl_tlv_calib_data { __le32 ucode_type; struct iwl_tlv_calib_ctrl calib; } __packed; struct iwl_firmware_pieces { struct fw_img_parsing img[IWL_UCODE_TYPE_MAX]; u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; /* FW debug data parsed for driver usage */ bool dbg_dest_tlv_init; u8 *dbg_dest_ver; union { struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1; }; struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; size_t n_mem_tlv; }; /* * These functions are just to extract uCode section data from the pieces * structure. */ static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { return &pieces->img[type].sec[sec]; } static void alloc_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { struct fw_img_parsing *img = &pieces->img[type]; struct fw_sec *sec_memory; int size = sec + 1; size_t alloc_size = sizeof(*img->sec) * size; if (img->sec && img->sec_counter >= size) return; sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL); if (!sec_memory) return; img->sec = sec_memory; img->sec_counter = size; } static void set_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, const void *data) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].data = data; } static void set_sec_size(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, size_t size) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].size = size; } static size_t get_sec_size(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { return pieces->img[type].sec[sec].size; } static void set_sec_offset(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, u32 offset) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].offset = offset; } static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) { int i, j; struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data; struct iwl_fw_cipher_scheme *fwcs; if (len < sizeof(*l) || len < sizeof(l->size) + l->size * sizeof(l->cs[0])) return -EINVAL; for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) { fwcs = &l->cs[j]; /* we skip schemes with zero cipher suite selector */ if (!fwcs->cipher) continue; fw->cs[j++] = *fwcs; } return 0; } /* * Gets uCode section from tlv. */ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, const void *data, enum iwl_ucode_type type, int size) { struct fw_img_parsing *img; struct fw_sec *sec; struct fw_sec_parsing *sec_parse; size_t alloc_size; if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) return -1; sec_parse = (struct fw_sec_parsing *)data; img = &pieces->img[type]; alloc_size = sizeof(*img->sec) * (img->sec_counter + 1); sec = krealloc(img->sec, alloc_size, GFP_KERNEL); if (!sec) return -ENOMEM; img->sec = sec; sec = &img->sec[img->sec_counter]; sec->offset = le32_to_cpu(sec_parse->offset); sec->data = sec_parse->data; sec->size = size - sizeof(sec_parse->offset); ++img->sec_counter; return 0; } static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) { struct iwl_tlv_calib_data *def_calib = (struct iwl_tlv_calib_data *)data; u32 ucode_type = le32_to_cpu(def_calib->ucode_type); if (ucode_type >= IWL_UCODE_TYPE_MAX) { IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n", ucode_type); return -EINVAL; } drv->fw.default_calib[ucode_type].flow_trigger = def_calib->calib.flow_trigger; drv->fw.default_calib[ucode_type].event_trigger = def_calib->calib.event_trigger; return 0; } static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_api *ucode_api = (void *)data; u32 api_index = le32_to_cpu(ucode_api->api_index); u32 api_flags = le32_to_cpu(ucode_api->api_flags); int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) { IWL_WARN(drv, "api flags index %d larger than supported by driver\n", api_index); return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_api); } } static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_capa *ucode_capa = (void *)data; u32 api_index = le32_to_cpu(ucode_capa->api_index); u32 api_flags = le32_to_cpu(ucode_capa->api_capa); int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) { IWL_WARN(drv, "capa flags index %d larger than supported by driver\n", api_index); return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_capa); } } static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces) { struct iwl_ucode_header *ucode = (void *)ucode_raw->data; u32 api_ver, hdr_size, build; char buildstr[25]; const u8 *src; drv->fw.ucode_ver = le32_to_cpu(ucode->ver); api_ver = IWL_UCODE_API(drv->fw.ucode_ver); switch (api_ver) { default: hdr_size = 28; if (ucode_raw->size < hdr_size) { IWL_ERR(drv, "File size too small!\n"); return -EINVAL; } build = le32_to_cpu(ucode->u.v2.build); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v2.inst_size)); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v2.data_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v2.init_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v2.init_data_size)); src = ucode->u.v2.data; break; case 0: case 1: case 2: hdr_size = 24; if (ucode_raw->size < hdr_size) { IWL_ERR(drv, "File size too small!\n"); return -EINVAL; } build = 0; set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v1.inst_size)); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v1.data_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v1.init_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v1.init_data_size)); src = ucode->u.v1.data; break; } if (build) sprintf(buildstr, " build %u", build); else buildstr[0] = '\0'; snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u.%u%s", IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr); /* Verify size of file vs. image size info in file's header */ if (ucode_raw->size != hdr_size + get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) + get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) { IWL_ERR(drv, "uCode file size %d does not match expected size\n", (int)ucode_raw->size); return -EINVAL; } set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src); src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src); src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src); src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src); src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); return 0; } #define FW_ADDR_CACHE_CONTROL 0xC0000000 static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, struct iwl_ucode_capabilities *capa, bool *usniffer_images) { struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data; struct iwl_ucode_tlv *tlv; size_t len = ucode_raw->size; const u8 *data; u32 tlv_len; u32 usniffer_img; enum iwl_ucode_tlv_type tlv_type; const u8 *tlv_data; char buildstr[25]; u32 build, paging_mem_size; int num_of_cpus; bool usniffer_req = false; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (ucode->magic == cpu_to_le32(IWL_TLV_FW_DBG_MAGIC)) { size_t dbg_data_ofs = offsetof(struct iwl_tlv_ucode_header, human_readable); data = (void *)ucode_raw->data + dbg_data_ofs; len -= dbg_data_ofs; goto fw_dbg_conf; } #endif if (len < sizeof(*ucode)) { IWL_ERR(drv, "uCode has invalid length: %zd\n", len); return -EINVAL; } if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) { IWL_ERR(drv, "invalid uCode magic: 0X%x\n", le32_to_cpu(ucode->magic)); return -EINVAL; } drv->fw.ucode_ver = le32_to_cpu(ucode->ver); memcpy(drv->fw.human_readable, ucode->human_readable, sizeof(drv->fw.human_readable)); build = le32_to_cpu(ucode->build); if (build) sprintf(buildstr, " build %u", build); else buildstr[0] = '\0'; snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u.%u%s", IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr); data = ucode->data; len -= sizeof(*ucode); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES fw_dbg_conf: #endif while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); tlv = (void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); tlv_data = tlv->data; if (len < tlv_len) { IWL_ERR(drv, "invalid TLV len: %zd/%u\n", len, tlv_len); return -EINVAL; } len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); switch (tlv_type) { case IWL_UCODE_TLV_INST: set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_DATA: set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_INIT: set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_INIT_DATA: set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_BOOT: IWL_ERR(drv, "Found unexpected BOOT ucode\n"); break; case IWL_UCODE_TLV_PROBE_MAX_LEN: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->max_probe_length = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_PAN: if (tlv_len) goto invalid_tlv_len; capa->flags |= IWL_UCODE_TLV_FLAGS_PAN; break; case IWL_UCODE_TLV_FLAGS: /* must be at least one u32 */ if (tlv_len < sizeof(u32)) goto invalid_tlv_len; /* and a proper number of u32s */ if (tlv_len % sizeof(u32)) goto invalid_tlv_len; /* * This driver only reads the first u32 as * right now no more features are defined, * if that changes then either the driver * will not work with the new firmware, or * it'll not take advantage of new features. */ capa->flags = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_API_CHANGES_SET: if (tlv_len != sizeof(struct iwl_ucode_api)) goto invalid_tlv_len; iwl_set_ucode_api_flags(drv, tlv_data, capa); break; case IWL_UCODE_TLV_ENABLED_CAPABILITIES: if (tlv_len != sizeof(struct iwl_ucode_capa)) goto invalid_tlv_len; iwl_set_ucode_capabilities(drv, tlv_data, capa); break; case IWL_UCODE_TLV_INIT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_evtlog_ptr = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_INIT_EVTLOG_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_evtlog_size = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_INIT_ERRLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_errlog_ptr = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_evtlog_ptr = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_evtlog_size = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_ERRLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_errlog_ptr = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_ENHANCE_SENS_TBL: if (tlv_len) goto invalid_tlv_len; drv->fw.enhance_sensitivity_table = true; break; case IWL_UCODE_TLV_WOWLAN_INST: set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_WOWLAN_DATA: set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->standard_phy_calibration_size = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_DEF_CALIB: if (tlv_len != sizeof(struct iwl_tlv_calib_data)) goto invalid_tlv_len; if (iwl_set_default_calib(drv, tlv_data)) goto tlv_error; break; case IWL_UCODE_TLV_PHY_SKU: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (drv->trans->dbg_cfg.valid_ants & ~ANT_ABC) IWL_ERR(drv, "Invalid value for antennas: 0x%x\n", drv->trans->dbg_cfg.valid_ants); /* Make sure value stays in range */ drv->trans->dbg_cfg.valid_ants &= ANT_ABC; if (drv->trans->dbg_cfg.valid_ants) { u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | FW_PHY_CFG_RX_CHAIN); phy_config |= (drv->trans->dbg_cfg.valid_ants << FW_PHY_CFG_TX_CHAIN_POS); phy_config |= (drv->trans->dbg_cfg.valid_ants << FW_PHY_CFG_RX_CHAIN_POS); drv->fw.phy_config &= phy_config; } #endif drv->fw.valid_tx_ant = (drv->fw.phy_config & FW_PHY_CFG_TX_CHAIN) >> FW_PHY_CFG_TX_CHAIN_POS; drv->fw.valid_rx_ant = (drv->fw.phy_config & FW_PHY_CFG_RX_CHAIN) >> FW_PHY_CFG_RX_CHAIN_POS; break; case IWL_UCODE_TLV_SECURE_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_NUM_OF_CPU: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; num_of_cpus = le32_to_cpup((__le32 *)tlv_data); if (num_of_cpus == 2) { drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus = true; drv->fw.img[IWL_UCODE_INIT].is_dual_cpus = true; drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus = true; } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { IWL_ERR(drv, "Driver support upto 2 CPUs\n"); return -EINVAL; } break; case IWL_UCODE_TLV_CSCHEME: if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len)) goto invalid_tlv_len; break; case IWL_UCODE_TLV_N_SCAN_CHANNELS: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->n_scan_channels = le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_FW_VERSION: { __le32 *ptr = (void *)tlv_data; u32 major, minor; u8 local_comp; if (tlv_len != sizeof(u32) * 3) goto invalid_tlv_len; major = le32_to_cpup(ptr++); minor = le32_to_cpup(ptr++); local_comp = le32_to_cpup(ptr); if (strncmp(drv->fw.human_readable, "stream:", 7)) snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%08x.%u", major, minor, local_comp); else snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u", major, minor, local_comp); break; } case IWL_UCODE_TLV_FW_DBG_DEST: { struct iwl_fw_dbg_dest_tlv *dest = NULL; struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL; u8 mon_mode; pieces->dbg_dest_ver = (u8 *)tlv_data; if (*pieces->dbg_dest_ver == 1) { dest = (void *)tlv_data; } else if (*pieces->dbg_dest_ver == 0) { dest_v1 = (void *)tlv_data; } else { IWL_ERR(drv, "The version is %d, and it is invalid\n", *pieces->dbg_dest_ver); break; } #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (drv->trans->dbg_cfg.dbm_destination_path) { IWL_ERR(drv, "Ignoring destination, ini file present\n"); break; } #endif #endif if (pieces->dbg_dest_tlv_init) { IWL_ERR(drv, "dbg destination ignored, already exists\n"); break; } pieces->dbg_dest_tlv_init = true; if (dest_v1) { pieces->dbg_dest_tlv_v1 = dest_v1; mon_mode = dest_v1->monitor_mode; } else { pieces->dbg_dest_tlv = dest; mon_mode = dest->monitor_mode; } IWL_INFO(drv, "Found debug destination: %s\n", get_fw_dbg_mode_string(mon_mode)); drv->fw.dbg.n_dest_reg = (dest_v1) ? tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv_v1, reg_ops) : tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, reg_ops); drv->fw.dbg.n_dest_reg /= sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]); break; } case IWL_UCODE_TLV_FW_DBG_CONF: { struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data; if (!pieces->dbg_dest_tlv_init) { IWL_ERR(drv, "Ignore dbg config %d - no destination configured\n", conf->id); break; } if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) { IWL_ERR(drv, "Skip unknown configuration: %d\n", conf->id); break; } if (pieces->dbg_conf_tlv[conf->id]) { IWL_ERR(drv, "Ignore duplicate dbg config %d\n", conf->id); break; } if (conf->usniffer) usniffer_req = true; IWL_INFO(drv, "Found debug configuration: %d\n", conf->id); pieces->dbg_conf_tlv[conf->id] = conf; pieces->dbg_conf_tlv_len[conf->id] = tlv_len; break; } case IWL_UCODE_TLV_FW_DBG_TRIGGER: { struct iwl_fw_dbg_trigger_tlv *trigger = (void *)tlv_data; u32 trigger_id = le32_to_cpu(trigger->id); if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) { IWL_ERR(drv, "Skip unknown trigger: %u\n", trigger->id); break; } if (pieces->dbg_trigger_tlv[trigger_id]) { IWL_ERR(drv, "Ignore duplicate dbg trigger %u\n", trigger->id); break; } IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id); pieces->dbg_trigger_tlv[trigger_id] = trigger; pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; break; } case IWL_UCODE_TLV_FW_DBG_DUMP_LST: { if (tlv_len != sizeof(u32)) { IWL_ERR(drv, "dbg lst mask size incorrect, skip\n"); break; } drv->fw.dbg.dump_mask = le32_to_cpup((__le32 *)tlv_data); break; } case IWL_UCODE_TLV_SEC_RT_USNIFFER: *usniffer_images = true; iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR_USNIFFER, tlv_len); break; case IWL_UCODE_TLV_PAGING: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; paging_mem_size = le32_to_cpup((__le32 *)tlv_data); IWL_DEBUG_FW(drv, "Paging: paging enabled (size = %u bytes)\n", paging_mem_size); if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) { IWL_ERR(drv, "Paging: driver supports up to %lu bytes for paging image\n", MAX_PAGING_IMAGE_SIZE); return -EINVAL; } if (paging_mem_size & (FW_PAGING_SIZE - 1)) { IWL_ERR(drv, "Paging: image isn't multiple %lu\n", FW_PAGING_SIZE); return -EINVAL; } drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size = paging_mem_size; usniffer_img = IWL_UCODE_REGULAR_USNIFFER; drv->fw.img[usniffer_img].paging_mem_size = paging_mem_size; break; case IWL_UCODE_TLV_FW_GSCAN_CAPA: /* ignored */ break; case IWL_UCODE_TLV_FW_MEM_SEG: { struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = (void *)tlv_data; size_t size; struct iwl_fw_dbg_mem_seg_tlv *n; if (tlv_len != (sizeof(*dbg_mem))) goto invalid_tlv_len; IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", dbg_mem->data_type); size = sizeof(*pieces->dbg_mem_tlv) * (pieces->n_mem_tlv + 1); n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); if (!n) return -ENOMEM; pieces->dbg_mem_tlv = n; pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem; pieces->n_mem_tlv++; break; } case IWL_UCODE_TLV_IML: { drv->fw.iml_len = tlv_len; drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL); if (!drv->fw.iml) return -ENOMEM; break; } case IWL_UCODE_TLV_FW_RECOVERY_INFO: { struct { __le32 buf_addr; __le32 buf_size; } *recov_info = (void *)tlv_data; if (tlv_len != sizeof(*recov_info)) goto invalid_tlv_len; capa->error_log_addr = le32_to_cpu(recov_info->buf_addr); capa->error_log_size = le32_to_cpu(recov_info->buf_size); } break; case IWL_UCODE_TLV_FW_FSEQ_VERSION: { struct { u8 version[32]; u8 sha1[20]; } *fseq_ver = (void *)tlv_data; if (tlv_len != sizeof(*fseq_ver)) goto invalid_tlv_len; IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n", fseq_ver->version); } break; case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: { struct iwl_umac_debug_addrs *dbg_ptrs = (void *)tlv_data; if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; if (drv->trans->cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.umac_error_event_table = le32_to_cpu(dbg_ptrs->error_info_addr) & ~FW_ADDR_CACHE_CONTROL; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_UMAC; break; } case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: { struct iwl_lmac_debug_addrs *dbg_ptrs = (void *)tlv_data; if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; if (drv->trans->cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.lmac_error_event_table[0] = le32_to_cpu(dbg_ptrs->error_event_table_ptr) & ~FW_ADDR_CACHE_CONTROL; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_LMAC1; break; } case IWL_UCODE_TLV_TYPE_DEBUG_INFO: case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: case IWL_UCODE_TLV_TYPE_HCMD: case IWL_UCODE_TLV_TYPE_REGIONS: case IWL_UCODE_TLV_TYPE_TRIGGERS: case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: if (iwlwifi_mod_params.enable_ini) iwl_dbg_tlv_copy(drv->trans, tlv, false); break; case IWL_UCODE_TLV_CMD_VERSIONS: if (tlv_len % sizeof(struct iwl_fw_cmd_version)) { IWL_ERR(drv, "Invalid length for command versions: %u\n", tlv_len); tlv_len /= sizeof(struct iwl_fw_cmd_version); tlv_len *= sizeof(struct iwl_fw_cmd_version); } if (WARN_ON(capa->cmd_versions)) return -EINVAL; capa->cmd_versions = kmemdup(tlv_data, tlv_len, GFP_KERNEL); if (!capa->cmd_versions) return -ENOMEM; capa->n_cmd_versions = tlv_len / sizeof(struct iwl_fw_cmd_version); break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; } } if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) && usniffer_req && !*usniffer_images) { IWL_ERR(drv, "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); return -EINVAL; } if (len) { IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len); iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len); return -EINVAL; } return 0; invalid_tlv_len: IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len); tlv_error: iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len); return -EINVAL; } static int iwl_alloc_ucode(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type) { int i; struct fw_desc *sec; sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL); if (!sec) return -ENOMEM; drv->fw.img[type].sec = sec; drv->fw.img[type].num_sec = pieces->img[type].sec_counter; for (i = 0; i < pieces->img[type].sec_counter; i++) if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i))) return -ENOMEM; return 0; } static int validate_sec_sizes(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, const struct iwl_cfg *cfg) { IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST)); IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %zd\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)); /* Verify that uCode images will fit in card's SRAM. */ if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) { IWL_ERR(drv, "uCode instr len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST)); return -1; } if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) { IWL_ERR(drv, "uCode data len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); return -1; } if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) { IWL_ERR(drv, "uCode init instr len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); return -1; } if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) { IWL_ERR(drv, "uCode init data len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); return -1; } return 0; } static struct iwl_op_mode * _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) { const struct iwl_op_mode_ops *ops = op->ops; struct dentry *dbgfs_dir = NULL; struct iwl_op_mode *op_mode = NULL; #ifdef CPTCFG_IWLWIFI_DEBUGFS drv->dbgfs_op_mode = debugfs_create_dir(op->name, drv->dbgfs_drv); dbgfs_dir = drv->dbgfs_op_mode; #endif op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); if (!op_mode) { #ifdef CPTCFG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; return NULL; #endif } return op_mode; } static void _iwl_op_mode_stop(struct iwl_drv *drv) { /* op_mode can be NULL if its start failed */ if (drv->op_mode) { iwl_op_mode_stop(drv->op_mode); drv->op_mode = NULL; #ifdef CPTCFG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; #endif } } /** * iwl_req_fw_callback - callback when firmware was loaded * * If loaded successfully, copies the firmware into buffers * for the card to fetch (via DMA). */ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) { struct iwl_drv *drv = context; struct iwl_fw *fw = &drv->fw; struct iwl_ucode_header *ucode; struct iwlwifi_opmode_table *op; int err; struct iwl_firmware_pieces *pieces; const unsigned int api_max = drv->trans->cfg->ucode_api_max; const unsigned int api_min = drv->trans->cfg->ucode_api_min; size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; int i; bool load_module = false; bool usniffer_images = false; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES const struct firmware *fw_dbg_config; int load_fw_dbg_err = -ENOENT; #endif fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; /* dump all fw memory areas by default */ fw->dbg.dump_mask = 0xffffffff; pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) goto out_free_fw; if (!ucode_raw) goto try_again; IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", drv->firmware_name, ucode_raw->size); /* Make sure that we got at least the API version number */ if (ucode_raw->size < 4) { IWL_ERR(drv, "File size way too small!\n"); goto try_again; } /* Data from ucode file: header followed by uCode images */ ucode = (struct iwl_ucode_header *)ucode_raw->data; if (ucode->ver) err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces); else err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, &fw->ucode_capa, &usniffer_images); if (err) goto try_again; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (!ucode->ver && drv->trans->dbg_cfg.fw_dbg_conf) { load_fw_dbg_err = request_firmware(&fw_dbg_config, drv->trans->dbg_cfg.fw_dbg_conf, drv->trans->dev); if (!load_fw_dbg_err) { err = iwl_parse_tlv_firmware(drv, fw_dbg_config, pieces, &fw->ucode_capa, &usniffer_images); if (err) IWL_ERR(drv, "Failed to configure FW DBG data!\n"); } } #endif if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) api_ver = drv->fw.ucode_ver; else api_ver = IWL_UCODE_API(drv->fw.ucode_ver); /* * api_ver should match the api version forming part of the * firmware filename ... but we don't check for that and only rely * on the API version read from firmware header from here on forward */ if (api_ver < api_min || api_ver > api_max) { IWL_ERR(drv, "Driver unable to support your firmware API. " "Driver supports v%u, firmware is v%u.\n", api_max, api_ver); goto try_again; } /* * In mvm uCode there is no difference between data and instructions * sections. */ if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->trans->cfg)) goto try_again; /* Allocate ucode buffers for card's bus-master loading ... */ /* Runtime instructions and 2 copies of data: * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) if (iwl_alloc_ucode(drv, pieces, i)) goto out_free_fw; if (pieces->dbg_dest_tlv_init) { size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * drv->fw.dbg.n_dest_reg; drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); if (!drv->fw.dbg.dest_tlv) goto out_free_fw; if (*pieces->dbg_dest_ver == 0) { memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1, dbg_dest_size); } else { struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv = drv->fw.dbg.dest_tlv; dest_tlv->version = pieces->dbg_dest_tlv->version; dest_tlv->monitor_mode = pieces->dbg_dest_tlv->monitor_mode; dest_tlv->size_power = pieces->dbg_dest_tlv->size_power; dest_tlv->wrap_count = pieces->dbg_dest_tlv->wrap_count; dest_tlv->write_ptr_reg = pieces->dbg_dest_tlv->write_ptr_reg; dest_tlv->base_shift = pieces->dbg_dest_tlv->base_shift; memcpy(dest_tlv->reg_ops, pieces->dbg_dest_tlv->reg_ops, sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * drv->fw.dbg.n_dest_reg); /* In version 1 of the destination tlv, which is * relevant for internal buffer exclusively, * the base address is part of given with the length * of the buffer, and the size shift is give instead of * end shift. We now store these values in base_reg, * and end shift, and when dumping the data we'll * manipulate it for extracting both the length and * base address */ dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg; dest_tlv->end_shift = pieces->dbg_dest_tlv->size_shift; } } for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) { if (pieces->dbg_conf_tlv[i]) { drv->fw.dbg.conf_tlv[i] = kmemdup(pieces->dbg_conf_tlv[i], pieces->dbg_conf_tlv_len[i], GFP_KERNEL); if (!pieces->dbg_conf_tlv_len[i]) goto out_free_fw; } } memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz)); trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] = sizeof(struct iwl_fw_dbg_trigger_missed_bcon); trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0; trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] = sizeof(struct iwl_fw_dbg_trigger_cmd); trigger_tlv_sz[FW_DBG_TRIGGER_MLME] = sizeof(struct iwl_fw_dbg_trigger_mlme); trigger_tlv_sz[FW_DBG_TRIGGER_STATS] = sizeof(struct iwl_fw_dbg_trigger_stats); trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] = sizeof(struct iwl_fw_dbg_trigger_low_rssi); trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] = sizeof(struct iwl_fw_dbg_trigger_txq_timer); trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] = sizeof(struct iwl_fw_dbg_trigger_time_event); trigger_tlv_sz[FW_DBG_TRIGGER_BA] = sizeof(struct iwl_fw_dbg_trigger_ba); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS trigger_tlv_sz[FW_DBG_TRIGGER_TX_LATENCY] = sizeof(struct iwl_fw_dbg_trigger_tx_latency); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] = sizeof(struct iwl_fw_dbg_trigger_tdls); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) { if (pieces->dbg_trigger_tlv[i]) { /* * If the trigger isn't long enough, WARN and exit. * Someone is trying to debug something and he won't * be able to catch the bug he is trying to chase. * We'd better be noisy to be sure he knows what's * going on. */ if (WARN_ON(pieces->dbg_trigger_tlv_len[i] < (trigger_tlv_sz[i] + sizeof(struct iwl_fw_dbg_trigger_tlv)))) goto out_free_fw; drv->fw.dbg.trigger_tlv_len[i] = pieces->dbg_trigger_tlv_len[i]; drv->fw.dbg.trigger_tlv[i] = kmemdup(pieces->dbg_trigger_tlv[i], drv->fw.dbg.trigger_tlv_len[i], GFP_KERNEL); if (!drv->fw.dbg.trigger_tlv[i]) goto out_free_fw; } } /* Now that we can no longer fail, copy information */ drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv; pieces->dbg_mem_tlv = NULL; drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv; /* * The (size - 16) / 12 formula is based on the information recorded * for each event, which is of mode 1 (including timestamp) for all * new microcodes that include this information. */ fw->init_evtlog_ptr = pieces->init_evtlog_ptr; if (pieces->init_evtlog_size) fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; else fw->init_evtlog_size = drv->trans->cfg->base_params->max_event_log_size; fw->init_errlog_ptr = pieces->init_errlog_ptr; fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; if (pieces->inst_evtlog_size) fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; else fw->inst_evtlog_size = drv->trans->cfg->base_params->max_event_log_size; fw->inst_errlog_ptr = pieces->inst_errlog_ptr; /* * figure out the offset of chain noise reset and gain commands * base on the size of standard phy calibration commands table size */ if (fw->ucode_capa.standard_phy_calibration_size > IWL_MAX_PHY_CALIBRATE_TBL_SIZE) fw->ucode_capa.standard_phy_calibration_size = IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE; /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (!load_fw_dbg_err) release_firmware(fw_dbg_config); #endif mutex_lock(&iwlwifi_opmode_table_mtx); switch (fw->type) { case IWL_FW_DVM: op = &iwlwifi_opmode_table[DVM_OP_MODE]; break; default: WARN(1, "Invalid fw type %d\n", fw->type); /* fall through */ case IWL_FW_MVM: op = &iwlwifi_opmode_table[MVM_OP_MODE]; break; } #if IS_ENABLED(CPTCFG_IWLXVT) if (iwlwifi_mod_params.xvt_default_mode && drv->fw.type == IWL_FW_MVM) op = &iwlwifi_opmode_table[XVT_OP_MODE]; drv->xvt_mode_on = (op == &iwlwifi_opmode_table[XVT_OP_MODE]); #endif IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", drv->fw.fw_version, op->name); /* add this device to the list of devices using this op_mode */ list_add_tail(&drv->list, &op->drv); if (op->ops) { drv->op_mode = _iwl_op_mode_start(drv, op); if (!drv->op_mode) { mutex_unlock(&iwlwifi_opmode_table_mtx); goto out_unbind; } } else { load_module = true; } mutex_unlock(&iwlwifi_opmode_table_mtx); /* * Complete the firmware request last so that * a driver unbind (stop) doesn't run while we * are doing the start() above. */ complete(&drv->request_firmware_complete); /* * Load the module last so we don't block anything * else from proceeding if the module fails to load * or hangs loading. */ if (load_module) request_module("%s", op->name); goto free; try_again: /* try next, if any */ release_firmware(ucode_raw); if (iwl_request_firmware(drv, false)) goto out_unbind; goto free; out_free_fw: release_firmware(ucode_raw); out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); free: if (pieces) { for (i = 0; i < ARRAY_SIZE(pieces->img); i++) kfree(pieces->img[i].sec); kfree(pieces->dbg_mem_tlv); kfree(pieces); } } struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) { struct iwl_drv *drv; int ret; drv = kzalloc(sizeof(*drv), GFP_KERNEL); if (!drv) { ret = -ENOMEM; goto err; } drv->trans = trans; drv->dev = trans->dev; init_completion(&drv->request_firmware_complete); INIT_LIST_HEAD(&drv->list); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES trans->dbg_cfg = current_dbg_config; iwl_dbg_cfg_load_ini(drv->trans->dev, &drv->trans->dbg_cfg); #endif iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans); #ifdef CPTCFG_IWLWIFI_DEBUGFS /* Create the device debugfs entries. */ drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), iwl_dbgfs_root); /* Create transport layer debugfs dir */ drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv); #endif #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_tm_gnl_add(drv->trans); #endif ret = iwl_request_firmware(drv, true); if (ret) { IWL_ERR(trans, "Couldn't request the fw\n"); goto err_fw; } #if IS_ENABLED(CPTCFG_IWLXVT) ret = iwl_create_sysfs_file(drv); if (ret) { IWL_ERR(trans, "Couldn't create sysfs entry\n"); goto err_fw; } #endif return drv; err_fw: #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_tm_gnl_remove(drv->trans); #endif #ifdef CPTCFG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_drv); #endif iwl_dbg_tlv_free(drv->trans); kfree(drv); err: return ERR_PTR(ret); } void iwl_drv_stop(struct iwl_drv *drv) { wait_for_completion(&drv->request_firmware_complete); _iwl_op_mode_stop(drv); iwl_dealloc_ucode(drv); mutex_lock(&iwlwifi_opmode_table_mtx); /* * List is empty (this item wasn't added) * when firmware loading failed -- in that * case we can't remove it from any list. */ if (!list_empty(&drv->list)) list_del(&drv->list); mutex_unlock(&iwlwifi_opmode_table_mtx); #ifdef CPTCFG_IWLWIFI_DEBUGFS drv->trans->ops->debugfs_cleanup(drv->trans); debugfs_remove_recursive(drv->dbgfs_drv); #endif #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES iwl_dbg_cfg_free(&drv->trans->dbg_cfg); #endif iwl_dbg_tlv_free(drv->trans); #if IS_ENABLED(CPTCFG_IWLXVT) iwl_remove_sysfs_file(drv); #endif #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_tm_gnl_remove(drv->trans); #endif kfree(drv); } /* shared module parameters */ struct iwl_mod_params iwlwifi_mod_params = { .fw_restart = true, .bt_coex_active = true, .power_level = IWL_POWER_INDEX_1, .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT, /* the rest are 0 by default */ }; IWL_EXPORT_SYMBOL(iwlwifi_mod_params); int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) { int i; struct iwl_drv *drv; struct iwlwifi_opmode_table *op; mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { op = &iwlwifi_opmode_table[i]; if (strcmp(op->name, name)) continue; op->ops = ops; /* TODO: need to handle exceptional case */ list_for_each_entry(drv, &op->drv, list) drv->op_mode = _iwl_op_mode_start(drv, op); mutex_unlock(&iwlwifi_opmode_table_mtx); return 0; } mutex_unlock(&iwlwifi_opmode_table_mtx); return -EIO; } IWL_EXPORT_SYMBOL(iwl_opmode_register); void iwl_opmode_deregister(const char *name) { int i; struct iwl_drv *drv; mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { if (strcmp(iwlwifi_opmode_table[i].name, name)) continue; iwlwifi_opmode_table[i].ops = NULL; /* call the stop routine for all devices */ list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) _iwl_op_mode_stop(drv); mutex_unlock(&iwlwifi_opmode_table_mtx); return; } mutex_unlock(&iwlwifi_opmode_table_mtx); } IWL_EXPORT_SYMBOL(iwl_opmode_deregister); static int __init iwl_drv_init(void) { int i, err; mutex_init(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE if (iwl_tm_gnl_init()) return -EFAULT; #endif #if IS_ENABLED(CPTCFG_IWLXVT) iwl_kobj = kobject_create_and_add("devices", &THIS_MODULE->mkobj.kobj); if (!iwl_kobj) { err = -ENOMEM; goto cleanup_tm_gnl; } #endif pr_info(DRV_DESCRIPTION "\n"); pr_info(DRV_COPYRIGHT "\n"); #ifdef CPTCFG_IWLWIFI_DEBUGFS /* Create the root of iwlwifi debugfs subsystem. */ iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL); #endif err = iwl_pci_register_driver(); if (err) goto cleanup_debugfs; return 0; cleanup_debugfs: #if IS_ENABLED(CPTCFG_IWLXVT) kobject_put(iwl_kobj); cleanup_tm_gnl: #endif #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_tm_gnl_exit(); #endif #ifdef CPTCFG_IWLWIFI_DEBUGFS debugfs_remove_recursive(iwl_dbgfs_root); #endif return err; } module_init(iwl_drv_init); static void __exit iwl_drv_exit(void) { iwl_pci_unregister_driver(); #ifdef CPTCFG_IWLWIFI_DEBUGFS debugfs_remove_recursive(iwl_dbgfs_root); #endif #if IS_ENABLED(CPTCFG_IWLXVT) if (iwl_kobj) kobject_put(iwl_kobj); #endif #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_tm_gnl_exit(); #endif } module_exit(iwl_drv_exit); #ifdef CPTCFG_IWLWIFI_DEBUG module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 0644); MODULE_PARM_DESC(debug, "debug output mask"); #endif #if IS_ENABLED(CPTCFG_IWLXVT) module_param_named(xvt_default_mode, iwlwifi_mod_params.xvt_default_mode, bool, S_IRUGO); MODULE_PARM_DESC(xvt_default_mode, "xVT is the default operation mode (default: false)"); #endif module_param_named(swcrypto, iwlwifi_mod_params.swcrypto, int, 0444); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, 0444); MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444); MODULE_PARM_DESC(amsdu_size, "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, " "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); module_param_named(antenna_coupling, iwlwifi_mod_params.antenna_coupling, int, 0444); MODULE_PARM_DESC(antenna_coupling, "specify antenna coupling in dB (default: 0 dB)"); module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444); MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(enable_ini, "Enable debug INI TLV FW debug infrastructure (default: 0"); /* * set bt_coex_active to true, uCode will do kill/defer * every time the priority line is asserted (BT is sending signals on the * priority line in the PCIx). * set bt_coex_active to false, uCode will ignore the BT activity and * perform the normal operation * * User might experience transmit issue on some platform due to WiFi/BT * co-exist problem. The possible behaviors are: * Able to scan and finding all the available AP * Not able to associate with any AP * On those platforms, WiFi communication can be restored by set * "bt_coex_active" module parameter to "false" * * default: bt_coex_active = true (BT_COEX_ENABLE) */ module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active, bool, 0444); MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, 0444); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)"); module_param_named(power_save, iwlwifi_mod_params.power_save, bool, 0444); MODULE_PARM_DESC(power_save, "enable WiFi power management (default: disable)"); module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444); MODULE_PARM_DESC(power_level, "default power save level (range from 1 - 5, default: 1)"); module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, 0444); MODULE_PARM_DESC(fw_monitor, "firmware monitor - to debug FW (default: false - needs lots of memory)"); module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444); MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)"); module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool, 0444); MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)"); module_param_named(disable_msix, iwlwifi_mod_params.disable_msix, bool, 0444); MODULE_PARM_DESC(disable_msix, "Disable MSI-X and use MSI instead (default: false)"); module_param_named(remove_when_gone, iwlwifi_mod_params.remove_when_gone, bool, 0444); MODULE_PARM_DESC(remove_when_gone, "Remove dev from PCIe bus if it is deemed inaccessible (default: false)"); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-drv.h000066400000000000000000000150551351064634500263320ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_drv_h__ #define __iwl_drv_h__ #include #include /* for all modules */ #define DRV_NAME "iwlwifi" #define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation" #define DRV_AUTHOR "" /* radio config bits (actual values from NVM definition) */ #define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ #define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ #define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ #define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ #define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ #define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ #define EXT_NVM_RF_CFG_FLAVOR_MSK(x) ((x) & 0xF) #define EXT_NVM_RF_CFG_DASH_MSK(x) (((x) >> 4) & 0xF) #define EXT_NVM_RF_CFG_STEP_MSK(x) (((x) >> 8) & 0xF) #define EXT_NVM_RF_CFG_TYPE_MSK(x) (((x) >> 12) & 0xFFF) #define EXT_NVM_RF_CFG_TX_ANT_MSK(x) (((x) >> 24) & 0xF) #define EXT_NVM_RF_CFG_RX_ANT_MSK(x) (((x) >> 28) & 0xF) /** * DOC: Driver system flows - drv component * * This component implements the system flows such as bus enumeration, bus * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in * bus specific files (transport files). This is the code that is common among * different buses. * * This component is also in charge of managing the several implementations of * the wifi flows: it will allow to have several fw API implementation. These * different implementations will differ in the way they implement mac80211's * handlers too. * The init flow wrt to the drv component looks like this: * 1) The bus specific component is called from module_init * 2) The bus specific component registers the bus driver * 3) The bus driver calls the probe function * 4) The bus specific component configures the bus * 5) The bus specific component calls to the drv bus agnostic part * (iwl_drv_start) * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback * 7) iwl_req_fw_callback parses the fw file * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw */ struct iwl_drv; struct iwl_trans; struct iwl_cfg; /** * iwl_drv_start - start the drv * * @trans_ops: the ops of the transport * * starts the driver: fetches the firmware. This should be called by bus * specific system flows implementations. For example, the bus specific probe * function should do bus related operations only, and then call to this * function. It returns the driver object or %NULL if an error occurred. */ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans); /** * iwl_drv_stop - stop the drv * * @drv: * * Stop the driver. This should be called by bus specific system flows * implementations. For example, the bus specific remove function should first * call this function and then do the bus related operations only. */ void iwl_drv_stop(struct iwl_drv *drv); /* * iwl_drv_get_dev_container - Given a device, returns the pointer * to it's corresponding driver's struct */ struct iwl_drv *iwl_drv_get_dev_container(struct device *dev); /* * iwl_drv_switch_op_mode - Switch between operation modes * Checks if the desired operation mode is valid, if it * is supported by the device. Stops the current op mode * and starts the desired mode. */ int iwl_drv_switch_op_mode(struct iwl_drv *drv, const char *new_op_name); /* * exported symbol management * * The driver can be split into multiple modules, in which case some symbols * must be exported for the sub-modules. However, if it's not split and * everything is built-in, then we can avoid that. */ #ifdef CPTCFG_IWLWIFI_OPMODE_MODULAR #define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym) #else #define IWL_EXPORT_SYMBOL(sym) #endif #endif /* __iwl_drv_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c000066400000000000000000000715551351064634500301400ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include #include #include #include "iwl-drv.h" #include "iwl-modparams.h" #include "iwl-eeprom-parse.h" /* EEPROM offset definitions */ /* indirect access definitions */ #define ADDRESS_MSK 0x0000FFFF #define INDIRECT_TYPE_MSK 0x000F0000 #define INDIRECT_HOST 0x00010000 #define INDIRECT_GENERAL 0x00020000 #define INDIRECT_REGULATORY 0x00030000 #define INDIRECT_CALIBRATION 0x00040000 #define INDIRECT_PROCESS_ADJST 0x00050000 #define INDIRECT_OTHERS 0x00060000 #define INDIRECT_TXP_LIMIT 0x00070000 #define INDIRECT_TXP_LIMIT_SIZE 0x00080000 #define INDIRECT_ADDRESS 0x00100000 /* corresponding link offsets in EEPROM */ #define EEPROM_LINK_HOST (2*0x64) #define EEPROM_LINK_GENERAL (2*0x65) #define EEPROM_LINK_REGULATORY (2*0x66) #define EEPROM_LINK_CALIBRATION (2*0x67) #define EEPROM_LINK_PROCESS_ADJST (2*0x68) #define EEPROM_LINK_OTHERS (2*0x69) #define EEPROM_LINK_TXP_LIMIT (2*0x6a) #define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b) /* General */ #define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ #define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */ #define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ #define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ #define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ #define EEPROM_VERSION (2*0x44) /* 2 bytes */ #define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ #define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ #define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ #define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ /* calibration */ struct iwl_eeprom_calib_hdr { u8 version; u8 pa_type; __le16 voltage; } __packed; #define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION) #define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL) /* temperature */ #define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL) #define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL) /* SKU Capabilities (actual values from EEPROM definition) */ enum eeprom_sku_bits { EEPROM_SKU_CAP_BAND_24GHZ = BIT(4), EEPROM_SKU_CAP_BAND_52GHZ = BIT(5), EEPROM_SKU_CAP_11N_ENABLE = BIT(6), EEPROM_SKU_CAP_AMT_ENABLE = BIT(7), EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8) }; /* radio config bits (actual values from EEPROM definition) */ #define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ #define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ #define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ #define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ #define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ #define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ /* * EEPROM bands * These are the channel numbers from each band in the order * that they are stored in the EEPROM band information. Note * that EEPROM bands aren't the same as mac80211 bands, and * there are even special "ht40 bands" in the EEPROM. */ static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 }; static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 }; static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 }; static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ 145, 149, 153, 157, 161, 165 }; static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */ 1, 2, 3, 4, 5, 6, 7 }; static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 }; #define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \ ARRAY_SIZE(iwl_eeprom_band_2) + \ ARRAY_SIZE(iwl_eeprom_band_3) + \ ARRAY_SIZE(iwl_eeprom_band_4) + \ ARRAY_SIZE(iwl_eeprom_band_5)) /* rate data (static) */ static struct ieee80211_rate iwl_cfg80211_rates[] = { { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, }; #define RATES_24_OFFS 0 #define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) #define RATES_52_OFFS 4 #define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) /* EEPROM reading functions */ static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset) { if (WARN_ON(offset + sizeof(u16) > eeprom_size)) return 0; return le16_to_cpup((__le16 *)(eeprom + offset)); } static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size, u32 address) { u16 offset = 0; if ((address & INDIRECT_ADDRESS) == 0) return address; switch (address & INDIRECT_TYPE_MSK) { case INDIRECT_HOST: offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_HOST); break; case INDIRECT_GENERAL: offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_GENERAL); break; case INDIRECT_REGULATORY: offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_REGULATORY); break; case INDIRECT_TXP_LIMIT: offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_TXP_LIMIT); break; case INDIRECT_TXP_LIMIT_SIZE: offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_TXP_LIMIT_SIZE); break; case INDIRECT_CALIBRATION: offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_CALIBRATION); break; case INDIRECT_PROCESS_ADJST: offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_PROCESS_ADJST); break; case INDIRECT_OTHERS: offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_OTHERS); break; default: WARN_ON(1); break; } /* translate the offset from words to byte */ return (address & ADDRESS_MSK) + (offset << 1); } static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size, u32 offset) { u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset); if (WARN_ON(address >= eeprom_size)) return NULL; return &eeprom[address]; } static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size, struct iwl_nvm_data *data) { struct iwl_eeprom_calib_hdr *hdr; hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_CALIB_ALL); if (!hdr) return -ENODATA; data->calib_version = hdr->version; data->calib_voltage = hdr->voltage; return 0; } /** * enum iwl_eeprom_channel_flags - channel flags in EEPROM * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel * @EEPROM_CHANNEL_ACTIVE: active scanning allowed * @EEPROM_CHANNEL_RADAR: radar detection required * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?) * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate */ enum iwl_eeprom_channel_flags { EEPROM_CHANNEL_VALID = BIT(0), EEPROM_CHANNEL_IBSS = BIT(1), EEPROM_CHANNEL_ACTIVE = BIT(3), EEPROM_CHANNEL_RADAR = BIT(4), EEPROM_CHANNEL_WIDE = BIT(5), EEPROM_CHANNEL_DFS = BIT(7), }; /** * struct iwl_eeprom_channel - EEPROM channel data * @flags: %EEPROM_CHANNEL_* flags * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm */ struct iwl_eeprom_channel { u8 flags; s8 max_power_avg; } __packed; enum iwl_eeprom_enhanced_txpwr_flags { IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0), IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1), IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2), IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3), IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4), IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5), IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6), IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7), }; /** * iwl_eeprom_enhanced_txpwr structure * @flags: entry flags * @channel: channel number * @chain_a_max_pwr: chain a max power in 1/2 dBm * @chain_b_max_pwr: chain b max power in 1/2 dBm * @chain_c_max_pwr: chain c max power in 1/2 dBm * @delta_20_in_40: 20-in-40 deltas (hi/lo) * @mimo2_max_pwr: mimo2 max power in 1/2 dBm * @mimo3_max_pwr: mimo3 max power in 1/2 dBm * * This structure presents the enhanced regulatory tx power limit layout * in an EEPROM image. */ struct iwl_eeprom_enhanced_txpwr { u8 flags; u8 channel; s8 chain_a_max; s8 chain_b_max; s8 chain_c_max; u8 delta_20_in_40; s8 mimo2_max; s8 mimo3_max; } __packed; static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data, struct iwl_eeprom_enhanced_txpwr *txp) { s8 result = 0; /* (.5 dBm) */ /* Take the highest tx power from any valid chains */ if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result) result = txp->chain_a_max; if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result) result = txp->chain_b_max; if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result) result = txp->chain_c_max; if ((data->valid_tx_ant == ANT_AB || data->valid_tx_ant == ANT_BC || data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result) result = txp->mimo2_max; if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result) result = txp->mimo3_max; return result; } #define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT) #define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr) #define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE) #define TXP_CHECK_AND_PRINT(x) \ ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "") static void iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data, struct iwl_eeprom_enhanced_txpwr *txp, int n_channels, s8 max_txpower_avg) { int ch_idx; enum nl80211_band band; band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; for (ch_idx = 0; ch_idx < n_channels; ch_idx++) { struct ieee80211_channel *chan = &data->channels[ch_idx]; /* update matching channel or from common data only */ if (txp->channel != 0 && chan->hw_value != txp->channel) continue; /* update matching band only */ if (band != chan->band) continue; if (chan->max_power < max_txpower_avg && !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ)) chan->max_power = max_txpower_avg; } } static void iwl_eeprom_enhanced_txpower(struct device *dev, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size, int n_channels) { struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; int idx, entries; __le16 *txp_len; s8 max_txp_avg_halfdbm; BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); /* the length is in 16-bit words, but we want entries */ txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_SZ_OFFS); entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_OFFS); for (idx = 0; idx < entries; idx++) { txp = &txp_array[idx]; /* skip invalid entries */ if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID)) continue; IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n", (txp->channel && (txp->flags & IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ? "Common " : (txp->channel) ? "Channel" : "Common", (txp->channel), TXP_CHECK_AND_PRINT(VALID), TXP_CHECK_AND_PRINT(BAND_52G), TXP_CHECK_AND_PRINT(OFDM), TXP_CHECK_AND_PRINT(40MHZ), TXP_CHECK_AND_PRINT(HT_AP), TXP_CHECK_AND_PRINT(RES1), TXP_CHECK_AND_PRINT(RES2), TXP_CHECK_AND_PRINT(COMMON_TYPE), txp->flags); IWL_DEBUG_EEPROM(dev, "\t\t chain_A: %d chain_B: %d chain_C: %d\n", txp->chain_a_max, txp->chain_b_max, txp->chain_c_max); IWL_DEBUG_EEPROM(dev, "\t\t MIMO2: %d MIMO3: %d High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n", txp->mimo2_max, txp->mimo3_max, ((txp->delta_20_in_40 & 0xf0) >> 4), (txp->delta_20_in_40 & 0x0f)); max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp); iwl_eeprom_enh_txp_read_element(data, txp, n_channels, DIV_ROUND_UP(max_txp_avg_halfdbm, 2)); if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm) data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm; } } static void iwl_init_band_reference(const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size, int eeprom_band, int *eeprom_ch_count, const struct iwl_eeprom_channel **ch_info, const u8 **eeprom_ch_array) { u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1]; offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY; *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset); switch (eeprom_band) { case 1: /* 2.4GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); *eeprom_ch_array = iwl_eeprom_band_1; break; case 2: /* 4.9GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); *eeprom_ch_array = iwl_eeprom_band_2; break; case 3: /* 5.2GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); *eeprom_ch_array = iwl_eeprom_band_3; break; case 4: /* 5.5GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); *eeprom_ch_array = iwl_eeprom_band_4; break; case 5: /* 5.7GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); *eeprom_ch_array = iwl_eeprom_band_5; break; case 6: /* 2.4GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); *eeprom_ch_array = iwl_eeprom_band_6; break; case 7: /* 5 GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); *eeprom_ch_array = iwl_eeprom_band_7; break; default: *eeprom_ch_count = 0; *eeprom_ch_array = NULL; WARN_ON(1); } } #define CHECK_AND_PRINT(x) \ ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "") static void iwl_mod_ht40_chan_info(struct device *dev, struct iwl_nvm_data *data, int n_channels, enum nl80211_band band, u16 channel, const struct iwl_eeprom_channel *eeprom_ch, u8 clear_ht40_extension_channel) { struct ieee80211_channel *chan = NULL; int i; for (i = 0; i < n_channels; i++) { if (data->channels[i].band != band) continue; if (data->channels[i].hw_value != channel) continue; chan = &data->channels[i]; break; } if (!chan) return; IWL_DEBUG_EEPROM(dev, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", channel, band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), CHECK_AND_PRINT(DFS), eeprom_ch->flags, eeprom_ch->max_power_avg, ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) chan->flags &= ~clear_ht40_extension_channel; } #define CHECK_AND_PRINT_I(x) \ ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "") static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size) { int band, ch_idx; const struct iwl_eeprom_channel *eeprom_ch_info; const u8 *eeprom_ch_array; int eeprom_ch_count; int n_channels = 0; /* * Loop through the 5 EEPROM bands and add them to the parse list */ for (band = 1; band <= 5; band++) { struct ieee80211_channel *channel; iwl_init_band_reference(cfg, eeprom, eeprom_size, band, &eeprom_ch_count, &eeprom_ch_info, &eeprom_ch_array); /* Loop through each band adding each of the channels */ for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { const struct iwl_eeprom_channel *eeprom_ch; eeprom_ch = &eeprom_ch_info[ch_idx]; if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) { IWL_DEBUG_EEPROM(dev, "Ch. %d Flags %x [%sGHz] - No traffic\n", eeprom_ch_array[ch_idx], eeprom_ch_info[ch_idx].flags, (band != 1) ? "5.2" : "2.4"); continue; } channel = &data->channels[n_channels]; n_channels++; channel->hw_value = eeprom_ch_array[ch_idx]; channel->band = (band == 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); /* set no-HT40, will enable as appropriate later */ channel->flags = IEEE80211_CHAN_NO_HT40; if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS)) channel->flags |= IEEE80211_CHAN_NO_IR; if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE)) channel->flags |= IEEE80211_CHAN_NO_IR; if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR) channel->flags |= IEEE80211_CHAN_RADAR; /* Initialize regulatory-based run-time data */ channel->max_power = eeprom_ch_info[ch_idx].max_power_avg; IWL_DEBUG_EEPROM(dev, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", channel->hw_value, (band != 1) ? "5.2" : "2.4", CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(WIDE), CHECK_AND_PRINT_I(DFS), eeprom_ch_info[ch_idx].flags, eeprom_ch_info[ch_idx].max_power_avg, ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_IBSS) && !(eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); } } if (cfg->eeprom_params->enhanced_txpower) { /* * for newer device (6000 series and up) * EEPROM contain enhanced tx power information * driver need to process addition information * to determine the max channel tx power limits */ iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size, n_channels); } else { /* All others use data from channel map */ int i; data->max_tx_pwr_half_dbm = -128; for (i = 0; i < n_channels; i++) data->max_tx_pwr_half_dbm = max_t(s8, data->max_tx_pwr_half_dbm, data->channels[i].max_power * 2); } /* Check if we do have HT40 channels */ if (cfg->eeprom_params->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && cfg->eeprom_params->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) return n_channels; /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ for (band = 6; band <= 7; band++) { enum nl80211_band ieeeband; iwl_init_band_reference(cfg, eeprom, eeprom_size, band, &eeprom_ch_count, &eeprom_ch_info, &eeprom_ch_array); /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ ieeeband = (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; /* Loop through each band adding each of the channels */ for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { /* Set up driver's info for lower half */ iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, eeprom_ch_array[ch_idx], &eeprom_ch_info[ch_idx], IEEE80211_CHAN_NO_HT40PLUS); /* Set up driver's info for upper half */ iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, eeprom_ch_array[ch_idx] + 4, &eeprom_ch_info[ch_idx], IEEE80211_CHAN_NO_HT40MINUS); } } return n_channels; } int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, int n_channels, enum nl80211_band band) { struct ieee80211_channel *chan = &data->channels[0]; int n = 0, idx = 0; while (idx < n_channels && chan->band != band) chan = &data->channels[++idx]; sband->channels = &data->channels[idx]; while (idx < n_channels && chan->band == band) { chan = &data->channels[++idx]; n++; } sband->n_channels = n; return n; } #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, enum nl80211_band band, u8 tx_chains, u8 rx_chains) { int max_bit_rate = 0; tx_chains = hweight8(tx_chains); if (cfg->rx_with_siso_diversity) rx_chains = 1; else rx_chains = hweight8(rx_chains); if (!(data->sku_cap_11n_enable) || (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) || !cfg->ht_params) { ht_info->ht_supported = false; return; } if (data->sku_cap_mimo_disabled) rx_chains = 1; ht_info->ht_supported = true; ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; if (cfg->ht_params->stbc) { ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); if (tx_chains > 1) ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; } if (cfg->ht_params->ldpc) ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; if ((cfg->mq_rx_supported && iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) || iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent; ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; ht_info->mcs.rx_mask[0] = 0xFF; if (rx_chains >= 2) ht_info->mcs.rx_mask[1] = 0xFF; if (rx_chains >= 3) ht_info->mcs.rx_mask[2] = 0xFF; if (cfg->ht_params->ht_greenfield_support) ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; ht_info->cap |= IEEE80211_HT_CAP_SGI_20; max_bit_rate = MAX_BIT_RATE_20_MHZ; if (cfg->ht_params->ht40_bands & BIT(band)) { ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; ht_info->cap |= IEEE80211_HT_CAP_SGI_40; max_bit_rate = MAX_BIT_RATE_40_MHZ; } /* Highest supported Rx data rate */ max_bit_rate *= rx_chains; WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); /* Tx MCS capabilities */ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (tx_chains != rx_chains) { ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; ht_info->mcs.tx_params |= ((tx_chains - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); } } static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size) { int n_channels = iwl_init_channel_map(dev, cfg, data, eeprom, eeprom_size); int n_used = 0; struct ieee80211_supported_band *sband; sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ); iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, data->valid_tx_ant, data->valid_rx_ant); sband = &data->bands[NL80211_BAND_5GHZ]; sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ); iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, data->valid_tx_ant, data->valid_rx_ant); if (n_channels != n_used) IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n", n_used, n_channels); } /* EEPROM data functions */ struct iwl_nvm_data * iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size) { struct iwl_nvm_data *data; const void *tmp; u16 radio_cfg, sku; if (WARN_ON(!cfg || !cfg->eeprom_params)) return NULL; data = kzalloc(struct_size(data, channels, IWL_NUM_CHANNELS), GFP_KERNEL); if (!data) return NULL; /* get MAC address(es) */ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS); if (!tmp) goto err_free; memcpy(data->hw_addr, tmp, ETH_ALEN); data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_NUM_MAC_ADDRESS); if (iwl_eeprom_read_calib(eeprom, eeprom_size, data)) goto err_free; tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL); if (!tmp) goto err_free; memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib)); tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_RAW_TEMPERATURE); if (!tmp) goto err_free; data->raw_temperature = *(__le16 *)tmp; tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_KELVIN_TEMPERATURE); if (!tmp) goto err_free; data->kelvin_temperature = *(__le16 *)tmp; data->kelvin_voltage = *((__le16 *)tmp + 1); radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG); data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg); data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg); data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg); data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); sku = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_SKU_CAP); data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE; data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE; data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ; data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ; data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE; if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) data->sku_cap_11n_enable = false; data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_VERSION); /* check overrides (some devices have wrong EEPROM) */ if (cfg->valid_tx_ant) data->valid_tx_ant = cfg->valid_tx_ant; if (cfg->valid_rx_ant) data->valid_rx_ant = cfg->valid_rx_ant; if (!data->valid_tx_ant || !data->valid_rx_ant) { IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n", data->valid_tx_ant, data->valid_rx_ant); goto err_free; } iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size); return data; err_free: kfree(data); return NULL; } IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h000066400000000000000000000113051351064634500301300ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_eeprom_parse_h__ #define __iwl_eeprom_parse_h__ #include #include #include #include "iwl-trans.h" struct iwl_nvm_data { int n_hw_addrs; u8 hw_addr[ETH_ALEN]; u8 calib_version; __le16 calib_voltage; __le16 raw_temperature; __le16 kelvin_temperature; __le16 kelvin_voltage; __le16 xtal_calib[2]; bool sku_cap_band_24ghz_enable; bool sku_cap_band_52ghz_enable; bool sku_cap_11n_enable; bool sku_cap_11ac_enable; bool sku_cap_11ax_enable; bool sku_cap_amt_enable; bool sku_cap_ipan_enable; bool sku_cap_mimo_disabled; u16 radio_cfg_type; u8 radio_cfg_step; u8 radio_cfg_dash; u8 radio_cfg_pnum; u8 valid_tx_ant, valid_rx_ant; u32 nvm_version; s8 max_tx_pwr_half_dbm; bool lar_enabled; bool vht160_supported; struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; struct ieee80211_channel channels[]; }; /** * iwl_parse_eeprom_data - parse EEPROM data and return values * * @dev: device pointer we're parsing for, for debug only * @cfg: device configuration for parsing and overrides * @eeprom: the EEPROM data * @eeprom_size: length of the EEPROM data * * This function parses all EEPROM values we need and then * returns a (newly allocated) struct containing all the * relevant values for driver use. The struct must be freed * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size); int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, int n_channels, enum nl80211_band band); void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, enum nl80211_band band, u8 tx_chains, u8 rx_chains); #endif /* __iwl_eeprom_parse_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c000066400000000000000000000317221351064634500277310ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include #include #include #include "iwl-drv.h" #include "iwl-debug.h" #include "iwl-eeprom-read.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" /* * EEPROM access time values: * * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. */ #define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ #define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ #define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ /* * The device's EEPROM semaphore prevents conflicts between driver and uCode * when accessing the EEPROM; each access is a series of pulses to/from the * EEPROM chip, not a single event, so even reads could conflict if they * weren't arbitrated by the semaphore. */ #define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ #define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) { u16 count; int ret; for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { /* Request semaphore */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); /* See if we got it */ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, EEPROM_SEM_TIMEOUT); if (ret >= 0) { IWL_DEBUG_EEPROM(trans->dev, "Acquired semaphore after %d tries.\n", count+1); return ret; } } return ret; } static void iwl_eeprom_release_semaphore(struct iwl_trans *trans) { iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); } static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp) { u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp); switch (gp) { case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: if (!nvm_is_otp) { IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", gp); return -ENOENT; } return 0; case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: if (nvm_is_otp) { IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); return -ENOENT; } return 0; case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: default: IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n", nvm_is_otp ? "OTP" : "EEPROM", gp); return -ENOENT; } } /****************************************************************************** * * OTP related functions * ******************************************************************************/ static void iwl_set_otp_access_absolute(struct iwl_trans *trans) { iwl_read32(trans, CSR_OTP_GP_REG); iwl_clear_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_OTP_ACCESS_MODE); } static int iwl_nvm_is_otp(struct iwl_trans *trans) { u32 otpgp; /* OTP only valid for CP/PP and after */ switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) { case CSR_HW_REV_TYPE_NONE: IWL_ERR(trans, "Unknown hardware type\n"); return -EIO; case CSR_HW_REV_TYPE_5300: case CSR_HW_REV_TYPE_5350: case CSR_HW_REV_TYPE_5100: case CSR_HW_REV_TYPE_5150: return 0; default: otpgp = iwl_read32(trans, CSR_OTP_GP_REG); if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) return 1; return 0; } } static int iwl_init_otp_access(struct iwl_trans *trans) { int ret; ret = iwl_finish_nic_init(trans); if (ret) return ret; iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); udelay(5); iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); /* * CSR auto clock gate disable bit - * this is only applicable for HW with OTP shadow RAM */ if (trans->cfg->base_params->shadow_ram_support) iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); return 0; } static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr, __le16 *eeprom_data) { int ret = 0; u32 r; u32 otpgp; iwl_write32(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); ret = iwl_poll_bit(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK, CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT); if (ret < 0) { IWL_ERR(trans, "Time out reading OTP[%d]\n", addr); return ret; } r = iwl_read32(trans, CSR_EEPROM_REG); /* check for ECC errors: */ otpgp = iwl_read32(trans, CSR_OTP_GP_REG); if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { /* stop in this case */ /* set the uncorrectable OTP ECC bit for acknowledgment */ iwl_set_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n"); return -EINVAL; } if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { /* continue in this case */ /* set the correctable OTP ECC bit for acknowledgment */ iwl_set_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); IWL_ERR(trans, "Correctable OTP ECC error, continue read\n"); } *eeprom_data = cpu_to_le16(r >> 16); return 0; } /* * iwl_is_otp_empty: check for empty OTP */ static bool iwl_is_otp_empty(struct iwl_trans *trans) { u16 next_link_addr = 0; __le16 link_value; bool is_empty = false; /* locate the beginning of OTP link list */ if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) { if (!link_value) { IWL_ERR(trans, "OTP is empty\n"); is_empty = true; } } else { IWL_ERR(trans, "Unable to read first block of OTP list.\n"); is_empty = true; } return is_empty; } /* * iwl_find_otp_image: find EEPROM image in OTP * finding the OTP block that contains the EEPROM image. * the last valid block on the link list (the block _before_ the last block) * is the block we should read and used to configure the device. * If all the available OTP blocks are full, the last block will be the block * we should read and used to configure the device. * only perform this operation if shadow RAM is disabled */ static int iwl_find_otp_image(struct iwl_trans *trans, u16 *validblockaddr) { u16 next_link_addr = 0, valid_addr; __le16 link_value = 0; int usedblocks = 0; /* set addressing mode to absolute to traverse the link list */ iwl_set_otp_access_absolute(trans); /* checking for empty OTP or error */ if (iwl_is_otp_empty(trans)) return -EINVAL; /* * start traverse link list * until reach the max number of OTP blocks * different devices have different number of OTP blocks */ do { /* save current valid block address * check for more block on the link list */ valid_addr = next_link_addr; next_link_addr = le16_to_cpu(link_value) * sizeof(u16); IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", usedblocks, next_link_addr); if (iwl_read_otp_word(trans, next_link_addr, &link_value)) return -EINVAL; if (!link_value) { /* * reach the end of link list, return success and * set address point to the starting address * of the image */ *validblockaddr = valid_addr; /* skip first 2 bytes (link list pointer) */ *validblockaddr += 2; return 0; } /* more in the link list, continue */ usedblocks++; } while (usedblocks <= trans->cfg->base_params->max_ll_items); /* OTP has no valid blocks */ IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); return -EINVAL; } /** * iwl_read_eeprom - read EEPROM contents * * Load the EEPROM contents from adapter and return it * and its size. * * NOTE: This routine uses the non-debug IO access functions. */ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) { __le16 *e; u32 gp = iwl_read32(trans, CSR_EEPROM_GP); int sz; int ret; u16 addr; u16 validblockaddr = 0; u16 cache_addr = 0; int nvm_is_otp; if (!eeprom || !eeprom_size) return -EINVAL; nvm_is_otp = iwl_nvm_is_otp(trans); if (nvm_is_otp < 0) return nvm_is_otp; sz = trans->cfg->base_params->eeprom_size; IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); e = kmalloc(sz, GFP_KERNEL); if (!e) return -ENOMEM; ret = iwl_eeprom_verify_signature(trans, nvm_is_otp); if (ret < 0) { IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); goto err_free; } /* Make sure driver (instead of uCode) is allowed to read EEPROM */ ret = iwl_eeprom_acquire_semaphore(trans); if (ret < 0) { IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); goto err_free; } if (nvm_is_otp) { ret = iwl_init_otp_access(trans); if (ret) { IWL_ERR(trans, "Failed to initialize OTP access.\n"); goto err_unlock; } iwl_write32(trans, CSR_EEPROM_GP, iwl_read32(trans, CSR_EEPROM_GP) & ~CSR_EEPROM_GP_IF_OWNER_MSK); iwl_set_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); /* traversing the linked list if no shadow ram supported */ if (!trans->cfg->base_params->shadow_ram_support) { ret = iwl_find_otp_image(trans, &validblockaddr); if (ret) goto err_unlock; } for (addr = validblockaddr; addr < validblockaddr + sz; addr += sizeof(u16)) { __le16 eeprom_data; ret = iwl_read_otp_word(trans, addr, &eeprom_data); if (ret) goto err_unlock; e[cache_addr / 2] = eeprom_data; cache_addr += sizeof(u16); } } else { /* eeprom is an array of 16bit values */ for (addr = 0; addr < sz; addr += sizeof(u16)) { u32 r; iwl_write32(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); ret = iwl_poll_bit(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK, CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT); if (ret < 0) { IWL_ERR(trans, "Time out reading EEPROM[%d]\n", addr); goto err_unlock; } r = iwl_read32(trans, CSR_EEPROM_REG); e[addr / 2] = cpu_to_le16(r >> 16); } } IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", nvm_is_otp ? "OTP" : "EEPROM"); iwl_eeprom_release_semaphore(trans); *eeprom_size = sz; *eeprom = (u8 *)e; return 0; err_unlock: iwl_eeprom_release_semaphore(trans); err_free: kfree(e); return ret; } IWL_EXPORT_SYMBOL(iwl_read_eeprom); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h000066400000000000000000000055361351064634500277420ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_eeprom_h__ #define __iwl_eeprom_h__ #include "iwl-trans.h" int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size); #endif /* __iwl_eeprom_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-fh.h000066400000000000000000000747031351064634500261410ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fh_h__ #define __iwl_fh_h__ #include #include #include "iwl-trans.h" /****************************/ /* Flow Handler Definitions */ /****************************/ /** * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) * Addresses are offsets from device's PCI hardware base address. */ #define FH_MEM_LOWER_BOUND (0x1000) #define FH_MEM_UPPER_BOUND (0x2000) #define FH_MEM_LOWER_BOUND_GEN2 (0xa06000) #define FH_MEM_UPPER_BOUND_GEN2 (0xa08000) /** * Keep-Warm (KW) buffer base address. * * Driver must allocate a 4KByte buffer that is for keeping the * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency * DRAM access when doing Txing or Rxing. The dummy accesses prevent host * from going into a power-savings mode that would cause higher DRAM latency, * and possible data over/under-runs, before all Tx/Rx is complete. * * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) * of the buffer, which must be 4K aligned. Once this is set up, the device * automatically invokes keep-warm accesses when normal accesses might not * be sufficient to maintain fast DRAM response. * * Bit fields: * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned */ #define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) /** * TFD Circular Buffers Base (CBBC) addresses * * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte * aligned (address bits 0-7 must be 0). * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers * for them are in different places. * * Bit fields in each pointer register: * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned */ #define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) #define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) #define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0) #define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) #define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) /* 22000 TFD table address, 64 bit */ #define TFH_TFDQ_CBB_TABLE (0x1C00) /* Find TFD CB base pointer for given queue */ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, unsigned int chnl) { if (trans->cfg->use_tfh) { WARN_ON_ONCE(chnl >= 64); return TFH_TFDQ_CBB_TABLE + 8 * chnl; } if (chnl < 16) return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl; if (chnl < 20) return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16); WARN_ON_ONCE(chnl >= 32); return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20); } /* 22000 configuration registers */ /* * TFH Configuration register. * * BIT fields: * * Bits 3:0: * Define the maximum number of pending read requests. * Maximum configration value allowed is 0xC * Bits 9:8: * Define the maximum transfer size. (64 / 128 / 256) * Bit 10: * When bit is set and transfer size is set to 128B, the TFH will enable * reading chunks of more than 64B only if the read address is aligned to 128B. * In case of DRAM read address which is not aligned to 128B, the TFH will * enable transfer size which doesn't cross 64B DRAM address boundary. */ #define TFH_TRANSFER_MODE (0x1F40) #define TFH_TRANSFER_MAX_PENDING_REQ 0xc #define TFH_CHUNK_SIZE_128 BIT(8) #define TFH_CHUNK_SPLIT_MODE BIT(10) /* * Defines the offset address in dwords referring from the beginning of the * Tx CMD which will be updated in DRAM. * Note that the TFH offset address for Tx CMD update is always referring to * the start of the TFD first TB. * In case of a DRAM Tx CMD update the TFH will update PN and Key ID */ #define TFH_TXCMD_UPDATE_CFG (0x1F48) /* * Controls TX DMA operation * * BIT fields: * * Bits 31:30: Enable the SRAM DMA channel. * Turning on bit 31 will kick the SRAM2DRAM DMA. * Note that the sram2dram may be enabled only after configuring the DRAM and * SRAM addresses registers and the byte count register. * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When * set to 1 - interrupt is sent to the driver * Bit 0: Indicates the snoop configuration */ #define TFH_SRV_DMA_CHNL0_CTRL (0x1F60) #define TFH_SRV_DMA_SNOOP BIT(0) #define TFH_SRV_DMA_TO_DRIVER BIT(24) #define TFH_SRV_DMA_START BIT(31) /* Defines the DMA SRAM write start address to transfer a data block */ #define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64) /* Defines the 64bits DRAM start address to read the DMA data block from */ #define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68) /* * Defines the number of bytes to transfer from DRAM to SRAM. * Note that this register may be configured with non-dword aligned size. */ #define TFH_SRV_DMA_CHNL0_BC (0x1F70) /** * Rx SRAM Control and Status Registers (RSCSR) * * These registers provide handshake between driver and device for the Rx queue * (this queue handles *all* command responses, notifications, Rx data, etc. * sent from uCode to host driver). Unlike Tx, there is only one Rx * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 * mapping between RBDs and RBs. * * Driver must allocate host DRAM memory for the following, and set the * physical address of each into device registers: * * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 * entries (although any power of 2, up to 4096, is selectable by driver). * Each entry (1 dword) points to a receive buffer (RB) of consistent size * (typically 4K, although 8K or 16K are also selectable by driver). * Driver sets up RB size and number of RBDs in the CB via Rx config * register FH_MEM_RCSR_CHNL0_CONFIG_REG. * * Bit fields within one RBD: * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned * * Driver sets physical address [35:8] of base of RBD circular buffer * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. * * 2) Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers * (RBs) have been filled, via a "write pointer", actually the index of * the RB's corresponding RBD within the circular buffer. Driver sets * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. * * Bit fields in lower dword of Rx status buffer (upper dword not used * by driver: * 31-12: Not used by driver * 11- 0: Index of last filled Rx buffer descriptor * (device writes, driver reads this value) * * As the driver prepares Receive Buffers (RBs) for device to fill, driver must * enter pointers to these RBs into contiguous RBD circular buffer entries, * and update the device's "write" index register, * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. * * This "write" index corresponds to the *next* RBD that the driver will make * available, i.e. one RBD past the tail of the ready-to-fill RBDs within * the circular buffer. This value should initially be 0 (before preparing any * RBs), should be 8 after preparing the first 8 RBs (for example), and must * wrap back to 0 at the end of the circular buffer (but don't wrap before * "read" index has advanced past 1! See below). * NOTE: DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. * * As the device fills RBs (referenced from contiguous RBDs within the circular * buffer), it updates the Rx status buffer in host DRAM, 2) described above, * to tell the driver the index of the latest filled RBD. The driver must * read this "read" index from DRAM after receiving an Rx interrupt from device * * The driver must also internally keep track of a third index, which is the * next RBD to process. When receiving an Rx interrupt, driver should process * all filled but unprocessed RBs up to, but not including, the RB * corresponding to the "read" index. For example, if "read" index becomes "1", * driver may process the RB pointed to by RBD 0. Depending on volume of * traffic, there may be many RBs to process. * * If read index == write index, device thinks there is no room to put new data. * Due to this, the maximum number of filled RBs is 255, instead of 256. To * be safe, make sure that there is a gap of at least 2 RBDs between "write" * and "read" indexes; that is, make sure that there are no more than 254 * buffers waiting to be filled. */ #define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) #define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) /** * Physical base address of 8-byte Rx Status buffer. * Bit fields: * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. */ #define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) /** * Physical base address of Rx Buffer Descriptor Circular Buffer. * Bit fields: * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. */ #define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) /** * Rx write pointer (index, really!). * Bit fields: * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. * NOTE: For 256-entry circular buffer, use only bits [7:0]. */ #define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) #define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) #define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c) #define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG /** * Rx Config/Status Registers (RCSR) * Rx Config Reg for channel 0 (only channel used) * * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for * normal operation (see bit fields). * * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. * * Bit fields: * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, * '10' operate normally * 29-24: reserved * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), * min "5" for 32 RBDs, max "12" for 4096 RBDs. * 19-18: reserved * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, * '10' 12K, '11' 16K. * 15-14: reserved * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) * typical value 0x10 (about 1/2 msec) * 3- 0: reserved */ #define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) #define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) #define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) #define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8) #define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10) #define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ #define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ #define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ #define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ #define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ #define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) #define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) #define RX_RB_TIMEOUT (0x11) #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) #define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) #define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) /** * Rx Shared Status Registers (RSSR) * * After stopping Rx DMA channel (writing 0 to * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. * * Bit fields: * 24: 1 = Channel 0 is idle * * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV * contain default values that should not be altered by the driver. */ #define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) #define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) #define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) #define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) #define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ (FH_MEM_RSSR_LOWER_BOUND + 0x008) #define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 #define FH_MEM_TB_MAX_LENGTH (0x00020000) /* 9000 rx series registers */ #define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */ #define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8) /* Write index table */ #define RFH_Q0_FRBDCB_WIDX 0xA08080 #define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) /* Write index table - shadow registers */ #define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80 #define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4) /* Read index table */ #define RFH_Q0_FRBDCB_RIDX 0xA080C0 #define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) /* Used list table */ #define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */ #define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8) /* Write index table */ #define RFH_Q0_URBDCB_WIDX 0xA08180 #define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4) #define RFH_Q0_URBDCB_VAID 0xA081C0 #define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4) /* stts */ #define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */ #define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) #define RFH_Q0_ORB_WPTR_LSB 0xA08280 #define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8) #define RFH_RBDBUF_RBD0_LSB 0xA08300 #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) /** * RFH Status Register * * Bit fields: * * Bit 29: RBD_FETCH_IDLE * This status flag is set by the RFH when there is no active RBD fetch from * DRAM. * Once the RFH RBD controller starts fetching (or when there is a pending * RBD read response from DRAM), this flag is immediately turned off. * * Bit 30: SRAM_DMA_IDLE * This status flag is set by the RFH when there is no active transaction from * SRAM to DRAM. * Once the SRAM to DRAM DMA is active, this flag is immediately turned off. * * Bit 31: RXF_DMA_IDLE * This status flag is set by the RFH when there is no active transaction from * RXF to DRAM. * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. */ #define RFH_GEN_STATUS 0xA09808 #define RFH_GEN_STATUS_GEN3 0xA07824 #define RBD_FETCH_IDLE BIT(29) #define SRAM_DMA_IDLE BIT(30) #define RXF_DMA_IDLE BIT(31) /* DMA configuration */ #define RFH_RXF_DMA_CFG 0xA09820 #define RFH_RXF_DMA_CFG_GEN3 0xA07880 /* RB size */ #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ #define RFH_RXF_DMA_RB_SIZE_POS 16 #define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_12K (0x9 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_16K (0xA << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_20K (0xB << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_24K (0xC << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_28K (0xD << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_32K (0xE << RFH_RXF_DMA_RB_SIZE_POS) /* RB Circular Buffer size:defines the table sizes in RBD units */ #define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */ #define RFH_RXF_DMA_RBDCB_SIZE_POS 20 #define RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ #define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 #define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) #define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */ #define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ #define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ #define RFH_DMA_EN_ENABLE_VAL BIT(31) #define RFH_RXF_RXQ_ACTIVE 0xA0980C #define RFH_GEN_CFG 0xA09800 #define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) #define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) #define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) #define RFH_GEN_CFG_RB_CHUNK_SIZE_128 1 #define RFH_GEN_CFG_RB_CHUNK_SIZE_64 0 /* the driver assumes everywhere that the default RXQ is 0 */ #define RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00 #define RFH_GEN_CFG_VAL(_n, _v) FIELD_PREP(RFH_GEN_CFG_ ## _n, _v) /* end of 9000 rx series registers */ /* TFDB Area - TFDs buffer table */ #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) #define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) #define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) #define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) #define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) /** * Transmit DMA Channel Control/Status Registers (TCSR) * * Device has one configuration register for each of 8 Tx DMA/FIFO channels * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. * * To use a Tx DMA channel, driver must initialize its * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: * * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL * * All other bits should be 0. * * Bit fields: * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, * '10' operate normally * 29- 4: Reserved, set to "0" * 3: Enable internal DMA requests (1, normal operation), disable (0) * 2- 0: Reserved, set to "0" */ #define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) #define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) /* Find Control/Status reg for given Tx DMA/FIFO channel */ #define FH_TCSR_CHNL_NUM (8) /* TCSR: tx_config register values */ #define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) #define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) #define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) #define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) /** * Tx Shared Status Registers (TSSR) * * After stopping Tx DMA channel (writing 0 to * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle * (channel's buffers empty | no pending requests). * * Bit fields: * 31-24: 1 = Channel buffers empty (channel 7:0) * 23-16: 1 = No pending requests (channel 7:0) */ #define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) #define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) #define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) /** * Bit fields for TSSR(Tx Shared Status & Control) error status register: * 31: Indicates an address error when accessed to internal memory * uCode/driver must write "1" in order to clear this flag * 30: Indicates that Host did not send the expected number of dwords to FH * uCode/driver must write "1" in order to clear this flag * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA * command was received from the scheduler while the TRB was already full * with previous command * uCode/driver must write "1" in order to clear this flag * 7-0: Each status bit indicates a channel's TxCredit error. When an error * bit is set, it indicates that the FH has received a full indication * from the RTC TxFIFO and the current value of the TxCredit counter was * not equal to zero. This mean that the credit mechanism was not * synchronized to the TxFIFO status * uCode/driver must write "1" in order to clear this flag */ #define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) #define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008) #define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) /* Tx service channels */ #define FH_SRVC_CHNL (9) #define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) #define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) #define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) #define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) #define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4) /* Instruct FH to increment the retry count of a packet when * it is brought from the memory to TX-FIFO */ #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) #define MQ_RX_TABLE_SIZE 512 #define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1) #define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1) #define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \ IWL_MAX_RX_HW_QUEUES * \ (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC)) /* cb size is the exponent */ #define RX_QUEUE_CB_SIZE(x) ilog2(x) #define RX_QUEUE_SIZE 256 #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers * @closed_rb_num [0:11] - Indicates the index of the RB which was closed * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed * @finished_rb_num [0:11] - Indicates the index of the current RB * in which the last frame was written to * @finished_fr_num [0:11] - Indicates the index of the RX Frame * which was transferred */ struct iwl_rb_status { __le16 closed_rb_num; __le16 closed_fr_num; __le16 finished_rb_num; __le16 finished_fr_nam; __le32 __unused; } __packed; #define TFD_QUEUE_SIZE_MAX (256) #define TFD_QUEUE_SIZE_MAX_GEN3 (65536) /* cb size is the exponent - 3 */ #define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) #define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) #define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \ TFD_QUEUE_SIZE_BC_DUP) #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) #define IWL_NUM_OF_TBS 20 #define IWL_TFH_NUM_TBS 25 static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) { return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF; } /** * enum iwl_tfd_tb_hi_n_len - TB hi_n_len bits * @TB_HI_N_LEN_ADDR_HI_MSK: high 4 bits (to make it 36) of DMA address * @TB_HI_N_LEN_LEN_MSK: length of the TB */ enum iwl_tfd_tb_hi_n_len { TB_HI_N_LEN_ADDR_HI_MSK = 0xf, TB_HI_N_LEN_LEN_MSK = 0xfff0, }; /** * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor * * This structure contains dma address and length of transmission address * * @lo: low [31:0] portion of the dma address of TX buffer * every even is unaligned on 16 bit boundary * @hi_n_len: &enum iwl_tfd_tb_hi_n_len */ struct iwl_tfd_tb { __le32 lo; __le16 hi_n_len; } __packed; /** * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor * * This structure contains dma address and length of transmission address * * @tb_len length of the tx buffer * @addr 64 bits dma address */ struct iwl_tfh_tb { __le16 tb_len; __le64 addr; } __packed; /** * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. * Both driver and device share these circular buffers, each of which must be * contiguous 256 TFDs. * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes * * Driver must indicate the physical address of the base of each * circular buffer via the FH_MEM_CBBC_QUEUE registers. * * Each TFD contains pointer/size information for up to 20 / 25 data buffers * in host DRAM. These buffers collectively contain the (one) frame described * by the TFD. Each buffer must be a single contiguous block of memory within * itself, but buffers may be scattered in host DRAM. Each buffer has max size * of (4K - 4). The concatenates all of a TFD's buffers into a single * Tx frame, up to 8 KBytes in size. * * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. */ /** * struct iwl_tfd - Transmit Frame Descriptor (TFD) * @ __reserved1[3] reserved * @ num_tbs 0-4 number of active tbs * 5 reserved * 6-7 padding (not used) * @ tbs[20] transmit frame buffer descriptors * @ __pad padding */ struct iwl_tfd { u8 __reserved1[3]; u8 num_tbs; struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; __le32 __pad; } __packed; /** * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD) * @ num_tbs 0-4 number of active tbs * 5 -15 reserved * @ tbs[25] transmit frame buffer descriptors * @ __pad padding */ struct iwl_tfh_tfd { __le16 num_tbs; struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS]; __le32 __pad; } __packed; /* Keep Warm Size */ #define IWL_KW_SIZE 0x1000 /* 4k */ /* Fixed (non-configurable) rx data from phy */ /** * struct iwlagn_schedq_bc_tbl scheduler byte count table * base physical address provided by SCD_DRAM_BASE_ADDR * For devices up to 22000: * @tfd_offset 0-12 - tx command byte count * 12-16 - station index * For 22000: * @tfd_offset 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved */ struct iwlagn_scd_bc_tbl { __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; } __packed; /** * struct iwl_gen3_bc_tbl scheduler byte count table gen3 * For 22560 and on: * @tfd_offset: 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved */ struct iwl_gen3_bc_tbl { __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3]; } __packed; #endif /* !__iwl_fh_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-io.c000066400000000000000000000340351351064634500261400ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "iwl-drv.h" #include "iwl-io.h" #include "iwl-csr.h" #include "iwl-debug.h" #include "iwl-prph.h" #include "iwl-fh.h" const struct iwl_csr_params iwl_csr_v1 = { .flag_mac_clock_ready = 0, .flag_val_mac_access_en = 0, .flag_init_done = 2, .flag_mac_access_req = 3, .flag_sw_reset = 7, .flag_master_dis = 8, .flag_stop_master = 9, .addr_sw_reset = CSR_BASE + 0x020, .mac_addr0_otp = 0x380, .mac_addr1_otp = 0x384, .mac_addr0_strap = 0x388, .mac_addr1_strap = 0x38C }; const struct iwl_csr_params iwl_csr_v2 = { .flag_init_done = 6, .flag_mac_clock_ready = 20, .flag_val_mac_access_en = 20, .flag_mac_access_req = 21, .flag_master_dis = 28, .flag_stop_master = 29, .flag_sw_reset = 31, .addr_sw_reset = CSR_BASE + 0x024, .mac_addr0_otp = 0x30, .mac_addr1_otp = 0x34, .mac_addr0_strap = 0x38, .mac_addr1_strap = 0x3C }; void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) { trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val); iwl_trans_write8(trans, ofs, val); } IWL_EXPORT_SYMBOL(iwl_write8); void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val) { trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val); iwl_trans_write32(trans, ofs, val); } IWL_EXPORT_SYMBOL(iwl_write32); void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val) { trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val); iwl_trans_write32(trans, ofs, lower_32_bits(val)); iwl_trans_write32(trans, ofs + 4, upper_32_bits(val)); } IWL_EXPORT_SYMBOL(iwl_write64); u32 iwl_read32(struct iwl_trans *trans, u32 ofs) { u32 val = iwl_trans_read32(trans, ofs); trace_iwlwifi_dev_ioread32(trans->dev, ofs, val); return val; } IWL_EXPORT_SYMBOL(iwl_read32); #define IWL_POLL_INTERVAL 10 /* microseconds */ int iwl_poll_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { int t = 0; do { if ((iwl_read32(trans, addr) & mask) == (bits & mask)) return t; udelay(IWL_POLL_INTERVAL); t += IWL_POLL_INTERVAL; } while (t < timeout); return -ETIMEDOUT; } IWL_EXPORT_SYMBOL(iwl_poll_bit); u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) { u32 value = 0x5a5a5a5a; unsigned long flags; if (iwl_trans_grab_nic_access(trans, &flags)) { value = iwl_read32(trans, reg); iwl_trans_release_nic_access(trans, &flags); } return value; } IWL_EXPORT_SYMBOL(iwl_read_direct32); void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) { unsigned long flags; if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write32(trans, reg, value); iwl_trans_release_nic_access(trans, &flags); } } IWL_EXPORT_SYMBOL(iwl_write_direct32); void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value) { unsigned long flags; if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write64(trans, reg, value); iwl_trans_release_nic_access(trans, &flags); } } IWL_EXPORT_SYMBOL(iwl_write_direct64); int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, int timeout) { int t = 0; do { if ((iwl_read_direct32(trans, addr) & mask) == mask) return t; udelay(IWL_POLL_INTERVAL); t += IWL_POLL_INTERVAL; } while (t < timeout); return -ETIMEDOUT; } IWL_EXPORT_SYMBOL(iwl_poll_direct_bit); u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs) { u32 val = iwl_trans_read_prph(trans, ofs); trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val); return val; } IWL_EXPORT_SYMBOL(iwl_read_prph_no_grab); void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val) { trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val); iwl_trans_write_prph(trans, ofs, val); } IWL_EXPORT_SYMBOL(iwl_write_prph_no_grab); void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val) { trace_iwlwifi_dev_iowrite_prph64(trans->dev, ofs, val); iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff); iwl_write_prph_no_grab(trans, ofs + 4, val >> 32); } IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab); u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) { unsigned long flags; u32 val = 0x5a5a5a5a; if (iwl_trans_grab_nic_access(trans, &flags)) { val = iwl_read_prph_no_grab(trans, ofs); iwl_trans_release_nic_access(trans, &flags); } return val; } IWL_EXPORT_SYMBOL(iwl_read_prph); void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) { unsigned long flags; if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write_prph_no_grab(trans, ofs, val); iwl_trans_release_nic_access(trans, &flags); } } IWL_EXPORT_SYMBOL(iwl_write_prph); int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { int t = 0; do { if ((iwl_read_prph(trans, addr) & mask) == (bits & mask)) return t; udelay(IWL_POLL_INTERVAL); t += IWL_POLL_INTERVAL; } while (t < timeout); return -ETIMEDOUT; } void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) { unsigned long flags; if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write_prph_no_grab(trans, ofs, iwl_read_prph_no_grab(trans, ofs) | mask); iwl_trans_release_nic_access(trans, &flags); } } IWL_EXPORT_SYMBOL(iwl_set_bits_prph); void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, u32 bits, u32 mask) { unsigned long flags; if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write_prph_no_grab(trans, ofs, (iwl_read_prph_no_grab(trans, ofs) & mask) | bits); iwl_trans_release_nic_access(trans, &flags); } } IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph); void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) { unsigned long flags; u32 val; if (iwl_trans_grab_nic_access(trans, &flags)) { val = iwl_read_prph_no_grab(trans, ofs); iwl_write_prph_no_grab(trans, ofs, (val & ~mask)); iwl_trans_release_nic_access(trans, &flags); } } IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); void iwl_force_nmi(struct iwl_trans *trans) { if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV); else if (trans->cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK); else iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_NMI_BIT); } IWL_EXPORT_SYMBOL(iwl_force_nmi); static const char *get_rfh_string(int cmd) { #define IWL_CMD(x) case x: return #x #define IWL_CMD_MQ(arg, reg, q) { if (arg == reg(q)) return #reg; } int i; for (i = 0; i < IWL_MAX_RX_HW_QUEUES; i++) { IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_BA_LSB, i); IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i); IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i); IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i); } switch (cmd) { IWL_CMD(RFH_RXF_DMA_CFG); IWL_CMD(RFH_GEN_CFG); IWL_CMD(RFH_GEN_STATUS); IWL_CMD(FH_TSSR_TX_STATUS_REG); IWL_CMD(FH_TSSR_TX_ERROR_REG); default: return "UNKNOWN"; } #undef IWL_CMD_MQ } struct reg { u32 addr; bool is64; }; static int iwl_dump_rfh(struct iwl_trans *trans, char **buf) { int i, q; int num_q = trans->num_rx_queues; static const u32 rfh_tbl[] = { RFH_RXF_DMA_CFG, RFH_GEN_CFG, RFH_GEN_STATUS, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG, }; static const struct reg rfh_mq_tbl[] = { { RFH_Q0_FRBDCB_BA_LSB, true }, { RFH_Q0_FRBDCB_WIDX, false }, { RFH_Q0_FRBDCB_RIDX, false }, { RFH_Q0_URBD_STTS_WPTR_LSB, true }, }; #ifdef CPTCFG_IWLWIFI_DEBUGFS if (buf) { int pos = 0; /* * Register (up to 34 for name + 8 blank/q for MQ): 40 chars * Colon + space: 2 characters * 0X%08x: 10 characters * New line: 1 character * Total of 53 characters */ size_t bufsz = ARRAY_SIZE(rfh_tbl) * 53 + ARRAY_SIZE(rfh_mq_tbl) * 53 * num_q + 40; *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; pos += scnprintf(*buf + pos, bufsz - pos, "RFH register values:\n"); for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++) pos += scnprintf(*buf + pos, bufsz - pos, "%40s: 0X%08x\n", get_rfh_string(rfh_tbl[i]), iwl_read_prph(trans, rfh_tbl[i])); for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++) for (q = 0; q < num_q; q++) { u32 addr = rfh_mq_tbl[i].addr; addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4); pos += scnprintf(*buf + pos, bufsz - pos, "%34s(q %2d): 0X%08x\n", get_rfh_string(addr), q, iwl_read_prph(trans, addr)); } return pos; } #endif IWL_ERR(trans, "RFH register values:\n"); for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++) IWL_ERR(trans, " %34s: 0X%08x\n", get_rfh_string(rfh_tbl[i]), iwl_read_prph(trans, rfh_tbl[i])); for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++) for (q = 0; q < num_q; q++) { u32 addr = rfh_mq_tbl[i].addr; addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4); IWL_ERR(trans, " %34s(q %d): 0X%08x\n", get_rfh_string(addr), q, iwl_read_prph(trans, addr)); } return 0; } static const char *get_fh_string(int cmd) { switch (cmd) { IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); IWL_CMD(FH_RSCSR_CHNL0_WPTR); IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); IWL_CMD(FH_TSSR_TX_STATUS_REG); IWL_CMD(FH_TSSR_TX_ERROR_REG); default: return "UNKNOWN"; } #undef IWL_CMD } int iwl_dump_fh(struct iwl_trans *trans, char **buf) { int i; static const u32 fh_tbl[] = { FH_RSCSR_CHNL0_STTS_WPTR_REG, FH_RSCSR_CHNL0_RBDCB_BASE_REG, FH_RSCSR_CHNL0_WPTR, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_MEM_RSSR_SHARED_CTRL_REG, FH_MEM_RSSR_RX_STATUS_REG, FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG }; if (trans->cfg->mq_rx_supported) return iwl_dump_rfh(trans, buf); #ifdef CPTCFG_IWLWIFI_DEBUGFS if (buf) { int pos = 0; size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) pos += scnprintf(*buf + pos, bufsz - pos, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); return pos; } #endif IWL_ERR(trans, "FH register values:\n"); for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) IWL_ERR(trans, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); return 0; } int iwl_finish_nic_init(struct iwl_trans *trans) { int err; if (trans->cfg->bisr_workaround) { /* ensure the TOP FSM isn't still in previous reset */ mdelay(2); } /* * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_init_done)); if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) udelay(2); /* * Wait for clock stabilization; once stabilized, access to * device-internal resources is supported, e.g. iwl_write_prph() * and accesses to uCode SRAM. */ err = iwl_poll_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_clock_ready), BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (err < 0) IWL_DEBUG_INFO(trans, "Failed to wake NIC\n"); if (trans->cfg->bisr_workaround) { /* ensure BISR shift has finished */ udelay(200); } return err < 0 ? err : 0; } IWL_EXPORT_SYMBOL(iwl_finish_nic_init); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-io.h000066400000000000000000000130331351064634500261400ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_io_h__ #define __iwl_io_h__ #include "iwl-devtrace.h" #include "iwl-trans.h" void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val); void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val); void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val); u32 iwl_read32(struct iwl_trans *trans, u32 ofs); static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) { iwl_trans_set_bits_mask(trans, reg, mask, mask); } static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) { iwl_trans_set_bits_mask(trans, reg, mask, 0); } int iwl_poll_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout); int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, int timeout); u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg); void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value); u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs); u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs); void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val); void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val); void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout); void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, u32 bits, u32 mask); void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); void iwl_force_nmi(struct iwl_trans *trans); int iwl_finish_nic_init(struct iwl_trans *trans); /* Error handling */ int iwl_dump_fh(struct iwl_trans *trans, char **buf); /* * UMAC periphery address space changed from 0xA00000 to 0xD00000 starting from * device family AX200. So peripheries used in families above and below AX200 * should go through iwl_..._umac_..._prph. */ static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs) { return ofs + trans->cfg->umac_prph_offset; } static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs) { return iwl_read_prph_no_grab(trans, ofs + trans->cfg->umac_prph_offset); } static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs) { return iwl_read_prph(trans, ofs + trans->cfg->umac_prph_offset); } static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val) { iwl_write_prph_no_grab(trans, ofs + trans->cfg->umac_prph_offset, val); } static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs, u32 val) { iwl_write_prph(trans, ofs + trans->cfg->umac_prph_offset, val); } static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { return iwl_poll_prph_bit(trans, addr + trans->cfg->umac_prph_offset, bits, mask, timeout); } #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h000066400000000000000000000132031351064634500275130ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_modparams_h__ #define __iwl_modparams_h__ #include #include #include extern struct iwl_mod_params iwlwifi_mod_params; enum iwl_power_level { IWL_POWER_INDEX_1, IWL_POWER_INDEX_2, IWL_POWER_INDEX_3, IWL_POWER_INDEX_4, IWL_POWER_INDEX_5, IWL_POWER_NUM }; enum iwl_disable_11n { IWL_DISABLE_HT_ALL = BIT(0), IWL_DISABLE_HT_TXAGG = BIT(1), IWL_DISABLE_HT_RXAGG = BIT(2), IWL_ENABLE_HT_TXAGG = BIT(3), }; enum iwl_amsdu_size { IWL_AMSDU_DEF = 0, IWL_AMSDU_4K = 1, IWL_AMSDU_8K = 2, IWL_AMSDU_12K = 3, /* Add 2K at the end to avoid breaking current API */ IWL_AMSDU_2K = 4, }; enum iwl_uapsd_disable { IWL_DISABLE_UAPSD_BSS = BIT(0), IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1), }; /** * struct iwl_mod_params * * Holds the module parameters * * @swcrypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, * use IWL_[DIS,EN]ABLE_HT_* constants * @amsdu_size: See &enum iwl_amsdu_size. * @fw_restart: restart firmware, default = 1 * @bt_coex_active: enable bt coex, default = true * @led_mode: system default, default = 0 * @power_save: enable power save, default = false * @power_level: power level, default = 1 * @debug_level: levels are IWL_DL_* * @antenna_coupling: antenna coupling in dB, default = 0 * @nvm_file: specifies a external NVM file * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default = * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT * @xvt_default_mode: xVT is the default operation mode, default = false * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor * @disable_11ac: disable VHT capabilities, default = false. * @disable_msix: disable MSI-X and fall back to MSI on PCIe, default = false. * @remove_when_gone: remove an inaccessible device from the PCIe bus. * @enable_ini: enable new FW debug infratructure (INI TLVs) */ struct iwl_mod_params { int swcrypto; unsigned int disable_11n; int amsdu_size; bool fw_restart; bool bt_coex_active; int led_mode; bool power_save; int power_level; #ifdef CPTCFG_IWLWIFI_DEBUG u32 debug_level; #endif int antenna_coupling; #if IS_ENABLED(CPTCFG_IWLXVT) bool xvt_default_mode; #endif char *nvm_file; u32 uapsd_disable; bool lar_disable; bool fw_monitor; bool disable_11ac; /** * @disable_11ax: disable HE capabilities, default = false */ bool disable_11ax; bool disable_msix; bool remove_when_gone; bool enable_ini; }; static inline bool iwl_enable_rx_ampdu(void) { if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) return false; return true; } static inline bool iwl_enable_tx_ampdu(void) { if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) return false; if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) return true; /* enabled by default */ return true; } #endif /* #__iwl_modparams_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c000066400000000000000000001463531351064634500274500ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include #include #include #include #include #include #include "iwl-drv.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" #include "iwl-io.h" #include "iwl-csr.h" #include "fw/acpi.h" #include "fw/api/nvm-reg.h" #include "fw/api/commands.h" #include "fw/api/cmdhdr.h" #include "fw/img.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { /* NVM HW-Section offset (in words) definitions */ SUBSYSTEM_ID = 0x0A, HW_ADDR = 0x15, /* NVM SW-Section offset (in words) definitions */ NVM_SW_SECTION = 0x1C0, NVM_VERSION = 0, RADIO_CFG = 1, SKU = 2, N_HW_ADDRS = 3, NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_SDP = 0, }; enum ext_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, /* NVM SW-Section offset (in words) definitions */ NVM_VERSION_EXT_NVM = 0, RADIO_CFG_FAMILY_EXT_NVM = 0, SKU_FAMILY_8000 = 2, N_HW_ADDRS_FAMILY_8000 = 3, /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_EXTENDED = 0, NVM_LAR_OFFSET_OLD = 0x4C7, NVM_LAR_OFFSET = 0x507, NVM_LAR_ENABLED = 0x7, }; /* SKU Capabilities (actual values from NVM definition) */ enum nvm_sku_bits { NVM_SKU_CAP_BAND_24GHZ = BIT(0), NVM_SKU_CAP_BAND_52GHZ = BIT(1), NVM_SKU_CAP_11N_ENABLE = BIT(2), NVM_SKU_CAP_11AC_ENABLE = BIT(3), NVM_SKU_CAP_MIMO_DISABLE = BIT(5), }; /* * These are the channel numbers in the order that they are stored in the NVM */ static const u16 iwl_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ 36, 40, 44 , 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165 }; static const u16 iwl_ext_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173, 177, 181 }; static const u16 iwl_uhb_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173, 177, 181, /* 6-7 GHz */ 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241, 245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297, 301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353, 357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409, 413, 417, 421 }; #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) #define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels) #define NUM_2GHZ_CHANNELS 14 #define FIRST_2GHZ_HT_MINUS 5 #define LAST_2GHZ_HT_PLUS 9 #define N_HW_ADDR_MASK 0xF /* rate data (static) */ static struct ieee80211_rate iwl_cfg80211_rates[] = { { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, }; #define RATES_24_OFFS 0 #define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) #define RATES_52_OFFS 4 #define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) /** * enum iwl_nvm_channel_flags - channel flags in NVM * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo * @NVM_CHANNEL_IBSS: usable as an IBSS channel * @NVM_CHANNEL_ACTIVE: active scanning allowed * @NVM_CHANNEL_RADAR: radar detection required * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS * on same channel on 2.4 or same UNII band on 5.2 * @NVM_CHANNEL_UNIFORM: uniform spreading required * @NVM_CHANNEL_20MHZ: 20 MHz channel okay * @NVM_CHANNEL_40MHZ: 40 MHz channel okay * @NVM_CHANNEL_80MHZ: 80 MHz channel okay * @NVM_CHANNEL_160MHZ: 160 MHz channel okay * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) */ enum iwl_nvm_channel_flags { NVM_CHANNEL_VALID = BIT(0), NVM_CHANNEL_IBSS = BIT(1), NVM_CHANNEL_ACTIVE = BIT(3), NVM_CHANNEL_RADAR = BIT(4), NVM_CHANNEL_INDOOR_ONLY = BIT(5), NVM_CHANNEL_GO_CONCURRENT = BIT(6), NVM_CHANNEL_UNIFORM = BIT(7), NVM_CHANNEL_20MHZ = BIT(8), NVM_CHANNEL_40MHZ = BIT(9), NVM_CHANNEL_80MHZ = BIT(10), NVM_CHANNEL_160MHZ = BIT(11), NVM_CHANNEL_DC_HIGH = BIT(12), }; static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, int chan, u32 flags) { #define CHECK_AND_PRINT_I(x) \ ((flags & NVM_CHANNEL_##x) ? " " #x : "") if (!(flags & NVM_CHANNEL_VALID)) { IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", chan, flags); return; } /* Note: already can print up to 101 characters, 110 is the limit! */ IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", chan, flags, CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(INDOOR_ONLY), CHECK_AND_PRINT_I(GO_CONCURRENT), CHECK_AND_PRINT_I(UNIFORM), CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ), CHECK_AND_PRINT_I(160MHZ), CHECK_AND_PRINT_I(DC_HIGH)); #undef CHECK_AND_PRINT_I } static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, u32 nvm_flags, const struct iwl_cfg *cfg) { u32 flags = IEEE80211_CHAN_NO_HT40; if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { if (ch_num <= LAST_2GHZ_HT_PLUS) flags &= ~IEEE80211_CHAN_NO_HT40PLUS; if (ch_num >= FIRST_2GHZ_HT_MINUS) flags &= ~IEEE80211_CHAN_NO_HT40MINUS; } else if (nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~IEEE80211_CHAN_NO_HT40PLUS; else flags &= ~IEEE80211_CHAN_NO_HT40MINUS; } if (!(nvm_flags & NVM_CHANNEL_80MHZ)) flags |= IEEE80211_CHAN_NO_80MHZ; if (!(nvm_flags & NVM_CHANNEL_160MHZ)) flags |= IEEE80211_CHAN_NO_160MHZ; if (!(nvm_flags & NVM_CHANNEL_IBSS)) flags |= IEEE80211_CHAN_NO_IR; if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) flags |= IEEE80211_CHAN_NO_IR; if (nvm_flags & NVM_CHANNEL_RADAR) flags |= IEEE80211_CHAN_RADAR; if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) flags |= IEEE80211_CHAN_INDOOR_ONLY; /* Set the GO concurrent flag only in case that NO_IR is set. * Otherwise it is meaningless */ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & IEEE80211_CHAN_NO_IR)) flags |= IEEE80211_CHAN_IR_CONCURRENT; return flags; } static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const void * const nvm_ch_flags, u32 sbands_flags, bool v4) { int ch_idx; int n_channels = 0; struct ieee80211_channel *channel; u32 ch_flags; int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS; const u16 *nvm_chan; if (cfg->uhb_supported) { num_of_ch = IWL_NVM_NUM_CHANNELS_UHB; nvm_chan = iwl_uhb_nvm_channels; } else if (cfg->nvm_type == IWL_NVM_EXT) { num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = iwl_ext_nvm_channels; } else { num_of_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = iwl_nvm_channels; } for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { bool is_5ghz = (ch_idx >= num_2ghz_channels); if (v4) ch_flags = __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx); else ch_flags = __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx); if (is_5ghz && !data->sku_cap_band_52ghz_enable) continue; /* workaround to disable wide channels in 5GHz */ if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && is_5ghz) { ch_flags &= ~(NVM_CHANNEL_40MHZ | NVM_CHANNEL_80MHZ | NVM_CHANNEL_160MHZ); } if (ch_flags & NVM_CHANNEL_160MHZ) data->vht160_supported = true; if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && !(ch_flags & NVM_CHANNEL_VALID)) { /* * Channels might become valid later if lar is * supported, hence we still want to add them to * the list of supported channels to cfg80211. */ iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, nvm_chan[ch_idx], ch_flags); continue; } channel = &data->channels[n_channels]; n_channels++; channel->hw_value = nvm_chan[ch_idx]; channel->band = is_5ghz ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); /* Initialize regulatory-based run-time data */ /* * Default value - highest tx power value. max_power * is not used in mvm, and is used for backwards compatibility */ channel->max_power = IWL_DEFAULT_MAX_TX_POWER; /* don't put limitations in case we're using LAR */ if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR)) channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], ch_idx, is_5ghz, ch_flags, cfg); else channel->flags = 0; iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, channel->hw_value, ch_flags); IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", channel->hw_value, channel->max_power); } return n_channels; } static void iwl_init_vht_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_sta_vht_cap *vht_cap, u8 tx_chains, u8 rx_chains) { const struct iwl_cfg *cfg = trans->cfg; int num_rx_ants = num_of_ant(rx_chains); int num_tx_ants = num_of_ant(tx_chains); unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: IEEE80211_VHT_MAX_AMPDU_1024K); vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | max_ampdu_exponent << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; if (data->vht160_supported) vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | IEEE80211_VHT_CAP_SHORT_GI_160; if (cfg->vht_mu_mimo_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; if (cfg->ht_params->ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; if (data->sku_cap_mimo_disabled) { num_rx_ants = 1; num_tx_ants = 1; } if (num_tx_ants > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: if (cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; case IWL_AMSDU_2K: if (cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else WARN(1, "RB size of 2K is not supported by this device\n"); break; case IWL_AMSDU_4K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; case IWL_AMSDU_8K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; break; case IWL_AMSDU_12K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; break; default: break; } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) { vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; /* this works because NOT_SUPPORTED == 3 */ vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); } #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES vht_cap->cap ^= trans->dbg_cfg.vht_cap_flip; #endif vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; vht_cap->vht_mcs.tx_highest |= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); } static struct ieee80211_sband_iftype_data iwl_he_capa[] = { { .types_mask = BIT(NL80211_IFTYPE_STATION), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE | IEEE80211_HE_MAC_CAP0_TWT_REQ, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU | IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, .mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS | IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, .phy_cap_info[5] = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, .phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP7_MAX_NC_1, .phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996, .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, }, /* * Set default Tx/Rx HE MCS NSS Support field. * Indicate support for up to 2 spatial streams and all * MCS, without any special cases */ .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, /* * Set default PPE thresholds, with PPET16 set to 0, * PPET8 set to 7 */ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, }, { .types_mask = BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, .mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, .phy_cap_info[5] = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, .phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP7_MAX_NC_1, .phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996, .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, }, /* * Set default Tx/Rx HE MCS NSS Support field. * Indicate support for up to 2 spatial streams and all * MCS, without any special cases */ .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, /* * Set default PPE thresholds, with PPET16 set to 0, * PPET8 set to 7 */ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, }, }; static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, u8 tx_chains, u8 rx_chains) { sband->iftype_data = iwl_he_capa; sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa); /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ if ((tx_chains & rx_chains) != ANT_AB) { int i; for (i = 0; i < sband->n_iftype_data; i++) { iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[1] &= ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[2] &= ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; } } } #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES /* returns true iff there exists one spatial stream where MCS of a > b */ static bool iwl_he_mcs_greater(u16 a, u16 b) { int i; for (i = 0; i < 16; i += 2) { if ((((a >> i) + 1) & 3) > (((b >> i) + 1) & 3)) return true; } return false; } static void iwl_init_he_override(struct iwl_trans *trans, struct ieee80211_supported_band *sband) { struct ieee80211_sband_iftype_data *iftype_data; int i; for (i = 0; i < ARRAY_SIZE(iwl_he_capa); i++) { iftype_data = &iwl_he_capa[i]; if (trans->dbg_cfg.rx_mcs_80) { if (iwl_he_mcs_greater(trans->dbg_cfg.rx_mcs_80, le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80))) IWL_ERR(trans, "Cannot set dbg rx_mcs_80 = 0x%x (too big)\n", trans->dbg_cfg.rx_mcs_80); else iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(trans->dbg_cfg.rx_mcs_80); } if (trans->dbg_cfg.tx_mcs_80) { if (iwl_he_mcs_greater(trans->dbg_cfg.tx_mcs_80, le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80))) IWL_ERR(trans, "Cannot set dbg tx_mcs_80 = 0x%x (too big)\n", trans->dbg_cfg.tx_mcs_80); else iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(trans->dbg_cfg.tx_mcs_80); } if (trans->dbg_cfg.rx_mcs_160) { if (iwl_he_mcs_greater(trans->dbg_cfg.rx_mcs_160, le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160))) IWL_ERR(trans, "Cannot set dbg rx_mcs_160 = 0x%x (too big)\n", trans->dbg_cfg.rx_mcs_160); else iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(trans->dbg_cfg.rx_mcs_160); } if (trans->dbg_cfg.tx_mcs_160) { if (iwl_he_mcs_greater(trans->dbg_cfg.tx_mcs_160, le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160))) IWL_ERR(trans, "Cannot set dbg tx_mcs_160 = 0x%x (too big)\n", trans->dbg_cfg.tx_mcs_160); else iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(trans->dbg_cfg.tx_mcs_160); } /* * If antennas were forced - make sure not declaring MIMO when * we actually are SISO * Recall that there are 2 bits per stream in the "HE Tx/Rx HE * MCS NSS Support Field", so if some antenna is forced on but * not both A and B - we should work in SISO mode, so mark the * 2nd SS as not supported */ if (trans->dbg_cfg.valid_ants && (trans->dbg_cfg.valid_ants & ANT_AB) != ANT_AB) { iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80 |= cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80 |= cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160 |= cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160 |= cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80p80 |= cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80p80 |= cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); } if (trans->dbg_cfg.no_ack_en & 0x1) iftype_data->he_cap.he_cap_elem.mac_cap_info[2] &= ~IEEE80211_HE_MAC_CAP2_ACK_EN; if (trans->dbg_cfg.no_ldpc) iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &= ~IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; /* Check if any HE capabilities need to be set for debug */ if (trans->dbg_cfg.he_ppe_thres.len) { u8 len = trans->dbg_cfg.he_ppe_thres.len; if (len > sizeof(iftype_data->he_cap.ppe_thres)) len = sizeof(iftype_data->he_cap.ppe_thres); memcpy(iftype_data->he_cap.ppe_thres, trans->dbg_cfg.he_ppe_thres.data, len); } if (trans->dbg_cfg.he_chan_width_dis) iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &= ~(trans->dbg_cfg.he_chan_width_dis << 1); if (trans->dbg_cfg.he_mac_cap.len) { if (trans->dbg_cfg.he_mac_cap.len != sizeof(iftype_data->he_cap.he_cap_elem.mac_cap_info)) { IWL_ERR(trans, "Wrong he_mac_cap len %u, should be %zu\n", trans->dbg_cfg.he_mac_cap.len, sizeof(iftype_data->he_cap.he_cap_elem.mac_cap_info)); } else { memcpy(iftype_data->he_cap.he_cap_elem.mac_cap_info, trans->dbg_cfg.he_mac_cap.data, trans->dbg_cfg.he_mac_cap.len); } } if (trans->dbg_cfg.he_phy_cap.len) { if (trans->dbg_cfg.he_phy_cap.len != sizeof(iftype_data->he_cap.he_cap_elem.phy_cap_info)) { IWL_ERR(trans, "Wrong he_phy_cap len %u, should be %zu\n", trans->dbg_cfg.he_phy_cap.len, sizeof(iftype_data->he_cap.he_cap_elem.phy_cap_info)); } else { memcpy(iftype_data->he_cap.he_cap_elem.phy_cap_info, trans->dbg_cfg.he_phy_cap.data, trans->dbg_cfg.he_phy_cap.len); } } } } #endif static void iwl_init_sbands(struct iwl_trans *trans, struct iwl_nvm_data *data, const void *nvm_ch_flags, u8 tx_chains, u8 rx_chains, u32 sbands_flags, bool v4) { struct device *dev = trans->dev; const struct iwl_cfg *cfg = trans->cfg; int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, sbands_flags, v4); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ); iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(sband, tx_chains, rx_chains); sband = &data->bands[NL80211_BAND_5GHZ]; sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ); iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains); if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, tx_chains, rx_chains); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(sband, tx_chains, rx_chains); if (n_channels != n_used) IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); } static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + SKU); return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); } static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + NVM_VERSION); else return le32_to_cpup((__le32 *)(nvm_sw + NVM_VERSION_EXT_NVM)); } static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + RADIO_CFG); return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); } static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) { int n_hw_addr; if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + N_HW_ADDRS); n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); return n_hw_addr & N_HW_ADDR_MASK; } static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, u32 radio_cfg) { if (cfg->nvm_type != IWL_NVM_EXT) { data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); return; } /* set the radio configuration for family 8000 */ data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg); data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg); data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg); data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg); data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg); } static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) { const u8 *hw_addr; hw_addr = (const u8 *)&mac_addr0; dest[0] = hw_addr[3]; dest[1] = hw_addr[2]; dest[2] = hw_addr[1]; dest[3] = hw_addr[0]; hw_addr = (const u8 *)&mac_addr1; dest[4] = hw_addr[1]; dest[5] = hw_addr[0]; } static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, struct iwl_nvm_data *data) { __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr0_strap)); __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr1_strap)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); /* * If the OEM fused a valid address, use it instead of the one in the * OTP */ if (is_valid_ether_addr(data->hw_addr)) return; mac_addr0 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr0_otp)); mac_addr1 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr1_otp)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, const __be16 *nvm_hw) { const u8 *hw_addr; if (mac_override) { static const u8 reserved_mac[] = { 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 }; hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_EXT_NVM); /* * Store the MAC address from MAO section. * No byte swapping is required in MAO section */ memcpy(data->hw_addr, hw_addr, ETH_ALEN); /* * Force the use of the OTP MAC address in case of reserved MAC * address in the NVM, or if address is given but invalid. */ if (is_valid_ether_addr(data->hw_addr) && memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) return; IWL_ERR(trans, "mac address from nvm override section is not valid\n"); } if (nvm_hw) { /* read the mac address from WFMP registers */ __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_0)); __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_1)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); return; } IWL_ERR(trans, "mac address is not found\n"); } static int iwl_set_hw_address(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __be16 *nvm_hw, const __le16 *mac_override) { #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES struct iwl_dbg_cfg *dbg_cfg = &trans->dbg_cfg; if (dbg_cfg->hw_address.len) { if (dbg_cfg->hw_address.len == ETH_ALEN && is_valid_ether_addr(dbg_cfg->hw_address.data)) { memcpy(data->hw_addr, dbg_cfg->hw_address.data, ETH_ALEN); return 0; } IWL_ERR(trans, "mac address from config file is invalid\n"); } #endif if (cfg->mac_addr_from_csr) { iwl_set_hw_address_from_csr(trans, data); } else if (cfg->nvm_type != IWL_NVM_EXT) { const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); /* The byte order is little endian 16 bit, meaning 214365 */ data->hw_addr[0] = hw_addr[1]; data->hw_addr[1] = hw_addr[0]; data->hw_addr[2] = hw_addr[3]; data->hw_addr[3] = hw_addr[2]; data->hw_addr[4] = hw_addr[5]; data->hw_addr[5] = hw_addr[4]; } else { iwl_set_hw_address_family_8000(trans, cfg, data, mac_override, nvm_hw); } if (!is_valid_ether_addr(data->hw_addr)) { IWL_ERR(trans, "no valid mac address was found\n"); return -EINVAL; } IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr); return 0; } static bool iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, const __be16 *nvm_hw) { /* * Workaround a bug in Indonesia SKUs where the regulatory in * some 7000-family OTPs erroneously allow wide channels in * 5GHz. To check for Indonesia, we take the SKU value from * bits 1-4 in the subsystem ID and check if it is either 5 or * 9. In those cases, we need to force-disable wide channels * in 5GHz otherwise the FW will throw a sysassert when we try * to use them. */ if (cfg->device_family == IWL_DEVICE_FAMILY_7000) { /* * Unlike the other sections in the NVM, the hw * section uses big-endian. */ u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID); u8 sku = (subsystem_id & 0x1e) >> 1; if (sku == 5 || sku == 9) { IWL_DEBUG_EEPROM(dev, "disabling wide channels in 5GHz (0x%0x %d)\n", subsystem_id, sku); return true; } } return false; } struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported) { struct device *dev = trans->dev; struct iwl_nvm_data *data; bool lar_enabled; u32 sku, radio_cfg; u32 sbands_flags = 0; u16 lar_config; const __le16 *ch_section; if (cfg->nvm_type != IWL_NVM_EXT) data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS), GFP_KERNEL); else data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_EXT), GFP_KERNEL); if (!data) return NULL; data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku); iwl_set_radio_cfg(cfg, data, radio_cfg); if (data->valid_tx_ant) tx_chains &= data->valid_tx_ant; if (data->valid_rx_ant) rx_chains &= data->valid_rx_ant; sku = iwl_get_sku(cfg, nvm_sw, phy_sku); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (trans->dbg_cfg.disable_52GHz) /* remove support for 5.2 */ sku &= ~NVM_SKU_CAP_BAND_52GHZ; if (trans->dbg_cfg.disable_24GHz) /* remove support for 2.4 */ sku &= ~NVM_SKU_CAP_BAND_24GHZ; #endif data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) data->sku_cap_11n_enable = false; data->sku_cap_11ac_enable = data->sku_cap_11n_enable && (sku & NVM_SKU_CAP_11AC_ENABLE); data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); if (cfg->nvm_type != IWL_NVM_EXT) { /* Checking for required sections */ if (!nvm_calib) { IWL_ERR(trans, "Can't parse empty Calib NVM sections\n"); kfree(data); return NULL; } ch_section = cfg->nvm_type == IWL_NVM_SDP ? ®ulatory[NVM_CHANNELS_SDP] : &nvm_sw[NVM_CHANNELS]; lar_enabled = true; } else { u16 lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_OLD : NVM_LAR_OFFSET; lar_config = le16_to_cpup(regulatory + lar_offset); data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED); lar_enabled = data->lar_enabled; ch_section = ®ulatory[NVM_CHANNELS_EXTENDED]; } /* If no valid mac address was found - bail out */ if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) { kfree(data); return NULL; } #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES iwl_init_he_override(trans, &data->bands[NL80211_BAND_2GHZ]); iwl_init_he_override(trans, &data->bands[NL80211_BAND_5GHZ]); #endif if (lar_fw_supported && lar_enabled) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains, sbands_flags, false); data->calib_version = 255; return data; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; if (ch_idx < NUM_2GHZ_CHANNELS && (nvm_flags & NVM_CHANNEL_40MHZ)) { if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) flags &= ~NL80211_RRF_NO_HT40PLUS; if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) flags &= ~NL80211_RRF_NO_HT40MINUS; } else if (nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~NL80211_RRF_NO_HT40PLUS; else flags &= ~NL80211_RRF_NO_HT40MINUS; } if (!(nvm_flags & NVM_CHANNEL_80MHZ)) flags |= NL80211_RRF_NO_80MHZ; if (!(nvm_flags & NVM_CHANNEL_160MHZ)) flags |= NL80211_RRF_NO_160MHZ; if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) flags |= NL80211_RRF_NO_IR; if (nvm_flags & NVM_CHANNEL_RADAR) flags |= NL80211_RRF_DFS; if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) flags |= NL80211_RRF_NO_OUTDOOR; /* Set the GO concurrent flag only in case that NO_IR is set. * Otherwise it is meaningless */ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & NL80211_RRF_NO_IR)) flags |= NL80211_RRF_GO_CONCURRENT; return flags; } struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, u16 geo_info) { int ch_idx; u16 ch_flags; u32 reg_rule_flags, prev_reg_rule_flags = 0; const u16 *nvm_chan; struct ieee80211_regdomain *regd, *copy_rd; struct ieee80211_reg_rule *rule; enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; bool new_rule; int max_num_ch; if (cfg->uhb_supported) { max_num_ch = IWL_NVM_NUM_CHANNELS_UHB; nvm_chan = iwl_uhb_nvm_channels; } else if (cfg->nvm_type == IWL_NVM_EXT) { max_num_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = iwl_ext_nvm_channels; } else { max_num_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = iwl_nvm_channels; } if (WARN_ON(num_of_ch > max_num_ch)) num_of_ch = max_num_ch; if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) return ERR_PTR(-EINVAL); IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", num_of_ch); /* build a regdomain rule for every valid channel */ regd = kzalloc(struct_size(regd, reg_rules, num_of_ch), GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); /* set alpha2 from FW. */ regd->alpha2[0] = fw_mcc >> 8; regd->alpha2[1] = fw_mcc & 0xff; for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = (ch_idx < NUM_2GHZ_CHANNELS) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band); new_rule = false; if (!(ch_flags & NVM_CHANNEL_VALID)) { iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags); continue; } reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, ch_flags, cfg); /* we can't continue the same rule */ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || center_freq - prev_center_freq > 20) { valid_rules++; new_rule = true; } rule = ®d->reg_rules[valid_rules - 1]; if (new_rule) rule->freq_range.start_freq_khz = MHZ_TO_KHZ(center_freq - 10); rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); /* this doesn't matter - not used by FW */ rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); rule->power_rule.max_eirp = DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); rule->flags = reg_rule_flags; /* rely on auto-calculation to merge BW of contiguous chans */ rule->flags |= NL80211_RRF_AUTO_BW; rule->freq_range.max_bandwidth_khz = 0; prev_center_freq = center_freq; prev_reg_rule_flags = reg_rule_flags; iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags); if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || band == NL80211_BAND_2GHZ) continue; reg_query_regdb_wmm(regd->alpha2, center_freq, rule); } regd->n_reg_rules = valid_rules; /* * Narrow down regdom for unused regulatory rules to prevent hole * between reg rules to wmm rules. */ copy_rd = kmemdup(regd, struct_size(regd, reg_rules, valid_rules), GFP_KERNEL); if (!copy_rd) copy_rd = ERR_PTR(-ENOMEM); kfree(regd); return copy_rd; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); #define IWL_MAX_NVM_SECTION_SIZE 0x1b58 #define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc #define MAX_NVM_FILE_LEN 16384 void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, unsigned int len) { #define IWL_4165_DEVICE_ID 0x5501 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5) if (section == NVM_SECTION_TYPE_PHY_SKU && hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) /* OTP 0x52 bug work around: it's a 1x1 device */ data[3] = ANT_B | (ANT_B << 4); } IWL_EXPORT_SYMBOL(iwl_nvm_fixups); /* * Reads external NVM from a file into mvm->nvm_sections * * HOW TO CREATE THE NVM FILE FORMAT: * ------------------------------ * 1. create hex file, format: * 3800 -> header * 0000 -> header * 5a40 -> data * * rev - 6 bit (word1) * len - 10 bit (word1) * id - 4 bit (word2) * rsv - 12 bit (word2) * * 2. flip 8bits with 8 bits per line to get the right NVM file format * * 3. create binary file from the hex file * * 4. save as "iNVM_xxx.bin" under /lib/firmware */ int iwl_read_external_nvm(struct iwl_trans *trans, const char *nvm_file_name, struct iwl_nvm_section *nvm_sections) { int ret, section_size; u16 section_id; const struct firmware *fw_entry; const struct { __le16 word1; __le16 word2; u8 data[]; } *file_sec; const u8 *eof; u8 *temp; int max_section_size; const __le32 *dword_buff; #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) #define NVM_WORD2_ID(x) (x >> 12) #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) #define EXT_NVM_WORD1_ID(x) ((x) >> 4) #define NVM_HEADER_0 (0x2A504C54) #define NVM_HEADER_1 (0x4E564D2A) #define NVM_HEADER_SIZE (4 * sizeof(u32)) IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n"); /* Maximal size depends on NVM version */ if (trans->cfg->nvm_type != IWL_NVM_EXT) max_section_size = IWL_MAX_NVM_SECTION_SIZE; else max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; /* * Obtain NVM image via request_firmware. Since we already used * request_firmware_nowait() for the firmware binary load and only * get here after that we assume the NVM request can be satisfied * synchronously. */ ret = request_firmware(&fw_entry, nvm_file_name, trans->dev); if (ret) { IWL_ERR(trans, "ERROR: %s isn't available %d\n", nvm_file_name, ret); return ret; } IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", nvm_file_name, fw_entry->size); if (fw_entry->size > MAX_NVM_FILE_LEN) { IWL_ERR(trans, "NVM file too large\n"); ret = -EINVAL; goto out; } eof = fw_entry->data + fw_entry->size; dword_buff = (__le32 *)fw_entry->data; /* some NVM file will contain a header. * The header is identified by 2 dwords header as follow: * dword[0] = 0x2A504C54 * dword[1] = 0x4E564D2A * * This header must be skipped when providing the NVM data to the FW. */ if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); IWL_INFO(trans, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3])); /* nvm file validation, dword_buff[2] holds the file version */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && le32_to_cpu(dword_buff[2]) < 0xE4A) { ret = -EFAULT; goto out; } } else { file_sec = (void *)fw_entry->data; } while (true) { if (file_sec->data > eof) { IWL_ERR(trans, "ERROR - NVM file too short for section header\n"); ret = -EINVAL; break; } /* check for EOF marker */ if (!file_sec->word1 && !file_sec->word2) { ret = 0; break; } if (trans->cfg->nvm_type != IWL_NVM_EXT) { section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); } else { section_size = 2 * EXT_NVM_WORD2_LEN( le16_to_cpu(file_sec->word2)); section_id = EXT_NVM_WORD1_ID( le16_to_cpu(file_sec->word1)); } if (section_size > max_section_size) { IWL_ERR(trans, "ERROR - section too large (%d)\n", section_size); ret = -EINVAL; break; } if (!section_size) { IWL_ERR(trans, "ERROR - section empty\n"); ret = -EINVAL; break; } if (file_sec->data + section_size > eof) { IWL_ERR(trans, "ERROR - NVM file too short for section (%d bytes)\n", section_size); ret = -EINVAL; break; } if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, "Invalid NVM section ID %d\n", section_id)) { ret = -EINVAL; break; } temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); if (!temp) { ret = -ENOMEM; break; } iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); kfree(nvm_sections[section_id].data); nvm_sections[section_id].data = temp; nvm_sections[section_id].length = section_size; /* advance to the next section */ file_sec = (void *)(file_sec->data + section_size); } out: release_firmware(fw_entry); return ret; } IWL_EXPORT_SYMBOL(iwl_read_external_nvm); struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw) { struct iwl_nvm_get_info cmd = {}; struct iwl_nvm_data *nvm; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &cmd, }, .len = { sizeof(cmd) }, .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) }; int ret; bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; /* * All the values in iwl_nvm_get_info_rsp v4 are the same as * in v3, except for the channel profile part of the * regulatory. So we can just access the new struct, with the * exception of the latter. */ struct iwl_nvm_get_info_rsp *rsp; struct iwl_nvm_get_info_rsp_v3 *rsp_v3; bool v4 = fw_has_api(&fw->ucode_capa, IWL_UCODE_TLV_API_REGULATORY_NVM_INFO); size_t rsp_size = v4 ? sizeof(*rsp) : sizeof(*rsp_v3); void *channel_profile; ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) return ERR_PTR(ret); if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != rsp_size, "Invalid payload len in NVM response from FW %d", iwl_rx_packet_payload_len(hcmd.resp_pkt))) { ret = -EINVAL; goto out; } rsp = (void *)hcmd.resp_pkt->data; empty_otp = !!(le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP); if (empty_otp) IWL_INFO(trans, "OTP is empty\n"); nvm = kzalloc(struct_size(nvm, channels, IWL_NUM_CHANNELS), GFP_KERNEL); if (!nvm) { ret = -ENOMEM; goto out; } iwl_set_hw_address_from_csr(trans, nvm); /* TODO: if platform NVM has MAC address - override it here */ #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (trans->dbg_cfg.hw_address.len) { if (trans->dbg_cfg.hw_address.len == ETH_ALEN && is_valid_ether_addr(trans->dbg_cfg.hw_address.data)) memcpy(nvm->hw_addr, trans->dbg_cfg.hw_address.data, ETH_ALEN); else IWL_ERR(trans, "mac address from config file is invalid\n"); } #endif if (!is_valid_ether_addr(nvm->hw_addr)) { IWL_ERR(trans, "no valid mac address was found\n"); ret = -EINVAL; goto err_free; } IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); /* Initialize general data */ nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); nvm->n_hw_addrs = rsp->general.n_hw_addrs; if (nvm->n_hw_addrs == 0) IWL_WARN(trans, "Firmware declares no reserved mac addresses. OTP is empty: %d\n", empty_otp); /* Initialize MAC sku data */ mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); nvm->sku_cap_11ac_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); nvm->sku_cap_11n_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); nvm->sku_cap_11ax_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); nvm->sku_cap_band_24ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); nvm->sku_cap_band_52ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); /* Initialize PHY sku data */ nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { nvm->lar_enabled = true; sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; } #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES iwl_init_he_override(trans, &nvm->bands[NL80211_BAND_2GHZ]); iwl_init_he_override(trans, &nvm->bands[NL80211_BAND_5GHZ]); #endif rsp_v3 = (void *)rsp; channel_profile = v4 ? (void *)rsp->regulatory.channel_profile : (void *)rsp_v3->regulatory.channel_profile; iwl_init_sbands(trans, nvm, channel_profile, nvm->valid_tx_ant & fw->valid_tx_ant, nvm->valid_rx_ant & fw->valid_rx_ant, sbands_flags, v4); iwl_free_resp(&hcmd); return nvm; err_free: kfree(nvm); out: iwl_free_resp(&hcmd); return ERR_PTR(ret); } IWL_EXPORT_SYMBOL(iwl_get_nvm); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h000066400000000000000000000125601351064634500274450ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_nvm_parse_h__ #define __iwl_nvm_parse_h__ #include #include "iwl-eeprom-parse.h" /** * enum iwl_nvm_sbands_flags - modification flags for the channel profiles * * @IWL_NVM_SBANDS_FLAGS_LAR: LAR is enabled * @IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ: disallow 40, 80 and 160MHz on 5GHz */ enum iwl_nvm_sbands_flags { IWL_NVM_SBANDS_FLAGS_LAR = BIT(0), IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1), }; /** * iwl_parse_nvm_data - parse NVM data and return values * * This function parses all NVM values we need and then * returns a (newly allocated) struct containing all the * relevant values for driver use. The struct must be freed * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW * * This function parses the regulatory channel data received as a * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, * to be fed into the regulatory core. In case the geo_info is set handle * accordingly. An ERR_PTR is returned on error. * If not given to the regulatory core, the user is responsible for freeing * the regdomain returned here with kfree. */ struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, u16 geo_info); /** * struct iwl_nvm_section - describes an NVM section in memory. * * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, * and saved for later use by the driver. Not all NVM sections are saved * this way, only the needed ones. */ struct iwl_nvm_section { u16 length; const u8 *data; }; /** * iwl_read_external_nvm - Reads external NVM from a file into nvm_sections */ int iwl_read_external_nvm(struct iwl_trans *trans, const char *nvm_file_name, struct iwl_nvm_section *nvm_sections); void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, unsigned int len); /** * iwl_get_nvm - retrieve NVM data from firmware * * Allocates a new iwl_nvm_data structure, fills it with * NVM data, and returns it to caller. */ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw); #endif /* __iwl_nvm_parse_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h000066400000000000000000000242751351064634500271030ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_op_mode_h__ #define __iwl_op_mode_h__ #include #include #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #include "fw/testmode.h" #endif struct iwl_op_mode; struct iwl_trans; struct sk_buff; struct iwl_device_cmd; struct iwl_host_cmd; struct iwl_rx_cmd_buffer; struct iwl_fw; struct iwl_cfg; struct iwl_tm_data; /** * DOC: Operational mode - what is it ? * * The operational mode (a.k.a. op_mode) is the layer that implements * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses * the transport API to access the HW. The op_mode doesn't need to know how the * underlying HW works, since the transport layer takes care of that. * * There can be several op_mode: i.e. different fw APIs will require two * different op_modes. This is why the op_mode is virtualized. */ /** * DOC: Life cycle of the Operational mode * * The operational mode has a very simple life cycle. * * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the * capabilities advertised by the fw file (in TLV format). * 2) The driver layer starts the op_mode (ops->start) * 3) The op_mode registers mac80211 * 4) The op_mode is governed by mac80211 * 5) The driver layer stops the op_mode */ #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE /** * struct iwl_test_ops: callback to the op mode * @send_hcmd: Handler that sends host cmd in the specific op_mode. If this * handler is not registered then sending host cmd will not be supported. * @cmd_exec: Handler that is used to execute user's test-mode commands. * It is optional. If this handler is not given, the default handler will * execute. * * The structure defines the callbacks that the op_mode should handle, * inorder to handle logic that is out of the scope of iwl_test. */ struct iwl_test_ops { int (*send_hcmd)(void *op_mode, struct iwl_host_cmd *host_cmd); int (*cmd_exec)(struct iwl_testmode *testmode, u32 cmd, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out, bool *cmd_supported); }; #endif /** * struct iwl_op_mode_ops - op_mode specific operations * * The op_mode exports its ops so that external components can start it and * interact with it. The driver layer typically calls the start and stop * handlers, the transport layer calls the others. * * All the handlers MUST be implemented, except @rx_rss which can be left * out *iff* the opmode will never run on hardware with multi-queue capability. * * @start: start the op_mode. The transport layer is already allocated. * May sleep * @stop: stop the op_mode. Must free all the memory allocated. * May sleep * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the * HCMD this Rx responds to. Can't sleep. * @rx_rss: data queue RX notification to the op_mode, for (data) notifications * received on the RSS queue(s). The queue parameter indicates which of the * RSS queues received this frame; it will always be non-zero. * This method must not sleep. * @async_cb: called when an ASYNC command with CMD_WANT_ASYNC_CALLBACK set * completes. Must be atomic. * @queue_full: notifies that a HW queue is full. * Must be atomic and called with BH disabled. * @queue_not_full: notifies that a HW queue is not full any more. * Must be atomic and called with BH disabled. * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that * the radio is killed. Return %true if the device should be stopped by * the transport immediately after the call. May sleep. * @free_skb: allows the transport layer to free skbs that haven't been * reclaimed by the op_mode. This can happen when the driver is freed and * there are Tx packets pending in the transport layer. * Must be atomic * @nic_error: error notification. Must be atomic and must be called with BH * disabled. * @cmd_queue_full: Called when the command queue gets full. Must be atomic and * called with BH disabled. * @nic_config: configure NIC, called before firmware is started. * May sleep * @wimax_active: invoked when WiMax becomes active. May sleep */ struct iwl_op_mode_ops { struct iwl_op_mode *(*start)(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir); void (*stop)(struct iwl_op_mode *op_mode); void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, unsigned int queue); void (*async_cb)(struct iwl_op_mode *op_mode, const struct iwl_device_cmd *cmd); void (*queue_full)(struct iwl_op_mode *op_mode, int queue); void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); void (*nic_error)(struct iwl_op_mode *op_mode); void (*cmd_queue_full)(struct iwl_op_mode *op_mode); void (*nic_config)(struct iwl_op_mode *op_mode); void (*wimax_active)(struct iwl_op_mode *op_mode); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE struct iwl_test_ops test_ops; #endif }; int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); void iwl_opmode_deregister(const char *name); /** * struct iwl_op_mode - operational mode * @ops: pointer to its own ops * * This holds an implementation of the mac80211 / fw API. */ struct iwl_op_mode { const struct iwl_op_mode_ops *ops; char op_mode_specific[0] __aligned(sizeof(void *)); }; static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) { might_sleep(); op_mode->ops->stop(op_mode); } static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { return op_mode->ops->rx(op_mode, napi, rxb); } static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, unsigned int queue) { op_mode->ops->rx_rss(op_mode, napi, rxb, queue); } static inline void iwl_op_mode_async_cb(struct iwl_op_mode *op_mode, const struct iwl_device_cmd *cmd) { if (op_mode->ops->async_cb) op_mode->ops->async_cb(op_mode, cmd); } static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, int queue) { op_mode->ops->queue_full(op_mode, queue); } static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, int queue) { op_mode->ops->queue_not_full(op_mode, queue); } static inline bool __must_check iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state) { might_sleep(); return op_mode->ops->hw_rf_kill(op_mode, state); } static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) { op_mode->ops->free_skb(op_mode, skb); } static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode) { op_mode->ops->nic_error(op_mode); } static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) { op_mode->ops->cmd_queue_full(op_mode); } static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) { might_sleep(); op_mode->ops->nic_config(op_mode); } static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) { might_sleep(); op_mode->ops->wimax_active(op_mode); } #endif /* __iwl_op_mode_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c000066400000000000000000000315341351064634500267150ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "iwl-drv.h" #include "iwl-phy-db.h" #include "iwl-debug.h" #include "iwl-op-mode.h" #include "iwl-trans.h" #define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ struct iwl_phy_db_entry { u16 size; u8 *data; }; /** * struct iwl_phy_db - stores phy configuration and calibration data. * * @cfg: phy configuration. * @calib_nch: non channel specific calibration data. * @calib_ch: channel specific calibration data. * @n_group_papd: number of entries in papd channel group. * @calib_ch_group_papd: calibration data related to papd channel group. * @n_group_txp: number of entries in tx power channel group. * @calib_ch_group_txp: calibration data related to tx power chanel group. */ struct iwl_phy_db { struct iwl_phy_db_entry cfg; struct iwl_phy_db_entry calib_nch; int n_group_papd; struct iwl_phy_db_entry *calib_ch_group_papd; int n_group_txp; struct iwl_phy_db_entry *calib_ch_group_txp; struct iwl_trans *trans; }; enum iwl_phy_db_section_type { IWL_PHY_DB_CFG = 1, IWL_PHY_DB_CALIB_NCH, IWL_PHY_DB_UNUSED, IWL_PHY_DB_CALIB_CHG_PAPD, IWL_PHY_DB_CALIB_CHG_TXP, IWL_PHY_DB_MAX }; #define PHY_DB_CMD 0x6c /* for parsing of tx power channel group data that comes from the firmware*/ struct iwl_phy_db_chg_txp { __le32 space; __le16 max_channel_idx; } __packed; struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans) { struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db), GFP_KERNEL); if (!phy_db) return phy_db; phy_db->trans = trans; phy_db->n_group_txp = -1; phy_db->n_group_papd = -1; /* TODO: add default values of the phy db. */ return phy_db; } IWL_EXPORT_SYMBOL(iwl_phy_db_init); /* * get phy db section: returns a pointer to a phy db section specified by * type and channel group id. */ static struct iwl_phy_db_entry * iwl_phy_db_get_section(struct iwl_phy_db *phy_db, enum iwl_phy_db_section_type type, u16 chg_id) { if (!phy_db || type >= IWL_PHY_DB_MAX) return NULL; switch (type) { case IWL_PHY_DB_CFG: return &phy_db->cfg; case IWL_PHY_DB_CALIB_NCH: return &phy_db->calib_nch; case IWL_PHY_DB_CALIB_CHG_PAPD: if (chg_id >= phy_db->n_group_papd) return NULL; return &phy_db->calib_ch_group_papd[chg_id]; case IWL_PHY_DB_CALIB_CHG_TXP: if (chg_id >= phy_db->n_group_txp) return NULL; return &phy_db->calib_ch_group_txp[chg_id]; default: return NULL; } return NULL; } static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db, enum iwl_phy_db_section_type type, u16 chg_id) { struct iwl_phy_db_entry *entry = iwl_phy_db_get_section(phy_db, type, chg_id); if (!entry) return; kfree(entry->data); entry->data = NULL; entry->size = 0; } void iwl_phy_db_free(struct iwl_phy_db *phy_db) { int i; if (!phy_db) return; iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); for (i = 0; i < phy_db->n_group_papd; i++) iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); kfree(phy_db->calib_ch_group_papd); for (i = 0; i < phy_db->n_group_txp; i++) iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i); kfree(phy_db->calib_ch_group_txp); kfree(phy_db); } IWL_EXPORT_SYMBOL(iwl_phy_db_free); int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt) { struct iwl_calib_res_notif_phy_db *phy_db_notif = (struct iwl_calib_res_notif_phy_db *)pkt->data; enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type); u16 size = le16_to_cpu(phy_db_notif->length); struct iwl_phy_db_entry *entry; u16 chg_id = 0; if (!phy_db) return -EINVAL; if (type == IWL_PHY_DB_CALIB_CHG_PAPD) { chg_id = le16_to_cpup((__le16 *)phy_db_notif->data); if (phy_db && !phy_db->calib_ch_group_papd) { /* * Firmware sends the largest index first, so we can use * it to know how much we should allocate. */ phy_db->calib_ch_group_papd = kcalloc(chg_id + 1, sizeof(struct iwl_phy_db_entry), GFP_ATOMIC); if (!phy_db->calib_ch_group_papd) return -ENOMEM; phy_db->n_group_papd = chg_id + 1; } } else if (type == IWL_PHY_DB_CALIB_CHG_TXP) { chg_id = le16_to_cpup((__le16 *)phy_db_notif->data); if (phy_db && !phy_db->calib_ch_group_txp) { /* * Firmware sends the largest index first, so we can use * it to know how much we should allocate. */ phy_db->calib_ch_group_txp = kcalloc(chg_id + 1, sizeof(struct iwl_phy_db_entry), GFP_ATOMIC); if (!phy_db->calib_ch_group_txp) return -ENOMEM; phy_db->n_group_txp = chg_id + 1; } } entry = iwl_phy_db_get_section(phy_db, type, chg_id); if (!entry) return -EINVAL; kfree(entry->data); entry->data = kmemdup(phy_db_notif->data, size, GFP_ATOMIC); if (!entry->data) { entry->size = 0; return -ENOMEM; } entry->size = size; IWL_DEBUG_INFO(phy_db->trans, "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", __func__, __LINE__, type, size); return 0; } IWL_EXPORT_SYMBOL(iwl_phy_db_set_section); static int is_valid_channel(u16 ch_id) { if (ch_id <= 14 || (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) || (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) || (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1)) return 1; return 0; } static u8 ch_id_to_ch_index(u16 ch_id) { if (WARN_ON(!is_valid_channel(ch_id))) return 0xff; if (ch_id <= 14) return ch_id - 1; if (ch_id <= 64) return (ch_id + 20) / 4; if (ch_id <= 140) return (ch_id - 12) / 4; return (ch_id - 13) / 4; } static u16 channel_id_to_papd(u16 ch_id) { if (WARN_ON(!is_valid_channel(ch_id))) return 0xff; if (1 <= ch_id && ch_id <= 14) return 0; if (36 <= ch_id && ch_id <= 64) return 1; if (100 <= ch_id && ch_id <= 140) return 2; return 3; } static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id) { struct iwl_phy_db_chg_txp *txp_chg; int i; u8 ch_index = ch_id_to_ch_index(ch_id); if (ch_index == 0xff) return 0xff; for (i = 0; i < phy_db->n_group_txp; i++) { txp_chg = (void *)phy_db->calib_ch_group_txp[i].data; if (!txp_chg) return 0xff; /* * Looking for the first channel group that its max channel is * higher then wanted channel. */ if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index) return i; } return 0xff; } #if !IS_ENABLED(CPTCFG_IWLXVT) static #endif int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, u32 type, u8 **data, u16 *size, u16 ch_id) { struct iwl_phy_db_entry *entry; u16 ch_group_id = 0; if (!phy_db) return -EINVAL; /* find wanted channel group */ if (type == IWL_PHY_DB_CALIB_CHG_PAPD) ch_group_id = channel_id_to_papd(ch_id); else if (type == IWL_PHY_DB_CALIB_CHG_TXP) ch_group_id = channel_id_to_txp(phy_db, ch_id); entry = iwl_phy_db_get_section(phy_db, type, ch_group_id); if (!entry) return -EINVAL; *data = entry->data; *size = entry->size; IWL_DEBUG_INFO(phy_db->trans, "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", __func__, __LINE__, type, *size); return 0; } #if IS_ENABLED(CPTCFG_IWLXVT) IWL_EXPORT_SYMBOL(iwl_phy_db_get_section_data); #endif static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type, u16 length, void *data) { struct iwl_phy_db_cmd phy_db_cmd; struct iwl_host_cmd cmd = { .id = PHY_DB_CMD, }; IWL_DEBUG_INFO(phy_db->trans, "Sending PHY-DB hcmd of type %d, of length %d\n", type, length); /* Set phy db cmd variables */ phy_db_cmd.type = cpu_to_le16(type); phy_db_cmd.length = cpu_to_le16(length); /* Set hcmd variables */ cmd.data[0] = &phy_db_cmd; cmd.len[0] = sizeof(struct iwl_phy_db_cmd); cmd.data[1] = data; cmd.len[1] = length; cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; return iwl_trans_send_cmd(phy_db->trans, &cmd); } static int iwl_phy_db_send_all_channel_groups( struct iwl_phy_db *phy_db, enum iwl_phy_db_section_type type, u8 max_ch_groups) { u16 i; int err; struct iwl_phy_db_entry *entry; /* Send all the channel specific groups to operational fw */ for (i = 0; i < max_ch_groups; i++) { entry = iwl_phy_db_get_section(phy_db, type, i); if (!entry) return -EINVAL; if (!entry->size) continue; /* Send the requested PHY DB section */ err = iwl_send_phy_db_cmd(phy_db, type, entry->size, entry->data); if (err) { IWL_ERR(phy_db->trans, "Can't SEND phy_db section %d (%d), err %d\n", type, i, err); return err; } IWL_DEBUG_INFO(phy_db->trans, "Sent PHY_DB HCMD, type = %d num = %d\n", type, i); } return 0; } int iwl_send_phy_db_data(struct iwl_phy_db *phy_db) { u8 *data = NULL; u16 size = 0; int err; IWL_DEBUG_INFO(phy_db->trans, "Sending phy db data and configuration to runtime image\n"); /* Send PHY DB CFG section */ err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG, &data, &size, 0); if (err) { IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n"); return err; } err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data); if (err) { IWL_ERR(phy_db->trans, "Cannot send HCMD of Phy DB cfg section\n"); return err; } err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH, &data, &size, 0); if (err) { IWL_ERR(phy_db->trans, "Cannot get Phy DB non specific channel section\n"); return err; } err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data); if (err) { IWL_ERR(phy_db->trans, "Cannot send HCMD of Phy DB non specific channel section\n"); return err; } /* Send all the TXP channel specific data */ err = iwl_phy_db_send_all_channel_groups(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, phy_db->n_group_papd); if (err) { IWL_ERR(phy_db->trans, "Cannot send channel specific PAPD groups\n"); return err; } /* Send all the TXP channel specific data */ err = iwl_phy_db_send_all_channel_groups(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, phy_db->n_group_txp); if (err) { IWL_ERR(phy_db->trans, "Cannot send channel specific TX power groups\n"); return err; } IWL_DEBUG_INFO(phy_db->trans, "Finished sending phy db non channel data\n"); return 0; } IWL_EXPORT_SYMBOL(iwl_send_phy_db_data); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h000066400000000000000000000063201351064634500267150ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __IWL_PHYDB_H__ #define __IWL_PHYDB_H__ #include #include "iwl-op-mode.h" #include "iwl-trans.h" struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans); void iwl_phy_db_free(struct iwl_phy_db *phy_db); int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt); #if IS_ENABLED(CPTCFG_IWLXVT) int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, u32 type, u8 **data, u16 *size, u16 ch_id); #endif int iwl_send_phy_db_data(struct iwl_phy_db *phy_db); #endif /* __IWL_PHYDB_H__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-prph.h000066400000000000000000000412711351064634500265070ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #ifndef __iwl_prph_h__ #define __iwl_prph_h__ #include /* * Registers in this file are internal, not PCI bus memory mapped. * Driver accesses these via HBUS_TARG_PRPH_* registers. */ #define PRPH_BASE (0x00000) #define PRPH_END (0xFFFFF) /* APMG (power management) constants */ #define APMG_BASE (PRPH_BASE + 0x3000) #define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) #define APMG_CLK_EN_REG (APMG_BASE + 0x0004) #define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) #define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) #define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) #define APMG_RFKILL_REG (APMG_BASE + 0x0014) #define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) #define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) #define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058) #define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C) #define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001) #define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) #define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) #define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000) #define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) #define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) #define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) #define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) #define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ #define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) #define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200) #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) #define APMG_PCIDEV_STT_VAL_WAKE_ME (0x00004000) #define APMG_RTC_INT_STT_RFKILL (0x10000000) /* Device system time */ #define DEVICE_SYSTEM_TIME_REG 0xA0206C /* Device NMI register and value for 8000 family and lower hw's */ #define DEVICE_SET_NMI_REG 0x00a01c30 #define DEVICE_SET_NMI_VAL_DRV BIT(7) /* Device NMI register and value for 9000 family and above hw's */ #define UREG_NIC_SET_NMI_DRIVER 0x00a05c10 #define UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK 0xff000000 /* Shared registers (0x0..0x3ff, via target indirect or periphery */ #define SHR_BASE 0x00a10000 /* Shared GP1 register */ #define SHR_APMG_GP1_REG 0x01dc #define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG) #define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004 #define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000 /* Shared DL_CFG register */ #define SHR_APMG_DL_CFG_REG 0x01c4 #define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG) #define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0 #define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080 #define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100 /* Shared APMG_XTAL_CFG register */ #define SHR_APMG_XTAL_CFG_REG 0x1c0 #define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000 /* * Device reset for family 8000 * write to bit 24 in order to reset the CPU */ #define RELEASE_CPU_RESET (0x300C) #define RELEASE_CPU_RESET_BIT BIT(24) /***************************************************************************** * 7000/3000 series SHR DTS addresses * *****************************************************************************/ #define SHR_MISC_WFM_DTS_EN (0x00a10024) #define DTSC_CFG_MODE (0x00a10604) #define DTSC_VREF_AVG (0x00a10648) #define DTSC_VREF5_AVG (0x00a1064c) #define DTSC_CFG_MODE_PERIODIC (0x2) #define DTSC_PTAT_AVG (0x00a10650) /** * Tx Scheduler * * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in * host DRAM. It steers each frame's Tx command (which contains the frame * data) into one of up to 7 prioritized Tx DMA FIFO channels within the * device. A queue maps to only one (selectable by driver) Tx DMA channel, * but one DMA channel may take input from several queues. * * Tx DMA FIFOs have dedicated purposes. * * For 5000 series and up, they are used differently * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c): * * 0 -- EDCA BK (background) frames, lowest priority * 1 -- EDCA BE (best effort) frames, normal priority * 2 -- EDCA VI (video) frames, higher priority * 3 -- EDCA VO (voice) and management frames, highest priority * 4 -- unused * 5 -- unused * 6 -- unused * 7 -- Commands * * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. * In addition, driver can map the remaining queues to Tx DMA/FIFO * channels 0-3 to support 11n aggregation via EDCA DMA channels. * * The driver sets up each queue to work in one of two modes: * * 1) Scheduler-Ack, in which the scheduler automatically supports a * block-ack (BA) window of up to 64 TFDs. In this mode, each queue * contains TFDs for a unique combination of Recipient Address (RA) * and Traffic Identifier (TID), that is, traffic of a given * Quality-Of-Service (QOS) priority, destined for a single station. * * In scheduler-ack mode, the scheduler keeps track of the Tx status of * each frame within the BA window, including whether it's been transmitted, * and whether it's been acknowledged by the receiving station. The device * automatically processes block-acks received from the receiving STA, * and reschedules un-acked frames to be retransmitted (successful * Tx completion may end up being out-of-order). * * The driver must maintain the queue's Byte Count table in host DRAM * for this mode. * This mode does not support fragmentation. * * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. * The device may automatically retry Tx, but will retry only one frame * at a time, until receiving ACK from receiving station, or reaching * retry limit and giving up. * * The command queue (#4/#9) must use this mode! * This mode does not require use of the Byte Count table in host DRAM. * * Driver controls scheduler operation via 3 means: * 1) Scheduler registers * 2) Shared scheduler data base in internal SRAM * 3) Shared data in host DRAM * * Initialization: * * When loading, driver should allocate memory for: * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs. * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory * (1024 bytes for each queue). * * After receiving "Alive" response from uCode, driver must initialize * the scheduler (especially for queue #4/#9, the command queue, otherwise * the driver can't issue commands!): */ #define SCD_MEM_LOWER_BOUND (0x0000) /** * Max Tx window size is the max number of contiguous TFDs that the scheduler * can keep track of at one time when creating block-ack chains of frames. * Note that "64" matches the number of ack bits in a block-ack packet. */ #define SCD_WIN_SIZE 64 #define SCD_FRAME_LIMIT 64 #define SCD_TXFIFO_POS_TID (0) #define SCD_TXFIFO_POS_RA (4) #define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) /* agn SCD */ #define SCD_QUEUE_STTS_REG_POS_TXF (0) #define SCD_QUEUE_STTS_REG_POS_ACTIVE (3) #define SCD_QUEUE_STTS_REG_POS_WSL (4) #define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) #define SCD_QUEUE_STTS_REG_MSK (0x017F0000) #define SCD_QUEUE_CTX_REG1_CREDIT (0x00FFFF00) #define SCD_QUEUE_CTX_REG1_SUPER_CREDIT (0xFF000000) #define SCD_QUEUE_CTX_REG1_VAL(_n, _v) FIELD_PREP(SCD_QUEUE_CTX_REG1_ ## _n, _v) #define SCD_QUEUE_CTX_REG2_WIN_SIZE (0x0000007F) #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT (0x007F0000) #define SCD_QUEUE_CTX_REG2_VAL(_n, _v) FIELD_PREP(SCD_QUEUE_CTX_REG2_ ## _n, _v) #define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0) #define SCD_GP_CTRL_DRAM_BC_TABLE_DUP_DIS BIT(16) #define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18) /* Context Data */ #define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) #define SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) /* Tx status */ #define SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) #define SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) /* Translation Data */ #define SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) #define SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808) #define SCD_CONTEXT_QUEUE_OFFSET(x)\ (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8)) #define SCD_TX_STTS_QUEUE_OFFSET(x)\ (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16)) #define SCD_TRANS_TBL_OFFSET_QUEUE(x) \ ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) #define SCD_BASE (PRPH_BASE + 0xa02c00) #define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0) #define SCD_DRAM_BASE_ADDR (SCD_BASE + 0x8) #define SCD_AIT (SCD_BASE + 0x0c) #define SCD_TXFACT (SCD_BASE + 0x10) #define SCD_ACTIVE (SCD_BASE + 0x14) #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) #define SCD_CHAINEXT_EN (SCD_BASE + 0x244) #define SCD_AGGR_SEL (SCD_BASE + 0x248) #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) #define SCD_CB_SIZE (SCD_BASE + 0x1a4) #define SCD_GP_CTRL (SCD_BASE + 0x1a8) #define SCD_EN_CTRL (SCD_BASE + 0x254) /*********************** END TX SCHEDULER *************************************/ /* Oscillator clock */ #define OSC_CLK (0xa04068) #define OSC_CLK_FORCE_CONTROL (0x8) #define FH_UCODE_LOAD_STATUS (0x1AF0) /* * Replacing FH_UCODE_LOAD_STATUS * This register is writen by driver and is read by uCode during boot flow. * Note this address is cleared after MAC reset. */ #define UREG_UCODE_LOAD_STATUS (0xa05c40) #define UREG_CPU_INIT_RUN (0xa05c44) #define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78) #define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C) #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) #define LMAC2_PRPH_OFFSET (0x100000) /* Rx FIFO */ #define RXF_SIZE_ADDR (0xa00c88) #define RXF_RD_D_SPACE (0xa00c40) #define RXF_RD_WR_PTR (0xa00c50) #define RXF_RD_RD_PTR (0xa00c54) #define RXF_RD_FENCE_PTR (0xa00c4c) #define RXF_SET_FENCE_MODE (0xa00c14) #define RXF_LD_WR2FENCE (0xa00c1c) #define RXF_FIFO_RD_FENCE_INC (0xa00c68) #define RXF_SIZE_BYTE_CND_POS (7) #define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS) #define RXF_DIFF_FROM_PREV (0x200) #define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10) #define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c) /* Tx FIFO */ #define TXF_FIFO_ITEM_CNT (0xa00438) #define TXF_WR_PTR (0xa00414) #define TXF_RD_PTR (0xa00410) #define TXF_FENCE_PTR (0xa00418) #define TXF_LOCK_FENCE (0xa00424) #define TXF_LARC_NUM (0xa0043c) #define TXF_READ_MODIFY_DATA (0xa00448) #define TXF_READ_MODIFY_ADDR (0xa0044c) /* UMAC Internal Tx Fifo */ #define TXF_CPU2_FIFO_ITEM_CNT (0xA00538) #define TXF_CPU2_WR_PTR (0xA00514) #define TXF_CPU2_RD_PTR (0xA00510) #define TXF_CPU2_FENCE_PTR (0xA00518) #define TXF_CPU2_LOCK_FENCE (0xA00524) #define TXF_CPU2_NUM (0xA0053C) #define TXF_CPU2_READ_MODIFY_DATA (0xA00548) #define TXF_CPU2_READ_MODIFY_ADDR (0xA0054C) /* Radio registers access */ #define RSP_RADIO_CMD (0xa02804) #define RSP_RADIO_RDDAT (0xa02814) #define RADIO_RSP_ADDR_POS (6) #define RADIO_RSP_RD_CMD (3) /* FW monitor */ #define MON_BUFF_SAMPLE_CTL (0xa03c00) #define MON_BUFF_BASE_ADDR (0xa03c1c) #define MON_BUFF_END_ADDR (0xa03c40) #define MON_BUFF_WRPTR (0xa03c44) #define MON_BUFF_CYCLE_CNT (0xa03c48) /* FW monitor family 8000 and on */ #define MON_BUFF_BASE_ADDR_VER2 (0xa03c1c) #define MON_BUFF_END_ADDR_VER2 (0xa03c20) #define MON_BUFF_WRPTR_VER2 (0xa03c24) #define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28) #define MON_BUFF_SHIFT_VER2 (0x8) /* FW monitor familiy AX210 and on */ #define DBGC_CUR_DBGBUF_BASE_ADDR_LSB (0xd03c20) #define DBGC_CUR_DBGBUF_BASE_ADDR_MSB (0xd03c24) #define DBGC_CUR_DBGBUF_STATUS (0xd03c1c) #define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c) #define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff) #define MON_DMARB_RD_CTL_ADDR (0xa03c60) #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) #define DBGC_IN_SAMPLE (0xa03c00) #define DBGC_OUT_CTRL (0xa03c0c) /* enable the ID buf for read */ #define WFPM_PS_CTL_CLR 0xA0300C #define WFMP_MAC_ADDR_0 0xA03080 #define WFMP_MAC_ADDR_1 0xA03084 #define LMPM_PMG_EN 0xA01CEC #define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 #define WFPM_GP2 0xA030B4 enum { ENABLE_WFPM = BIT(31), WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, }; #define CNVI_AUX_MISC_CHIP 0xA200B0 #define CNVR_AUX_MISC_CHIP 0xA2B800 #define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890 #define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938 enum { HW_STEP_LOCATION_BITS = 24, }; #define AUX_MISC_MASTER1_EN 0xA20818 enum aux_misc_master1_en { AUX_MISC_MASTER1_EN_SBE_MSK = 0x1, }; #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 #define RSA_ENABLE 0xA24B08 #define PREG_AUX_BUS_WPROT_0 0xA04CC0 /* device family 9000 WPROT register */ #define PREG_PRPH_WPROT_9000 0xA04CE0 /* device family 22000 WPROT register */ #define PREG_PRPH_WPROT_22000 0xA04D00 #define SB_CFG_OVERRIDE_ADDR 0xA26C78 #define SB_CFG_OVERRIDE_ENABLE 0x8000 #define SB_CFG_BASE_OVERRIDE 0xA20000 #define SB_MODIFY_CFG_FLAG 0xA03088 #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 #define UMAG_SB_CPU_2_STATUS 0xA038C4 #define UMAG_GEN_HW_STATUS 0xA038C8 /* For UMAG_GEN_HW_STATUS reg check */ enum { UMAG_GEN_HW_IS_FPGA = BIT(1), }; /* FW chicken bits */ #define LMPM_CHICK 0xA01FF8 enum { LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0), }; #define UREG_CHICK (0xA05C00) #define UREG_CHICK_MSI_ENABLE BIT(24) #define UREG_CHICK_MSIX_ENABLE BIT(25) #define SD_REG_VER 0xA29600 #define REG_VER_RF_ID_JF 0x4900 #define HPM_DEBUG 0xA03440 #define PERSISTENCE_BIT BIT(12) #define PREG_WFPM_ACCESS BIT(12) #define UREG_DOORBELL_TO_ISR6 0xA05C04 #define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0) #define FSEQ_ERROR_CODE 0xA340C8 #define FSEQ_TOP_INIT_VERSION 0xA34038 #define FSEQ_CNVIO_INIT_VERSION 0xA3403C #define FSEQ_OTP_VERSION 0xA340FC #define FSEQ_TOP_CONTENT_VERSION 0xA340F4 #define FSEQ_ALIVE_TOKEN 0xA340F0 #define FSEQ_CNVI_ID 0xA3408C #define FSEQ_CNVR_ID 0xA34090 #endif /* __iwl_prph_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-scd.h000066400000000000000000000107241351064634500263060ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_scd_h__ #define __iwl_scd_h__ #include "iwl-trans.h" #include "iwl-io.h" #include "iwl-prph.h" static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans, u16 txq_id) { iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); } static inline void iwl_scd_txq_enable_agg(struct iwl_trans *trans, u16 txq_id) { iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); } static inline void iwl_scd_txq_disable_agg(struct iwl_trans *trans, u16 txq_id) { iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); } static inline void iwl_scd_disable_agg(struct iwl_trans *trans) { iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0); } static inline void iwl_scd_activate_fifos(struct iwl_trans *trans) { iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7)); } static inline void iwl_scd_deactivate_fifos(struct iwl_trans *trans) { iwl_write_prph(trans, SCD_TXFACT, 0); } static inline void iwl_scd_enable_set_active(struct iwl_trans *trans, u32 value) { iwl_write_prph(trans, SCD_EN_CTRL, value); } static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl) { if (chnl < 20) return SCD_BASE + 0x18 + chnl * 4; WARN_ON_ONCE(chnl >= 32); return SCD_BASE + 0x284 + (chnl - 20) * 4; } static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl) { if (chnl < 20) return SCD_BASE + 0x68 + chnl * 4; WARN_ON_ONCE(chnl >= 32); return SCD_BASE + 0x2B4 + chnl * 4; } static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl) { if (chnl < 20) return SCD_BASE + 0x10c + chnl * 4; WARN_ON_ONCE(chnl >= 32); return SCD_BASE + 0x334 + chnl * 4; } static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) { iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-tm-gnl.c000066400000000000000000000745651351064634500267430ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2010 - 2014, 2018 - 2019 Intel Corporation * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2010 - 2014, 2018 - 2019 Intel Corporation * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-drv.h" #include "iwl-io.h" #include "iwl-fh.h" #include "iwl-prph.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "iwl-tm-gnl.h" #include "iwl-tm-infc.h" #include "iwl-dnt-cfg.h" #include "iwl-dnt-dispatch.h" #include "iwl-csr.h" /** * iwl_tm_validate_fw_cmd() - Validates FW host command input data * @data_in: Input to be validated * */ static int iwl_tm_validate_fw_cmd(struct iwl_tm_data *data_in) { struct iwl_tm_cmd_request *cmd_req; u32 data_buf_size; if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_cmd_request))) return -EINVAL; cmd_req = (struct iwl_tm_cmd_request *)data_in->data; data_buf_size = data_in->len - sizeof(struct iwl_tm_cmd_request); if (data_buf_size < cmd_req->len) return -EINVAL; return 0; } /** * iwl_tm_validate_reg_ops() - Checks the input data for registers operations * @data_in: data is casted to iwl_tm_regs_request len in * the size of the request struct in bytes. */ static int iwl_tm_validate_reg_ops(struct iwl_tm_data *data_in) { struct iwl_tm_regs_request *request; u32 request_size; u32 idx; if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_regs_request))) return -EINVAL; request = (struct iwl_tm_regs_request *)(data_in->data); request_size = sizeof(struct iwl_tm_regs_request) + request->num * sizeof(struct iwl_tm_reg_op); if (data_in->len < request_size) return -EINVAL; /* * Calculate result size - result is returned only for read ops * Also, verifying inputs */ for (idx = 0; idx < request->num; idx++) { if (request->reg_ops[idx].op_type >= IWL_TM_REG_OP_MAX) return -EINVAL; /* * Allow access only to FH/CSR/HBUS in direct mode. * Since we don't have the upper bounds for the CSR * and HBUS segments, we will use only the upper * bound of FH for sanity check. */ if (!IS_AL_ADDR(request->reg_ops[idx].address)) { if (request->reg_ops[idx].address >= FH_MEM_UPPER_BOUND) return -EINVAL; } } return 0; } /** * iwl_tm_trace_end - Ends debug trace. Common for all op modes. * @dev: testmode device struct */ static int iwl_tm_trace_end(struct iwl_tm_gnl_dev *dev) { struct iwl_trans *trans = dev->trans; struct iwl_test_trace *trace = &dev->tst.trace; if (!trace->enabled) return -EILSEQ; if (trace->cpu_addr && trace->dma_addr) dma_free_coherent(trans->dev, trace->size, trace->cpu_addr, trace->dma_addr); memset(trace, 0, sizeof(struct iwl_test_trace)); return 0; } /** * iwl_tm_trace_begin() - Checks input data for trace request * @dev: testmode device struct * @data_in: Only size is relevant - Desired size of trace buffer. * @data_out: Trace request data (address & size) */ static int iwl_tm_trace_begin(struct iwl_tm_gnl_dev *dev, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_tm_trace_request *req = data_in->data; struct iwl_tm_trace_request *resp; if (!data_in->data || data_in->len < sizeof(struct iwl_tm_trace_request)) return -EINVAL; req = data_in->data; /* size zero means use the default */ if (!req->size) req->size = TRACE_BUFF_SIZE_DEF; else if (req->size < TRACE_BUFF_SIZE_MIN || req->size > TRACE_BUFF_SIZE_MAX) return -EINVAL; else if (!dev->dnt->mon_buf_cpu_addr) return -ENOMEM; resp = kmalloc(sizeof(*resp), GFP_KERNEL); if (!resp) { return -ENOMEM; } resp->size = dev->dnt->mon_buf_size; /* Casting to avoid compilation warnings when DMA address is 32bit */ resp->addr = (u64)dev->dnt->mon_base_addr; data_out->data = resp; data_out->len = sizeof(*resp); return 0; } static bool iwl_tm_gnl_valid_hw_addr(u32 addr) { /* TODO need to implement */ return true; } /** * iwl_tm_validate_sram_write_req() - Checks input data of SRAM write request * @dev: testmode device struct * @data_in: SRAM access request */ static int iwl_tm_validate_sram_write_req(struct iwl_tm_gnl_dev *dev, struct iwl_tm_data *data_in) { struct iwl_tm_sram_write_request *cmd_in; u32 data_buf_size; if (!dev->trans->op_mode) { IWL_ERR(dev->trans, "No op_mode!\n"); return -ENODEV; } if (!data_in->data || data_in->len < sizeof(struct iwl_tm_sram_write_request)) return -EINVAL; cmd_in = data_in->data; data_buf_size = data_in->len - sizeof(struct iwl_tm_sram_write_request); if (data_buf_size < cmd_in->len) return -EINVAL; if (iwl_tm_gnl_valid_hw_addr(cmd_in->offset)) return 0; if ((cmd_in->offset < IWL_ABS_PRPH_START) && (cmd_in->offset >= IWL_ABS_PRPH_START + PRPH_END)) return 0; return -EINVAL; } /** * iwl_tm_validate_sram_read_req() - Checks input data of SRAM read request * @dev: testmode device struct * @data_in: SRAM access request */ static int iwl_tm_validate_sram_read_req(struct iwl_tm_gnl_dev *dev, struct iwl_tm_data *data_in) { struct iwl_tm_sram_read_request *cmd_in; if (!dev->trans->op_mode) { IWL_ERR(dev->trans, "No op_mode!\n"); return -ENODEV; } if (!data_in->data || data_in->len < sizeof(struct iwl_tm_sram_read_request)) return -EINVAL; cmd_in = data_in->data; if (iwl_tm_gnl_valid_hw_addr(cmd_in->offset)) return 0; if ((cmd_in->offset < IWL_ABS_PRPH_START) && (cmd_in->offset >= IWL_ABS_PRPH_START + PRPH_END)) return 0; return -EINVAL; } /** * iwl_tm_notifications_en() - Checks input for enable test notifications * @tst: Device's test data pointer * @data_in: u32 notification (flag) */ static int iwl_tm_notifications_en(struct iwl_test *tst, struct iwl_tm_data *data_in) { u32 notification_en; if (!data_in->data || (data_in->len != sizeof(u32))) return -EINVAL; notification_en = *(u32 *)data_in->data; if ((notification_en != NOTIFICATIONS_ENABLE) && (notification_en != NOTIFICATIONS_DISABLE)) return -EINVAL; tst->notify = notification_en == NOTIFICATIONS_ENABLE; return 0; } /** * iwl_tm_validate_tx_cmd() - Validates FW host command input data * @data_in: Input to be validated * */ static int iwl_tm_validate_tx_cmd(struct iwl_tm_data *data_in) { struct iwl_tm_mod_tx_request *cmd_req; u32 data_buf_size; if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_mod_tx_request))) return -EINVAL; cmd_req = (struct iwl_tm_mod_tx_request *)data_in->data; data_buf_size = data_in->len - sizeof(struct iwl_tm_mod_tx_request); if (data_buf_size < cmd_req->len) return -EINVAL; if (cmd_req->sta_id >= IWL_TM_STATION_COUNT) return -EINVAL; return 0; } /** * iwl_tm_validate_rx_hdrs_mode_req() - Validates RX headers mode request * @data_in: Input to be validated * */ static int iwl_tm_validate_rx_hdrs_mode_req(struct iwl_tm_data *data_in) { if (!data_in->data || (data_in->len < sizeof(struct iwl_xvt_rx_hdrs_mode_request))) return -EINVAL; return 0; } static int iwl_tm_validate_get_chip_id(struct iwl_trans *trans) { return 0; } /** * iwl_tm_validate_apmg_pd_mode_req() - Validates apmg rx mode request * @data_in: Input to be validated * */ static int iwl_tm_validate_apmg_pd_mode_req(struct iwl_tm_data *data_in) { if (!data_in->data || (data_in->len != sizeof(struct iwl_xvt_apmg_pd_mode_request))) return -EINVAL; return 0; } static int iwl_tm_get_device_status(struct iwl_tm_gnl_dev *dev, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { __u32 *status; status = kmalloc(sizeof(__u32), GFP_KERNEL); if (!status) return -ENOMEM; *status = dev->dnt->iwl_dnt_status; data_out->data = status; data_out->len = sizeof(__u32); return 0; } #if IS_ENABLED(CPTCFG_IWLXVT) static int iwl_tm_switch_op_mode(struct iwl_tm_gnl_dev *dev, struct iwl_tm_data *data_in) { struct iwl_switch_op_mode *switch_cmd = data_in->data; struct iwl_drv *drv; int ret = 0; if (data_in->len < sizeof(*switch_cmd)) return -EINVAL; drv = iwl_drv_get_dev_container(dev->trans->dev); if (!drv) { IWL_ERR(dev->trans, "Couldn't retrieve device information\n"); return -ENODEV; } /* Executing switch command */ ret = iwl_drv_switch_op_mode(drv, switch_cmd->new_op_mode); if (ret < 0) IWL_ERR(dev->trans, "Failed to switch op mode to %s (err:%d)\n", switch_cmd->new_op_mode, ret); return ret; } #endif static int iwl_tm_gnl_get_sil_step(struct iwl_trans *trans, struct iwl_tm_data *data_out) { struct iwl_sil_step *resp; data_out->data = kmalloc(sizeof(struct iwl_sil_step), GFP_KERNEL); if (!data_out->data) return -ENOMEM; data_out->len = sizeof(struct iwl_sil_step); resp = (struct iwl_sil_step *)data_out->data; resp->silicon_step = CSR_HW_REV_STEP(trans->hw_rev); return 0; } static int iwl_tm_gnl_get_build_info(struct iwl_trans *trans, struct iwl_tm_data *data_out) { struct iwl_tm_build_info *resp; data_out->data = kmalloc(sizeof(*resp), GFP_KERNEL); if (!data_out->data) return -ENOMEM; data_out->len = sizeof(struct iwl_tm_build_info); resp = (struct iwl_tm_build_info *)data_out->data; memset(resp, 0 , sizeof(*resp)); strncpy(resp->driver_version, BACKPORTS_GIT_TRACKED, sizeof(resp->driver_version)); #ifdef BACKPORTS_BRANCH_TSTAMP strncpy(resp->branch_time, BACKPORTS_BRANCH_TSTAMP, sizeof(resp->branch_time)); #endif strncpy(resp->build_time, BACKPORTS_BUILD_TSTAMP, sizeof(resp->build_time)); return 0; } static int iwl_tm_gnl_get_sil_type(struct iwl_trans * trans,struct iwl_tm_data * data_out) { struct iwl_tm_sil_type *resp; resp = kzalloc(sizeof(*resp), GFP_KERNEL); if (!resp) return -ENOMEM; resp->silicon_type = CSR_HW_REV_TYPE(trans->hw_rev); data_out->data = resp; data_out->len = sizeof(*resp); return 0; } static int iwl_tm_gnl_get_rfid(struct iwl_trans *trans, struct iwl_tm_data *data_out) { struct iwl_tm_rfid *resp; resp = kzalloc(sizeof(*resp), GFP_KERNEL); if (!resp) return -ENOMEM; IWL_DEBUG_INFO(trans, "HW RFID=0x08%X\n", trans->hw_rf_id); resp->flavor = CSR_HW_RFID_FLAVOR(trans->hw_rf_id); resp->dash = CSR_HW_RFID_DASH(trans->hw_rf_id); resp->step = CSR_HW_RFID_STEP(trans->hw_rf_id); resp->type = CSR_HW_RFID_TYPE(trans->hw_rf_id); data_out->data = resp; data_out->len = sizeof(*resp); return 0; } /* * Testmode GNL family types (This NL family * will eventually replace nl80211 support in * iwl xVM modules) */ #define IWL_TM_GNL_FAMILY_NAME "iwl_tm_gnl" #define IWL_TM_GNL_MC_GRP_NAME "iwl_tm_mcgrp" #define IWL_TM_GNL_VERSION_NR 1 #define IWL_TM_GNL_DEVNAME_LEN 256 /** * struct iwl_tm_gnl_cmd - Required data for command execution. * @dev_name: Target device * @cmd: Command index * @data_in: Input data * @data_out: Output data, to be sent when * command is done. */ struct iwl_tm_gnl_cmd { const char *dev_name; u32 cmd; struct iwl_tm_data data_in; struct iwl_tm_data data_out; }; static struct list_head dev_list; /* protected by mutex or RCU */ static struct mutex dev_list_mtx; /* Testmode GNL family command attributes */ enum iwl_tm_gnl_cmd_attr_t { IWL_TM_GNL_MSG_ATTR_INVALID = 0, IWL_TM_GNL_MSG_ATTR_DEVNAME, IWL_TM_GNL_MSG_ATTR_CMD, IWL_TM_GNL_MSG_ATTR_DATA, IWL_TM_GNL_MSG_ATTR_MAX }; /* TM GNL family definition */ static struct genl_family iwl_tm_gnl_family; static __genl_const struct genl_multicast_group iwl_tm_gnl_mcgrps[] = { { .name = IWL_TM_GNL_MC_GRP_NAME, }, }; /* TM GNL bus policy */ static const struct nla_policy iwl_tm_gnl_msg_policy[IWL_TM_GNL_MSG_ATTR_MAX] = { [IWL_TM_GNL_MSG_ATTR_DEVNAME] = { .type = NLA_NUL_STRING, .len = IWL_TM_GNL_DEVNAME_LEN-1 }, [IWL_TM_GNL_MSG_ATTR_CMD] = { .type = NLA_U32, }, [IWL_TM_GNL_MSG_ATTR_DATA] = { .type = NLA_BINARY, }, }; /** * iwl_tm_gnl_get_dev() - Retrieve device information * @dev_name: Device to be found * * Finds the device information according to device name, * must be protected by list mutex when used (mutex is not * locked inside the function to allow code flexibility) */ static struct iwl_tm_gnl_dev *iwl_tm_gnl_get_dev(const char *dev_name) { struct iwl_tm_gnl_dev *dev_itr, *dev = NULL; lockdep_assert_held(&dev_list_mtx); list_for_each_entry(dev_itr, &dev_list, list) { if (!strcmp(dev_itr->dev_name, dev_name)) { dev = dev_itr; break; } } return dev; } /** * iwl_tm_gnl_create_message() - Creates a genl message * @pid: Netlink PID that the message is addressed to * @seq: sequence number (usually the one of the sender) * @cmd_data: Message's data * @flags: Resources allocation flags */ static struct sk_buff *iwl_tm_gnl_create_msg(u32 pid, u32 seq, struct iwl_tm_gnl_cmd cmd_data, gfp_t flags) { void *nlmsg_head; struct sk_buff *skb; int ret; skb = genlmsg_new(NLMSG_GOODSIZE, flags); if (!skb) goto send_msg_err; nlmsg_head = genlmsg_put(skb, pid, seq, &iwl_tm_gnl_family, 0, IWL_TM_GNL_CMD_EXECUTE); if (!nlmsg_head) goto send_msg_err; ret = nla_put_string(skb, IWL_TM_GNL_MSG_ATTR_DEVNAME, cmd_data.dev_name); if (ret) goto send_msg_err; ret = nla_put_u32(skb, IWL_TM_GNL_MSG_ATTR_CMD, cmd_data.cmd); if (ret) goto send_msg_err; if (cmd_data.data_out.len && cmd_data.data_out.data) { ret = nla_put(skb, IWL_TM_GNL_MSG_ATTR_DATA, cmd_data.data_out.len, cmd_data.data_out.data); if (ret) goto send_msg_err; } genlmsg_end(skb, nlmsg_head); return skb; send_msg_err: if (skb) kfree_skb(skb); return NULL; } /** * iwl_tm_gnl_send_msg() - Sends a message to mcast or userspace listener * @trans: transport * @cmd: Command index * @check_notify: only send when notify is set * @data_out: Data to be sent * * Initiate a message sending to user space (as apposed * to replying to a message that was initiated by user * space). Uses multicast broadcasting method. */ int iwl_tm_gnl_send_msg(struct iwl_trans *trans, u32 cmd, bool check_notify, void *data_out, u32 data_len, gfp_t flags) { struct iwl_tm_gnl_dev *dev; struct iwl_tm_gnl_cmd cmd_data; struct sk_buff *skb; u32 nlportid; if (WARN_ON_ONCE(!trans)) return -EINVAL; if (!trans->tmdev) return 0; dev = trans->tmdev; nlportid = READ_ONCE(dev->nl_events_portid); if (check_notify && !dev->tst.notify) return 0; memset(&cmd_data, 0 , sizeof(struct iwl_tm_gnl_cmd)); cmd_data.dev_name = dev_name(trans->dev); cmd_data.cmd = cmd; cmd_data.data_out.data = data_out; cmd_data.data_out.len = data_len; skb = iwl_tm_gnl_create_msg(nlportid, 0, cmd_data, flags); if (!skb) return -EINVAL; if (nlportid) return genlmsg_unicast(&init_net, skb, nlportid); return genlmsg_multicast(&iwl_tm_gnl_family, skb, 0, 0, flags); } IWL_EXPORT_SYMBOL(iwl_tm_gnl_send_msg); /** * iwl_tm_gnl_reply() - Sends command's results back to user space * @info: info struct received in .doit callback * @cmd_data: Data of command to be responded */ static int iwl_tm_gnl_reply(struct genl_info *info, struct iwl_tm_gnl_cmd cmd_data) { struct sk_buff *skb; skb = iwl_tm_gnl_create_msg(genl_info_snd_portid(info), info->snd_seq, cmd_data, GFP_KERNEL); if (!skb) return -EINVAL; return genlmsg_reply(skb, info); } /** * iwl_tm_gnl_cmd_execute() - Execute IWL testmode GNL command * @cmd_data: Pointer to the data of command to be executed */ static int iwl_tm_gnl_cmd_execute(struct iwl_tm_gnl_cmd *cmd_data) { struct iwl_tm_gnl_dev *dev; bool common_op = false; int ret = 0; mutex_lock(&dev_list_mtx); dev = iwl_tm_gnl_get_dev(cmd_data->dev_name); mutex_unlock(&dev_list_mtx); if (!dev) return -ENODEV; IWL_DEBUG_INFO(dev->trans, "%s cmd=0x%X\n", __func__, cmd_data->cmd); switch (cmd_data->cmd) { case IWL_TM_USER_CMD_HCMD: ret = iwl_tm_validate_fw_cmd(&cmd_data->data_in); break; case IWL_TM_USER_CMD_REG_ACCESS: ret = iwl_tm_validate_reg_ops(&cmd_data->data_in); break; case IWL_TM_USER_CMD_SRAM_WRITE: ret = iwl_tm_validate_sram_write_req(dev, &cmd_data->data_in); break; case IWL_TM_USER_CMD_BEGIN_TRACE: ret = iwl_tm_trace_begin(dev, &cmd_data->data_in, &cmd_data->data_out); common_op = true; break; case IWL_TM_USER_CMD_END_TRACE: ret = iwl_tm_trace_end(dev); common_op = true; break; case IWL_XVT_CMD_MOD_TX: ret = iwl_tm_validate_tx_cmd(&cmd_data->data_in); break; case IWL_XVT_CMD_RX_HDRS_MODE: ret = iwl_tm_validate_rx_hdrs_mode_req(&cmd_data->data_in); break; case IWL_XVT_CMD_APMG_PD_MODE: ret = iwl_tm_validate_apmg_pd_mode_req(&cmd_data->data_in); break; case IWL_TM_USER_CMD_NOTIFICATIONS: ret = iwl_tm_notifications_en(&dev->tst, &cmd_data->data_in); common_op = true; break; case IWL_TM_USER_CMD_GET_DEVICE_STATUS: ret = iwl_tm_get_device_status(dev, &cmd_data->data_in, &cmd_data->data_out); common_op = true; break; #if IS_ENABLED(CPTCFG_IWLXVT) case IWL_TM_USER_CMD_SWITCH_OP_MODE: ret = iwl_tm_switch_op_mode(dev, &cmd_data->data_in); common_op = true; break; #endif case IWL_XVT_CMD_GET_CHIP_ID: ret = iwl_tm_validate_get_chip_id(dev->trans); break; case IWL_TM_USER_CMD_GET_SIL_STEP: ret = iwl_tm_gnl_get_sil_step(dev->trans, &cmd_data->data_out); common_op = true; break; case IWL_TM_USER_CMD_GET_DRIVER_BUILD_INFO: ret = iwl_tm_gnl_get_build_info(dev->trans, &cmd_data->data_out); common_op = true; break; case IWL_TM_USER_CMD_GET_SIL_TYPE: ret = iwl_tm_gnl_get_sil_type(dev->trans, &cmd_data->data_out); common_op = true; break; case IWL_TM_USER_CMD_GET_RFID: ret = iwl_tm_gnl_get_rfid(dev->trans, &cmd_data->data_out); common_op = true; break; } if (ret) { IWL_ERR(dev->trans, "%s Error=%d\n", __func__, ret); return ret; } if (!common_op) ret = iwl_tm_execute_cmd(&dev->trans->testmode, cmd_data->cmd, &cmd_data->data_in, &cmd_data->data_out); if (ret) IWL_ERR(dev->trans, "%s ret=%d\n", __func__, ret); else IWL_DEBUG_INFO(dev->trans, "%s ended Ok\n", __func__); return ret; } /** * iwl_tm_mem_dump() - Returns memory buffer data * @dev: testmode device struct * @data_in: input data * @data_out: Dump data */ static int iwl_tm_mem_dump(struct iwl_tm_gnl_dev *dev, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { int ret; ret = iwl_tm_validate_sram_read_req(dev, data_in); if (ret) return ret; return iwl_tm_execute_cmd(&dev->trans->testmode, IWL_TM_USER_CMD_SRAM_READ, data_in, data_out); } /** * iwl_tm_trace_dump() - Returns trace buffer data * @tst: Device's test data * @data_out: Dump data */ static int iwl_tm_trace_dump(struct iwl_tm_gnl_dev *dev, struct iwl_tm_data *data_out) { int ret; u32 buf_size; if (!(dev->dnt->iwl_dnt_status & IWL_DNT_STATUS_MON_CONFIGURED)) { IWL_ERR(dev->trans, "Invalid monitor status\n"); return -EINVAL; } if (dev->dnt->mon_buf_size == 0) { IWL_ERR(dev->trans, "No available monitor buffer\n"); return -ENOMEM; } buf_size = dev->dnt->mon_buf_size; data_out->data = kmalloc(buf_size, GFP_KERNEL); if (!data_out->data) return -ENOMEM; ret = iwl_dnt_dispatch_pull(dev->trans, data_out->data, buf_size, MONITOR); if (ret < 0) { kfree(data_out->data); return ret; } data_out->len = ret; return 0; } /** * iwl_tm_gnl_command_dump() - Returns dump buffer data * @cmd_data: Pointer to the data of dump command. * Only device name and command index are the relevant input. * Data out is the start address of the buffer, and it's size. */ static int iwl_tm_gnl_command_dump(struct iwl_tm_gnl_cmd *cmd_data) { struct iwl_tm_gnl_dev *dev; int ret = 0; mutex_lock(&dev_list_mtx); dev = iwl_tm_gnl_get_dev(cmd_data->dev_name); mutex_unlock(&dev_list_mtx); if (!dev) return -ENODEV; switch (cmd_data->cmd) { case IWL_TM_USER_CMD_TRACE_DUMP: ret = iwl_tm_trace_dump(dev, &cmd_data->data_out); break; case IWL_TM_USER_CMD_SRAM_READ: ret = iwl_tm_mem_dump(dev, &cmd_data->data_in, &cmd_data->data_out); break; default: return -EOPNOTSUPP; } return ret; } /** * iwl_tm_gnl_parse_msg - Extract input cmd data out of netlink attributes * @attrs: Input netlink attributes * @cmd_data: Command */ static int iwl_tm_gnl_parse_msg(struct nlattr **attrs, struct iwl_tm_gnl_cmd *cmd_data) { memset(cmd_data, 0 , sizeof(struct iwl_tm_gnl_cmd)); if (!attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME] || !attrs[IWL_TM_GNL_MSG_ATTR_CMD]) return -EINVAL; cmd_data->dev_name = nla_data(attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]); cmd_data->cmd = nla_get_u32(attrs[IWL_TM_GNL_MSG_ATTR_CMD]); if (attrs[IWL_TM_GNL_MSG_ATTR_DATA]) { cmd_data->data_in.data = nla_data(attrs[IWL_TM_GNL_MSG_ATTR_DATA]); cmd_data->data_in.len = nla_len(attrs[IWL_TM_GNL_MSG_ATTR_DATA]); } return 0; } /** * iwl_tm_gnl_cmd_do() - Executes IWL testmode GNL command */ static int iwl_tm_gnl_cmd_do(struct sk_buff *skb, struct genl_info *info) { struct iwl_tm_gnl_cmd cmd_data; int ret; ret = iwl_tm_gnl_parse_msg(info->attrs, &cmd_data); if (ret) return ret; ret = iwl_tm_gnl_cmd_execute(&cmd_data); if (!ret && cmd_data.data_out.len) { ret = iwl_tm_gnl_reply(info, cmd_data); /* * In this case, data out should be allocated in * iwl_tm_gnl_cmd_execute so it should be freed * here */ kfree(cmd_data.data_out.data); } return ret; } /** * iwl_tm_gnl_dump() - Executes IWL testmode GNL command * cb->args[0]: Command number, incremented by one (as it may be zero) * We're keeping the command so we can tell if is it the * first dump call or not. * cb->args[1]: Buffer data to dump * cb->args[2]: Buffer size * cb->args[3]: Buffer offset from where to dump in the next round */ static int iwl_tm_gnl_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct iwl_tm_gnl_cmd cmd_data; void *nlmsg_head = NULL; struct nlattr *attrs[IWL_TM_GNL_MSG_ATTR_MAX]; void *dump_addr; unsigned long dump_offset; int dump_size, chunk_size, ret; if (!cb->args[0]) { /* * This is the first part of the dump - Parse dump data * out of the data in the netlink header and set up the * dump in cb->args[]. */ ret = nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, IWL_TM_GNL_MSG_ATTR_MAX - 1, iwl_tm_gnl_msg_policy, NULL); if (ret) return ret; ret = iwl_tm_gnl_parse_msg(attrs, &cmd_data); if (ret) return ret; ret = iwl_tm_gnl_command_dump(&cmd_data); if (ret) return ret; /* Incrementing command since command number may be zero */ cb->args[0] = cmd_data.cmd + 1; cb->args[1] = (unsigned long)cmd_data.data_out.data; cb->args[2] = cmd_data.data_out.len; cb->args[3] = 0; if (!cb->args[2]) return -ENODATA; } dump_addr = (u8 *)cb->args[1]; dump_size = cb->args[2]; dump_offset = cb->args[3]; nlmsg_head = genlmsg_put(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, &iwl_tm_gnl_family, NLM_F_MULTI, IWL_TM_GNL_CMD_EXECUTE); /* * Reserve some room for NL attribute header, * 16 bytes should be enough. */ chunk_size = skb_tailroom(skb) - 16; if (chunk_size <= 0) { ret = -ENOMEM; goto dump_err; } if (chunk_size > dump_size - dump_offset) chunk_size = dump_size - dump_offset; if (chunk_size) { ret = nla_put(skb, IWL_TM_GNL_MSG_ATTR_DATA, chunk_size, dump_addr + dump_offset); if (ret) goto dump_err; } genlmsg_end(skb, nlmsg_head); /* move offset */ cb->args[3] += chunk_size; return cb->args[2] - cb->args[3]; dump_err: genlmsg_cancel(skb, nlmsg_head); return ret; } static int iwl_tm_gnl_done(struct netlink_callback *cb) { switch (cb->args[0] - 1) { case IWL_TM_USER_CMD_SRAM_READ: case IWL_TM_USER_CMD_TRACE_DUMP: kfree((void *)cb->args[1]); return 0; } return -EOPNOTSUPP; } static int iwl_tm_gnl_cmd_subscribe(struct sk_buff *skb, struct genl_info *info) { struct iwl_tm_gnl_dev *dev; const char *dev_name; int ret; if (!info->attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]) return -EINVAL; dev_name = nla_data(info->attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]); mutex_lock(&dev_list_mtx); dev = iwl_tm_gnl_get_dev(dev_name); if (!dev) { ret = -ENODEV; goto unlock; } if (dev->nl_events_portid) { ret = -EBUSY; goto unlock; } dev->nl_events_portid = genl_info_snd_portid(info); ret = 0; unlock: mutex_unlock(&dev_list_mtx); return ret; } /* * iwl_tm_gnl_ops - GNL Family commands. * There is only one NL command, and only one callback, * which handles all NL messages. */ static __genl_const struct genl_ops iwl_tm_gnl_ops[] = { { .cmd = IWL_TM_GNL_CMD_EXECUTE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = iwl_tm_gnl_cmd_do, .dumpit = iwl_tm_gnl_dump, .done = iwl_tm_gnl_done, }, { .cmd = IWL_TM_GNL_CMD_SUBSCRIBE_EVENTS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = iwl_tm_gnl_cmd_subscribe, }, }; static struct genl_family iwl_tm_gnl_family __genl_ro_after_init = { .hdrsize = 0, .name = IWL_TM_GNL_FAMILY_NAME, .version = IWL_TM_GNL_VERSION_NR, .maxattr = IWL_TM_GNL_MSG_ATTR_MAX, .policy = iwl_tm_gnl_msg_policy, .module = THIS_MODULE, .ops = iwl_tm_gnl_ops, .n_ops = ARRAY_SIZE(iwl_tm_gnl_ops), .mcgrps = iwl_tm_gnl_mcgrps, .n_mcgrps = ARRAY_SIZE(iwl_tm_gnl_mcgrps), }; /** * iwl_tm_gnl_add() - Registers a devices/op-mode in the iwl-tm-gnl list * @trans: transport struct for the device to register for */ void iwl_tm_gnl_add(struct iwl_trans *trans) { struct iwl_tm_gnl_dev *dev; if (!trans) return; if (trans->tmdev) return; mutex_lock(&dev_list_mtx); if (iwl_tm_gnl_get_dev(dev_name(trans->dev))) goto unlock; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto unlock; dev->dev_name = dev_name(trans->dev); trans->tmdev = dev; dev->trans = trans; list_add_tail_rcu(&dev->list, &dev_list); unlock: mutex_unlock(&dev_list_mtx); } /** * iwl_tm_gnl_remove() - Unregisters a devices/op-mode in the iwl-tm-gnl list * @trans: transport struct for the device */ void iwl_tm_gnl_remove(struct iwl_trans *trans) { struct iwl_tm_gnl_dev *dev_itr, *tmp; if (WARN_ON_ONCE(!trans)) return; /* Searching for operation mode in list */ mutex_lock(&dev_list_mtx); list_for_each_entry_safe(dev_itr, tmp, &dev_list, list) { if (dev_itr->trans == trans) { /* * Device found. Removing it from list * and releasing it's resources */ list_del_rcu(&dev_itr->list); synchronize_rcu(); kfree(dev_itr); break; } } trans->tmdev = NULL; mutex_unlock(&dev_list_mtx); } static int iwl_tm_gnl_netlink_notify(struct notifier_block *nb, unsigned long state, void *_notify) { struct netlink_notify *notify = _notify; struct iwl_tm_gnl_dev *dev; rcu_read_lock(); list_for_each_entry_rcu(dev, &dev_list, list) { if (dev->nl_events_portid == netlink_notify_portid(notify)) dev->nl_events_portid = 0; } rcu_read_unlock(); return NOTIFY_OK; } static struct notifier_block iwl_tm_gnl_netlink_notifier = { .notifier_call = iwl_tm_gnl_netlink_notify, }; /** * iwl_tm_gnl_init() - Registers tm-gnl module * * Registers Testmode GNL family and initializes * TM GNL global variables */ int iwl_tm_gnl_init(void) { int ret; INIT_LIST_HEAD(&dev_list); mutex_init(&dev_list_mtx); ret = genl_register_family(&iwl_tm_gnl_family); if (ret) return ret; ret = netlink_register_notifier(&iwl_tm_gnl_netlink_notifier); if (ret) genl_unregister_family(&iwl_tm_gnl_family); return ret; } /** * iwl_tm_gnl_exit() - Unregisters Testmode GNL family */ int iwl_tm_gnl_exit(void) { netlink_unregister_notifier(&iwl_tm_gnl_netlink_notifier); return genl_unregister_family(&iwl_tm_gnl_family); } /** * iwl_tm_fw_send_rx() - Send a spontaneous rx message to user * @trans: Pointer to the transport layer * @rxb: Contains rx packet to be sent */ void iwl_tm_gnl_send_rx(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); int length = iwl_rx_packet_len(pkt); /* the length doesn't include len_n_flags field, so add it manually */ length += sizeof(__le32); iwl_tm_gnl_send_msg(trans, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT, true, (void *)pkt, length, GFP_ATOMIC); } IWL_EXPORT_SYMBOL(iwl_tm_gnl_send_rx); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-tm-gnl.h000066400000000000000000000077241351064634500267410ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2010 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2010 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __IWL_TM_GNL_H__ #define __IWL_TM_GNL_H__ #include #include "fw/testmode.h" struct iwl_test_trace { u32 size; u8 *cpu_addr; dma_addr_t dma_addr; bool enabled; }; struct iwl_test { struct iwl_test_trace trace; bool notify; }; /** * struct iwl_tm_gnl_dev - Devices data base * @list: Linked list to all devices * @trans: Pointer to the owning transport * @dev_name: Pointer to the device name * @cmd_handlers: Operation mode specific command handlers. * * Used to retrieve a device op mode pointer. * Device identifier it's name. */ struct iwl_tm_gnl_dev { struct list_head list; struct iwl_test tst; struct iwl_dnt *dnt; struct iwl_trans *trans; const char *dev_name; u32 nl_events_portid; }; int iwl_tm_gnl_send_msg(struct iwl_trans *trans, u32 cmd, bool check_notify, void *data_out, u32 data_len, gfp_t flags); void iwl_tm_gnl_add(struct iwl_trans *trans); void iwl_tm_gnl_remove(struct iwl_trans *trans); int iwl_tm_gnl_init(void); int iwl_tm_gnl_exit(void); void iwl_tm_gnl_send_rx(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-tm-infc.h000066400000000000000000000541301351064634500270710ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2010 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2010 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_tm_infc__ #define __iwl_tm_infc__ #include #include /* * Testmode GNL family command. * There is only one NL command, not to be * confused with testmode commands */ enum iwl_tm_gnl_cmd_t { IWL_TM_GNL_CMD_EXECUTE = 0, IWL_TM_GNL_CMD_SUBSCRIBE_EVENTS, }; /* uCode trace buffer */ #define TRACE_BUFF_SIZE_MAX 0x200000 #define TRACE_BUFF_SIZE_MIN 0x1000 #define TRACE_BUFF_SIZE_DEF 0x20000 #define TM_CMD_BASE 0x100 #define TM_CMD_NOTIF_BASE 0x200 #define XVT_CMD_BASE 0x300 #define XVT_CMD_NOTIF_BASE 0x400 #define XVT_BUS_TESTER_BASE 0x500 /** * signifies iwl_tm_mod_tx_request is set to infinite mode, * when iwl_tm_mod_tx_request.times == IWL_XVT_TX_MODULATED_INFINITE */ #define IWL_XVT_TX_MODULATED_INFINITE (0) #define IWL_XVT_MAX_MAC_HEADER_LENGTH (36) #define IWL_XVT_MAX_NUM_OF_FRAMES (32) /* * Periphery registers absolute lower bound. This is used in order to * differentiate registery access through HBUS_TARG_PRPH_.* and * HBUS_TARG_MEM_* accesses. */ #define IWL_ABS_PRPH_START (0xA00000) /* User-Driver interface commands */ enum { IWL_TM_USER_CMD_HCMD = TM_CMD_BASE, IWL_TM_USER_CMD_REG_ACCESS, IWL_TM_USER_CMD_SRAM_WRITE, IWL_TM_USER_CMD_SRAM_READ, IWL_TM_USER_CMD_GET_DEVICE_INFO, IWL_TM_USER_CMD_GET_DEVICE_STATUS, IWL_TM_USER_CMD_BEGIN_TRACE, IWL_TM_USER_CMD_END_TRACE, IWL_TM_USER_CMD_TRACE_DUMP, IWL_TM_USER_CMD_NOTIFICATIONS, IWL_TM_USER_CMD_SWITCH_OP_MODE, IWL_TM_USER_CMD_GET_SIL_STEP, IWL_TM_USER_CMD_GET_DRIVER_BUILD_INFO, IWL_TM_USER_CMD_GET_FW_INFO, IWL_TM_USER_CMD_BUS_DATA_ACCESS, IWL_TM_USER_CMD_GET_SIL_TYPE, IWL_TM_USER_CMD_GET_RFID, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT = TM_CMD_NOTIF_BASE, IWL_TM_USER_CMD_NOTIF_DRIVER, IWL_TM_USER_CMD_NOTIF_RX_HDR, IWL_TM_USER_CMD_NOTIF_COMMIT_STATISTICS, IWL_TM_USER_CMD_NOTIF_PHY_DB, IWL_TM_USER_CMD_NOTIF_DTS_MEASUREMENTS, IWL_TM_USER_CMD_NOTIF_MONITOR_DATA, IWL_TM_USER_CMD_NOTIF_UCODE_MSGS_DATA, IWL_TM_USER_CMD_NOTIF_APMG_PD, IWL_TM_USER_CMD_NOTIF_RETRIEVE_MONITOR, IWL_TM_USER_CMD_NOTIF_CRASH_DATA, IWL_TM_USER_CMD_NOTIF_BFE, IWL_TM_USER_CMD_NOTIF_LOC_MCSI, IWL_TM_USER_CMD_NOTIF_LOC_RANGE, IWL_TM_USER_CMD_NOTIF_IQ_CALIB, IWL_TM_USER_CMD_NOTIF_CT_KILL, }; /* * xVT commands indeces start where common * testmode commands indeces end */ enum { IWL_XVT_CMD_START = XVT_CMD_BASE, IWL_XVT_CMD_STOP, IWL_XVT_CMD_CONTINUE_INIT, IWL_XVT_CMD_GET_PHY_DB_ENTRY, IWL_XVT_CMD_SET_CONFIG, IWL_XVT_CMD_GET_CONFIG, IWL_XVT_CMD_MOD_TX, IWL_XVT_CMD_RX_HDRS_MODE, IWL_XVT_CMD_ALLOC_DMA, IWL_XVT_CMD_GET_DMA, IWL_XVT_CMD_FREE_DMA, IWL_XVT_CMD_GET_CHIP_ID, IWL_XVT_CMD_APMG_PD_MODE, IWL_XVT_CMD_GET_MAC_ADDR_INFO, IWL_XVT_CMD_MOD_TX_STOP, IWL_XVT_CMD_TX_QUEUE_CFG, IWL_XVT_CMD_DRIVER_CMD, /* Driver notifications */ IWL_XVT_CMD_SEND_REPLY_ALIVE = XVT_CMD_NOTIF_BASE, IWL_XVT_CMD_SEND_RFKILL, IWL_XVT_CMD_SEND_NIC_ERROR, IWL_XVT_CMD_SEND_NIC_UMAC_ERROR, IWL_XVT_CMD_SEND_MOD_TX_DONE, IWL_XVT_CMD_ENHANCED_TX_DONE, IWL_XVT_CMD_TX_CMD_RESP, IWL_XVT_CMD_ECHO_NOTIF, /* Bus Tester Commands*/ IWL_TM_USER_CMD_SV_BUS_CONFIG = XVT_BUS_TESTER_BASE, IWL_TM_USER_CMD_SV_BUS_RESET, IWL_TM_USER_CMD_SV_IO_TOGGLE, IWL_TM_USER_CMD_SV_GET_STATUS, IWL_TM_USER_CMD_SV_RD_WR_UINT8, IWL_TM_USER_CMD_SV_RD_WR_UINT32, IWL_TM_USER_CMD_SV_RD_WR_BUFFER, }; /** * User space - driver interface command. These commands will be sent as * sub-commands through IWL_XVT_CMD_DRIVER_CMD. */ enum { IWL_DRV_CMD_CONFIG_TX_QUEUE = 0, IWL_DRV_CMD_SET_TX_PAYLOAD, IWL_DRV_CMD_TX_START, IWL_DRV_CMD_TX_STOP, IWL_DRV_CMD_GET_RX_AGG_STATS, IWL_DRV_CMD_CONFIG_RX_MPDU, IWL_DRV_CMD_ECHO_NOTIF, }; enum { NOTIFICATIONS_DISABLE = 0, NOTIFICATIONS_ENABLE = 1, }; /** * struct iwl_tm_cmd_request - Host command request * @id: Command ID * @want_resp: True value if response is required, else false (0) * @len: Data length * @data: For command in, casted to iwl_host_cmd * rx packet when structure is used for command response. */ struct iwl_tm_cmd_request { __u32 id; __u32 want_resp; __u32 len; __u8 data[]; } __packed __aligned(4); /** * struct iwl_svt_sdio_enable - SV Tester SDIO bus enable command * @enable: Function enable/disable 1/0 */ struct iwl_tm_sdio_io_toggle { __u32 enable; } __packed __aligned(4); /* Register operations - Operation type */ enum { IWL_TM_REG_OP_READ = 0, IWL_TM_REG_OP_WRITE, IWL_TM_REG_OP_MAX }; /** * struct iwl_tm_reg_op - Single register operation data * @op_type: READ/WRITE * @address: Register address * @value: Write value, or read result */ struct iwl_tm_reg_op { __u32 op_type; __u32 address; __u32 value; } __packed __aligned(4); /** * struct iwl_tm_regs_request - Register operation request data * @num: number of operations in struct * @reg_ops: Array of register operations */ struct iwl_tm_regs_request { __u32 num; struct iwl_tm_reg_op reg_ops[]; } __packed __aligned(4); /** * struct iwl_tm_trace_request - Data for trace begin requests * @size: Requested size of trace buffer * @addr: Resulting DMA address of trace buffer LSB */ struct iwl_tm_trace_request { __u64 addr; __u32 size; } __packed __aligned(4); /** * struct iwl_tm_sram_write_request * @offest: Address offset * @length: input data length * @buffer: input data */ struct iwl_tm_sram_write_request { __u32 offset; __u32 len; __u8 buffer[]; } __packed __aligned(4); /** * struct iwl_tm_sram_read_request * @offest: Address offset * @length: data length */ struct iwl_tm_sram_read_request { __u32 offset; __u32 length; } __packed __aligned(4); /** * struct iwl_tm_dev_info_req - Request data for get info request * @read_sv: rather or not read sv_srop */ struct iwl_tm_dev_info_req { __u32 read_sv; } __packed __aligned(4); /** * struct iwl_tm_dev_info - Result data for get info request * @dev_id: * @vendor_id: * @silicon: * @fw_ver: * @driver_ver: * @build_ver: */ struct iwl_tm_dev_info { __u32 dev_id; __u32 vendor_id; __u32 silicon_step; __u32 fw_ver; __u32 build_ver; __u8 driver_ver[]; } __packed __aligned(4); /* * struct iwl_tm_thrshld_md - tx packet metadata that crosses a thrshld * * @monitor_collec_wind: the size of the window to collect the logs * @seq: packet sequence * @pkt_start: start time of triggering pkt * @pkt_end: end time of triggering pkt * @msrmnt: the tx latency of the pkt * @tid: tid of the pkt * @mode: recording mode (internal buffer or continuous recording). */ struct iwl_tm_thrshld_md { __u16 monitor_collec_wind; __u16 seq; __u32 pkt_start; __u32 pkt_end; __u32 msrmnt; __u16 tid; __u8 mode; } __packed __aligned(4); #define MAX_OP_MODE_LENGTH 16 /** * struct iwl_switch_op_mode - switch op_mode * @new_op_mode: size of data */ struct iwl_switch_op_mode { __u8 new_op_mode[MAX_OP_MODE_LENGTH]; } __packed __aligned(4); /** * struct iwl_sil_step - holds the silicon step * @silicon_step: the device silicon step */ struct iwl_sil_step { __u32 silicon_step; } __packed __aligned(4); /** * struct iwl_sil_type - holds the silicon type * @silicon_type: the device silicon type */ struct iwl_tm_sil_type { __u32 silicon_type; } __packed __aligned(4); /** * struct iwl_tm_rfid - Currently connected RF device info * @flavor: - RFID flavor * @dash: - RFID dash * @step: - RFID step * @type: - RFID type */ struct iwl_tm_rfid { __u32 flavor; __u32 dash; __u32 step; __u32 type; } __packed __aligned(4); #define MAX_DRIVER_VERSION_LEN 256 #define MAX_BUILD_DATE_LEN 32 /** * struct iwl_tm_build_info - Result data for get driver build info request * @driver_version: driver version in tree:branch:build:sha1 * @branch_time: branch creation time * @build_time: build time */ struct iwl_tm_build_info { __u8 driver_version[MAX_DRIVER_VERSION_LEN]; __u8 branch_time[MAX_BUILD_DATE_LEN]; __u8 build_time[MAX_BUILD_DATE_LEN]; } __packed __aligned(4); /** * struct iwl_tm_get_fw_info - get the FW info * @fw_major_ver: the fw major version * @fw_minor_ver: the fw minor version * @fw_capa_flags: the fw capabilities flags * @fw_capa_api_len: the fw capabilities api length in data * @fw_capa_len: the fw capabilities length in data * @data: fw_capa_api and fa_capa data (length defined by fw_capa_api_len * + fw_capa_len) */ struct iwl_tm_get_fw_info { __u32 fw_major_ver; __u32 fw_minor_ver; __u32 fw_capa_flags; __u32 fw_capa_api_len; __u32 fw_capa_len; __u8 data[]; } __packed __aligned(4); /* xVT definitions */ #define IWL_XVT_RFKILL_OFF 0 #define IWL_XVT_RFKILL_ON 1 struct iwl_xvt_user_calib_ctrl { __u32 flow_trigger; __u32 event_trigger; } __packed __aligned(4); #define IWL_USER_FW_IMAGE_IDX_INIT 0 #define IWL_USER_FW_IMAGE_IDX_REGULAR 1 #define IWL_USER_FW_IMAGE_IDX_WOWLAN 2 #define IWL_USER_FW_IMAGE_IDX_TYPE_MAX 3 /** * iwl_xvt_sw_cfg_request - Data for set SW stack configuration request * @load_mask: bit[0] = Init FW * bit[1] = Runtime FW * @cfg_mask: Mask for which calibrations to regard * @phy_config: PHY_CONFIGURATION command paramter * Used only for "Get SW CFG" * @get_calib_type: Used only in "Get SW CFG" * 0: Get FW original calib ctrl * 1: Get actual calib ctrl * @calib_ctrl: Calibration control for each FW */ enum { IWL_XVT_GET_CALIB_TYPE_DEF = 0, IWL_XVT_GET_CALIB_TYPE_RUNTIME }; struct iwl_xvt_sw_cfg_request { __u32 load_mask; __u32 cfg_mask; __u32 phy_config; __u32 get_calib_type; __u32 dbg_flags; struct iwl_xvt_user_calib_ctrl calib_ctrl[IWL_UCODE_TYPE_MAX]; } __packed __aligned(4); /** * iwl_xvt_sw_cfg_request - Data for set SW stack configuration request * @type: Type of DB section * @chg_id: Channel Group ID, relevant only when * type is CHG PAPD or CHG TXP calibrations * @size: Size of result data, in bytes * @data: Result entry data */ struct iwl_xvt_phy_db_request { __u32 type; __u32 chg_id; __u32 size; __u8 data[]; } __packed __aligned(4); #define IWL_TM_STATION_COUNT 16 /** * struct iwl_tm_tx_request - Data transmission request * @times: Number of times to transmit the data. * @delay_us: Delay between frames * @pa_detect_en: Flag. When True, enable PA detector * @trigger_led: Flag. When true, light led when transmitting * @len: Size of data buffer * @rate_flags: Tx Configuration rate flags * @no_ack: Flag. When true, FW will not send ack * @sta_id: Station ID * @data: Data to transmit */ struct iwl_tm_mod_tx_request { __u32 times; __u32 delay_us; __u32 pa_detect_en; __u32 trigger_led; __u32 len; __u32 rate_flags; __u32 no_ack; __u8 sta_id; __u8 data[]; } __packed __aligned(4); /** * struct iwl_xvt_tx_mod_task_data - Data for modulated tx task handler * @lmac_id: lmac index * @xvt: pointer to the xvt op mode * @tx_req: pointer to data of transmission request */ struct iwl_xvt_tx_mod_task_data { __u32 lmac_id; struct iwl_xvt *xvt; struct iwl_tm_mod_tx_request tx_req; } __packed __aligned(4); /** * error status for status parameter in struct iwl_xvt_tx_mod_done */ enum { XVT_TX_DRIVER_SUCCESSFUL = 0, XVT_TX_DRIVER_QUEUE_FULL, XVT_TX_DRIVER_TIMEOUT, XVT_TX_DRIVER_ABORTED }; /** * struct iwl_xvt_tx_mod_done - Notification data for modulated tx * @num_of_packets: number of sent packets * @status: tx task handler error status * @lmac_id: lmac index */ struct iwl_xvt_tx_mod_done { __u64 num_of_packets; __u32 status; __u32 lmac_id; } __packed __aligned(4); /** * struct iwl_xvt_tx_mod_stop - input for stop modulated tx * @lmac_id: which lmac id to stop */ struct iwl_xvt_tx_mod_stop { __u32 lmac_id; } __packed __aligned(4); /** * struct iwl_xvt_rx_hdrs_mode_request - Start/Stop gathering headers info. * @mode: 0 - stop * 1 - start */ struct iwl_xvt_rx_hdrs_mode_request { __u32 mode; } __packed __aligned(4); /** * struct iwl_xvt_apmg_pd_mode_request - Start/Stop gathering apmg_pd info. * @mode: 0 - stop * 1 - start */ struct iwl_xvt_apmg_pd_mode_request { __u32 mode; } __packed __aligned(4); /** * struct iwl_xvt_alloc_dma - Data for alloc dma requests * @addr: Resulting DMA address of trace buffer LSB * @size: Requested size of dma buffer */ struct iwl_xvt_alloc_dma { __u64 addr; __u32 size; } __packed __aligned(4); /** * struct iwl_xvt_alloc_dma - Data for alloc dma requests * @size: size of data * @data: Data to transmit */ struct iwl_xvt_get_dma { __u32 size; __u8 data[]; } __packed __aligned(4); /** * struct iwl_xvt_chip_id - get the chip id from SCU * @registers: an array of registers to hold the chip id data */ struct iwl_xvt_chip_id { __u32 registers[3]; } __packed __aligned(4); /** * struct iwl_tm_crash_data - Notifications containing crash data * @data_type: type of the data * @size: data size * @data: data */ struct iwl_tm_crash_data { __u32 size; __u8 data[]; } __packed __aligned(4); /** * struct iwl_xvt_curr_mac_addr_info - Current mac address data * @curr_mac_addr: the current mac address */ struct iwl_xvt_mac_addr_info { __u8 mac_addr[ETH_ALEN]; } __packed __aligned(4); enum { TX_QUEUE_CFG_REMOVE, TX_QUEUE_CFG_ADD, }; /** * iwl_xvt_tx_queue_cfg - add/remove tx queue * @ sta_id: station ID associated with queue * @ flags: 0 - remove queue, 1 - add queue */ struct iwl_xvt_tx_queue_cfg { __u8 sta_id; __u8 operation; } __packed __aligned(4); /** * iwl_xvt_driver_command_req - wrapper for general driver command that are sent * by IWL_XVT_CMD_DRIVER_CMD * @ command_id: sub comamnd ID * @ max_out_length: max size in bytes of the sub command's expected response * @ input_data: place holder for the sub command's input structure */ struct iwl_xvt_driver_command_req { __u32 command_id; __u32 max_out_length; __u8 input_data[0]; } __packed __aligned(4); /** * iwl_xvt_driver_command_resp - response of IWL_XVT_CMD_DRIVER_CMD * @ command_id: sub command ID * @ length: resp_data length in bytes * @ resp_data: place holder for the sub command's rseponse data */ struct iwl_xvt_driver_command_resp { __u32 command_id; __u32 length; __u8 resp_data[0]; } __packed __aligned(4); /** * iwl_xvt_txq_config - add/remove tx queue. IWL_DRV_CMD_CONFIG_TX_QUEUE input. * @sta_id: station id * @tid: TID * @scd_queue: scheduler queue to configure * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner * Value is one of &enum iwl_scd_cfg_actions options * @aggregate: 1 aggregated queue, 0 otherwise * @tx_fifo: &enum iwl_mvm_tx_fifo * @window: BA window size * @reserved: reserved * @ssn: SSN for the BA agreement * @flags: flags for iwl_tx_queue_cfg_cmd. see &enum iwl_tx_queue_cfg_actions * @reserved2: reserved * @queue_size: size of configured queue */ struct iwl_xvt_txq_config { u8 sta_id; u8 tid; u8 scd_queue; u8 action; u8 aggregate; u8 tx_fifo; u8 window; u8 reserved; u16 ssn; u16 flags; u16 reserved2; int queue_size; } __packed __aligned(4); /** * iwl_xvt_txq_config_resp - response from IWL_DRV_CMD_CONFIG_TX_QUEUE * @sta_id: taken from command * @tid: taken from command * @scd_queue: queue number assigned to this RA -TID * @reserved: for alignment */ struct iwl_xvt_txq_config_resp { u8 sta_id; u8 tid; u8 scd_queue; u8 reserved; } __packed __aligned(4); /** * struct iwl_xvt_set_tx_payload - input for TX payload configuration * @index: payload's index in 'payloads' array in struct iwl_xvt * @length: payload length in bytes * @payload: buffer containing payload */ struct iwl_xvt_set_tx_payload { u16 index; u16 length; u8 payload[]; } __packed __aligned(4); /** * struct tx_cmd_commom_data - Data shared between all queues for TX * command configuration * @rate_flags: Tx Configuration rate flags * @tx_flags: TX flags configuration * @initial_rate_index: Start index in TLC table * @rts_retry_limit: Max retry for RTS transmit * @data_retry_limit: Max retry for data transmit * @fragment_size: 0 - no fragnentation else - max fragment size * @frag_num: Array of fragments numbers */ struct tx_cmd_commom_data { u32 rate_flags; u32 tx_flags; u8 initial_rate_index; u8 rts_retry_limit; u8 data_retry_limit; u8 fragment_size; u8 frag_num[32]; } __packed __aligned(4); /** * struct tx_cmd_frame - frame specific transmission data * @times: Number of subsequent times to transmit tx command to queue * @sta_id: Station index * @queue: Transmission queue * @tid_tspec: TID tspec * @sec_ctl: security control * @payload_index: payload buffer index in 'payloads' array in struct iwl_xvt * @key: security key * @header: MAC header */ struct tx_cmd_frame_data { u16 times; u8 sta_id; u8 queue; u8 tid_tspec; u8 sec_ctl; u8 payload_index; u8 reserved; u8 key[16]; u8 header[IWL_XVT_MAX_MAC_HEADER_LENGTH]; } __packed __aligned(4); /** * struct iwl_xvt_tx_start - Data for transmission. IWL_DRV_CMD_TX_START input. * @num_of_cycles: Number of times to go over frames_data. When set to zero - * infinite transmit (max amount is ULLONG_MAX) - go over frames_data and sent * tx until stop command is received. * @num_of_different_frames: actual number of entries in frames_data * @send_tx_resp: Whether to send FW's tx response to user * @reserved1: for alignment * @reserved2: for alignment * @tx_data: Tx command configuration shared data * @frames_data: array of specific frame data for each queue */ struct iwl_xvt_tx_start { u16 num_of_cycles; u16 num_of_different_frames; u8 send_tx_resp; u8 reserved1; u16 reserved2; struct tx_cmd_commom_data tx_data; struct tx_cmd_frame_data frames_data[IWL_XVT_MAX_NUM_OF_FRAMES]; } __packed __aligned(4); /** * struct iwl_xvt_enhanced_tx_data - Data for enhanced TX task * @xvt: pointer to the xvt op mode * @tx_start_data: IWL_DRV_CMD_TX_START command's input */ struct iwl_xvt_enhanced_tx_data { struct iwl_xvt *xvt; struct iwl_xvt_tx_start tx_start_data; } __packed __aligned(4); /** * struct iwl_xvt_post_tx_data - transmission data per queue * @num_of_packets: number of sent packets * @queue: queue packets were sent on */ struct iwl_xvt_post_tx_data { u64 num_of_packets; u16 queue; u16 reserved; } __packed __aligned(4); /** * struct iwl_xvt_tx_done - Notification data for sent tx * @status: tx task handler error status * @num_of_queues: total number of queues tx was sent from. Equals to number of * entries in tx_data * @tx_data: data of sent frames for each queue */ struct iwl_xvt_tx_done { u32 status; u32 num_of_queues; struct iwl_xvt_post_tx_data tx_data[]; } __packed __aligned(4); /* * struct iwl_xvt_get_rx_agg_stats - get rx aggregation statistics * @sta_id: station id of relevant ba * @tid: tid of relevant ba * @reserved: reserved */ struct iwl_xvt_get_rx_agg_stats { u8 sta_id; u8 tid; u16 reserved; } __packed __aligned(4); /* * struct iwl_xvt_get_rx_agg_stats_resp - rx aggregation statistics response * @dropped: number of frames dropped (e.g. too old) * @released: total number of frames released (either in-order or * out of order (after passing the reorder buffer) * @skipped: number of frames skipped the reorder buffer (in-order) * @reordered: number of frames gone through the reorder buffer (unordered) */ struct iwl_xvt_get_rx_agg_stats_resp { u32 dropped; u32 released; u32 skipped; u32 reordered; } __packed __aligned(4); /* struct iwl_xvt_config_rx_mpdu - Whether to send RX MPDU notifications to user * @enable: 0 - disable. don't send rx mpdu to user 1 - send rx mpdu to user. * @reserved: reserved */ struct iwl_xvt_config_rx_mpdu_req { u8 enable; u8 reserved[3]; } __packed __aligned(4); #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-trans.c000066400000000000000000000144141351064634500266570ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-trans.h" #include "iwl-drv.h" #include "iwl-fh.h" struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_cfg *cfg, const struct iwl_trans_ops *ops) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP static struct lock_class_key __key; #endif trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL); if (!trans) return NULL; #ifdef CONFIG_LOCKDEP lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", &__key, 0); #endif trans->dev = dev; trans->cfg = cfg; trans->ops = ops; trans->num_rx_queues = 1; snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, sizeof(struct iwl_device_cmd), sizeof(void *), SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) return NULL; WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); return trans; } void iwl_trans_free(struct iwl_trans *trans) { kmem_cache_destroy(trans->dev_cmd_pool); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { int ret; if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status))) return -ERFKILL; if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) && !(cmd->flags & CMD_ASYNC))) return -EINVAL; if (!(cmd->flags & CMD_ASYNC)) lock_map_acquire_read(&trans->sync_cmd_lockdep_map); if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) cmd->id = DEF_ID(cmd->id); ret = trans->ops->send_cmd(trans, cmd); if (!(cmd->flags & CMD_ASYNC)) lock_map_release(&trans->sync_cmd_lockdep_map); if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt)) return -EIO; return ret; } IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); /* Comparator for struct iwl_hcmd_names. * Used in the binary search over a list of host commands. * * @key: command_id that we're looking for. * @elt: struct iwl_hcmd_names candidate for match. * * @return 0 iff equal. */ static int iwl_hcmd_names_cmp(const void *key, const void *elt) { const struct iwl_hcmd_names *name = elt; u8 cmd1 = *(u8 *)key; u8 cmd2 = name->cmd_id; return (cmd1 - cmd2); } const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) { u8 grp, cmd; struct iwl_hcmd_names *ret; const struct iwl_hcmd_arr *arr; size_t size = sizeof(struct iwl_hcmd_names); grp = iwl_cmd_groupid(id); cmd = iwl_cmd_opcode(id); if (!trans->command_groups || grp >= trans->command_groups_size || !trans->command_groups[grp].arr) return "UNKNOWN"; arr = &trans->command_groups[grp]; ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp); if (!ret) return "UNKNOWN"; return ret->cmd_name; } IWL_EXPORT_SYMBOL(iwl_get_cmd_string); int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) { int i, j; const struct iwl_hcmd_arr *arr; for (i = 0; i < trans->command_groups_size; i++) { arr = &trans->command_groups[i]; if (!arr->arr) continue; for (j = 0; j < arr->size - 1; j++) if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id) return -1; } return 0; } IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-trans.h000066400000000000000000001166241351064634500266720ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_trans_h__ #define __iwl_trans_h__ #include #include /* for page_address */ #include #include #include "iwl-debug.h" #include "iwl-config.h" #include "fw/img.h" #include "iwl-op-mode.h" #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES #include "iwl-dbg-cfg.h" #endif #include "fw/api/cmdhdr.h" #include "fw/api/txq.h" #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #include "fw/testmode.h" #endif #include "fw/api/dbg-tlv.h" #include "iwl-dbg-tlv.h" /** * DOC: Transport layer - what is it ? * * The transport layer is the layer that deals with the HW directly. It provides * an abstraction of the underlying HW to the upper layer. The transport layer * doesn't provide any policy, algorithm or anything of this kind, but only * mechanisms to make the HW do something. It is not completely stateless but * close to it. * We will have an implementation for each different supported bus. */ /** * DOC: Life cycle of the transport layer * * The transport layer has a very precise life cycle. * * 1) A helper function is called during the module initialization and * registers the bus driver's ops with the transport's alloc function. * 2) Bus's probe calls to the transport layer's allocation functions. * Of course this function is bus specific. * 3) This allocation functions will spawn the upper layer which will * register mac80211. * * 4) At some point (i.e. mac80211's start call), the op_mode will call * the following sequence: * start_hw * start_fw * * 5) Then when finished (or reset): * stop_device * * 6) Eventually, the free function will be called. */ #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ #define FH_RSCSR_FRAME_INVALID 0x55550000 #define FH_RSCSR_FRAME_ALIGN 0x40 #define FH_RSCSR_RPA_EN BIT(25) #define FH_RSCSR_RADA_EN BIT(26) #define FH_RSCSR_RXQ_POS 16 #define FH_RSCSR_RXQ_MASK 0x3F0000 struct iwl_rx_packet { /* * The first 4 bytes of the RX frame header contain both the RX frame * size and some flags. * Bit fields: * 31: flag flush RB request * 30: flag ignore TC (terminal counter) request * 29: flag fast IRQ request * 28-27: Reserved * 26: RADA enabled * 25: Offload enabled * 24: RPF enabled * 23: RSS enabled * 22: Checksum enabled * 21-16: RX queue * 15-14: Reserved * 13-00: RX frame size */ __le32 len_n_flags; struct iwl_cmd_header hdr; u8 data[]; } __packed; static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) { return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; } static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) { return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); } /** * enum CMD_MODE - how to send the host commands ? * * @CMD_ASYNC: Return right away and don't wait for the response * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of * the response. The caller needs to call iwl_free_resp when done. * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be * called after this command completes. Valid only with CMD_ASYNC. */ enum CMD_MODE { CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), CMD_SEND_IN_RFKILL = BIT(2), CMD_WANT_ASYNC_CALLBACK = BIT(3), }; #define DEF_CMD_PAYLOAD_SIZE 320 /** * struct iwl_device_cmd * * For allocation of the command and tx queues, this establishes the overall * size of the largest command we send to uCode, except for commands that * aren't fully copied and use other TFD space. */ struct iwl_device_cmd { union { struct { struct iwl_cmd_header hdr; /* uCode API */ u8 payload[DEF_CMD_PAYLOAD_SIZE]; }; struct { struct iwl_cmd_header_wide hdr_wide; u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - sizeof(struct iwl_cmd_header_wide) + sizeof(struct iwl_cmd_header)]; }; }; } __packed; #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) /* * number of transfer buffers (fragments) per transmit frame descriptor; * this is just the driver's idea, the hardware supports 20 */ #define IWL_MAX_CMD_TBS_PER_TFD 2 /** * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command * * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's * ring. The transport layer doesn't map the command's buffer to DMA, but * rather copies it to a previously allocated DMA buffer. This flag tells * the transport layer not to copy the command, but to map the existing * buffer (that is passed in) instead. This saves the memcpy and allows * commands that are bigger than the fixed buffer to be submitted. * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this * chunk internally and free it again after the command completes. This * can (currently) be used only once per command. * Note that a TFD entry after a DUP one cannot be a normal copied one. */ enum iwl_hcmd_dataflag { IWL_HCMD_DFL_NOCOPY = BIT(0), IWL_HCMD_DFL_DUP = BIT(1), }; enum iwl_error_event_table_status { IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0), IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1), IWL_ERROR_EVENT_TABLE_UMAC = BIT(2), }; /** * struct iwl_host_cmd - Host command to the uCode * * @data: array of chunks that composes the data of the host command * @resp_pkt: response packet, if %CMD_WANT_SKB was set * @_rx_page_order: (internally used to free response packet) * @_rx_page_addr: (internally used to free response packet) * @flags: can be CMD_* * @len: array of the lengths of the chunks in data * @dataflags: IWL_HCMD_DFL_* * @id: command id of the host command, for wide commands encoding the * version and group as well */ struct iwl_host_cmd { const void *data[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_rx_packet *resp_pkt; unsigned long _rx_page_addr; u32 _rx_page_order; u32 flags; u32 id; u16 len[IWL_MAX_CMD_TBS_PER_TFD]; u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; }; static inline void iwl_free_resp(struct iwl_host_cmd *cmd) { free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); } struct iwl_rx_cmd_buffer { struct page *_page; int _offset; bool _page_stolen; u32 _rx_page_order; unsigned int truesize; }; static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) { return (void *)((unsigned long)page_address(r->_page) + r->_offset); } static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) { return r->_offset; } static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) { r->_page_stolen = true; get_page(r->_page); return r->_page; } static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) { __free_pages(r->_page, r->_rx_page_order); } #define MAX_NO_RECLAIM_CMDS 6 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) /* * Maximum number of HW queues the transport layer * currently supports */ #define IWL_MAX_HW_QUEUES 32 #define IWL_MAX_TVQM_QUEUES 512 #define IWL_MAX_TID_COUNT 8 #define IWL_MGMT_TID 15 #define IWL_FRAME_LIMIT 64 #define IWL_MAX_RX_HW_QUEUES 16 /** * enum iwl_wowlan_status - WoWLAN image/device status * @IWL_D3_STATUS_ALIVE: firmware is still running after resume * @IWL_D3_STATUS_RESET: device was reset while suspended */ enum iwl_d3_status { IWL_D3_STATUS_ALIVE, IWL_D3_STATUS_RESET, }; /** * enum iwl_trans_status: transport status flags * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed * @STATUS_DEVICE_ENABLED: APM is enabled * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) * @STATUS_INT_ENABLED: interrupts are enabled * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode * @STATUS_FW_ERROR: the fw is in error state * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands * are sent * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TA_ACTIVE: target access is in progress * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, STATUS_DEVICE_ENABLED, STATUS_TPOWER_PMI, STATUS_INT_ENABLED, STATUS_RFKILL_HW, STATUS_RFKILL_OPMODE, STATUS_FW_ERROR, STATUS_TRANS_GOING_IDLE, STATUS_TRANS_IDLE, STATUS_TA_ACTIVE, STATUS_TRANS_DEAD, }; static inline int iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) { switch (rb_size) { case IWL_AMSDU_2K: return get_order(2 * 1024); case IWL_AMSDU_4K: return get_order(4 * 1024); case IWL_AMSDU_8K: return get_order(8 * 1024); case IWL_AMSDU_12K: return get_order(12 * 1024); default: WARN_ON(1); return -1; } } struct iwl_hcmd_names { u8 cmd_id; const char *const cmd_name; }; #define HCMD_NAME(x) \ { .cmd_id = x, .cmd_name = #x } struct iwl_hcmd_arr { const struct iwl_hcmd_names *arr; int size; }; #define HCMD_ARR(x) \ { .arr = x, .size = ARRAY_SIZE(x) } /** * struct iwl_trans_config - transport configuration * * @op_mode: pointer to the upper layer. * @cmd_queue: the index of the command queue. * Must be set before start_fw. * @cmd_fifo: the fifo for host commands * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. * @no_reclaim_cmds: Some devices erroneously don't set the * SEQ_RX_FRAME bit on some notifications, this is the * list of such notifications to filter. Max length is * %MAX_NO_RECLAIM_CMDS. * @n_no_reclaim_cmds: # of commands in list * @rx_buf_size: RX buffer size needed for A-MSDUs * if unset 4k will be the RX buffer size * @bc_table_dword: set to true if the BC table expects the byte count to be * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue * @sw_csum_tx: transport should compute the TCP checksum * @command_groups: array of command groups, each member is an array of the * commands in the group; for debugging only * @command_groups_size: number of command groups, to avoid illegal access * @cb_data_offs: offset inside skb->cb to store transport data at, must have * space for at least two pointers */ struct iwl_trans_config { struct iwl_op_mode *op_mode; u8 cmd_queue; u8 cmd_fifo; unsigned int cmd_q_wdg_timeout; const u8 *no_reclaim_cmds; unsigned int n_no_reclaim_cmds; enum iwl_amsdu_size rx_buf_size; bool bc_table_dword; bool scd_set_active; bool sw_csum_tx; const struct iwl_hcmd_arr *command_groups; int command_groups_size; u8 cb_data_offs; }; struct iwl_trans_dump_data { u32 len; u8 data[]; }; struct iwl_trans; struct iwl_trans_txq_scd_cfg { u8 fifo; u8 sta_id; u8 tid; bool aggregate; int frame_limit; }; /** * struct iwl_trans_rxq_dma_data - RX queue DMA data * @fr_bd_cb: DMA address of free BD cyclic buffer * @fr_bd_wid: Initial write index of the free BD cyclic buffer * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr * @ur_bd_cb: DMA address of used BD cyclic buffer */ struct iwl_trans_rxq_dma_data { u64 fr_bd_cb; u32 fr_bd_wid; u64 urbd_stts_wrptr; u64 ur_bd_cb; }; /** * struct iwl_trans_ops - transport specific operations * * All the handlers MUST be implemented * * @start_hw: starts the HW. From that point on, the HW can send interrupts. * May sleep. * @op_mode_leave: Turn off the HW RF kill indication if on * May sleep * @start_fw: allocates and inits all the resources for the transport * layer. Also kick a fw image. * May sleep * @fw_alive: called when the fw sends alive notification. If the fw provides * the SCD base address in SRAM, then provide it here, or 0 otherwise. * May sleep * @stop_device: stops the whole device (embedded CPU put to reset) and stops * the HW. From that point on, the HW will be stopped but will still issue * an interrupt if the HW RF kill switch is triggered. * This callback must do the right thing and not crash even if %start_hw() * was called but not &start_fw(). May sleep. * @d3_suspend: put the device into the correct mode for WoWLAN during * suspend. This is optional, if not implemented WoWLAN will not be * supported. This callback may sleep. * @d3_resume: resume the device after WoWLAN, enabling the opmode to * talk to the WoWLAN image to get its status. This is optional, if not * implemented WoWLAN will not be supported. This callback may sleep. * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. * If RFkill is asserted in the middle of a SYNC host command, it must * return -ERFKILL straight away. * May sleep only if CMD_ASYNC is not set * @tx: send an skb. The transport relies on the op_mode to zero the * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all * the CSUM will be taken care of (TCP CSUM and IP header in case of * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP * header if it is IPv4. * Must be atomic * @reclaim: free packet until ssn. Returns a list of freed packets. * Must be atomic * @txq_enable: setup a queue. To setup an AC queue, use the * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before * this one. The op_mode must not configure the HCMD queue. The scheduler * configuration may be %NULL, in which case the hardware will not be * configured. If true is returned, the operation mode needs to increment * the sequence number of the packets routed to this queue because of a * hardware scheduler bug. May sleep. * @txq_disable: de-configure a Tx queue to send AMPDUs * Must be atomic * @txq_set_shared_mode: change Tx queue shared/unshared marking * @wait_tx_queues_empty: wait until tx queues are empty. May sleep. * @wait_txq_empty: wait until specific tx queue is empty. May sleep. * @freeze_txq_timer: prevents the timer of the queue from firing until the * queue is set to awake. Must be atomic. * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note * that the transport needs to refcount the calls since this function * will be called several times with block = true, and then the queues * need to be unblocked only after the same number of calls with * block = false. * @write8: write a u8 to a register at offset ofs from the BAR * @write32: write a u32 to a register at offset ofs from the BAR * @read32: read a u32 register at offset ofs from the BAR * @read_prph: read a DWORD from a periphery register * @write_prph: write a DWORD to a periphery register * @read_mem: read device's SRAM in DWORD * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory * will be zeroed. * @configure: configure parameters required by the transport layer from * the op_mode. May be called several times before start_fw, can't be * called after that. * @set_pmi: set the power pmi state * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. * Sleeping is not allowed between grab_nic_access and * release_nic_access. * @release_nic_access: let the NIC go to sleep. The "flags" parameter * must be the same one that was sent before to the grab_nic_access. * @set_bits_mask - set SRAM register according to value and mask. * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last * TX'ed commands and similar. The buffer will be vfree'd by the caller. * Note that the transport must fill in the proper file headers. * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup * of the trans debugfs */ struct iwl_trans_ops { int (*start_hw)(struct iwl_trans *iwl_trans); void (*op_mode_leave)(struct iwl_trans *iwl_trans); #if IS_ENABLED(CPTCFG_IWLXVT) int (*start_fw_dbg)(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill, u32 fw_dbg_flags); int (*test_mode_cmd)(struct iwl_trans *trans, bool enable); #endif int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); void (*stop_device)(struct iwl_trans *trans); void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int queue); void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int queue_wdg_timeout); void (*txq_disable)(struct iwl_trans *trans, int queue, bool configure_scd); /* 22000 functions */ int (*txq_alloc)(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, int cmd_id, int size, unsigned int queue_wdg_timeout); void (*txq_free)(struct iwl_trans *trans, int queue); int (*rxq_dma_data)(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data); void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, bool shared); int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); int (*wait_txq_empty)(struct iwl_trans *trans, int queue); void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, bool freeze); void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); u32 (*read32)(struct iwl_trans *trans, u32 ofs); u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); int (*read_mem)(struct iwl_trans *trans, u32 addr, void *buf, int dwords); int (*write_mem)(struct iwl_trans *trans, u32 addr, const void *buf, int dwords); void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); void (*sw_reset)(struct iwl_trans *trans); bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags); void (*release_nic_access)(struct iwl_trans *trans, unsigned long *flags); void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, u32 value); int (*suspend)(struct iwl_trans *trans); void (*resume)(struct iwl_trans *trans); struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, u32 dump_mask); void (*debugfs_cleanup)(struct iwl_trans *trans); void (*sync_nmi)(struct iwl_trans *trans); }; /** * enum iwl_trans_state - state of the transport layer * * @IWL_TRANS_NO_FW: no fw has sent an alive response * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response */ enum iwl_trans_state { IWL_TRANS_NO_FW = 0, IWL_TRANS_FW_ALIVE = 1, }; /** * DOC: Platform power management * * In system-wide power management the entire platform goes into a low * power state (e.g. idle or suspend to RAM) at the same time and the * device is configured as a wakeup source for the entire platform. * This is usually triggered by userspace activity (e.g. the user * presses the suspend button or a power management daemon decides to * put the platform in low power mode). The device's behavior in this * mode is dictated by the wake-on-WLAN configuration. * * The terms used for the device's behavior are as follows: * * - D0: the device is fully powered and the host is awake; * - D3: the device is in low power mode and only reacts to * specific events (e.g. magic-packet received or scan * results found); * * These terms reflect the power modes in the firmware and are not to * be confused with the physical device power state. */ /** * enum iwl_plat_pm_mode - platform power management mode * * This enumeration describes the device's platform power management * behavior when in system-wide suspend (i.e WoWLAN). * * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this * device. In system-wide suspend mode, it means that the all * connections will be closed automatically by mac80211 before * the platform is suspended. * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). */ enum iwl_plat_pm_mode { IWL_PLAT_PM_MODE_DISABLED, IWL_PLAT_PM_MODE_D3, }; /* Max time to wait for nmi interrupt */ #define IWL_TRANS_NMI_TIMEOUT (HZ / 4 * CPTCFG_IWL_TIMEOUT_FACTOR) /** * struct iwl_dram_data * @physical: page phy pointer * @block: pointer to the allocated block/page * @size: size of the block/page */ struct iwl_dram_data { dma_addr_t physical; void *block; int size; }; /** * struct iwl_self_init_dram - dram data used by self init process * @fw: lmac and umac dram data * @fw_cnt: total number of items in array * @paging: paging dram data * @paging_cnt: total number of items in array */ struct iwl_self_init_dram { struct iwl_dram_data *fw; int fw_cnt; struct iwl_dram_data *paging; int paging_cnt; }; /** * struct iwl_trans_debug - transport debug related data * * @n_dest_reg: num of reg_ops in %dbg_dest_tlv * @rec_on: true iff there is a fw debug recording currently active * @dest_tlv: points to the destination TLV for debug * @conf_tlv: array of pointers to configuration TLVs for debug * @trigger_tlv: array of pointers to triggers TLVs for debug * @lmac_error_event_table: addrs of lmacs error tables * @umac_error_event_table: addr of umac error table * @error_event_table_tlv_status: bitmap that indicates what error table * pointers was recevied via TLV. uses enum &iwl_error_event_table_status * @external_ini_loaded: indicates if an external ini cfg was given * @ini_valid: indicates if debug ini mode is on * @num_blocks: number of blocks in fw_mon * @fw_mon: address of the buffers for firmware monitor * @is_alloc: bit i is set if buffer i was allocated * @hw_error: equals true if hw error interrupt was received from the FW * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location */ struct iwl_trans_debug { u8 n_dest_reg; bool rec_on; const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv; u32 lmac_error_event_table[2]; u32 umac_error_event_table; unsigned int error_event_table_tlv_status; bool external_ini_loaded; bool ini_valid; struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM]; struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM]; int num_blocks; struct iwl_dram_data fw_mon[IWL_FW_INI_ALLOCATION_NUM]; u32 is_alloc; bool hw_error; enum iwl_fw_ini_buffer_location ini_dest; }; /** * struct iwl_trans - transport common data * * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode * @cfg - pointer to the configuration * @drv - pointer to iwl_drv * @status: a bit-mask of transport status flags * @dev - pointer to struct device * that represents the device * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. * @hw_rf_id a u32 with the device RF ID * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled * @wide_cmd_header: true when ucode supports wide command header format * @num_rx_queues: number of RX queues allocated by the transport; * the transport must set this before calling iwl_drv_start() * @iml_len: the length of the image loader * @iml: a pointer to the image loader itself * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before * starting the firmware, used for tracing * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the * start of the 802.11 header in the @rx_mpdu_cmd * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) * @system_pm_mode: the system-wide power management mode in use. * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. */ struct iwl_trans { const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; const struct iwl_cfg *cfg; struct iwl_drv *drv; struct iwl_tm_gnl_dev *tmdev; enum iwl_trans_state state; unsigned long status; struct device *dev; u32 max_skb_frags; u32 hw_rev; u32 hw_rf_id; u32 hw_id; char hw_id_str[52]; u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; bool pm_support; bool ltr_enabled; const struct iwl_hcmd_arr *command_groups; int command_groups_size; bool wide_cmd_header; u8 num_rx_queues; size_t iml_len; u8 *iml; /* The following fields are internal only */ struct kmem_cache *dev_cmd_pool; char dev_cmd_pool_name[50]; struct dentry *dbgfs_dir; #ifdef CONFIG_LOCKDEP struct lockdep_map sync_cmd_lockdep_map; #endif #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES struct iwl_dbg_cfg dbg_cfg; #endif struct iwl_trans_debug dbg; struct iwl_self_init_dram init_dram; enum iwl_plat_pm_mode system_pm_mode; #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE struct iwl_testmode testmode; #endif /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); }; const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); static inline void iwl_trans_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { trans->op_mode = trans_cfg->op_mode; trans->ops->configure(trans, trans_cfg); WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); } static inline int iwl_trans_start_hw(struct iwl_trans *trans) { might_sleep(); return trans->ops->start_hw(trans); } static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) { might_sleep(); if (trans->ops->op_mode_leave) trans->ops->op_mode_leave(trans); trans->op_mode = NULL; trans->state = IWL_TRANS_NO_FW; } static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) { might_sleep(); trans->state = IWL_TRANS_FW_ALIVE; trans->ops->fw_alive(trans, scd_addr); } static inline int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { might_sleep(); WARN_ON_ONCE(!trans->rx_mpdu_cmd); clear_bit(STATUS_FW_ERROR, &trans->status); return trans->ops->start_fw(trans, fw, run_in_rfkill); } #if IS_ENABLED(CPTCFG_IWLXVT) enum iwl_xvt_dbg_flags { IWL_XVT_DBG_ADC_SAMP_TEST = BIT(0), IWL_XVT_DBG_ADC_SAMP_SYNC_RX = BIT(1), }; static inline int iwl_trans_start_fw_dbg(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill, u32 dbg_flags) { might_sleep(); if (WARN_ON_ONCE(!trans->ops->start_fw_dbg && dbg_flags)) return -ENOTSUPP; clear_bit(STATUS_FW_ERROR, &trans->status); if (trans->ops->start_fw_dbg) return trans->ops->start_fw_dbg(trans, fw, run_in_rfkill, dbg_flags); return trans->ops->start_fw(trans, fw, run_in_rfkill); } #endif static inline void iwl_trans_stop_device(struct iwl_trans *trans) { might_sleep(); trans->ops->stop_device(trans); trans->state = IWL_TRANS_NO_FW; } static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { might_sleep(); if (trans->ops->d3_suspend) trans->ops->d3_suspend(trans, test, reset); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) { might_sleep(); if (!trans->ops->d3_resume) return 0; return trans->ops->d3_resume(trans, status, test, reset); } static inline int iwl_trans_suspend(struct iwl_trans *trans) { if (!trans->ops->suspend) return 0; return trans->ops->suspend(trans); } static inline void iwl_trans_resume(struct iwl_trans *trans) { if (trans->ops->resume) trans->ops->resume(trans); } static inline struct iwl_trans_dump_data * iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask) { if (!trans->ops->dump_data) return NULL; return trans->ops->dump_data(trans, dump_mask); } static inline struct iwl_device_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, struct iwl_device_cmd *dev_cmd) { kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int queue) { if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } return trans->ops->tx(trans, skb, dev_cmd, queue); } static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } trans->ops->reclaim(trans, queue, ssn, skbs); } static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } trans->ops->set_q_ptrs(trans, queue, ptr); } static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd) { trans->ops->txq_disable(trans, queue, configure_scd); } static inline bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int queue_wdg_timeout) { might_sleep(); if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return false; } return trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); } static inline int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data) { if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) return -ENOTSUPP; return trans->ops->rxq_dma_data(trans, queue, data); } static inline void iwl_trans_txq_free(struct iwl_trans *trans, int queue) { if (WARN_ON_ONCE(!trans->ops->txq_free)) return; trans->ops->txq_free(trans, queue); } static inline int iwl_trans_txq_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, int cmd_id, int size, unsigned int wdg_timeout) { might_sleep(); if (WARN_ON_ONCE(!trans->ops->txq_alloc)) return -ENOTSUPP; if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } return trans->ops->txq_alloc(trans, flags, sta_id, tid, cmd_id, size, wdg_timeout); } static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, int queue, bool shared_mode) { if (trans->ops->txq_set_shared_mode) trans->ops->txq_set_shared_mode(trans, queue, shared_mode); } static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn, unsigned int queue_wdg_timeout) { struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = sta_id, .tid = tid, .frame_limit = frame_limit, .aggregate = sta_id >= 0, }; iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); } static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, unsigned int queue_wdg_timeout) { struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = -1, .tid = IWL_MAX_TID_COUNT, .frame_limit = IWL_FRAME_LIMIT, .aggregate = false, }; iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); } static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } if (trans->ops->freeze_txq_timer) trans->ops->freeze_txq_timer(trans, txqs, freeze); } static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, bool block) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } if (trans->ops->block_txq_ptrs) trans->ops->block_txq_ptrs(trans, block); } static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs) { if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) return -ENOTSUPP; if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } return trans->ops->wait_tx_queues_empty(trans, txqs); } static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) { if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) return -ENOTSUPP; if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } return trans->ops->wait_txq_empty(trans, queue); } #if IS_ENABLED(CPTCFG_IWLXVT) static inline int iwl_trans_test_mode_cmd(struct iwl_trans *trans, bool enable) { if (trans->ops->test_mode_cmd) return trans->ops->test_mode_cmd(trans, enable); return -ENOTSUPP; } #endif static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) { trans->ops->write8(trans, ofs, val); } static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) { trans->ops->write32(trans, ofs, val); } static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) { return trans->ops->read32(trans, ofs); } static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) { return trans->ops->read_prph(trans, ofs); } static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) { return trans->ops->write_prph(trans, ofs, val); } static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { return trans->ops->read_mem(trans, addr, buf, dwords); } #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ do { \ if (__builtin_constant_p(bufsize)) \ BUILD_BUG_ON((bufsize) % sizeof(u32)); \ iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ } while (0) static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) { u32 value; if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) return 0xa5a5a5a5; return value; } static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, const void *buf, int dwords) { return trans->ops->write_mem(trans, addr, buf, dwords); } static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, u32 val) { return iwl_trans_write_mem(trans, addr, &val, 1); } static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) { if (trans->ops->set_pmi) trans->ops->set_pmi(trans, state); } static inline void iwl_trans_sw_reset(struct iwl_trans *trans) { if (trans->ops->sw_reset) trans->ops->sw_reset(trans); } static inline void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) { trans->ops->set_bits_mask(trans, reg, mask, value); } #define iwl_trans_grab_nic_access(trans, flags) \ __cond_lock(nic_access, \ likely((trans)->ops->grab_nic_access(trans, flags))) static inline void __releases(nic_access) iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) { trans->ops->release_nic_access(trans, flags); __release(nic_access); } static inline void iwl_trans_fw_error(struct iwl_trans *trans) { if (WARN_ON_ONCE(!trans->op_mode)) return; /* prevent double restarts due to the same erroneous FW */ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) iwl_op_mode_nic_error(trans->op_mode); } static inline bool iwl_trans_fw_running(struct iwl_trans *trans) { return trans->state == IWL_TRANS_FW_ALIVE; } static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) { if (trans->ops->sync_nmi) trans->ops->sync_nmi(trans); } /***************************************************** * transport helper functions *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_cfg *cfg, const struct iwl_trans_ops *ops); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** * driver (transport) register/unregister functions ******************************************************/ /* PCI */ int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); #endif /* __iwl_trans_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/iwl-vendor-cmd.h000066400000000000000000001211541351064634500275730ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __VENDOR_CMD_H__ #define __VENDOR_CMD_H__ #define INTEL_OUI 0x001735 /** * enum iwl_mvm_vendor_cmd - supported vendor commands * @IWL_MVM_VENDOR_CMD_SET_LOW_LATENCY: set low-latency mode for the given * virtual interface * @IWL_MVM_VENDOR_CMD_GET_LOW_LATENCY: query low-latency mode * @IWL_MVM_VENDOR_CMD_TCM_EVENT: TCM event * @IWL_MVM_VENDOR_CMD_LTE_STATE: inform the LTE modem state * @IWL_MVM_VENDOR_CMD_LTE_COEX_CONFIG_INFO: configure LTE-Coex static * parameters * @IWL_MVM_VENDOR_CMD_LTE_COEX_DYNAMIC_INFO: configure LTE dynamic parameters * @IWL_MVM_VENDOR_CMD_LTE_COEX_SPS_INFO: configure semi oersistent info * @IWL_MVM_VENDOR_CMD_LTE_COEX_WIFI_RPRTD_CHAN: Wifi reported channel as * calculated by the coex-manager * @IWL_MVM_VENDOR_CMD_SET_COUNTRY: set a new mcc regulatory information * @IWL_MVM_VENDOR_CMD_PROXY_FRAME_FILTERING: filter GTK, gratuitous * ARP & unsolicited NA * @IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_ADD: add a peer to the TDLS peer cache * @IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_DEL: delete a peer from the TDLS peer * cache * @IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_QUERY: query traffic statistics for a * peer in the TDLS cache * @IWL_MVM_VENDOR_CMD_SET_NIC_TXPOWER_LIMIT: set the NIC's (SAR) TX power limit * @IWL_MVM_VENDOR_CMD_OPPPS_WA: wa to pass Sigma test - applicable code is * claused under CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA * @IWL_MVM_VENDOR_CMD_GSCAN_GET_CAPABILITIES: get driver gscan capabilities as * specified in %IWL_MVM_VENDOR_ATTR_GSCAN_* * @IWL_MVM_VENDOR_CMD_GSCAN_START: set gscan parameters and start gscan * @IWL_MVM_VENDOR_CMD_GSCAN_STOP: stop a previously started gscan * @IWL_MVM_VENDOR_CMD_GSCAN_RESULTS_EVENT: event that reports scan results * from gscan. This event is sent when the scan results buffer has reached * the report threshold, or when scanning a bucket with report mode * %IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_COMPLETE was completed. * @IWL_MVM_VENDOR_CMD_GSCAN_SET_BSSID_HOTLIST: set a list of AP's to track * changes in their RSSI and report scan results history when RSSI goes * above/below threshold. Sending this command with an empty list of AP's * will cancel previous set_bssid_hotlist request. * @IWL_MVM_VENDOR_CMD_GSCAN_SET_SIGNIFICANT_CHANGE: set a list of APs to track * significant changes in their RSSI. Sending this command with an empty * list of AP's will cancel previous set_significant_change request. * @IWL_MVM_VENDOR_CMD_GSCAN_HOTLIST_CHANGE_EVENT: event that indicates that an * AP from the BSSID hotlist was lost or found. * @IWL_MVM_VENDOR_CMD_GSCAN_SIGNIFICANT_CHANGE_EVENT: event that indicates a * significant change in the RSSI level of beacons received from a certain * AP. * @IWL_MVM_VENDOR_CMD_RXFILTER: Set/clear rx filter. * @IWL_MVM_VENDOR_CMD_GSCAN_BEACON_EVENT: event that reports a * beacon/probe response was received, and contains information from the * beacon/probe response. This event is sent for buckets with report mode * set to %IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_COMPLETE_RESULTS. * @IWL_MVM_VENDOR_CMD_DBG_COLLECT: collect debug data * @IWL_MVM_VENDOR_CMD_NAN_FAW_CONF: Configure post NAN further availability. * @IWL_MVM_VENDOR_CMD_SET_SAR_PROFILE: set the NIC's tx power limits * according to the specified tx power profiles. In this command * %IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE and * %IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE must be passed. * @IWL_MVM_VENDOR_CMD_GET_SAR_PROFILE_INFO: get sar profile information. * This command provides the user with the following information: * Number of enabled SAR profiles, current used SAR profile per chain. * @IWL_MVM_VENDOR_CMD_NEIGHBOR_REPORT_REQUEST: Send a neighbor report request * to the AP we are currently connected to. The request parameters are * specified with %IWL_MVM_VENDOR_ATTR_NR_*. * @IWL_MVM_VENDOR_CMD_NEIGHBOR_REPORT_RESPONSE: An event that reports a list of * neighbor APs received in a neighbor report response frame. The report is * a nested list of &enum iwl_mvm_vendor_neighbor_report. * @IWL_MVM_VENDOR_CMD_GET_SAR_GEO_PROFILE: get sar geographic profile * information. This command provides the user with the following * information: Per band tx power offset for chain A and chain B as well as * maximum allowed tx power on this band. * @IWL_MVM_VENDOR_CMD_TEST_FIPS: request the output of a certain function for * the specified test vector. The test vector is specified with one of: * &IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_SHA, * &IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HMAC, or * &IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_KDF. Only one test vector shall be * specified per test command. * The result output is sent back in &IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT * attribute. In case the function failed to produce an output for the * requested test vector, &IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT is not set. * @IWL_MVM_VENDOR_CMD_FMAC_CONNECT_PARAMS: set fmac specific parameters for * future connect commands. These parameters will affect all following * connect commands. To clear previous configuration, send the command with * no attributes. * @IWL_MVM_VENDOR_CMD_FMAC_CONFIG: set one of the fmac configuration options. * &IWL_MVM_VENDOR_ATTR_FMAC_CONFIG_STR specifies the configuration string. * @IWL_MVM_VENDOR_CMD_CSI_EVENT: CSI event, use as a command to enable unicast * reporting to the calling socket */ enum iwl_mvm_vendor_cmd { IWL_MVM_VENDOR_CMD_SET_LOW_LATENCY = 0x00, IWL_MVM_VENDOR_CMD_GET_LOW_LATENCY = 0x01, IWL_MVM_VENDOR_CMD_TCM_EVENT = 0x02, IWL_MVM_VENDOR_CMD_LTE_STATE = 0x03, IWL_MVM_VENDOR_CMD_LTE_COEX_CONFIG_INFO = 0x04, IWL_MVM_VENDOR_CMD_LTE_COEX_DYNAMIC_INFO = 0x05, IWL_MVM_VENDOR_CMD_LTE_COEX_SPS_INFO = 0x06, IWL_MVM_VENDOR_CMD_LTE_COEX_WIFI_RPRTD_CHAN = 0x07, IWL_MVM_VENDOR_CMD_SET_COUNTRY = 0x08, IWL_MVM_VENDOR_CMD_PROXY_FRAME_FILTERING = 0x09, IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_ADD = 0x0a, IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_DEL = 0x0b, IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_QUERY = 0x0c, IWL_MVM_VENDOR_CMD_SET_NIC_TXPOWER_LIMIT = 0x0d, IWL_MVM_VENDOR_CMD_OPPPS_WA = 0x0e, IWL_MVM_VENDOR_CMD_GSCAN_GET_CAPABILITIES = 0x0f, IWL_MVM_VENDOR_CMD_GSCAN_START = 0x10, IWL_MVM_VENDOR_CMD_GSCAN_STOP = 0x11, IWL_MVM_VENDOR_CMD_GSCAN_RESULTS_EVENT = 0x12, IWL_MVM_VENDOR_CMD_GSCAN_SET_BSSID_HOTLIST = 0x13, IWL_MVM_VENDOR_CMD_GSCAN_SET_SIGNIFICANT_CHANGE = 0x14, IWL_MVM_VENDOR_CMD_GSCAN_HOTLIST_CHANGE_EVENT = 0x15, IWL_MVM_VENDOR_CMD_GSCAN_SIGNIFICANT_CHANGE_EVENT = 0x16, IWL_MVM_VENDOR_CMD_RXFILTER = 0x17, IWL_MVM_VENDOR_CMD_GSCAN_BEACON_EVENT = 0x18, IWL_MVM_VENDOR_CMD_DBG_COLLECT = 0x19, IWL_MVM_VENDOR_CMD_NAN_FAW_CONF = 0x1a, /* 0x1b is deprecated */ IWL_MVM_VENDOR_CMD_SET_SAR_PROFILE = 0x1c, IWL_MVM_VENDOR_CMD_GET_SAR_PROFILE_INFO = 0x1d, IWL_MVM_VENDOR_CMD_NEIGHBOR_REPORT_REQUEST = 0x1e, IWL_MVM_VENDOR_CMD_NEIGHBOR_REPORT_RESPONSE = 0x1f, IWL_MVM_VENDOR_CMD_GET_SAR_GEO_PROFILE = 0x20, IWL_MVM_VENDOR_CMD_TEST_FIPS = 0x21, IWL_MVM_VENDOR_CMD_FMAC_CONNECT_PARAMS = 0x22, IWL_MVM_VENDOR_CMD_FMAC_CONFIG = 0x23, IWL_MVM_VENDOR_CMD_CSI_EVENT = 0x24, }; /** * enum iwl_mvm_vendor_load - traffic load identifiers * @IWL_MVM_VENDOR_LOAD_LOW: low load: less than 10% airtime usage * @IWL_MVM_VENDOR_LOAD_MEDIUM: medium load: 10% or more, but less than 50% * @IWL_MVM_VENDOR_LOAD_HIGH: high load: 50% or more * * Traffic load is calculated based on the percentage of airtime used * (TX airtime is accounted as RTS+CTS+PPDU+ACK/BlockACK, RX airtime * is just the PPDU's time) */ enum iwl_mvm_vendor_load { IWL_MVM_VENDOR_LOAD_LOW, IWL_MVM_VENDOR_LOAD_MEDIUM, IWL_MVM_VENDOR_LOAD_HIGH, }; /** * enum iwl_mvm_vendor_gscan_report_mode - gscan scan results report modes * @IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_FULL: report that scan results are * available only when the scan results buffer reaches the report * threshold. The report threshold is set for each bucket. * @IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_EACH_SCAN: report that scan results are * available when scanning of this bucket is complete. * @IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_FULL_RESULTS: forward scan results * (beacons/probe responses) in real time to userspace. * @IWL_MVM_VENDOR_GSCAN_REPORT_HISTORY_RESERVED: reserved. * @IWL_MVM_VENDOR_GSCAN_REPORT_NO_BATCH: do not fill scan history buffer. * @NUM_IWL_MVM_VENDOR_GSCAN_REPORT: number of report mode attributes. * * Note that these must match the firmware API. */ enum iwl_mvm_vendor_gscan_report_mode { IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_FULL, IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_EACH_SCAN, IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_FULL_RESULTS, IWL_MVM_VENDOR_GSCAN_REPORT_HISTORY_RESERVED, IWL_MVM_VENDOR_GSCAN_REPORT_NO_BATCH, NUM_IWL_MVM_VENDOR_GSCAN_REPORT, }; /** * enum iwl_mvm_vendor_gscan_channel_spec - gscan channel specification * @IWL_MVM_VENDOR_CHANNEL_SPEC_INVALID: attribute number 0 is reserved * @IWL_MVM_VENDOR_CHANNEL_SPEC_CHANNEL: channel number * @IWL_MVM_VENDOR_CHANNEL_SPEC_DWELL_TIME: u16 attribute specifying dwell * time on this channel. * @IWL_MVM_VENDOR_CHANNEL_SPEC_PASSIVE: flag attribute. If set, passive * scan should be performed on this channel. * @NUM_IWL_MVM_VENDOR_CHANNEL_SPEC: number of channel spec attributes. * @MAX_IWL_MVM_VENDOR_CHANNEL_SPEC: highest channel spec attribute number. */ enum iwl_mvm_vendor_gscan_channel_spec { IWL_MVM_VENDOR_CHANNEL_SPEC_INVALID, IWL_MVM_VENDOR_CHANNEL_SPEC_CHANNEL, IWL_MVM_VENDOR_CHANNEL_SPEC_DWELL_TIME, IWL_MVM_VENDOR_CHANNEL_SPEC_PASSIVE, NUM_IWL_MVM_VENDOR_CHANNEL_SPEC, MAX_IWL_MVM_VENDOR_CHANNEL_SPEC = NUM_IWL_MVM_VENDOR_CHANNEL_SPEC - 1, }; /** * enum iwl_mvm_vendor_gscan_bucket_spec - gscan bucket specification * @IWL_MVM_VENDOR_BUCKET_SPEC_INVALID: attribute number 0 is reserved * @IWL_MVM_VENDOR_BUCKET_SPEC_INDEX: bucket index * @IWL_MVM_VENDOR_BUCKET_SPEC_BAND: band to scan as specified in * &enum iwl_gscan_band. When not set, the channel list is used. * @IWL_MVM_VENDOR_BUCKET_SPEC_PERIOD: interval between this bucket scans, * in msecs. * @IWL_MVM_VENDOR_BUCKET_SPEC_REPORT_MODE: when to report scan results. * Available modes are specified in &enum iwl_mvm_vendor_report_mode. * @IWL_MVM_VENDOR_BUCKET_SPEC_CHANNELS: array of channels to scan for this * bucket. Each channel is specified with a nested attribute of * %IWL_MVM_VENDOR_CHANNEL_SPEC. This channel list is used when * %IWL_MVM_VENDOR_BUCKET_SPEC_BAND is set to * %IWL_MVM_VENDOR_BAND_UNSPECIFIED. * @IWL_MVM_VENDOR_BUCKET_SPEC_MAX_PERIOD: maximum scan interval. If it's * non zero or different than period, then this bucket is an exponential * back off bucket and the scan period will grow exponentially. * @IWL_MVM_VENDOR_BUCKET_SPEC_EXPONENT: for exponential back off bucket, * scan period calculation should be done according to the following: * new_period = old_period * exponent * @IWL_MVM_VENDOR_BUCKET_SPEC_STEP_CNT: for exponential back off bucket: * number of scans to perform at a given period and until the exponent * is applied. * @NUM_IWL_MVM_VENDOR_BUCKET_SPEC: number of bucket spec attributes. * @MAX_IWL_MVM_VENDOR_BUCKET_SPEC: highest bucket spec attribute number. */ enum iwl_mvm_vendor_gscan_bucket_spec { IWL_MVM_VENDOR_BUCKET_SPEC_INVALID, IWL_MVM_VENDOR_BUCKET_SPEC_INDEX, IWL_MVM_VENDOR_BUCKET_SPEC_BAND, IWL_MVM_VENDOR_BUCKET_SPEC_PERIOD, IWL_MVM_VENDOR_BUCKET_SPEC_REPORT_MODE, IWL_MVM_VENDOR_BUCKET_SPEC_CHANNELS, IWL_MVM_VENDOR_BUCKET_SPEC_MAX_PERIOD, IWL_MVM_VENDOR_BUCKET_SPEC_EXPONENT, IWL_MVM_VENDOR_BUCKET_SPEC_STEP_CNT, NUM_IWL_MVM_VENDOR_BUCKET_SPEC, MAX_IWL_MVM_VENDOR_BUCKET_SPEC = NUM_IWL_MVM_VENDOR_BUCKET_SPEC - 1, }; /** * enum iwl_mvm_vendor_results_event_type - scan results available event type * @IWL_MVM_VENDOR_RESULTS_NOTIF_BUFFER_FULL: scan results available was * reported because scan results buffer has reached the report threshold. * @IWL_MVM_VENDOR_RESULTS_NOTIF_BUCKET_END: scan results available was reported * because scan of a bucket was completed. * @NUM_IWL_VENDOR_RESULTS_NOTIF_EVENT_TYPE: number of defined gscan results * notification event types. * * Note that these must match the firmware API. */ enum iwl_mvm_vendor_results_event_type { IWL_MVM_VENDOR_RESULTS_NOTIF_BUFFER_FULL, IWL_MVM_VENDOR_RESULTS_NOTIF_BUCKET_END, NUM_IWL_VENDOR_RESULTS_NOTIF_EVENT_TYPE, }; /** * enum iwl_mvm_vendor_gscan_result - gscan scan result * @IWL_MVM_VENDOR_GSCAN_RESULT_INVALID: attribute number 0 is reserved. * @IWL_MVM_VENDOR_GSCAN_RESULT_TIMESTAMP: time since boot (in usecs) when * the result was retrieved. * @IWL_MVM_VENDOR_GSCAN_RESULT_SSID: SSID. * @IWL_MVM_VENDOR_GSCAN_RESULT_BSSID: BSSID of the BSS (6 octets). * @IWL_MVM_VENDOR_GSCAN_RESULT_CHANNEL: channel frequency in MHz. * @IWL_MVM_VENDOR_GSCAN_RESULT_RSSI: signal strength in dB. * @IWL_MVM_VENDOR_GSCAN_RESULT_FRAME: the whole beacon/probe response * frame data including the header. * @IWL_MVM_VENDOR_GSCAN_RESULT_BEACON_PERIOD: period advertised in the beacon. * @IWL_MVM_VENDOR_GSCAN_RESULT_CAPABILITY: capabilities advertised in the * beacon / probe response. * @IWL_MVM_VENDOR_GSCAN_RESULT_PAD: used for padding, ignore * @NUM_IWL_MVM_VENDOR_GSCAN_RESULT: number of scan result attributes. * @MAX_IWL_MVM_VENDOR_GSCAN_RESULT: highest scan result attribute number. */ enum iwl_mvm_vendor_gscan_result { IWL_MVM_VENDOR_GSCAN_RESULT_INVALID, IWL_MVM_VENDOR_GSCAN_RESULT_TIMESTAMP, IWL_MVM_VENDOR_GSCAN_RESULT_SSID, IWL_MVM_VENDOR_GSCAN_RESULT_BSSID, IWL_MVM_VENDOR_GSCAN_RESULT_CHANNEL, IWL_MVM_VENDOR_GSCAN_RESULT_RSSI, IWL_MVM_VENDOR_GSCAN_RESULT_FRAME, IWL_MVM_VENDOR_GSCAN_RESULT_BEACON_PERIOD, IWL_MVM_VENDOR_GSCAN_RESULT_CAPABILITY, IWL_MVM_VENDOR_GSCAN_RESULT_PAD, NUM_IWL_MVM_VENDOR_GSCAN_RESULT, MAX_IWL_MVM_VENDOR_GSCAN_RESULT = NUM_IWL_MVM_VENDOR_GSCAN_RESULT - 1, }; /** * enum iwl_mvm_vendor_gscan_cached_scan_res - gscan cached scan result * @IWL_MVM_VENDOR_GSCAN_CACHED_RES_INVALID: attribute number 0 is reserved. * @IWL_MVM_VENDOR_GSCAN_CACHED_RES_SCAN_ID: unique ID for this cached result. * @IWL_MVM_VENDOR_GSCAN_CACHED_RES_FLAGS: additional information about this * scan iteration. * @IWL_MVM_VENDOR_GSCAN_CACHED_RES_APS: APs reported in this scan iteration. * @NUM_IWL_MVM_VENDOR_GSCAN_CACHED_RES: number of scan result attributes. * @MAX_IWL_MVM_VENDOR_GSCAN_CACHED_RES: highest scan result attribute number. */ enum iwl_mvm_vendor_gscan_cached_scan_res { IWL_MVM_VENDOR_GSCAN_CACHED_RES_INVALID, IWL_MVM_VENDOR_GSCAN_CACHED_RES_SCAN_ID, IWL_MVM_VENDOR_GSCAN_CACHED_RES_FLAGS, IWL_MVM_VENDOR_GSCAN_CACHED_RES_APS, NUM_IWL_MVM_VENDOR_GSCAN_CACHED_RES, MAX_IWL_MVM_VENDOR_GSCAN_CACHED_RES = NUM_IWL_MVM_VENDOR_GSCAN_CACHED_RES - 1, }; /** * enum iwl_mvm_vendor_ap_threshold_param - parameters for tracking AP's RSSI * @IWL_MVM_VENDOR_AP_THRESHOLD_PARAM_INVALID: attribute number 0 is reserved. * @IWL_MVM_VENDOR_AP_BSSID: BSSID of the BSS (6 octets) * @IWL_MVM_VENDOR_AP_LOW_RSSI_THRESHOLD: low RSSI threshold. in dB. * @IWL_MVM_VENDOR_AP_HIGH_RSSI_THRESHOLD: high RSSI threshold. in dB. * @NUM_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM: number of ap threshold param * attributes. * @MAX_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM: highest ap threshold param * attribute number. */ enum iwl_mvm_vendor_ap_threshold_param { IWL_MVM_VENDOR_AP_THRESHOLD_PARAM_INVALID, IWL_MVM_VENDOR_AP_BSSID, IWL_MVM_VENDOR_AP_LOW_RSSI_THRESHOLD, IWL_MVM_VENDOR_AP_HIGH_RSSI_THRESHOLD, NUM_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM, MAX_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM = NUM_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM - 1, }; /** * enum iwl_mvm_vendor_hotlist_ap_status - whether an AP was found or lost * @IWL_MVM_VENDOR_HOTLIST_AP_FOUND: beacon from this AP was received with RSSI * above the configured high threshold. * @IWL_MVM_VENDOR_HOTLIST_AP_LOST: beacon from this AP was received with RSSI * below the configured low threshold. * @NUM_IWL_MVM_VENDOR_HOTLIST_AP_STATUS: number of defined AP statuses. * * Note that these must match the firmware API. */ enum iwl_mvm_vendor_hotlist_ap_status { IWL_MVM_VENDOR_HOTLIST_AP_FOUND, IWL_MVM_VENDOR_HOTLIST_AP_LOST, NUM_IWL_MVM_VENDOR_HOTLIST_AP_STATUS, }; /** * enum iwl_mvm_vendor_significant_change_result - significant change result * @IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_INVALID: attribute number 0 is reserved * @IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_CHANNEL: channel number of the reported * AP. * @IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_BSSID: BSSID. * @IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RSSI_HISTORY: array of RSSI samples for * the reported AP. in dB. * @NUM_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT: number of significant change * attriutes. * @MAX_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT: highest significant change * result attribute number. */ enum iwl_mvm_vendor_significant_change_result { IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_INVALID, IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_CHANNEL, IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_BSSID, IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RSSI_HISTORY, NUM_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT, MAX_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT = NUM_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT - 1, }; /** * enum iwl_mvm_vendor_rxfilter_flags - the type of request rxfilter * * @IWL_MVM_VENDOR_RXFILTER_UNICAST: control unicast Rx filter * @IWL_MVM_VENDOR_RXFILTER_BCAST: control broadcast Rx filter * @IWL_MVM_VENDOR_RXFILTER_MCAST4: control IPv4 multicast Rx filter * @IWL_MVM_VENDOR_RXFILTER_MCAST6: control IPv4 multicast Rx filter * @IWL_MVM_VENDOR_RXFILTER_EINVAL: no Rx filter command was set * */ enum iwl_mvm_vendor_rxfilter_flags { IWL_MVM_VENDOR_RXFILTER_UNICAST = 1 << 0, IWL_MVM_VENDOR_RXFILTER_BCAST = 1 << 1, IWL_MVM_VENDOR_RXFILTER_MCAST4 = 1 << 2, IWL_MVM_VENDOR_RXFILTER_MCAST6 = 1 << 3, IWL_MVM_VENDOR_RXFILTER_EINVAL = 1 << 7, }; /** * enum iwl_mvm_vendor_rxfilter_op - the operation associated with a filter * * @IWL_MVM_VENDOR_RXFILTER_OP_PASS: pass frames matching the filter * @IWL_MVM_VENDOR_RXFILTER_OP_DROP: drop frames matching the filter */ enum iwl_mvm_vendor_rxfilter_op { IWL_MVM_VENDOR_RXFILTER_OP_PASS, IWL_MVM_VENDOR_RXFILTER_OP_DROP, }; /* * enum iwl_mvm_vendor_nr_chan_width - channel width definitions * * The values in this enum correspond to the values defined in * IEEE802.11-2016, table 9-153. */ enum iwl_mvm_vendor_nr_chan_width { IWL_MVM_VENDOR_CHAN_WIDTH_20, IWL_MVM_VENDOR_CHAN_WIDTH_40, IWL_MVM_VENDOR_CHAN_WIDTH_80, IWL_MVM_VENDOR_CHAN_WIDTH_160, IWL_MVM_VENDOR_CHAN_WIDTH_80P80, }; /* * enum iwl_mvm_vendor_phy_type - neighbor report phy types * * The values in this enum correspond to the values defined in * IEEE802.11-2016, Annex C. */ enum iwl_mvm_vendor_phy_type { IWL_MVM_VENDOR_PHY_TYPE_UNSPECIFIED, IWL_MVM_VENDOR_PHY_TYPE_DSSS = 2, IWL_MVM_VENDOR_PHY_TYPE_OFDM = 4, IWL_MVM_VENDOR_PHY_TYPE_HRDSSS = 5, IWL_MVM_VENDOR_PHY_TYPE_ERP = 6, IWL_MVM_VENDOR_PHY_TYPE_HT = 7, IWL_MVM_VENDOR_PHY_TYPE_DMG = 8, IWL_MVM_VENDOR_PHY_TYPE_VHT = 9, IWL_MVM_VENDOR_PHY_TYPE_TVHT = 10, }; /** * enum iwl_mvm_vendor_neighbor_report - Neighbor report for one AP * * @__IWL_MVM_VENDOR_NEIGHBOR_INVALID: attribute number 0 is reserved * @IWL_MVM_VENDOR_NEIGHBOR_BSSID: the BSSID of the neighbor AP. * @IWL_MVM_VENDOR_NEIGHBOR_BSSID_INFO: the BSSID information field as * defined in IEEE802.11-2016, figure 9-296 (u32) * @IWL_MVM_VENDOR_NEIGHBOR_OPERATING_CLASS: the operating class of the * neighbor AP (u8) * @IWL_MVM_VENDOR_NEIGHBOR_CHANNEL: the primary channel number of the * neighbor AP (u8) * @IWL_MVM_VENDOR_NEIGHBOR_PHY_TYPE: the phy type of the neighbor AP * as specified in &enum iwl_mvm_vendor_phy_type (u8) * @IWL_MVM_VENDOR_NEIGHBOR_CHANNEL_WIDTH: u32 attribute containing one of the * values of &enum iwl_mvm_vendor_nr_chan_width, describing the * channel width. * @IWL_MVM_VENDOR_NEIGHBOR_CENTER_FREQ_IDX_0: Center frequency of the first * part of the channel, used for anything but 20 MHz bandwidth. * @IWL_MVM_VENDOR_NEIGHBOR_CENTER_FREQ_IDX_1: Center frequency of the second * part of the channel, used only for 80+80 MHz bandwidth. * @IWL_MVM_VENDOR_NEIGHBOR_LCI: the LCI info of the neighbor AP. Optional. * Binary attribute. * @IWL_MVM_VENDOR_NEIGHBOR_CIVIC: the CIVIC info of the neighbor AP. Optional. * Binary attribute. * @NUM_IWL_MVM_VENDOR_NEIGHBOR_REPORT: num of neighbor report attributes * @MAX_IWL_MVM_VENDOR_NEIGHBOR_REPORT: highest neighbor report attribute * number. */ enum iwl_mvm_vendor_neighbor_report { __IWL_MVM_VENDOR_NEIGHBOR_INVALID, IWL_MVM_VENDOR_NEIGHBOR_BSSID, IWL_MVM_VENDOR_NEIGHBOR_BSSID_INFO, IWL_MVM_VENDOR_NEIGHBOR_OPERATING_CLASS, IWL_MVM_VENDOR_NEIGHBOR_CHANNEL, IWL_MVM_VENDOR_NEIGHBOR_PHY_TYPE, IWL_MVM_VENDOR_NEIGHBOR_CHANNEL_WIDTH, IWL_MVM_VENDOR_NEIGHBOR_CENTER_FREQ_IDX_0, IWL_MVM_VENDOR_NEIGHBOR_CENTER_FREQ_IDX_1, IWL_MVM_VENDOR_NEIGHBOR_LCI, IWL_MVM_VENDOR_NEIGHBOR_CIVIC, NUM_IWL_MVM_VENDOR_NEIGHBOR_REPORT, MAX_IWL_MVM_VENDOR_NEIGHBOR_REPORT = NUM_IWL_MVM_VENDOR_NEIGHBOR_REPORT - 1, }; /** * enum iwl_vendor_sar_per_chain_geo_table - per chain tx power table * * @IWL_VENDOR_SAR_GEO_INVALID: attribute number 0 is reserved. * @IWL_VENDOR_SAR_GEO_CHAIN_A_OFFSET: allowed offset for chain a (u8). * @IWL_VENDOR_SAR_GEO_CHAIN_B_OFFSET: allowed offset for chain b (u8). * @IWL_VENDOR_SAR_GEO_MAX_TXP: maximum allowed tx power (u8). */ enum iwl_vendor_sar_per_chain_geo_table { IWL_VENDOR_SAR_GEO_INVALID, IWL_VENDOR_SAR_GEO_CHAIN_A_OFFSET, IWL_VENDOR_SAR_GEO_CHAIN_B_OFFSET, IWL_VENDOR_SAR_GEO_MAX_TXP, }; /** * enum iwl_vendor_fips_test_vector_sha_type - SHA types for FIPS tests * * @IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA1: SHA1 * @IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA256: SHA256 * @IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA384: SHA384 */ enum iwl_vendor_fips_test_vector_sha_type { IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA1, IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA256, IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA384, }; /** * enum iwl_vendor_fips_test_vector_sha - test vector for SHA tests * * @IWL_VENDOR_FIPS_TEST_VECTOR_SHA_INVALID: attribute number 0 is reserved. * @IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE: which SHA function to use. One of * &enum iwl_vendor_fips_test_vector_sha_type. * @IWL_VENDOR_FIPS_TEST_VECTOR_SHA_MSG: the message to generate the digest for. * @NUM_IWL_VENDOR_FIPS_TEST_VECTOR_SHA: number of SHA test vector attributes. * @MAX_IWL_VENDOR_FIPS_TEST_VECTOR_SHA: highest SHA test vector attribute. */ enum iwl_vendor_fips_test_vector_sha { IWL_VENDOR_FIPS_TEST_VECTOR_SHA_INVALID, IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE, IWL_VENDOR_FIPS_TEST_VECTOR_SHA_MSG, NUM_IWL_VENDOR_FIPS_TEST_VECTOR_SHA, MAX_IWL_VENDOR_FIPS_TEST_VECTOR_SHA = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_SHA - 1, }; /** * enum iwl_vendor_fips_test_vector_hmac_kdf - test vector for HMAC/KDF tests * * @IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_INVALID: attribute number 0 is * reserved. * @IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_TYPE: which HMAC-SHA function to use. * One of &enum iwl_vendor_fips_test_vector_sha_type. * @IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_KEY: key input for the HMAC-SHA * function. * @IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_MSG: the message to generate the * digest for. * @IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_RES_LEN: the requested digest length in * bytes. * @NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF: number of HMAC/KDF test vector * attributes. * @MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF: highest HMAC/KDF test vector * attribute. */ enum iwl_vendor_fips_test_vector_hmac_kdf { IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_INVALID, IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_TYPE, IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_KEY, IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_MSG, IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_RES_LEN, NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF, MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF - 1, }; /** * enum iwl_vendor_fips_test_vector_flags - flags for FIPS HW test vector * @IWL_VENDOR_FIPS_TEST_VECTOR_FLAGS_ENCRYPT: if this is set, the requested * operation is encryption. Otherwise the requested operation is * decryption. */ enum iwl_vendor_fips_test_vector_flags { IWL_VENDOR_FIPS_TEST_VECTOR_FLAGS_ENCRYPT = BIT(0), }; /** * enum iwl_vendor_fips_test_vector_hw - test vector for FIPS HW tests * @IWL_VENDOR_FIPS_TEST_VECTOR_HW_INVALID: attribute number 0 is reserved. * @IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY: the key to use for * encryption/decryption. For CCM, only 128-bit key is supported. * For AES and GCM, 128-bit and 256-bit keys are supported. * @IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE: for CCM use 13 bytes, for GCM only 12 * bytes. Not valid for AES tests. * @IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD: adata. maximum supported size is 30 * bytes. Not valid for AES tests. * @IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD: for encryption, this is the * plaintext to encrypt. For decryption, this is the ciphertext + MIC (8 * bytes of MIC for CCM, 16 bytes for GCM). * @IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS: &enum iwl_vendor_fips_test_vector_flags. * @NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW: number of hw test vector attributes. * @MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HW: highest hw test vector attribute. */ enum iwl_vendor_fips_test_vector_hw { IWL_VENDOR_FIPS_TEST_VECTOR_HW_INVALID, IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY, IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE, IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD, IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD, IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS, NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW, MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HW = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW - 1, }; /** * enum iwl_mvm_vendor_attr - attributes used in vendor commands * @__IWL_MVM_VENDOR_ATTR_INVALID: attribute 0 is invalid * @IWL_MVM_VENDOR_ATTR_LOW_LATENCY: low-latency flag attribute * @IWL_MVM_VENDOR_ATTR_VIF_ADDR: interface MAC address * @IWL_MVM_VENDOR_ATTR_VIF_LL: vif-low-latency (u8, 0/1) * @IWL_MVM_VENDOR_ATTR_LL: global low-latency (u8, 0/1) * @IWL_MVM_VENDOR_ATTR_VIF_LOAD: vif traffic load (u8, see load enum) * @IWL_MVM_VENDOR_ATTR_LOAD: global traffic load (u8, see load enum) * @IWL_MVM_VENDOR_ATTR_COUNTRY: MCC to set, for regulatory information (u16) * IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA: filter gratuitous ARP and unsolicited * Neighbor Advertisement frames * IWL_MVM_VENDOR_ATTR_FILTER_GTK: filter Filtering Frames Encrypted using * the GTK * @IWL_MVM_VENDOR_ATTR_ADDR: MAC address * @IWL_MVM_VENDOR_ATTR_TX_BYTES: number of bytes transmitted to peer * @IWL_MVM_VENDOR_ATTR_RX_BYTES: number of bytes received from peer * @IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24: TX power limit for 2.4 GHz * (s32 in units of 1/8 dBm) * @IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L: TX power limit for 5.2 GHz low (as 2.4) * @IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H: TX power limit for 5.2 GHz high (as 2.4) * @IWL_MVM_VENDOR_ATTR_OPPPS_WA: wa to pass Sigma test * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_CACHE_SIZE: scan cache size * (in bytes) * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_BUCKETS: maximum number of channel * buckets * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_CACHE_PER_SCAN: maximum number of AP's * that can be stored per scan * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_RSSI_SAMPLE_SIZE: number of RSSI samples * used for averaging RSSI * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_REPORTING_THRESHOLD: max possible * report threshold. see %IWL_MVM_VENDOR_ATTR_GSCAN_START_REPORT_THRESHOLD * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_HOTLIST_APS: maximum number of entries for * hotlist AP's * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SIGNIFICANT_CHANGE_APS: maximum number of * entries for significant change AP's * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_BSSID_HISTORY_ENTRIES: number of * BSSID/RSSI entries that the device can hold * @IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR: mac address to be used on gscan scans * @IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR_MASK: mac address mask. Bits set to 0 * will be copied from %IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR. Bits set to 1 * will be randomized * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_PER_SCAN: number of AP's to store in each * scan in the BSSID/RSSI history buffer (keep the highest RSSI AP's) * @IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD: report that scan results * are available when buffer is that much full. In percentage. * @IWL_MVM_VENDOR_ATTR_GSCAN_BUCKET_SPECS: array of bucket specifications for * this gscan start command. Each bucket spec is a nested attribute of * &enum iwl_mvm_vendor_gscan_bucket_spec. * @IWL_MVM_VENDOR_ATTR_GSCAN_RESULTS_EVENT_TYPE: gscan results event type as * specified in &enum iwl_mvm_vendor_results_event_type. * @IWL_MVM_VENDOR_ATTR_GSCAN_RESULTS: array of gscan results. Each result is a * nested attribute of &enum iwl_mvm_vendor_gscan_result. * @IWL_MVM_VENDOR_ATTR_GSCAN_LOST_AP_SAMPLE_SIZE: number of samples to confirm * ap loss. * @IWL_MVM_VENDOR_ATTR_GSCAN_AP_LIST: an array of nested attributes of * &enum iwl_mvm_vendor_ap_threshold_param. * @IWL_MVM_VENDOR_ATTR_GSCAN_RSSI_SAMPLE_SIZE: number of samples for averaging * RSSI * @IWL_MVM_VENDOR_ATTR_GSCAN_MIN_BREACHING: number of APs breaching threshold * @IWL_MVM_VENDOR_ATTR_GSCAN_HOTLIST_AP_STATUS: indicates if a reported AP was * lost or found as specified in &enum iwl_mvm_vendor_hotlist_ap_status. * @IWL_MVM_VENDOR_ATTR_GSCAN_SIG_CHANGE_RESULTS: array of significant * change results. Each result is a nested attribute of &enum * iwl_mvm_vendor_significant_change_result. * @IWL_MVM_VENDOR_ATTR_RXFILTER: u32 attribute. * See %iwl_mvm_vendor_rxfilter_flags. * @IWL_MVM_VENDOR_ATTR_RXFILTER_OP: u32 attribute. * See %iwl_mvm_vendor_rxfilter_op. * @IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER: description of collect debug data * trigger. * @IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ: u32 attribute. Frequency (in MHz) to be * used for NAN further availability. * @IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS: u8 attribute. Number of 16TU slots * the NAN device will be available on it's FAW between DWs. * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_HOTLIST_SSIDS: maximum number of entries for * hotlist SSID's * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_EPNO_NETWORKS: max number of epno entries * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_EPNO_NETWORKS_BY_SSID: max number of epno * entries if ssid is specified * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_WHITE_LISTED_SSID: max number of white * listed SSIDs * @IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_BLACK_LISTED_SSID: max number of black * listed SSIDs * * @NUM_IWL_MVM_VENDOR_ATTR: number of vendor attributes * @MAX_IWL_MVM_VENDOR_ATTR: highest vendor attribute number * @IWL_MVM_VENDOR_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz, * defines the channel together with the attributes * %IWL_MVM_VENDOR_ATTR_CHANNEL_WIDTH and if needed * %IWL_MVM_VENDOR_ATTR_CENTER_FREQ1 and * %IWL_MVM_VENDOR_ATTR_CENTER_FREQ2. * @IWL_MVM_VENDOR_ATTR_CHANNEL_WIDTH: u32 attribute containing one of the * values of &enum nl80211_chan_width, describing the channel width. * See the documentation of the enum for more information. * @IWL_MVM_VENDOR_ATTR_CENTER_FREQ1: Center frequency of the first part of the * channel, used for anything but 20 MHz bandwidth. * @IWL_MVM_VENDOR_ATTR_CENTER_FREQ2: Center frequency of the second part of * the channel, used only for 80+80 MHz bandwidth. * @IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD_NUM: report that scan results * are available when buffer is that much full. In number of scans. * @IWL_MVM_VENDOR_ATTR_GSCAN_CACHED_RESULTS: array of gscan cached results. * Each result is a nested attribute of * &enum iwl_mvm_vendor_gscan_cached_scan_res. * @IWL_MVM_VENDOR_ATTR_LAST_MSG: Indicates that this message is the last one * in the series of messages. (flag) * @IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE: SAR table idx for chain A. * This is a u8. * @IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE: SAR table idx for chain B. * This is a u8. * @IWL_MVM_VENDOR_ATTR_SAR_ENABLED_PROFILE_NUM: number of enabled SAR profile * This is a u8. * @IWL_MVM_VENDOR_ATTR_SSID: SSID (binary attribute, 0..32 octets) * @IWL_MVM_VENDOR_ATTR_NEIGHBOR_LCI: Flag attribute specifying that the * neighbor request shall query for LCI information. * @IWL_MVM_VENDOR_ATTR_NEIGHBOR_CIVIC: Flag attribute specifying that the * neighbor request shall query for CIVIC information. * @IWL_MVM_VENDOR_ATTR_NEIGHBOR_REPORT: A list of neighbor APs as received in a * neighbor report frame. Each AP is a nested attribute of * &enum iwl_mvm_vendor_neighbor_report. * @IWL_MVM_VENDOR_ATTR_SAR_GEO_PROFILE: geo profile info. * see &enum iwl_vendor_sar_per_chain_geo_table. * @IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_SHA: data vector for FIPS SHA test. * &enum iwl_vendor_fips_test_vector_sha. * @IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HMAC: data vector for FIPS HMAC test. * &enum iwl_vendor_fips_test_vector_hmac_kdf. * @IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_KDF: data vector for FIPS KDF test. * &enum iwl_vendor_fips_test_vector_hmac_kdf. * @IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT: FIPS test result. Contains the * output of the requested function. * @IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES: data vector for FIPS AES HW * test. &enum iwl_vendor_fips_test_vector_hw. * @IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM: data vector for FIPS CCM HW * test. &enum iwl_vendor_fips_test_vector_hw. * @IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM: data vector for FIPS GCM HW * test. &enum iwl_vendor_fips_test_vector_hw. * @IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_BLACKLIST: an array of BSSIDs to * blacklist. The device shall not try to connect to blacklisted BSSIDs. * This attribute shall not be set if * IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_WHITELIST is set. * @IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_WHITELIST: an array of BSSIDs to * whitelist. The device shall only try to connect to BSSIDs from the list. * This attribute shall not be set if * IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_BLACKLIST is set. * @IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_MAX_RETRIES: number of APs to try * before notifying connection failure. * @IWL_MVM_VENDOR_ATTR_FMAC_CONFIG_STR: a key=value string where key is an * fmac configuration option. * @IWL_MVM_VENDOR_ATTR_CSI_HDR: CSI header * @IWL_MVM_VENDOR_ATTR_CSI_DATA: CSI data */ enum iwl_mvm_vendor_attr { __IWL_MVM_VENDOR_ATTR_INVALID = 0x00, IWL_MVM_VENDOR_ATTR_LOW_LATENCY = 0x01, IWL_MVM_VENDOR_ATTR_VIF_ADDR = 0x02, IWL_MVM_VENDOR_ATTR_VIF_LL = 0x03, IWL_MVM_VENDOR_ATTR_LL = 0x04, IWL_MVM_VENDOR_ATTR_VIF_LOAD = 0x05, IWL_MVM_VENDOR_ATTR_LOAD = 0x06, IWL_MVM_VENDOR_ATTR_COUNTRY = 0x07, IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA = 0x08, IWL_MVM_VENDOR_ATTR_FILTER_GTK = 0x09, IWL_MVM_VENDOR_ATTR_ADDR = 0x0a, IWL_MVM_VENDOR_ATTR_TX_BYTES = 0x0b, IWL_MVM_VENDOR_ATTR_RX_BYTES = 0x0c, IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24 = 0x0d, IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L = 0x0e, IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H = 0x0f, IWL_MVM_VENDOR_ATTR_OPPPS_WA = 0x10, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_CACHE_SIZE = 0x11, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_BUCKETS = 0x12, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_CACHE_PER_SCAN = 0x13, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_RSSI_SAMPLE_SIZE = 0x14, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_REPORTING_THRESHOLD = 0x15, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_HOTLIST_APS = 0x16, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SIGNIFICANT_CHANGE_APS = 0x17, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_BSSID_HISTORY_ENTRIES = 0x18, IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR = 0x19, IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR_MASK = 0x1a, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_PER_SCAN = 0x1b, IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD = 0x1c, IWL_MVM_VENDOR_ATTR_GSCAN_BUCKET_SPECS = 0x1d, IWL_MVM_VENDOR_ATTR_GSCAN_RESULTS_EVENT_TYPE = 0x1e, IWL_MVM_VENDOR_ATTR_GSCAN_RESULTS = 0x1f, IWL_MVM_VENDOR_ATTR_GSCAN_LOST_AP_SAMPLE_SIZE = 0x20, IWL_MVM_VENDOR_ATTR_GSCAN_AP_LIST = 0x21, IWL_MVM_VENDOR_ATTR_GSCAN_RSSI_SAMPLE_SIZE = 0x22, IWL_MVM_VENDOR_ATTR_GSCAN_MIN_BREACHING = 0x23, IWL_MVM_VENDOR_ATTR_GSCAN_HOTLIST_AP_STATUS = 0x24, IWL_MVM_VENDOR_ATTR_GSCAN_SIG_CHANGE_RESULTS = 0x25, IWL_MVM_VENDOR_ATTR_RXFILTER = 0x26, IWL_MVM_VENDOR_ATTR_RXFILTER_OP = 0x27, IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER = 0x28, IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ = 0x29, IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS = 0x2a, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_HOTLIST_SSIDS = 0x2b, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_EPNO_NETWORKS = 0x2c, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_EPNO_NETWORKS_BY_SSID = 0x2d, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_WHITE_LISTED_SSID = 0x2e, IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_BLACK_LISTED_SSID = 0x2f, IWL_MVM_VENDOR_ATTR_WIPHY_FREQ = 0x30, IWL_MVM_VENDOR_ATTR_CHANNEL_WIDTH = 0x31, IWL_MVM_VENDOR_ATTR_CENTER_FREQ1 = 0x32, IWL_MVM_VENDOR_ATTR_CENTER_FREQ2 = 0x33, /* 0x34 is deprecated */ /* 0x35 is deprecated */ /* 0x36 is deprecated */ IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD_NUM = 0x37, IWL_MVM_VENDOR_ATTR_GSCAN_CACHED_RESULTS = 0x38, IWL_MVM_VENDOR_ATTR_LAST_MSG = 0x39, IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE = 0x3a, IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE = 0x3b, IWL_MVM_VENDOR_ATTR_SAR_ENABLED_PROFILE_NUM = 0x3c, IWL_MVM_VENDOR_ATTR_SSID = 0x3d, IWL_MVM_VENDOR_ATTR_NEIGHBOR_LCI = 0x3e, IWL_MVM_VENDOR_ATTR_NEIGHBOR_CIVIC = 0x3f, IWL_MVM_VENDOR_ATTR_NEIGHBOR_REPORT = 0x40, IWL_MVM_VENDOR_ATTR_SAR_GEO_PROFILE = 0x41, IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_SHA = 0x42, IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HMAC = 0x43, IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_KDF = 0x44, IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT = 0x45, IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES = 0x46, IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM = 0x47, IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM = 0x48, IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_BLACKLIST = 0x49, IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_WHITELIST = 0x4a, IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_MAX_RETRIES = 0x4b, IWL_MVM_VENDOR_ATTR_FMAC_CONFIG_STR = 0x4c, IWL_MVM_VENDOR_ATTR_CSI_HDR = 0x4d, IWL_MVM_VENDOR_ATTR_CSI_DATA = 0x4e, NUM_IWL_MVM_VENDOR_ATTR, MAX_IWL_MVM_VENDOR_ATTR = NUM_IWL_MVM_VENDOR_ATTR - 1, }; #define IWL_MVM_VENDOR_FILTER_ARP_NA IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA #define IWL_MVM_VENDOR_FILTER_GTK IWL_MVM_VENDOR_ATTR_FILTER_GTK #endif /* __VENDOR_CMD_H__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/000077500000000000000000000000001351064634500253665ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/Makefile000066400000000000000000000011421351064634500270240ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 obj-$(CPTCFG_IWLMVM) += iwlmvm.o iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o rs-fw.o iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-y += ftm-initiator.o iwlmvm-y += ftm-responder.o iwlmvm-y += nan.o iwlmvm-$(CPTCFG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CPTCFG_IWLWIFI_LEDS) += led.o iwlmvm-$(CONFIG_PM) += d3.o ccflags-y += -I$(src)/../ # non-upstream things iwlmvm-$(CPTCFG_IWLMVM_VENDOR_CMDS) += vendor-cmd.o backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/binding.c000066400000000000000000000150611351064634500271470ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "fw-api.h" #include "mvm.h" struct iwl_mvm_iface_iterator_data { struct ieee80211_vif *ignore_vif; int idx; struct iwl_mvm_phy_ctxt *phyctxt; u16 ids[MAX_MACS_IN_BINDING]; u16 colors[MAX_MACS_IN_BINDING]; }; static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, struct iwl_mvm_iface_iterator_data *data) { struct iwl_binding_cmd cmd; struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt; int i, ret; u32 status; int size; memset(&cmd, 0, sizeof(cmd)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { size = sizeof(cmd); if (phyctxt->channel->band == NL80211_BAND_2GHZ || !iwl_mvm_is_cdb_supported(mvm)) cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX); else cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX); } else { size = IWL_BINDING_CMD_SIZE_V1; } cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); cmd.action = cpu_to_le32(action); cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); for (i = 0; i < MAX_MACS_IN_BINDING; i++) cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); for (i = 0; i < data->idx; i++) cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i], data->colors[i])); status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, size, &cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", action, ret); return ret; } if (status) { IWL_ERR(mvm, "Binding command failed: %u\n", status); ret = -EIO; } return ret; } static void iwl_mvm_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif == data->ignore_vif) return; if (mvmvif->phy_ctxt != data->phyctxt) return; if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING)) return; data->ids[data->idx] = mvmvif->id; data->colors[data->idx] = mvmvif->color; data->idx++; } static int iwl_mvm_binding_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_phy_ctxt *phyctxt, bool add) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_iface_iterator_data data = { .ignore_vif = vif, .phyctxt = phyctxt, }; u32 action = FW_CTXT_ACTION_MODIFY; lockdep_assert_held(&mvm->mutex); ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_iface_iterator, &data); /* * If there are no other interfaces yet we * need to create a new binding. */ if (data.idx == 0) { if (add) action = FW_CTXT_ACTION_ADD; else action = FW_CTXT_ACTION_REMOVE; } if (add) { if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING)) return -EINVAL; data.ids[data.idx] = mvmvif->id; data.colors[data.idx] = mvmvif->color; data.idx++; } return iwl_mvm_binding_cmd(mvm, action, &data); } int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return -EINVAL; /* * Update SF - Disable if needed. if this fails, SF might still be on * while many macs are bound, which is forbidden - so fail the binding. */ if (iwl_mvm_sf_update(mvm, vif, false)) return -EINVAL; return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true); } int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return -EINVAL; ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false); if (!ret) if (iwl_mvm_sf_update(mvm, vif, true)) IWL_ERR(mvm, "Failed to update SF state\n"); return ret; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/coex.c000066400000000000000000000515131351064634500264750ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "fw/api/coex.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-debug.h" /* 20MHz / 40MHz below / 40Mhz above*/ static const __le64 iwl_ci_mask[][3] = { /* dummy entry for channel 0 */ {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)}, { cpu_to_le64(0x0000001FFFULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x00007FFFFFULL), }, { cpu_to_le64(0x000000FFFFULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x0003FFFFFFULL), }, { cpu_to_le64(0x000003FFFCULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x000FFFFFFCULL), }, { cpu_to_le64(0x00001FFFE0ULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x007FFFFFE0ULL), }, { cpu_to_le64(0x00007FFF80ULL), cpu_to_le64(0x00007FFFFFULL), cpu_to_le64(0x01FFFFFF80ULL), }, { cpu_to_le64(0x0003FFFC00ULL), cpu_to_le64(0x0003FFFFFFULL), cpu_to_le64(0x0FFFFFFC00ULL), }, { cpu_to_le64(0x000FFFF000ULL), cpu_to_le64(0x000FFFFFFCULL), cpu_to_le64(0x3FFFFFF000ULL), }, { cpu_to_le64(0x007FFF8000ULL), cpu_to_le64(0x007FFFFFE0ULL), cpu_to_le64(0xFFFFFF8000ULL), }, { cpu_to_le64(0x01FFFE0000ULL), cpu_to_le64(0x01FFFFFF80ULL), cpu_to_le64(0xFFFFFE0000ULL), }, { cpu_to_le64(0x0FFFF00000ULL), cpu_to_le64(0x0FFFFFFC00ULL), cpu_to_le64(0x0ULL), }, { cpu_to_le64(0x3FFFC00000ULL), cpu_to_le64(0x3FFFFFF000ULL), cpu_to_le64(0x0) }, { cpu_to_le64(0xFFFE000000ULL), cpu_to_le64(0xFFFFFF8000ULL), cpu_to_le64(0x0) }, { cpu_to_le64(0xFFF8000000ULL), cpu_to_le64(0xFFFFFE0000ULL), cpu_to_le64(0x0) }, { cpu_to_le64(0xFE00000000ULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x0ULL) }, }; static enum iwl_bt_coex_lut_type iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; enum iwl_bt_coex_lut_type ret; u16 phy_ctx_id; u32 primary_ch_phy_id, secondary_ch_phy_id; /* * Checking that we hold mvm->mutex is a good idea, but the rate * control can't acquire the mutex since it runs in Tx path. * So this is racy in that case, but in the worst case, the AMPDU * size limit will be wrong for a short time which is not a big * issue. */ rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) { rcu_read_unlock(); return BT_COEX_INVALID_LUT; } ret = BT_COEX_TX_DIS_LUT; if (mvm->cfg->bt_shared_single_ant) { rcu_read_unlock(); return ret; } phy_ctx_id = *((u16 *)chanctx_conf->drv_priv); primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id); secondary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id); if (primary_ch_phy_id == phy_ctx_id) ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut); else if (secondary_ch_phy_id == phy_ctx_id) ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut); /* else - default = TX TX disallowed */ rcu_read_unlock(); return ret; } int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm) { struct iwl_bt_coex_cmd bt_cmd = {}; u32 mode; lockdep_assert_held(&mvm->mutex); if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { switch (mvm->bt_force_ant_mode) { case BT_FORCE_ANT_BT: mode = BT_COEX_BT; break; case BT_FORCE_ANT_WIFI: mode = BT_COEX_WIFI; break; default: WARN_ON(1); mode = 0; } bt_cmd.mode = cpu_to_le32(mode); goto send_cmd; } mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; bt_cmd.mode = cpu_to_le32(mode); if (IWL_MVM_BT_COEX_SYNC2SCO) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); if (iwl_mvm_is_mplut_supported(mvm)) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); send_cmd: memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd); } static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable) { struct iwl_bt_coex_reduced_txp_update_cmd cmd = {}; struct iwl_mvm_sta *mvmsta; u32 value; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (!mvmsta) return 0; /* nothing to do */ if (mvmsta->bt_reduced_txpower == enable) return 0; value = mvmsta->sta_id; if (enable) value |= BT_REDUCED_TX_POWER_BIT; IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", enable ? "en" : "dis", sta_id); cmd.reduced_txp = cpu_to_le32(value); mvmsta->bt_reduced_txpower = enable; return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC, sizeof(cmd), &cmd); } struct iwl_bt_iterator_data { struct iwl_bt_coex_profile_notif *notif; struct iwl_mvm *mvm; struct ieee80211_chanctx_conf *primary; struct ieee80211_chanctx_conf *secondary; bool primary_ll; u8 primary_load; u8 secondary_load; }; static inline void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool enable, int rssi) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->bf_data.last_bt_coex_event = rssi; mvmvif->bf_data.bt_coex_max_thold = enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; mvmvif->bf_data.bt_coex_min_thold = enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; } #define MVM_COEX_TCM_PERIOD (HZ * 10) static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm, struct iwl_bt_iterator_data *data) { unsigned long now = jiffies; if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD)) return; mvm->bt_coex_last_tcm_ts = now; /* We assume here that we don't have more than 2 vifs on 2.4GHz */ /* if the primary is low latency, it will stay primary */ if (data->primary_ll) return; if (data->primary_load >= data->secondary_load) return; swap(data->primary, data->secondary); } /* must be called under rcu_read_lock */ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_bt_iterator_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct ieee80211_chanctx_conf *chanctx_conf; /* default smps_mode is AUTOMATIC - only used for client modes */ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; u32 bt_activity_grading, min_ag_for_static_smps; int ave_rssi; lockdep_assert_held(&mvm->mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: break; case NL80211_IFTYPE_AP: if (!mvmvif->ap_ibss_active) return; break; default: return; } chanctx_conf = rcu_dereference(vif->chanctx_conf); /* If channel context is invalid or not on 2.4GHz .. */ if ((!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) { if (vif->type == NL80211_IFTYPE_STATION) { /* ... relax constraints and disable rssi events */ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode); iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); } return; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2)) min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC; else min_ag_for_static_smps = BT_HIGH_TRAFFIC; bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); if (bt_activity_grading >= min_ag_for_static_smps) smps_mode = IEEE80211_SMPS_STATIC; else if (bt_activity_grading >= BT_LOW_TRAFFIC) smps_mode = IEEE80211_SMPS_DYNAMIC; /* relax SMPS constraints for next association */ if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; if (mvmvif->phy_ctxt && (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id))) smps_mode = IEEE80211_SMPS_AUTOMATIC; IWL_DEBUG_COEX(data->mvm, "mac %d: bt_activity_grading %d smps_req %d\n", mvmvif->id, bt_activity_grading, smps_mode); if (vif->type == NL80211_IFTYPE_STATION) iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode); /* low latency is always primary */ if (iwl_mvm_vif_low_latency(mvmvif)) { data->primary_ll = true; data->secondary = data->primary; data->primary = chanctx_conf; } if (vif->type == NL80211_IFTYPE_AP) { if (!mvmvif->ap_ibss_active) return; if (chanctx_conf == data->primary) return; if (!data->primary_ll) { /* * downgrade the current primary no matter what its * type is. */ data->secondary = data->primary; data->primary = chanctx_conf; } else { /* there is low latency vif - we will be secondary */ data->secondary = chanctx_conf; } if (data->primary == chanctx_conf) data->primary_load = mvm->tcm.result.load[mvmvif->id]; else if (data->secondary == chanctx_conf) data->secondary_load = mvm->tcm.result.load[mvmvif->id]; return; } /* * STA / P2P Client, try to be primary if first vif. If we are in low * latency mode, we are already in primary and just don't do much */ if (!data->primary || data->primary == chanctx_conf) data->primary = chanctx_conf; else if (!data->secondary) /* if secondary is not NULL, it might be a GO */ data->secondary = chanctx_conf; if (data->primary == chanctx_conf) data->primary_load = mvm->tcm.result.load[mvmvif->id]; else if (data->secondary == chanctx_conf) data->secondary_load = mvm->tcm.result.load[mvmvif->id]; /* * don't reduce the Tx power if one of these is true: * we are in LOOSE * single share antenna product * BT is inactive * we are not associated */ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) { iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); return; } /* try to get the avg rssi from fw */ ave_rssi = mvmvif->bf_data.ave_beacon_signal; /* if the RSSI isn't valid, fake it is very low */ if (!ave_rssi) ave_rssi = -100; if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); } /* Begin to monitor the RSSI: it may influence the reduced Tx power */ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); } static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) { struct iwl_bt_iterator_data data = { .mvm = mvm, .notif = &mvm->last_bt_notif, }; struct iwl_bt_coex_ci_cmd cmd = {}; u8 ci_bw_idx; /* Ignore updates if we are in force mode */ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) return; rcu_read_lock(); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bt_notif_iterator, &data); iwl_mvm_bt_coex_tcm_based_ci(mvm, &data); if (data.primary) { struct ieee80211_chanctx_conf *chan = data.primary; if (WARN_ON(!chan->def.chan)) { rcu_read_unlock(); return; } if (chan->def.width < NL80211_CHAN_WIDTH_40) { ci_bw_idx = 0; } else { if (chan->def.center_freq1 > chan->def.chan->center_freq) ci_bw_idx = 2; else ci_bw_idx = 1; } cmd.bt_primary_ci = iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; cmd.primary_ch_phy_id = cpu_to_le32(*((u16 *)data.primary->drv_priv)); } if (data.secondary) { struct ieee80211_chanctx_conf *chan = data.secondary; if (WARN_ON(!data.secondary->def.chan)) { rcu_read_unlock(); return; } if (chan->def.width < NL80211_CHAN_WIDTH_40) { ci_bw_idx = 0; } else { if (chan->def.center_freq1 > chan->def.chan->center_freq) ci_bw_idx = 2; else ci_bw_idx = 1; } cmd.bt_secondary_ci = iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; cmd.secondary_ch_phy_id = cpu_to_le32(*((u16 *)data.secondary->drv_priv)); } rcu_read_unlock(); /* Don't spam the fw with the same command over and over */ if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) { if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd)); } } void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", le32_to_cpu(notif->primary_ch_lut)); IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", le32_to_cpu(notif->secondary_ch_lut)); IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", le32_to_cpu(notif->bt_activity_grading)); /* remember this notification for future use: rssi fluctuations */ memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); } void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data rssi_event) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); /* Ignore updates if we are in force mode */ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) return; /* * Rssi update while not associated - can happen since the statistics * are handled asynchronously */ if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) return; /* No BT - reports should be disabled */ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) return; IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); /* * Check if rssi is good enough for reduced Tx power, but not in loose * scheme. */ if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); else ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); if (ret) IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); } #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) #define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) return LINK_QUAL_AGG_TIME_LIMIT_DEF; if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC) return LINK_QUAL_AGG_TIME_LIMIT_DEF; lut_type = iwl_get_coex_type(mvm, mvmsta->vif); if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) return LINK_QUAL_AGG_TIME_LIMIT_DEF; /* tight coex, high bt traffic, reduce AGG time limit */ return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; } bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) return true; if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC) return true; /* * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas * since BT is already killed. * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while * we Tx. * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO. */ lut_type = iwl_get_coex_type(mvm, mvmsta->vif); return lut_type != BT_COEX_LOOSE_LUT; } bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) { /* there is no other antenna, shared antenna is always available */ if (mvm->cfg->bt_shared_single_ant) return true; if (ant & mvm->cfg->non_shared_ant) return true; return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) { /* there is no other antenna, shared antenna is always available */ if (mvm->cfg->bt_shared_single_ant) return true; return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, enum nl80211_band band) { u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); if (band != NL80211_BAND_2GHZ) return false; return bt_activity >= BT_LOW_TRAFFIC; } u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) && (mvm->cfg->non_shared_ant & enabled_ants)) return mvm->cfg->non_shared_ant; return first_antenna(enabled_ants); } u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac) { __le16 fc = hdr->frame_control; bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm); if (info->band != NL80211_BAND_2GHZ) return 0; if (unlikely(mvm->bt_tx_prio)) return mvm->bt_tx_prio - 1; if (likely(ieee80211_is_data(fc))) { if (likely(ieee80211_is_data_qos(fc))) { switch (ac) { case IEEE80211_AC_BE: return mplut_enabled ? 1 : 0; case IEEE80211_AC_VI: return mplut_enabled ? 2 : 3; case IEEE80211_AC_VO: return 3; default: return 0; } } else if (is_multicast_ether_addr(hdr->addr1)) { return 3; } else return 0; } else if (ieee80211_is_mgmt(fc)) { return ieee80211_is_disassoc(fc) ? 0 : 3; } else if (ieee80211_is_ctl(fc)) { /* ignore cfend and cfendack frames as we never send those */ return 3; } return 0; } void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { iwl_mvm_bt_coex_notif_handle(mvm); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/constants.h000066400000000000000000000354541351064634500275660ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __MVM_CONSTANTS_H #define __MVM_CONSTANTS_H #include #include "fw-api.h" #define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20 #ifndef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 1 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) #define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20 #define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8 #define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30 #define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20 #define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50 #define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50 #define IWL_MVM_PS_SNOOZE_INTERVAL 25 #define IWL_MVM_PS_SNOOZE_WINDOW 50 #define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25 #define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64 #define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62 #define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65 #define IWL_MVM_BT_COEX_SYNC2SCO 1 #define IWL_MVM_BT_COEX_MPLUT 1 #define IWL_MVM_BT_COEX_RRC 1 #define IWL_MVM_BT_COEX_TTC 1 #define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200 #define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451 #define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30 #define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0 #define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0 #define IWL_MVM_QUOTA_THRESHOLD 4 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 #define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_PARSE_NVM 0 #define IWL_MVM_ADWELL_ENABLE 1 #define IWL_MVM_ADWELL_MAX_BUDGET 0 #define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */ #define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */ #define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */ #define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */ #define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM #define IWL_MVM_NON_TRANSMITTING_AP 0 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 #define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3 #define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3 #define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2 #define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2 #define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1 #define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16 #define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3 #define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1 #define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3 #define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8 #define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */ #define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */ #define IWL_MVM_RS_MISSED_RATE_MAX 15 #define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160 #define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480 #define IWL_MVM_RS_LEGACY_TABLE_COUNT 160 #define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400 #define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500 #define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500 #define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */ #define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */ #define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */ #define IWL_MVM_RS_AGG_DISABLE_START 3 #define IWL_MVM_RS_AGG_START_THRESHOLD 10 /* num frames per second */ #define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */ #define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ #define IWL_MVM_RS_TPC_TX_POWER_STEP 3 #define IWL_MVM_ENABLE_EBS 1 #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true #else /* CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES */ #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (mvm->trans->dbg_cfg.MVM_DEFAULT_PS_TX_DATA_TIMEOUT) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (mvm->trans->dbg_cfg.MVM_DEFAULT_PS_RX_DATA_TIMEOUT) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (mvm->trans->dbg_cfg.MVM_WOWLAN_PS_TX_DATA_TIMEOUT) #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (mvm->trans->dbg_cfg.MVM_WOWLAN_PS_RX_DATA_TIMEOUT) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (mvm->trans->dbg_cfg.MVM_SHORT_PS_TX_DATA_TIMEOUT) #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (mvm->trans->dbg_cfg.MVM_SHORT_PS_RX_DATA_TIMEOUT) #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE (mvm->trans->dbg_cfg.MVM_P2P_LOWLATENCY_PS_ENABLE) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (mvm->trans->dbg_cfg.MVM_UAPSD_TX_DATA_TIMEOUT) #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (mvm->trans->dbg_cfg.MVM_UAPSD_RX_DATA_TIMEOUT) #define IWL_MVM_UAPSD_QUEUES (mvm->trans->dbg_cfg.MVM_UAPSD_QUEUES) #define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS (mvm->trans->dbg_cfg.MVM_PS_HEAVY_TX_THLD_PACKETS) #define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS (mvm->trans->dbg_cfg.MVM_PS_HEAVY_RX_THLD_PACKETS) #define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS) #define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS) #define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT (mvm->trans->dbg_cfg.MVM_PS_HEAVY_TX_THLD_PERCENT) #define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT (mvm->trans->dbg_cfg.MVM_PS_HEAVY_RX_THLD_PERCENT) #define IWL_MVM_PS_SNOOZE_INTERVAL (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_INTERVAL) #define IWL_MVM_PS_SNOOZE_WINDOW (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_WINDOW) #define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW (mvm->trans->dbg_cfg.MVM_WOWLAN_PS_SNOOZE_WINDOW) #define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT (mvm->trans->dbg_cfg.MVM_LOWLAT_QUOTA_MIN_PERCENT) #define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH (mvm->trans->dbg_cfg.MVM_BT_COEX_EN_RED_TXP_THRESH) #define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH (mvm->trans->dbg_cfg.MVM_BT_COEX_DIS_RED_TXP_THRESH) #define IWL_MVM_BT_COEX_SYNC2SCO (mvm->trans->dbg_cfg.MVM_BT_COEX_SYNC2SCO) #define IWL_MVM_BT_COEX_MPLUT (mvm->trans->dbg_cfg.MVM_BT_COEX_MPLUT) #define IWL_MVM_BT_COEX_RRC (mvm->trans->dbg_cfg.MVM_BT_COEX_RRC) #define IWL_MVM_BT_COEX_TTC (mvm->trans->dbg_cfg.MVM_BT_COEX_TTC) #define IWL_MVM_BT_COEX_MPLUT_REG0 (mvm->trans->dbg_cfg.MVM_BT_COEX_MPLUT_REG0) #define IWL_MVM_BT_COEX_MPLUT_REG1 (mvm->trans->dbg_cfg.MVM_BT_COEX_MPLUT_REG1) #define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS (mvm->trans->dbg_cfg.MVM_BT_COEX_ANTENNA_COUPLING_THRS) #define IWL_MVM_FW_MCAST_FILTER_PASS_ALL (mvm->trans->dbg_cfg.MVM_FW_MCAST_FILTER_PASS_ALL) #define IWL_MVM_FW_BCAST_FILTER_PASS_ALL (mvm->trans->dbg_cfg.MVM_FW_BCAST_FILTER_PASS_ALL) #define IWL_MVM_TOF_IS_RESPONDER (mvm->trans->dbg_cfg.MVM_TOF_IS_RESPONDER) #define IWL_MVM_SW_TX_CSUM_OFFLOAD (mvm->trans->dbg_cfg.MVM_SW_TX_CSUM_OFFLOAD) #define IWL_MVM_HW_CSUM_DISABLE (mvm->trans->dbg_cfg.MVM_HW_CSUM_DISABLE) #define IWL_MVM_PARSE_NVM (mvm->trans->dbg_cfg.MVM_PARSE_NVM) #define IWL_MVM_ADWELL_ENABLE (mvm->trans->dbg_cfg.MVM_ADWELL_ENABLE) #define IWL_MVM_ADWELL_MAX_BUDGET (mvm->trans->dbg_cfg.MVM_ADWELL_MAX_BUDGET) #define IWL_MVM_TCM_LOAD_MEDIUM_THRESH (mvm->trans->dbg_cfg.MVM_TCM_LOAD_MEDIUM_THRESH) #define IWL_MVM_TCM_LOAD_HIGH_THRESH (mvm->trans->dbg_cfg.MVM_TCM_LOAD_HIGH_THRESH) #define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH (mvm->trans->dbg_cfg.MVM_TCM_LOWLAT_ENABLE_THRESH) #define IWL_MVM_UAPSD_NONAGG_PERIOD (mvm->trans->dbg_cfg.MVM_UAPSD_NONAGG_PERIOD) #define IWL_MVM_UAPSD_NOAGG_LIST_LEN (mvm->trans->dbg_cfg.MVM_UAPSD_NOAGG_LIST_LEN) #define IWL_MVM_NON_TRANSMITTING_AP (mvm->trans->dbg_cfg.MVM_NON_TRANSMITTING_AP) #define IWL_MVM_QUOTA_THRESHOLD (mvm->trans->dbg_cfg.MVM_QUOTA_THRESHOLD) #define IWL_MVM_RS_RSSI_BASED_INIT_RATE (mvm->trans->dbg_cfg.MVM_RS_RSSI_BASED_INIT_RATE) #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK (mvm->trans->dbg_cfg.MVM_RS_80_20_FAR_RANGE_TWEAK) #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE (mvm->trans->dbg_cfg.MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE) #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE (mvm->trans->dbg_cfg.MVM_RS_HT_VHT_RETRIES_PER_RATE) #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW (mvm->trans->dbg_cfg.MVM_RS_HT_VHT_RETRIES_PER_RATE_TW) #define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_INITIAL_MIMO_NUM_RATES) #define IWL_MVM_RS_INITIAL_SISO_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_INITIAL_SISO_NUM_RATES) #define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_INITIAL_LEGACY_NUM_RATES) #define IWL_MVM_RS_INITIAL_LEGACY_RETRIES (mvm->trans->dbg_cfg.MVM_RS_INITIAL_LEGACY_RETRIES) #define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_LEGACY_RETRIES) #define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_LEGACY_NUM_RATES) #define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_SISO_NUM_RATES) #define IWL_MVM_RS_SECONDARY_SISO_RETRIES (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_SISO_RETRIES) #define IWL_MVM_RS_RATE_MIN_FAILURE_TH (mvm->trans->dbg_cfg.MVM_RS_RATE_MIN_FAILURE_TH) #define IWL_MVM_RS_RATE_MIN_SUCCESS_TH (mvm->trans->dbg_cfg.MVM_RS_RATE_MIN_SUCCESS_TH) #define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT (mvm->trans->dbg_cfg.MVM_RS_STAY_IN_COLUMN_TIMEOUT) #define IWL_MVM_RS_IDLE_TIMEOUT (mvm->trans->dbg_cfg.MVM_RS_IDLE_TIMEOUT) #define IWL_MVM_RS_MISSED_RATE_MAX (mvm->trans->dbg_cfg.MVM_RS_MISSED_RATE_MAX) #define IWL_MVM_RS_LEGACY_FAILURE_LIMIT (mvm->trans->dbg_cfg.MVM_RS_LEGACY_FAILURE_LIMIT) #define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT (mvm->trans->dbg_cfg.MVM_RS_LEGACY_SUCCESS_LIMIT) #define IWL_MVM_RS_LEGACY_TABLE_COUNT (mvm->trans->dbg_cfg.MVM_RS_LEGACY_TABLE_COUNT) #define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT (mvm->trans->dbg_cfg.MVM_RS_NON_LEGACY_FAILURE_LIMIT) #define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT (mvm->trans->dbg_cfg.MVM_RS_NON_LEGACY_SUCCESS_LIMIT) #define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT (mvm->trans->dbg_cfg.MVM_RS_NON_LEGACY_TABLE_COUNT) #define IWL_MVM_RS_SR_FORCE_DECREASE (mvm->trans->dbg_cfg.MVM_RS_SR_FORCE_DECREASE) #define IWL_MVM_RS_SR_NO_DECREASE (mvm->trans->dbg_cfg.MVM_RS_SR_NO_DECREASE) #define IWL_MVM_RS_AGG_TIME_LIMIT (mvm->trans->dbg_cfg.MVM_RS_AGG_TIME_LIMIT) #define IWL_MVM_RS_AGG_DISABLE_START (mvm->trans->dbg_cfg.MVM_RS_AGG_DISABLE_START) #define IWL_MVM_RS_AGG_START_THRESHOLD (mvm->trans->dbg_cfg.MVM_RS_AGG_START_THRESHOLD) #define IWL_MVM_RS_TPC_SR_FORCE_INCREASE (mvm->trans->dbg_cfg.MVM_RS_TPC_SR_FORCE_INCREASE) #define IWL_MVM_RS_TPC_SR_NO_INCREASE (mvm->trans->dbg_cfg.MVM_RS_TPC_SR_NO_INCREASE) #define IWL_MVM_RS_TPC_TX_POWER_STEP (mvm->trans->dbg_cfg.MVM_RS_TPC_TX_POWER_STEP) #define IWL_MVM_ENABLE_EBS (mvm->trans->dbg_cfg.MVM_ENABLE_EBS) #define IWL_MVM_FTM_RESP_TOA_OFFSET (mvm->trans->dbg_cfg.MVM_FTM_RESP_TOA_OFFSET) #define IWL_MVM_FTM_RESP_VALID (mvm->trans->dbg_cfg.MVM_FTM_RESP_VALID) #define IWL_MVM_FTM_RESP_FLAGS (mvm->trans->dbg_cfg.MVM_FTM_RESP_FLAGS) #define IWL_MVM_FTM_INITIATOR_ALGO (mvm->trans->dbg_cfg.MVM_FTM_INITIATOR_ALGO) #define IWL_MVM_FTM_INITIATOR_DYNACK (mvm->trans->dbg_cfg.MVM_FTM_INITIATOR_DYNACK) #define IWL_MVM_FTM_INITIATOR_MCSI_ENABLED (mvm->trans->dbg_cfg.MVM_FTM_INITIATOR_MCSI_ENABLED) #define IWL_MVM_FTM_INITIATOR_COMMON_CALIB (mvm->trans->dbg_cfg.MVM_FTM_INITIATOR_COMMON_CALIB) #define IWL_MVM_FTM_INITIATOR_FAST_ALGO_DISABLE (mvm->trans->dbg_cfg.MVM_FTM_INITIATOR_FAST_ALGO_DISABLE) #define IWL_MVM_D3_DEBUG (((struct iwl_mvm *)ctx)->trans->dbg_cfg.MVM_D3_DEBUG) #define IWL_MVM_USE_TWT (mvm->trans->dbg_cfg.MVM_USE_TWT) #endif /* CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES */ #endif /* __MVM_CONSTANTS_H */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/d3.c000066400000000000000000001622161351064634500260500ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include #include #include "iwl-modparams.h" #include "fw-api.h" #include "mvm.h" void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (iwlwifi_mod_params.swcrypto) return; mutex_lock(&mvm->mutex); memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); mvmvif->rekey_data.replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); mvmvif->rekey_data.valid = true; mutex_unlock(&mvm->mutex); } #if IS_ENABLED(CONFIG_IPV6) void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct inet6_ifaddr *ifa; int idx = 0; memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { mvmvif->target_ipv6_addrs[idx] = ifa->addr; if (ifa->flags & IFA_F_TENTATIVE) __set_bit(idx, mvmvif->tentative_addrs); idx++; if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) break; } read_unlock_bh(&idev->lock); mvmvif->num_target_ipv6_addrs = idx; } #endif void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->tx_key_idx = idx; } static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) { int i; for (i = 0; i < IWL_P1K_SIZE; i++) out[i] = cpu_to_le16(p1k[i]); } static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, struct iwl_mvm_key_pn *ptk_pn, struct ieee80211_key_seq *seq, int tid, int queues) { const u8 *ret = seq->ccmp.pn; int i; /* get the PN from mac80211, used on the default queue */ ieee80211_get_key_rx_seq(key, tid, seq); /* and use the internal data for the other queues */ for (i = 1; i < queues; i++) { const u8 *tmp = ptk_pn->q[i].pn[tid]; if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) ret = tmp; } return ret; } struct wowlan_key_data { struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; struct iwl_wowlan_tkip_params_cmd *tkip; bool error, use_rsc_tsc, use_tkip, configure_keys; int wep_key_idx; }; static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct wowlan_key_data *data = _data; struct aes_sc *aes_sc, *aes_tx_sc = NULL; struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; struct iwl_p1k_cache *rx_p1ks; u8 *rx_mic_key; struct ieee80211_key_seq seq; u32 cur_rx_iv32 = 0; u16 p1k[IWL_P1K_SIZE]; int ret, i; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ struct { struct iwl_mvm_wep_key_cmd wep_key_cmd; struct iwl_mvm_wep_key wep_key; } __packed wkc = { .wep_key_cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .wep_key_cmd.num_keys = 1, /* firmware sets STA_KEY_FLG_WEP_13BYTES */ .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, .wep_key.key_index = key->keyidx, .wep_key.key_size = key->keylen, }; /* * This will fail -- the key functions don't set support * pairwise WEP keys. However, that's better than silently * failing WoWLAN. Or maybe not? */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) break; memcpy(&wkc.wep_key.key[3], key->key, key->keylen); if (key->keyidx == mvmvif->tx_key_idx) { /* TX key must be at offset 0 */ wkc.wep_key.key_offset = 0; } else { /* others start at 1 */ data->wep_key_idx++; wkc.wep_key.key_offset = data->wep_key_idx; } if (data->configure_keys) { mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); data->error = ret != 0; mvm->ptk_ivlen = key->iv_len; mvm->ptk_icvlen = key->icv_len; mvm->gtk_ivlen = key->iv_len; mvm->gtk_icvlen = key->icv_len; mutex_unlock(&mvm->mutex); } /* don't upload key again */ return; } default: data->error = true; return; case WLAN_CIPHER_SUITE_AES_CMAC: /* * Ignore CMAC keys -- the WoWLAN firmware doesn't support them * but we also shouldn't abort suspend due to that. It does have * support for the IGTK key renewal, but doesn't really use the * IGTK for anything. This means we could spuriously wake up or * be deauthenticated, but that was considered acceptable. */ return; case WLAN_CIPHER_SUITE_TKIP: if (sta) { u64 pn64; tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; rx_p1ks = data->tkip->rx_uni; pn64 = atomic64_read(&key->tx_pn); tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), p1k); iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); memcpy(data->tkip->mic_keys.tx, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); rx_mic_key = data->tkip->mic_keys.rx_unicast; } else { tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; rx_p1ks = data->tkip->rx_multi; rx_mic_key = data->tkip->mic_keys.rx_mcast; } /* * For non-QoS this relies on the fact that both the uCode and * mac80211 use TID 0 (as they need to to avoid replay attacks) * for checking the IV in the frames. */ for (i = 0; i < IWL_NUM_RSC; i++) { ieee80211_get_key_rx_seq(key, i, &seq); tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); /* wrapping isn't allowed, AP must rekey */ if (seq.tkip.iv32 > cur_rx_iv32) cur_rx_iv32 = seq.tkip.iv32; } ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32, p1k); iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32 + 1, p1k); iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); memcpy(rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE); data->use_tkip = true; data->use_rsc_tsc = true; break; case WLAN_CIPHER_SUITE_CCMP: if (sta) { u64 pn64; aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; pn64 = atomic64_read(&key->tx_pn); aes_tx_sc->pn = cpu_to_le64(pn64); } else { aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; } /* * For non-QoS this relies on the fact that both the uCode and * mac80211/our RX code use TID 0 for checking the PN. */ if (sta && iwl_mvm_has_new_rx_api(mvm)) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_key_pn *ptk_pn; const u8 *pn; mvmsta = iwl_mvm_sta_from_mac80211(sta); ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[key->keyidx], lockdep_is_held(&mvm->mutex)); if (WARN_ON(!ptk_pn)) break; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, mvm->trans->num_rx_queues); aes_sc[i].pn = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } } else { for (i = 0; i < IWL_NUM_RSC; i++) { u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); aes_sc[i].pn = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } } data->use_rsc_tsc = true; break; } if (data->configure_keys) { mutex_lock(&mvm->mutex); /* * The D3 firmware hardcodes the key offset 0 as the key it * uses to transmit packets to the AP, i.e. the PTK. */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { mvm->ptk_ivlen = key->iv_len; mvm->ptk_icvlen = key->icv_len; ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); } else { /* * firmware only supports TSC/RSC for a single key, * so if there are multiple keep overwriting them * with new ones -- this relies on mac80211 doing * list_add_tail(). */ mvm->gtk_ivlen = key->iv_len; mvm->gtk_icvlen = key->icv_len; ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); } mutex_unlock(&mvm->mutex); data->error = ret != 0; } } static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan) { struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd; struct iwl_host_cmd cmd = { .id = WOWLAN_PATTERNS, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int i, err; if (!wowlan->n_patterns) return 0; cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns); pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); if (!pattern_cmd) return -ENOMEM; pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); for (i = 0; i < wowlan->n_patterns; i++) { int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); memcpy(&pattern_cmd->patterns[i].mask, wowlan->patterns[i].mask, mask_len); memcpy(&pattern_cmd->patterns[i].pattern, wowlan->patterns[i].pattern, wowlan->patterns[i].pattern_len); pattern_cmd->patterns[i].mask_size = mask_len; pattern_cmd->patterns[i].pattern_size = wowlan->patterns[i].pattern_len; } cmd.data[0] = pattern_cmd; err = iwl_mvm_send_cmd(mvm, &cmd); kfree(pattern_cmd); return err; } static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan) { struct iwl_wowlan_patterns_cmd *pattern_cmd; struct iwl_host_cmd cmd = { .id = WOWLAN_PATTERNS, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int i, err; if (!wowlan->n_patterns) return 0; cmd.len[0] = sizeof(*pattern_cmd) + wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2); pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); if (!pattern_cmd) return -ENOMEM; pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); for (i = 0; i < wowlan->n_patterns; i++) { int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); pattern_cmd->patterns[i].pattern_type = WOWLAN_PATTERN_TYPE_BITMASK; memcpy(&pattern_cmd->patterns[i].u.bitmask.mask, wowlan->patterns[i].mask, mask_len); memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern, wowlan->patterns[i].pattern, wowlan->patterns[i].pattern_len); pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len; pattern_cmd->patterns[i].u.bitmask.pattern_size = wowlan->patterns[i].pattern_len; } cmd.data[0] = pattern_cmd; err = iwl_mvm_send_cmd(mvm, &cmd); kfree(pattern_cmd); return err; } static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *ap_sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_chanctx_conf *ctx; u8 chains_static, chains_dynamic; struct cfg80211_chan_def chandef; int ret, i; struct iwl_binding_cmd_v1 binding_cmd = {}; struct iwl_time_quota_cmd quota_cmd = {}; struct iwl_time_quota_data *quota; u32 status; if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) return -EINVAL; /* add back the PHY */ if (WARN_ON(!mvmvif->phy_ctxt)) return -EINVAL; rcu_read_lock(); ctx = rcu_dereference(vif->chanctx_conf); if (WARN_ON(!ctx)) { rcu_read_unlock(); return -EINVAL; } chandef = ctx->def; chains_static = ctx->rx_chains_static; chains_dynamic = ctx->rx_chains_dynamic; rcu_read_unlock(); ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, chains_static, chains_dynamic); if (ret) return ret; /* add back the MAC */ mvmvif->uploaded = false; if (WARN_ON(!vif->bss_conf.assoc)) return -EINVAL; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) return ret; /* add back binding - XXX refactor? */ binding_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color)); binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); binding_cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color)); binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); for (i = 1; i < MAX_MACS_IN_BINDING; i++) binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, IWL_BINDING_CMD_SIZE_V1, &binding_cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to add binding: %d\n", ret); return ret; } if (status) { IWL_ERR(mvm, "Binding command failed: %u\n", status); return -EIO; } ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); if (ret) return ret; rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) return ret; /* and some quota */ quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); quota->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color)); quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); for (i = 1; i < MAX_BINDINGS; i++) { quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i); quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID); } ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), "a_cmd); if (ret) IWL_ERR(mvm, "Failed to send quota: %d\n", ret); if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); return 0; } static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_nonqos_seq_query_cmd query_cmd = { .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), }; struct iwl_host_cmd cmd = { .id = NON_QOS_TX_COUNTER_CMD, .flags = CMD_WANT_SKB, }; int err; u32 size; cmd.data[0] = &query_cmd; cmd.len[0] = sizeof(query_cmd); err = iwl_mvm_send_cmd(mvm, &cmd); if (err) return err; size = iwl_rx_packet_payload_len(cmd.resp_pkt); if (size < sizeof(__le16)) { err = -EINVAL; } else { err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); /* firmware returns next, not last-used seqno */ err = (u16) (err - 0x10); } iwl_free_resp(&cmd); return err; } void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_nonqos_seq_query_cmd query_cmd = { .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .value = cpu_to_le16(mvmvif->seqno), }; /* return if called during restart, not resume from D3 */ if (!mvmvif->seqno_valid) return; mvmvif->seqno_valid = false; if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, sizeof(query_cmd), &query_cmd)) IWL_ERR(mvm, "failed to set non-QoS seqno\n"); } static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); iwl_mvm_stop_device(mvm); /* * Set the HW restart bit -- this is mostly true as we're * going to load new firmware and reprogram that, though * the reprogramming is going to be manual to avoid adding * all the MACs that aren't support. * We don't have to clear up everything though because the * reprogramming is manual. When we resume, we'll actually * go through a proper restart sequence again to switch * back to the runtime firmware image. */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); /* the fw is reset, so all the keys are cleared */ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; return iwl_mvm_load_d3_fw(mvm); } static int iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct iwl_wowlan_config_cmd *wowlan_config_cmd, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct ieee80211_sta *ap_sta) { int ret; struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ wowlan_config_cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; wowlan_config_cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; /* Query the last used seqno and set it */ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); if (ret < 0) return ret; wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); if (wowlan->disconnect) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE); if (wowlan->magic_pkt) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); if (wowlan->gtk_rekey_failure) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); if (wowlan->eap_identity_req) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); if (wowlan->four_way_handshake) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); if (wowlan->n_patterns) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); if (wowlan->rfkill_release) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); if (wowlan->tcp) { /* * Set the "link change" (really "link lost") flag as well * since that implies losing the TCP connection. */ wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | IWL_WOWLAN_WAKEUP_LINK_CHANGE); } if (wowlan->any) { wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE | IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BCN_FILTERING); } return 0; } static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 cmd_flags) { struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); struct wowlan_key_data key_data = { .configure_keys = !unified, .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, }; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); if (!key_data.rsc_tsc) return -ENOMEM; /* * if we have to configure keys, call ieee80211_iter_keys(), * as we need non-atomic context in order to take the * required locks. */ /* * Note that currently we don't propagate cmd_flags * to the iterator. In case of key_data.configure_keys, * all the configured commands are SYNC, and * iwl_mvm_wowlan_program_keys() will take care of * locking/unlocking mvm->mutex. */ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, &key_data); if (key_data.error) { ret = -EIO; goto out; } if (key_data.use_rsc_tsc) { ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, cmd_flags, sizeof(*key_data.rsc_tsc), key_data.rsc_tsc); if (ret) goto out; } if (key_data.use_tkip && !fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TKIP_PARAM, cmd_flags, sizeof(tkip_cmd), &tkip_cmd); if (ret) goto out; } /* configure rekey data only if offloaded rekey is supported (d3) */ if (mvmvif->rekey_data.valid) { memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, NL80211_KCK_LEN); kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, NL80211_KEK_LEN); kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, cmd_flags, sizeof(kek_kck_cmd), &kek_kck_cmd); if (ret) goto out; } ret = 0; out: kfree(key_data.rsc_tsc); return ret; } static int iwl_mvm_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct iwl_wowlan_config_cmd *wowlan_config_cmd, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct ieee80211_sta *ap_sta) { int ret; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); mvm->offload_tid = wowlan_config_cmd->offloading_tid; if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); if (ret) return ret; ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); if (ret) return ret; } if (!iwlwifi_mod_params.swcrypto) { /* * This needs to be unlocked due to lock ordering * constraints. Since we're in the suspend path * that isn't really a problem though. */ mutex_unlock(&mvm->mutex); ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); mutex_lock(&mvm->mutex); if (ret) return ret; } ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(*wowlan_config_cmd), wowlan_config_cmd); if (ret) return ret; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE)) ret = iwl_mvm_send_patterns(mvm, wowlan); else ret = iwl_mvm_send_patterns_v1(mvm, wowlan); if (ret) return ret; return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); } static int iwl_mvm_netdetect_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct cfg80211_sched_scan_request *nd_config, struct ieee80211_vif *vif) { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; int ret; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); if (ret) return ret; } else { /* In theory, we wouldn't have to stop a running sched * scan in order to start another one (for * net-detect). But in practice this doesn't seem to * work properly, so stop any running sched_scan now. */ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); if (ret) return ret; } /* rfkill release can be either for wowlan or netdetect */ if (wowlan->rfkill_release) wowlan_config_cmd.wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(wowlan_config_cmd), &wowlan_config_cmd); if (ret) return ret; ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, IWL_MVM_SCAN_NETDETECT); if (ret) return ret; if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) return -EBUSY; /* save the sched scan matchsets... */ if (nd_config->n_match_sets) { mvm->nd_match_sets = kmemdup(nd_config->match_sets, sizeof(*nd_config->match_sets) * nd_config->n_match_sets, GFP_KERNEL); if (mvm->nd_match_sets) mvm->n_nd_match_sets = nd_config->n_match_sets; } /* ...and the sched scan channels for later reporting */ mvm->nd_channels = kmemdup(nd_config->channels, sizeof(*nd_config->channels) * nd_config->n_channels, GFP_KERNEL); if (mvm->nd_channels) mvm->n_nd_channels = nd_config->n_channels; return 0; } static void iwl_mvm_free_nd(struct iwl_mvm *mvm) { kfree(mvm->nd_match_sets); mvm->nd_match_sets = NULL; mvm->n_nd_match_sets = 0; kfree(mvm->nd_channels); mvm->nd_channels = NULL; mvm->n_nd_channels = 0; } static int __iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan, bool test) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *vif = NULL; struct iwl_mvm_vif *mvmvif = NULL; struct ieee80211_sta *ap_sta = NULL; struct iwl_d3_manager_config d3_cfg_cmd_data = { /* * Program the minimum sleep time to 10 seconds, as many * platforms have issues processing a wakeup signal while * still being in the process of suspending. */ .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), }; struct iwl_host_cmd d3_cfg_cmd = { .id = D3_CONFIG_CMD, .flags = CMD_WANT_SKB, .data[0] = &d3_cfg_cmd_data, .len[0] = sizeof(d3_cfg_cmd_data), }; int ret; int len __maybe_unused; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!wowlan) { /* * mac80211 shouldn't get here, but for D3 test * it doesn't warrant a warning */ WARN_ON(!test); return -EINVAL; } mutex_lock(&mvm->mutex); vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) { ret = 1; goto out_noreset; } mvmvif = iwl_mvm_vif_from_mac80211(vif); if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { /* if we're not associated, this must be netdetect */ if (!wowlan->nd_config) { ret = 1; goto out_noreset; } ret = iwl_mvm_netdetect_config( mvm, wowlan, wowlan->nd_config, vif); if (ret) goto out; mvm->net_detect = true; } else { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; ap_sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(ap_sta)) { ret = -EINVAL; goto out_noreset; } ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta); if (ret) goto out_noreset; ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta); if (ret) goto out; mvm->net_detect = false; } ret = iwl_mvm_power_update_device(mvm); if (ret) goto out; ret = iwl_mvm_power_update_mac(mvm); if (ret) goto out; #ifdef CPTCFG_IWLWIFI_DEBUGFS if (mvm->d3_wake_sysassert) d3_cfg_cmd_data.wakeup_flags |= cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); #endif /* * Prior to 9000 device family the driver needs to stop the dbg * recording before entering D3. In later devices the FW stops the * recording automatically. */ if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); if (ret) goto out; #ifdef CPTCFG_IWLWIFI_DEBUGFS len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); if (len >= sizeof(u32)) { mvm->d3_test_pme_ptr = le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); } #endif iwl_free_resp(&d3_cfg_cmd); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { iwl_mvm_free_nd(mvm); if (!unified_image) { if (mvm->fw_restart > 0) { mvm->fw_restart--; ieee80211_restart_hw(mvm->hw); } } } out_noreset: mutex_unlock(&mvm->mutex); return ret; } int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_trans *trans = mvm->trans; int ret; iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); ret = iwl_trans_suspend(trans); if (ret) return ret; trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; return __iwl_mvm_suspend(hw, wowlan, false); } /* converted data from the different status responses */ struct iwl_wowlan_status_data { u16 pattern_number; u16 qos_seq_ctr[8]; u32 wakeup_reasons; u32 wake_packet_length; u32 wake_packet_bufsize; const u8 *wake_packet; }; static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_wowlan_status_data *status) { struct sk_buff *pkt = NULL; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; u32 reasons = status->wakeup_reasons; if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { wakeup_report = NULL; goto report; } pm_wakeup_event(mvm->dev, 0); if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) wakeup.magic_pkt = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) wakeup.pattern_idx = status->pattern_number; if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) wakeup.disconnect = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) wakeup.gtk_rekey_failure = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) wakeup.eap_identity_req = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) wakeup.four_way_handshake = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) wakeup.tcp_connlost = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) wakeup.tcp_nomoretokens = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) wakeup.tcp_match = true; if (status->wake_packet_bufsize) { int pktsize = status->wake_packet_bufsize; int pktlen = status->wake_packet_length; const u8 *pktdata = status->wake_packet; struct ieee80211_hdr *hdr = (void *)pktdata; int truncated = pktlen - pktsize; /* this would be a firmware bug */ if (WARN_ON_ONCE(truncated < 0)) truncated = 0; if (ieee80211_is_data(hdr->frame_control)) { int hdrlen = ieee80211_hdrlen(hdr->frame_control); int ivlen = 0, icvlen = 4; /* also FCS */ pkt = alloc_skb(pktsize, GFP_KERNEL); if (!pkt) goto report; skb_put_data(pkt, pktdata, hdrlen); pktdata += hdrlen; pktsize -= hdrlen; if (ieee80211_has_protected(hdr->frame_control)) { /* * This is unlocked and using gtk_i(c)vlen, * but since everything is under RTNL still * that's not really a problem - changing * it would be difficult. */ if (is_multicast_ether_addr(hdr->addr1)) { ivlen = mvm->gtk_ivlen; icvlen += mvm->gtk_icvlen; } else { ivlen = mvm->ptk_ivlen; icvlen += mvm->ptk_icvlen; } } /* if truncated, FCS/ICV is (partially) gone */ if (truncated >= icvlen) { icvlen = 0; truncated -= icvlen; } else { icvlen -= truncated; truncated = 0; } pktsize -= ivlen + icvlen; pktdata += ivlen; skb_put_data(pkt, pktdata, pktsize); if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) goto report; wakeup.packet = pkt->data; wakeup.packet_present_len = pkt->len; wakeup.packet_len = pkt->len - truncated; wakeup.packet_80211 = false; } else { int fcslen = 4; if (truncated >= 4) { truncated -= 4; fcslen = 0; } else { fcslen -= truncated; truncated = 0; } pktsize -= fcslen; wakeup.packet = status->wake_packet; wakeup.packet_present_len = pktsize; wakeup.packet_len = pktlen - truncated; wakeup.packet_80211 = true; } } report: ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); kfree_skb(pkt); } static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, struct ieee80211_key_seq *seq) { u64 pn; pn = le64_to_cpu(sc->pn); seq->ccmp.pn[0] = pn >> 40; seq->ccmp.pn[1] = pn >> 32; seq->ccmp.pn[2] = pn >> 24; seq->ccmp.pn[3] = pn >> 16; seq->ccmp.pn[4] = pn >> 8; seq->ccmp.pn[5] = pn; } static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, struct ieee80211_key_seq *seq) { seq->tkip.iv32 = le32_to_cpu(sc->iv32); seq->tkip.iv16 = le16_to_cpu(sc->iv16); } static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { int tid; BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); if (sta && iwl_mvm_has_new_rx_api(mvm)) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_key_pn *ptk_pn; mvmsta = iwl_mvm_sta_from_mac80211(sta); ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx], lockdep_is_held(&mvm->mutex)); if (WARN_ON(!ptk_pn)) return; for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct ieee80211_key_seq seq = {}; int i; iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); ieee80211_set_key_rx_seq(key, tid, &seq); for (i = 1; i < mvm->trans->num_rx_queues; i++) memcpy(ptk_pn->q[i].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); } } else { for (tid = 0; tid < IWL_NUM_RSC; tid++) { struct ieee80211_key_seq seq = {}; iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); ieee80211_set_key_rx_seq(key, tid, &seq); } } } static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, struct ieee80211_key_conf *key) { int tid; BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); for (tid = 0; tid < IWL_NUM_RSC; tid++) { struct ieee80211_key_seq seq = {}; iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); ieee80211_set_key_rx_seq(key, tid, &seq); } } static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, struct ieee80211_key_conf *key, struct iwl_wowlan_status *status) { union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc; switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); break; default: WARN_ON(1); } } struct iwl_mvm_d3_gtk_iter_data { struct iwl_mvm *mvm; struct iwl_wowlan_status *status; void *last_gtk; u32 cipher; bool find_phase, unhandled_cipher; int num_keys; }; static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm_d3_gtk_iter_data *data = _data; if (data->unhandled_cipher) return; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* ignore WEP completely, nothing to do */ return; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_TKIP: /* we support these */ break; default: /* everything else (even CMAC for MFP) - disconnect from AP */ data->unhandled_cipher = true; return; } data->num_keys++; /* * pairwise key - update sequence counters only; * note that this assumes no TDLS sessions are active */ if (sta) { struct ieee80211_key_seq seq = {}; union iwl_all_tsc_rsc *sc = &data->status->gtk[0].rsc.all_tsc_rsc; if (data->find_phase) return; switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, sta, key); atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); atomic64_set(&key->tx_pn, (u64)seq.tkip.iv16 | ((u64)seq.tkip.iv32 << 16)); break; } /* that's it for this key */ return; } if (data->find_phase) { data->last_gtk = key; data->cipher = key->cipher; return; } if (data->status->num_of_gtk_rekeys) ieee80211_remove_key(key); else if (data->last_gtk == key) iwl_mvm_set_key_rx_seq(data->mvm, key, data->status); } static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_wowlan_status *status) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_d3_gtk_iter_data gtkdata = { .mvm = mvm, .status = status, }; u32 disconnection_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; if (!status || !vif->bss_conf.bssid) return false; if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) return false; /* find last GTK that we used initially, if any */ gtkdata.find_phase = true; ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, >kdata); /* not trying to keep connections with MFP/unhandled ciphers */ if (gtkdata.unhandled_cipher) return false; if (!gtkdata.num_keys) goto out; if (!gtkdata.last_gtk) return false; /* * invalidate all other GTKs that might still exist and update * the one that we used */ gtkdata.find_phase = false; ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, >kdata); if (status->num_of_gtk_rekeys) { struct ieee80211_key_conf *key; struct { struct ieee80211_key_conf conf; u8 key[32]; } conf = { .conf.cipher = gtkdata.cipher, .conf.keyidx = iwlmvm_wowlan_gtk_idx(&status->gtk[0]), }; __be64 replay_ctr; switch (gtkdata.cipher) { case WLAN_CIPHER_SUITE_CCMP: conf.conf.keylen = WLAN_KEY_LEN_CCMP; memcpy(conf.conf.key, status->gtk[0].key, WLAN_KEY_LEN_CCMP); break; case WLAN_CIPHER_SUITE_TKIP: conf.conf.keylen = WLAN_KEY_LEN_TKIP; memcpy(conf.conf.key, status->gtk[0].key, 16); /* leave TX MIC key zeroed, we don't use it anyway */ memcpy(conf.conf.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, status->gtk[0].tkip_mic_key, 8); break; } key = ieee80211_gtk_rekey_add(vif, &conf.conf); if (IS_ERR(key)) return false; iwl_mvm_set_key_rx_seq(mvm, key, status); replay_ctr = cpu_to_be64(le64_to_cpu(status->replay_ctr)); ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); } out: mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; return true; } struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm) { struct iwl_wowlan_status *v7, *status; struct iwl_host_cmd cmd = { .id = WOWLAN_GET_STATUSES, .flags = CMD_WANT_SKB, }; int ret, len, status_size; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret); return ERR_PTR(ret); } if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; int data_size; status_size = sizeof(*v6); len = iwl_rx_packet_payload_len(cmd.resp_pkt); if (len < status_size) { IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); status = ERR_PTR(-EIO); goto out_free_resp; } data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4); if (len != (status_size + data_size)) { IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); status = ERR_PTR(-EIO); goto out_free_resp; } status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); if (!status) goto out_free_resp; BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > sizeof(status->gtk[0].key)); BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) > sizeof(status->gtk[0].tkip_mic_key)); /* copy GTK info to the right place */ memcpy(status->gtk[0].key, v6->gtk.decrypt_key, sizeof(v6->gtk.decrypt_key)); memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key, sizeof(v6->gtk.tkip_mic_key)); memcpy(&status->gtk[0].rsc, &v6->gtk.rsc, sizeof(status->gtk[0].rsc)); /* hardcode the key length to 16 since v6 only supports 16 */ status->gtk[0].key_len = 16; /* * The key index only uses 2 bits (values 0 to 3) and * we always set bit 7 which means this is the * currently used key. */ status->gtk[0].key_flags = v6->gtk.key_index | BIT(7); status->replay_ctr = v6->replay_ctr; /* everything starting from pattern_number is identical */ memcpy(&status->pattern_number, &v6->pattern_number, offsetof(struct iwl_wowlan_status, wake_packet) - offsetof(struct iwl_wowlan_status, pattern_number) + data_size); goto out_free_resp; } v7 = (void *)cmd.resp_pkt->data; status_size = sizeof(*v7); len = iwl_rx_packet_payload_len(cmd.resp_pkt); if (len < status_size) { IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); status = ERR_PTR(-EIO); goto out_free_resp; } if (len != (status_size + ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) { IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); status = ERR_PTR(-EIO); goto out_free_resp; } status = kmemdup(v7, len, GFP_KERNEL); out_free_resp: iwl_free_resp(&cmd); return status; } static struct iwl_wowlan_status * iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm) { int ret; /* only for tracing for now */ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); if (ret) IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); return iwl_mvm_send_wowlan_get_status(mvm); } /* releases the MVM mutex */ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_wowlan_status_data status; struct iwl_wowlan_status *fw_status; int i; bool keep; struct iwl_mvm_sta *mvm_ap_sta; fw_status = iwl_mvm_get_wakeup_status(mvm); if (IS_ERR_OR_NULL(fw_status)) goto out_unlock; status.pattern_number = le16_to_cpu(fw_status->pattern_number); for (i = 0; i < 8; i++) status.qos_seq_ctr[i] = le16_to_cpu(fw_status->qos_seq_ctr[i]); status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); status.wake_packet_length = le32_to_cpu(fw_status->wake_packet_length); status.wake_packet_bufsize = le32_to_cpu(fw_status->wake_packet_bufsize); status.wake_packet = fw_status->wake_packet; /* still at hard-coded place 0 for D3 image */ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); if (!mvm_ap_sta) goto out_free; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u16 seq = status.qos_seq_ctr[i]; /* firmware stores last-used value, we store next value */ seq += 0x10; mvm_ap_sta->tid_data[i].seq_number = seq; } if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { i = mvm->offload_tid; iwl_trans_set_q_ptrs(mvm->trans, mvm_ap_sta->tid_data[i].txq_id, mvm_ap_sta->tid_data[i].seq_number >> 4); } /* now we have all the data we need, unlock to avoid mac80211 issues */ mutex_unlock(&mvm->mutex); iwl_mvm_report_wakeup_reasons(mvm, vif, &status); keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); kfree(fw_status); return keep; out_free: kfree(fw_status); out_unlock: mutex_unlock(&mvm->mutex); return false; } #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ IWL_SCAN_MAX_PROFILES) struct iwl_mvm_nd_query_results { u32 matched_profiles; u8 matches[ND_QUERY_BUF_LEN]; }; static int iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, struct iwl_mvm_nd_query_results *results) { struct iwl_scan_offload_profiles_query *query; struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, .flags = CMD_WANT_SKB, }; int ret, len; size_t query_len, matches_len; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); return ret; } if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { query_len = sizeof(struct iwl_scan_offload_profiles_query); matches_len = sizeof(struct iwl_scan_offload_profile_match) * IWL_SCAN_MAX_PROFILES; } else { query_len = sizeof(struct iwl_scan_offload_profiles_query_v1); matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) * IWL_SCAN_MAX_PROFILES; } len = iwl_rx_packet_payload_len(cmd.resp_pkt); if (len < query_len) { IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); ret = -EIO; goto out_free_resp; } query = (void *)cmd.resp_pkt->data; results->matched_profiles = le32_to_cpu(query->matched_profiles); memcpy(results->matches, query->matches, matches_len); #ifdef CPTCFG_IWLWIFI_DEBUGFS mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); #endif out_free_resp: iwl_free_resp(&cmd); return ret; } static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, struct iwl_mvm_nd_query_results *query, int idx) { int n_chans = 0, i; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { struct iwl_scan_offload_profile_match *matches = (struct iwl_scan_offload_profile_match *)query->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) n_chans += hweight8(matches[idx].matching_channels[i]); } else { struct iwl_scan_offload_profile_match_v1 *matches = (struct iwl_scan_offload_profile_match_v1 *)query->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) n_chans += hweight8(matches[idx].matching_channels[i]); } return n_chans; } static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, struct iwl_mvm_nd_query_results *query, struct cfg80211_wowlan_nd_match *match, int idx) { int i; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { struct iwl_scan_offload_profile_match *matches = (struct iwl_scan_offload_profile_match *)query->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) match->channels[match->n_channels++] = mvm->nd_channels[i]->center_freq; } else { struct iwl_scan_offload_profile_match_v1 *matches = (struct iwl_scan_offload_profile_match_v1 *)query->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) match->channels[match->n_channels++] = mvm->nd_channels[i]->center_freq; } } static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct cfg80211_wowlan_nd_info *net_detect = NULL; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; struct iwl_mvm_nd_query_results query; struct iwl_wowlan_status *fw_status; unsigned long matched_profiles; u32 reasons = 0; int i, n_matches, ret; fw_status = iwl_mvm_get_wakeup_status(mvm); if (!IS_ERR_OR_NULL(fw_status)) { reasons = le32_to_cpu(fw_status->wakeup_reasons); kfree(fw_status); } if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) goto out; ret = iwl_mvm_netdetect_query_results(mvm, &query); if (ret || !query.matched_profiles) { wakeup_report = NULL; goto out; } matched_profiles = query.matched_profiles; if (mvm->n_nd_match_sets) { n_matches = hweight_long(matched_profiles); } else { IWL_ERR(mvm, "no net detect match information available\n"); n_matches = 0; } net_detect = kzalloc(struct_size(net_detect, matches, n_matches), GFP_KERNEL); if (!net_detect || !n_matches) goto out_report_nd; for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { struct cfg80211_wowlan_nd_match *match; int idx, n_channels = 0; n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); match = kzalloc(struct_size(match, channels, n_channels), GFP_KERNEL); if (!match) goto out_report_nd; net_detect->matches[net_detect->n_matches++] = match; /* We inverted the order of the SSIDs in the scan * request, so invert the index here. */ idx = mvm->n_nd_match_sets - i - 1; match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, match->ssid.ssid_len); if (mvm->n_nd_channels < n_channels) continue; iwl_mvm_query_set_freqs(mvm, &query, match, i); } out_report_nd: wakeup.net_detect = net_detect; out: iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); if (net_detect) { for (i = 0; i < net_detect->n_matches; i++) kfree(net_detect->matches[i]); kfree(net_detect); } } static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { /* skip the one we keep connection on */ if (data == vif) return; if (vif->type == NL80211_IFTYPE_STATION) ieee80211_resume_disconnect(vif); } static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { u32 base = mvm->trans->dbg.lmac_error_event_table[0]; struct error_table_start { /* cf. struct iwl_error_event_table */ u32 valid; u32 error_id; } err_info; iwl_trans_read_mem_bytes(mvm->trans, base, &err_info, sizeof(err_info)); if (err_info.valid && err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { struct cfg80211_wowlan_wakeup wakeup = { .rfkill_release = true, }; ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); } return err_info.valid; } static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; int ret = 1; enum iwl_d3_status d3_status; bool keep = false; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); mutex_lock(&mvm->mutex); /* get the BSS vif pointer again */ vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) goto err; ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); if (ret) goto err; if (d3_status != IWL_D3_STATUS_ALIVE) { IWL_INFO(mvm, "Device was reset during suspend\n"); goto err; } iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); if (iwl_mvm_check_rt_status(mvm, vif)) { set_bit(STATUS_FW_ERROR, &mvm->trans->status); iwl_mvm_dump_nic_error_log(mvm); iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, false, 0); ret = 1; goto err; } if (d0i3_first) { ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); if (ret < 0) { IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", ret); goto err; } } /* * Query the current location and source from the D3 firmware so we * can play it back when we re-intiailize the D0 firmware */ iwl_mvm_update_changed_regdom(mvm); /* Re-configure PPAG settings */ iwl_mvm_ppag_send_cmd(mvm); if (!unified_image) /* Re-configure default SAR profile */ iwl_mvm_sar_select_profile(mvm, 1, 1); if (mvm->net_detect) { /* If this is a non-unified image, we restart the FW, * so no need to stop the netdetect scan. If that * fails, continue and try to get the wake-up reasons, * but trigger a HW restart by keeping a failure code * in ret. */ if (unified_image) ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, false); iwl_mvm_query_netdetect_reasons(mvm, vif); /* has unlocked the mutex, so skip that */ goto out; } else { keep = iwl_mvm_query_wakeup_reasons(mvm, vif); #ifdef CPTCFG_IWLWIFI_DEBUGFS if (keep) mvm->keep_vif = vif; #endif /* has unlocked the mutex, so skip that */ goto out_iterate; } err: iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); out_iterate: if (!test) ieee80211_iterate_active_interfaces_rtnl(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); out: /* no need to reset the device in unified images, if successful */ if (unified_image && !ret) { /* nothing else to do if we already sent D0I3_END_CMD */ if (d0i3_first) return 0; ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); if (!ret) return 0; } /* * Reconfigure the device in one of the following cases: * 1. We are not using a unified image * 2. We are using a unified image but had an error while exiting D3 */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); return 1; } static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) { iwl_trans_resume(mvm->trans); return __iwl_mvm_resume(mvm, false); } int iwl_mvm_resume(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; ret = iwl_mvm_resume_d3(mvm); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; iwl_mvm_resume_tcm(mvm); iwl_fw_runtime_resume(&mvm->fwrt); return ret; } void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); device_set_wakeup_enable(mvm->trans->dev, enabled); } #ifdef CPTCFG_IWLWIFI_DEBUGFS static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) { struct iwl_mvm *mvm = inode->i_private; int err; if (mvm->d3_test_active) return -EBUSY; file->private_data = inode->i_private; synchronize_net(); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); /* start pseudo D3 */ rtnl_lock(); err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); rtnl_unlock(); if (err > 0) err = -EINVAL; if (err) return err; mvm->d3_test_active = true; mvm->keep_vif = NULL; return 0; } static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u32 pme_asserted; while (true) { /* read pme_ptr if available */ if (mvm->d3_test_pme_ptr) { pme_asserted = iwl_trans_read_mem32(mvm->trans, mvm->d3_test_pme_ptr); if (pme_asserted) break; } if (msleep_interruptible(100)) break; } return 0; } static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { /* skip the one we keep connection on */ if (_data == vif) return; if (vif->type == NL80211_IFTYPE_STATION) ieee80211_connection_loss(vif); } static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) { struct iwl_mvm *mvm = inode->i_private; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); mvm->d3_test_active = false; iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); rtnl_lock(); __iwl_mvm_resume(mvm, true); rtnl_unlock(); iwl_mvm_resume_tcm(mvm); iwl_fw_runtime_resume(&mvm->fwrt); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; iwl_abort_notification_waits(&mvm->notif_wait); if (!unified_image) { int remaining_time = 10; ieee80211_restart_hw(mvm->hw); /* wait for restart and disconnect all interfaces */ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && remaining_time > 0) { remaining_time--; msleep(1000); } if (remaining_time == 0) IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); } ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); return 0; } const struct file_operations iwl_dbgfs_d3_test_ops = { .llseek = no_llseek, .open = iwl_mvm_d3_test_open, .read = iwl_mvm_d3_test_read, .release = iwl_mvm_d3_test_release, }; #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c000066400000000000000000000645001351064634500277400ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "mvm.h" #include "debugfs.h" static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_dbgfs_pm_mask param, int val) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm; dbgfs_pm->mask |= param; switch (param) { case MVM_DEBUGFS_PM_KEEP_ALIVE: { int dtimper = vif->bss_conf.dtim_period ?: 1; int dtimper_msec = dtimper * vif->bss_conf.beacon_int; IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); if (val * MSEC_PER_SEC < 3 * dtimper_msec) IWL_WARN(mvm, "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n", val * MSEC_PER_SEC, 3 * dtimper_msec); dbgfs_pm->keep_alive_seconds = val; break; } case MVM_DEBUGFS_PM_SKIP_OVER_DTIM: IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", val ? "enabled" : "disabled"); dbgfs_pm->skip_over_dtim = val; break; case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS: IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val); dbgfs_pm->skip_dtim_periods = val; break; case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT: IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val); dbgfs_pm->rx_data_timeout = val; break; case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT: IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); dbgfs_pm->tx_data_timeout = val; break; case MVM_DEBUGFS_PM_LPRX_ENA: IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled"); dbgfs_pm->lprx_ena = val; break; case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD: IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val); dbgfs_pm->lprx_rssi_threshold = val; break; case MVM_DEBUGFS_PM_SNOOZE_ENABLE: IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val); dbgfs_pm->snooze_ena = val; break; case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING: IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val); dbgfs_pm->uapsd_misbehaving = val; break; case MVM_DEBUGFS_PM_USE_PS_POLL: IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val); dbgfs_pm->use_ps_poll = val; break; } } static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; enum iwl_dbgfs_pm_mask param; int val, ret; if (!strncmp("keep_alive=", buf, 11)) { if (sscanf(buf + 11, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_KEEP_ALIVE; } else if (!strncmp("skip_over_dtim=", buf, 15)) { if (sscanf(buf + 15, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM; } else if (!strncmp("skip_dtim_periods=", buf, 18)) { if (sscanf(buf + 18, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS; } else if (!strncmp("rx_data_timeout=", buf, 16)) { if (sscanf(buf + 16, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT; } else if (!strncmp("tx_data_timeout=", buf, 16)) { if (sscanf(buf + 16, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; } else if (!strncmp("lprx=", buf, 5)) { if (sscanf(buf + 5, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_LPRX_ENA; } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) { if (sscanf(buf + 20, "%d", &val) != 1) return -EINVAL; if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val < POWER_LPRX_RSSI_THRESHOLD_MIN) return -EINVAL; param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD; } else if (!strncmp("snooze_enable=", buf, 14)) { if (sscanf(buf + 14, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_SNOOZE_ENABLE; } else if (!strncmp("uapsd_misbehaving=", buf, 18)) { if (sscanf(buf + 18, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING; } else if (!strncmp("use_ps_poll=", buf, 12)) { if (sscanf(buf + 12, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_USE_PS_POLL; } else { return -EINVAL; } mutex_lock(&mvm->mutex); iwl_dbgfs_update_pm(mvm, vif, param, val); ret = iwl_mvm_power_update_mac(mvm); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; char buf[64]; int bufsz = sizeof(buf); int pos; pos = scnprintf(buf, bufsz, "bss limit = %d\n", vif->bss_conf.txpower); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_pm_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; char buf[512]; int bufsz = sizeof(buf); int pos; pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_mac_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u8 ap_sta_id; struct ieee80211_chanctx_conf *chanctx_conf; char buf[512]; int bufsz = sizeof(buf); int pos = 0; int i; mutex_lock(&mvm->mutex); ap_sta_id = mvmvif->ap_sta_id; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_ADHOC: pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n"); break; case NL80211_IFTYPE_STATION: pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n"); break; case NL80211_IFTYPE_AP: pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n"); break; case NL80211_IFTYPE_P2P_CLIENT: pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n"); break; case NL80211_IFTYPE_P2P_GO: pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n"); break; case NL80211_IFTYPE_P2P_DEVICE: pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n"); break; case NL80211_IFTYPE_NAN: pos += scnprintf(buf+pos, bufsz-pos, "type: NAN\n"); break; default: break; } pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n", mvmvif->id, mvmvif->color); pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n", vif->bss_conf.bssid); pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n", mvm->tcm.result.load[mvmvif->id]); pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n"); for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) pos += scnprintf(buf+pos, bufsz-pos, "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n", i, mvmvif->queue_params[i].txop, mvmvif->queue_params[i].cw_min, mvmvif->queue_params[i].cw_max, mvmvif->queue_params[i].aifs, mvmvif->queue_params[i].uapsd); if (vif->type == NL80211_IFTYPE_STATION && ap_sta_id != IWL_MVM_INVALID_STA) { struct iwl_mvm_sta *mvm_sta; mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); if (mvm_sta) { pos += scnprintf(buf+pos, bufsz-pos, "ap_sta_id %d - reduced Tx power %d\n", ap_sta_id, mvm_sta->bt_reduced_txpower); } } rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (chanctx_conf) pos += scnprintf(buf+pos, bufsz-pos, "idle rx chains %d, active rx chains: %d\n", chanctx_conf->rx_chains_static, chanctx_conf->rx_chains_dynamic); rcu_read_unlock(); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, enum iwl_dbgfs_bf_mask param, int value) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; dbgfs_bf->mask |= param; switch (param) { case MVM_DEBUGFS_BF_ENERGY_DELTA: dbgfs_bf->bf_energy_delta = value; break; case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA: dbgfs_bf->bf_roaming_energy_delta = value; break; case MVM_DEBUGFS_BF_ROAMING_STATE: dbgfs_bf->bf_roaming_state = value; break; case MVM_DEBUGFS_BF_TEMP_THRESHOLD: dbgfs_bf->bf_temp_threshold = value; break; case MVM_DEBUGFS_BF_TEMP_FAST_FILTER: dbgfs_bf->bf_temp_fast_filter = value; break; case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER: dbgfs_bf->bf_temp_slow_filter = value; break; case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: dbgfs_bf->bf_enable_beacon_filter = value; break; case MVM_DEBUGFS_BF_DEBUG_FLAG: dbgfs_bf->bf_debug_flag = value; break; case MVM_DEBUGFS_BF_ESCAPE_TIMER: dbgfs_bf->bf_escape_timer = value; break; case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT: dbgfs_bf->ba_enable_beacon_abort = value; break; case MVM_DEBUGFS_BA_ESCAPE_TIMER: dbgfs_bf->ba_escape_timer = value; break; } } static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; enum iwl_dbgfs_bf_mask param; int value, ret = 0; if (!strncmp("bf_energy_delta=", buf, 16)) { if (sscanf(buf+16, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_ENERGY_DELTA_MIN || value > IWL_BF_ENERGY_DELTA_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ENERGY_DELTA; } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) { if (sscanf(buf+24, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA; } else if (!strncmp("bf_roaming_state=", buf, 17)) { if (sscanf(buf+17, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_ROAMING_STATE_MIN || value > IWL_BF_ROAMING_STATE_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ROAMING_STATE; } else if (!strncmp("bf_temp_threshold=", buf, 18)) { if (sscanf(buf+18, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_TEMP_THRESHOLD_MIN || value > IWL_BF_TEMP_THRESHOLD_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_TEMP_THRESHOLD; } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) { if (sscanf(buf+20, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_TEMP_FAST_FILTER_MIN || value > IWL_BF_TEMP_FAST_FILTER_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER; } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) { if (sscanf(buf+20, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_TEMP_SLOW_FILTER_MIN || value > IWL_BF_TEMP_SLOW_FILTER_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER; } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { if (sscanf(buf+24, "%d", &value) != 1) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER; } else if (!strncmp("bf_debug_flag=", buf, 14)) { if (sscanf(buf+14, "%d", &value) != 1) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; param = MVM_DEBUGFS_BF_DEBUG_FLAG; } else if (!strncmp("bf_escape_timer=", buf, 16)) { if (sscanf(buf+16, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_ESCAPE_TIMER_MIN || value > IWL_BF_ESCAPE_TIMER_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ESCAPE_TIMER; } else if (!strncmp("ba_escape_timer=", buf, 16)) { if (sscanf(buf+16, "%d", &value) != 1) return -EINVAL; if (value < IWL_BA_ESCAPE_TIMER_MIN || value > IWL_BA_ESCAPE_TIMER_MAX) return -EINVAL; param = MVM_DEBUGFS_BA_ESCAPE_TIMER; } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) { if (sscanf(buf+23, "%d", &value) != 1) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT; } else { return -EINVAL; } mutex_lock(&mvm->mutex); iwl_dbgfs_update_bf(vif, param, value); if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); else ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_bf_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[256]; int pos = 0; const size_t bufsz = sizeof(buf); struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT), .ba_enable_beacon_abort = cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT), }; iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); if (mvmvif->bf_data.bf_enabled) cmd.bf_enable_beacon_filter = cpu_to_le32(1); else cmd.bf_enable_beacon_filter = 0; pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", le32_to_cpu(cmd.bf_energy_delta)); pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", le32_to_cpu(cmd.bf_roaming_energy_delta)); pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", le32_to_cpu(cmd.bf_roaming_state)); pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n", le32_to_cpu(cmd.bf_temp_threshold)); pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n", le32_to_cpu(cmd.bf_temp_fast_filter)); pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n", le32_to_cpu(cmd.bf_temp_slow_filter)); pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", le32_to_cpu(cmd.bf_enable_beacon_filter)); pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", le32_to_cpu(cmd.bf_debug_flag)); pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", le32_to_cpu(cmd.bf_escape_timer)); pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", le32_to_cpu(cmd.ba_escape_timer)); pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", le32_to_cpu(cmd.ba_enable_beacon_abort)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static inline char *iwl_dbgfs_is_match(char *name, char *buf) { int len = strlen(name); return !strncmp(name, buf, len) ? buf + len : NULL; } static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u32 curr_gp2; u64 curr_os; s64 diff; char buf[64]; const size_t bufsz = sizeof(buf); int pos = 0; iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); do_div(curr_os, NSEC_PER_USEC); diff = curr_os - curr_gp2; pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u8 value; int ret; ret = kstrtou8(buf, 0, &value); if (ret) return ret; if (value > 1) return -EINVAL; mutex_lock(&mvm->mutex); iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS); mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u8 value; int ret; ret = kstrtou8(buf, 0, &value); if (ret) return ret; if (value > NUM_LOW_LATENCY_FORCE) return -EINVAL; mutex_lock(&mvm->mutex); if (value == LOW_LATENCY_FORCE_UNSET) { iwl_mvm_update_low_latency(mvm, vif, false, LOW_LATENCY_DEBUGFS_FORCE); iwl_mvm_update_low_latency(mvm, vif, false, LOW_LATENCY_DEBUGFS_FORCE_ENABLE); } else { iwl_mvm_update_low_latency(mvm, vif, value == LOW_LATENCY_FORCE_ON, LOW_LATENCY_DEBUGFS_FORCE); iwl_mvm_update_low_latency(mvm, vif, true, LOW_LATENCY_DEBUGFS_FORCE_ENABLE); } mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_low_latency_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n" "dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n"; /* * all values in format are boolean so the size of format is enough * for holding the result string */ char buf[sizeof(format) + 1] = {}; int len; len = scnprintf(buf, sizeof(buf) - 1, format, !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS), !!(mvmvif->low_latency & LOW_LATENCY_VCMD), !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE), !!(mvmvif->low_latency_actual)); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[20]; int len; len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; bool ret; mutex_lock(&mvm->mutex); ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid); mutex_unlock(&mvm->mutex); return ret ? count : -EINVAL; } static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; struct ieee80211_chanctx_conf *chanctx_conf; struct iwl_mvm_phy_ctxt *phy_ctxt; u16 value; int ret; ret = kstrtou16(buf, 0, &value); if (ret) return ret; mutex_lock(&mvm->mutex); rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); /* make sure the channel context is assigned */ if (!chanctx_conf) { rcu_read_unlock(); mutex_unlock(&mvm->mutex); return -EINVAL; } phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv]; rcu_read_unlock(); mvm->dbgfs_rx_phyinfo = value; ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def, chanctx_conf->rx_chains_static, chanctx_conf->rx_chains_dynamic); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[8]; int len; len = scnprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static void iwl_dbgfs_quota_check(void *data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int *ret = data; if (mvmvif->dbgfs_quota_min) *ret = -EINVAL; } static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u16 value; int ret; ret = kstrtou16(buf, 0, &value); if (ret) return ret; if (value > 95) return -EINVAL; mutex_lock(&mvm->mutex); mvmvif->dbgfs_quota_min = 0; ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_dbgfs_quota_check, &ret); if (ret == 0) { mvmvif->dbgfs_quota_min = value; iwl_mvm_update_quotas(mvm, false, NULL); } mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_quota_min_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[10]; int len; len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \ debugfs_create_file(#name, mode, parent, vif, \ &iwl_dbgfs_##name##_ops); \ } while (0) MVM_DEBUGFS_READ_FILE_OPS(mac_params); MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct dentry *dbgfs_dir = vif->debugfs_dir; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[100]; /* * Check if debugfs directory already exist before creating it. * This may happen when, for example, resetting hw or suspend-resume */ if (!dbgfs_dir || mvmvif->dbgfs_dir) return; mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) { IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n", dbgfs_dir); return; } if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || (vif->type == NL80211_IFTYPE_STATION && vif->p2p))) MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600); /* * Create symlink for convenience pointing to interface specific * debugfs entries for the driver. For example, under * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/ * find * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/ */ #if LINUX_VERSION_IS_GEQ(3,12,0) snprintf(buf, 100, "../../../%pd3/%pd", dbgfs_dir, mvmvif->dbgfs_dir); #else snprintf(buf, 100, "../../../%s/%s/%s/%s", dbgfs_dir->d_parent->d_parent->d_name.name, dbgfs_dir->d_parent->d_name.name, dbgfs_dir->d_name.name, mvmvif->dbgfs_dir->d_name.name); #endif mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, mvm->debugfs_dir, buf); } void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); debugfs_remove(mvmvif->dbgfs_slink); mvmvif->dbgfs_slink = NULL; debugfs_remove_recursive(mvmvif->dbgfs_dir); mvmvif->dbgfs_dir = NULL; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c000066400000000000000000002121771351064634500271630ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "mvm.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" #include "iwl-modparams.h" #include "fw/error-dump.h" #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS static ssize_t iwl_dbgfs_tt_tx_backoff_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int i = 0; int ret; u32 temperature, backoff; char *value_str; char *seps = "\n "; char *buf_ptr = buf; struct iwl_tt_tx_backoff new_backoff_values[TT_TX_BACKOFF_SIZE]; mutex_lock(&mvm->mutex); while ((value_str = strsep(&buf_ptr, seps))) { if (sscanf(value_str, "%u=%u", &temperature, &backoff) != 2) break; if (temperature >= mvm->thermal_throttle.params.ct_kill_entry || backoff < mvm->thermal_throttle.min_backoff) { ret = -EINVAL; goto out; } if (i == TT_TX_BACKOFF_SIZE) { ret = -EINVAL; goto out; } new_backoff_values[i].backoff = backoff; new_backoff_values[i].temperature = temperature; i++; } if (i != TT_TX_BACKOFF_SIZE) { ret = -EINVAL; goto out; } memcpy(mvm->thermal_throttle.params.tx_backoff, new_backoff_values, sizeof(mvm->thermal_throttle.params.tx_backoff)); ret = count; out: mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_tt_tx_backoff_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_tt_tx_backoff *tx_backoff = mvm->thermal_throttle.params.tx_backoff; /* we need 10 chars per line: 3 chars for the temperature + 1 * for the equal sign + 5 for the backoff value + end of line. */ char buf[TT_TX_BACKOFF_SIZE * 10 + 1]; int i, pos = 0, bufsz = sizeof(buf); mutex_lock(&mvm->mutex); for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { pos += scnprintf(buf + pos, bufsz - pos, "%d=%d\n", tx_backoff[i].temperature, tx_backoff[i].backoff); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #endif static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos, budget; if (!iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0); mutex_unlock(&mvm->mutex); if (budget < 0) return budget; pos = scnprintf(buf, sizeof(buf), "%d\n", budget); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; if (!iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; iwl_mvm_enter_ctkill(mvm); return count; } static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; u32 flush_arg; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (kstrtou32(buf, 0, &flush_arg)) return -EINVAL; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING all tids queues on sta_id = %d\n", flush_arg); mutex_lock(&mvm->mutex); ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count; mutex_unlock(&mvm->mutex); return ret; } IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n", flush_arg); mutex_lock(&mvm->mutex); ret = iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count; mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_sta *mvmsta; int sta_id, drain, ret; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) return -EINVAL; if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT) return -EINVAL; if (drain < 0 || drain > 1) return -EINVAL; mutex_lock(&mvm->mutex); mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (!mvmsta) ret = -ENOENT; else ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; const struct fw_img *img; unsigned int ofs, len; size_t ret; u8 *ptr; if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; /* default is to dump the entire data segment */ img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; len = img->sec[IWL_UCODE_SECTION_DATA].len; if (mvm->dbgfs_sram_len) { ofs = mvm->dbgfs_sram_offset; len = mvm->dbgfs_sram_len; } ptr = kzalloc(len, GFP_KERNEL); if (!ptr) return -ENOMEM; iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len); kfree(ptr); return ret; } static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { const struct fw_img *img; u32 offset, len; u32 img_offset, img_len; if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; img_len = img->sec[IWL_UCODE_SECTION_DATA].len; if (sscanf(buf, "%x,%x", &offset, &len) == 2) { if ((offset & 0x3) || (len & 0x3)) return -EINVAL; if (offset + len > img_offset + img_len) return -EINVAL; mvm->dbgfs_sram_offset = offset; mvm->dbgfs_sram_len = len; } else { mvm->dbgfs_sram_offset = 0; mvm->dbgfs_sram_len = 0; } return count; } static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos; if (!mvm->temperature_test) pos = scnprintf(buf , sizeof(buf), "disabled\n"); else pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } /* * Set NIC Temperature * Cause the driver to ignore the actual NIC temperature reported by the FW * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE */ static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int temperature; if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test) return -EIO; if (kstrtoint(buf, 10, &temperature)) return -EINVAL; /* not a legal temperature */ if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) return -EINVAL; mutex_lock(&mvm->mutex); if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { if (!mvm->temperature_test) goto out; mvm->temperature_test = false; /* Since we can't read the temp while awake, just set * it to zero until we get the next RX stats from the * firmware. */ mvm->temperature = 0; } else { mvm->temperature_test = true; mvm->temperature = temperature; } IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", mvm->temperature_test ? "En" : "Dis" , mvm->temperature); /* handle the temperature change */ iwl_mvm_tt_handler(mvm); out: mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos, ret; s32 temp; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_get_temp(mvm, &temp); mutex_unlock(&mvm->mutex); if (ret) return -EIO; pos = scnprintf(buf , sizeof(buf), "%d\n", temp); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #ifdef CONFIG_ACPI static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[256]; int pos = 0; int bufsz = sizeof(buf); int tbl_idx; u8 *value; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (tbl_idx < 0) { mutex_unlock(&mvm->mutex); return tbl_idx; } if (!tbl_idx) { pos = scnprintf(buf, bufsz, "SAR geographic profile disabled\n"); } else { value = &mvm->geo_profiles[tbl_idx - 1].values[0]; pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx); pos += scnprintf(buf + pos, bufsz - pos, "2.4GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax tx power: %hhd dBm\n", value[1], value[2], value[0]); pos += scnprintf(buf + pos, bufsz - pos, "5.2GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax tx power: %hhd dBm\n", value[4], value[5], value[3]); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #endif static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct ieee80211_sta *sta; char buf[400]; int i, pos = 0, bufsz = sizeof(buf); mutex_lock(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta) pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); else if (IS_ERR(sta)) pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", PTR_ERR(sta)); else pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->addr); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; struct iwl_mvm *mvm = lq_sta->pers.drv; static const size_t bufsz = 2048; char *buff; int desc = 0; ssize_t ret; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; mutex_lock(&mvm->mutex); desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", lq_sta->pers.sta_id); desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", lq_sta->pers.dbg_fixed_rate); desc += scnprintf(buff + desc, bufsz - desc, "A-MPDU size limit %d\n", lq_sta->pers.dbg_agg_frame_count_lim); desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : ""); desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X ", lq_sta->last_rate_n_flags); desc += rs_pretty_print_rate(buff + desc, bufsz - desc, lq_sta->last_rate_n_flags); mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); kfree(buff); return ret; } static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i; u16 amsdu_len; if (kstrtou16(buf, 0, &amsdu_len)) return -EINVAL; if (amsdu_len) { mvmsta->orig_amsdu_len = sta->max_amsdu_len; sta->max_amsdu_len = amsdu_len; for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++) sta->max_tid_amsdu_len[i] = amsdu_len; } else { sta->max_amsdu_len = mvmsta->orig_amsdu_len; mvmsta->orig_amsdu_len = 0; } return count; } static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); char buf[32]; int pos; pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len); pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n", mvmsta->orig_amsdu_len); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[64]; int bufsz = sizeof(buf); int pos = 0; pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n", mvm->disable_power_off); pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n", mvm->disable_power_off_d3); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret, val; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (!strncmp("disable_power_off_d0=", buf, 21)) { if (sscanf(buf + 21, "%d", &val) != 1) return -EINVAL; mvm->disable_power_off = val; } else if (!strncmp("disable_power_off_d3=", buf, 21)) { if (sscanf(buf + 21, "%d", &val) != 1) return -EINVAL; mvm->disable_power_off_d3 = val; } else { return -EINVAL; } mutex_lock(&mvm->mutex); ret = iwl_mvm_power_update_device(mvm); mutex_unlock(&mvm->mutex); return ret ?: count; } static int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, int pos, int bufsz) { pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); BT_MBOX_PRINT(0, LE_PROF1, false); BT_MBOX_PRINT(0, LE_PROF2, false); BT_MBOX_PRINT(0, LE_PROF_OTHER, false); BT_MBOX_PRINT(0, CHL_SEQ_N, false); BT_MBOX_PRINT(0, INBAND_S, false); BT_MBOX_PRINT(0, LE_MIN_RSSI, false); BT_MBOX_PRINT(0, LE_SCAN, false); BT_MBOX_PRINT(0, LE_ADV, false); BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); BT_MBOX_PRINT(0, OPEN_CON_1, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); BT_MBOX_PRINT(1, IP_SR, false); BT_MBOX_PRINT(1, LE_MSTR, false); BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); BT_MBOX_PRINT(1, MSG_TYPE, false); BT_MBOX_PRINT(1, SSN, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); BT_MBOX_PRINT(2, SNIFF_ACT, false); BT_MBOX_PRINT(2, PAG, false); BT_MBOX_PRINT(2, INQUIRY, false); BT_MBOX_PRINT(2, CONN, false); BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); BT_MBOX_PRINT(2, DISC, false); BT_MBOX_PRINT(2, SCO_TX_ACT, false); BT_MBOX_PRINT(2, SCO_RX_ACT, false); BT_MBOX_PRINT(2, ESCO_RE_TX, false); BT_MBOX_PRINT(2, SCO_DURATION, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); BT_MBOX_PRINT(3, SCO_STATE, false); BT_MBOX_PRINT(3, SNIFF_STATE, false); BT_MBOX_PRINT(3, A2DP_STATE, false); BT_MBOX_PRINT(3, A2DP_SRC, false); BT_MBOX_PRINT(3, ACL_STATE, false); BT_MBOX_PRINT(3, MSTR_STATE, false); BT_MBOX_PRINT(3, OBX_STATE, false); BT_MBOX_PRINT(3, OPEN_CON_2, false); BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); BT_MBOX_PRINT(3, INBAND_P, false); BT_MBOX_PRINT(3, MSG_TYPE_2, false); BT_MBOX_PRINT(3, SSN_2, false); BT_MBOX_PRINT(3, UPDATE_REQUEST, true); return pos; } static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; char *buf; int ret, pos = 0, bufsz = sizeof(char) * 1024; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance); pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", le32_to_cpu(notif->primary_ch_lut)); pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", le32_to_cpu(notif->secondary_ch_lut)); pos += scnprintf(buf + pos, bufsz - pos, "bt_activity_grading = %d\n", le32_to_cpu(notif->bt_activity_grading)); pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", notif->rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", notif->ttc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO); pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", IWL_MVM_BT_COEX_MPLUT); mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #undef BT_MBOX_PRINT static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; char buf[256]; int bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); pos += scnprintf(buf + pos, bufsz - pos, "\tPrimary Channel Bitmap 0x%016llx\n", le64_to_cpu(cmd->bt_primary_ci)); pos += scnprintf(buf + pos, bufsz - pos, "\tSecondary Channel Bitmap 0x%016llx\n", le64_to_cpu(cmd->bt_secondary_ci)); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u32 bt_tx_prio; if (sscanf(buf, "%u", &bt_tx_prio) != 1) return -EINVAL; if (bt_tx_prio > 4) return -EINVAL; mvm->bt_tx_prio = bt_tx_prio; return count; } static ssize_t iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { static const char * const modes_str[BT_FORCE_ANT_MAX] = { [BT_FORCE_ANT_DIS] = "dis", [BT_FORCE_ANT_AUTO] = "auto", [BT_FORCE_ANT_BT] = "bt", [BT_FORCE_ANT_WIFI] = "wifi", }; int ret, bt_force_ant_mode; ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); if (ret < 0) return ret; bt_force_ant_mode = ret; ret = 0; mutex_lock(&mvm->mutex); if (mvm->bt_force_ant_mode == bt_force_ant_mode) goto out; mvm->bt_force_ant_mode = bt_force_ant_mode; IWL_DEBUG_COEX(mvm, "Force mode: %s\n", modes_str[mvm->bt_force_ant_mode]); if (iwl_mvm_firmware_running(mvm)) ret = iwl_mvm_send_bt_init_conf(mvm); else ret = 0; out: mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char *buff, *pos, *endpos; static const size_t bufsz = 1024; int ret; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", mvm->trans->cfg->fw_name_pre); pos += scnprintf(pos, endpos - pos, "FW: %s\n", mvm->fwrt.fw->human_readable); pos += scnprintf(pos, endpos - pos, "Device: %s\n", mvm->fwrt.trans->cfg->name); pos += scnprintf(pos, endpos - pos, "Bus: %s\n", mvm->fwrt.dev->bus->name); ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } #define PRINT_STATS_LE32(_struct, _memb) \ pos += scnprintf(buf + pos, bufsz - pos, \ fmt_table, #_memb, \ le32_to_cpu(_struct->_memb)) static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; static const char *fmt_table = "\t%-30s %10u\n"; static const char *fmt_header = "%-32s\n"; int pos = 0; char *buf; int ret; size_t bufsz; if (iwl_mvm_has_new_rx_stats_api(mvm)) bufsz = ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) + (4 * 33) + 1; else /* 43 = size of each data line; 33 = size of each header */ bufsz = ((sizeof(struct mvm_statistics_rx_v3) / sizeof(__le32)) * 43) + (4 * 33) + 1; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) iwl_mvm_request_statistics(mvm, false); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_phy_v2 *ofdm = &mvm->rx_stats_v3.ofdm; PRINT_STATS_LE32(ofdm, ina_cnt); PRINT_STATS_LE32(ofdm, fina_cnt); PRINT_STATS_LE32(ofdm, plcp_err); PRINT_STATS_LE32(ofdm, crc32_err); PRINT_STATS_LE32(ofdm, overrun_err); PRINT_STATS_LE32(ofdm, early_overrun_err); PRINT_STATS_LE32(ofdm, crc32_good); PRINT_STATS_LE32(ofdm, false_alarm_cnt); PRINT_STATS_LE32(ofdm, fina_sync_err_cnt); PRINT_STATS_LE32(ofdm, sfd_timeout); PRINT_STATS_LE32(ofdm, fina_timeout); PRINT_STATS_LE32(ofdm, unresponded_rts); PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); PRINT_STATS_LE32(ofdm, sent_ack_cnt); PRINT_STATS_LE32(ofdm, sent_cts_cnt); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill); PRINT_STATS_LE32(ofdm, mh_format_err); PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum); PRINT_STATS_LE32(ofdm, reserved); } else { struct mvm_statistics_rx_phy *ofdm = &mvm->rx_stats.ofdm; PRINT_STATS_LE32(ofdm, unresponded_rts); PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill); PRINT_STATS_LE32(ofdm, reserved); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - CCK"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_phy_v2 *cck = &mvm->rx_stats_v3.cck; PRINT_STATS_LE32(cck, ina_cnt); PRINT_STATS_LE32(cck, fina_cnt); PRINT_STATS_LE32(cck, plcp_err); PRINT_STATS_LE32(cck, crc32_err); PRINT_STATS_LE32(cck, overrun_err); PRINT_STATS_LE32(cck, early_overrun_err); PRINT_STATS_LE32(cck, crc32_good); PRINT_STATS_LE32(cck, false_alarm_cnt); PRINT_STATS_LE32(cck, fina_sync_err_cnt); PRINT_STATS_LE32(cck, sfd_timeout); PRINT_STATS_LE32(cck, fina_timeout); PRINT_STATS_LE32(cck, unresponded_rts); PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); PRINT_STATS_LE32(cck, sent_ack_cnt); PRINT_STATS_LE32(cck, sent_cts_cnt); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill); PRINT_STATS_LE32(cck, mh_format_err); PRINT_STATS_LE32(cck, re_acq_main_rssi_sum); PRINT_STATS_LE32(cck, reserved); } else { struct mvm_statistics_rx_phy *cck = &mvm->rx_stats.cck; PRINT_STATS_LE32(cck, unresponded_rts); PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill); PRINT_STATS_LE32(cck, reserved); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - GENERAL"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_non_phy_v3 *general = &mvm->rx_stats_v3.general; PRINT_STATS_LE32(general, bogus_cts); PRINT_STATS_LE32(general, bogus_ack); PRINT_STATS_LE32(general, non_bssid_frames); PRINT_STATS_LE32(general, filtered_frames); PRINT_STATS_LE32(general, non_channel_beacons); PRINT_STATS_LE32(general, channel_beacons); PRINT_STATS_LE32(general, num_missed_bcon); PRINT_STATS_LE32(general, adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_a); PRINT_STATS_LE32(general, beacon_silence_rssi_b); PRINT_STATS_LE32(general, beacon_silence_rssi_c); PRINT_STATS_LE32(general, interference_data_flag); PRINT_STATS_LE32(general, channel_load); PRINT_STATS_LE32(general, dsp_false_alarms); PRINT_STATS_LE32(general, beacon_rssi_a); PRINT_STATS_LE32(general, beacon_rssi_b); PRINT_STATS_LE32(general, beacon_rssi_c); PRINT_STATS_LE32(general, beacon_energy_a); PRINT_STATS_LE32(general, beacon_energy_b); PRINT_STATS_LE32(general, beacon_energy_c); PRINT_STATS_LE32(general, num_bt_kills); PRINT_STATS_LE32(general, mac_id); PRINT_STATS_LE32(general, directed_data_mpdu); } else { struct mvm_statistics_rx_non_phy *general = &mvm->rx_stats.general; PRINT_STATS_LE32(general, bogus_cts); PRINT_STATS_LE32(general, bogus_ack); PRINT_STATS_LE32(general, non_channel_beacons); PRINT_STATS_LE32(general, channel_beacons); PRINT_STATS_LE32(general, num_missed_bcon); PRINT_STATS_LE32(general, adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_a); PRINT_STATS_LE32(general, beacon_silence_rssi_b); PRINT_STATS_LE32(general, beacon_silence_rssi_c); PRINT_STATS_LE32(general, interference_data_flag); PRINT_STATS_LE32(general, channel_load); PRINT_STATS_LE32(general, beacon_rssi_a); PRINT_STATS_LE32(general, beacon_rssi_b); PRINT_STATS_LE32(general, beacon_rssi_c); PRINT_STATS_LE32(general, beacon_energy_a); PRINT_STATS_LE32(general, beacon_energy_b); PRINT_STATS_LE32(general, beacon_energy_c); PRINT_STATS_LE32(general, num_bt_kills); PRINT_STATS_LE32(general, mac_id); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - HT"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_ht_phy_v1 *ht = &mvm->rx_stats_v3.ofdm_ht; PRINT_STATS_LE32(ht, plcp_err); PRINT_STATS_LE32(ht, overrun_err); PRINT_STATS_LE32(ht, early_overrun_err); PRINT_STATS_LE32(ht, crc32_good); PRINT_STATS_LE32(ht, crc32_err); PRINT_STATS_LE32(ht, mh_format_err); PRINT_STATS_LE32(ht, agg_crc32_good); PRINT_STATS_LE32(ht, agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_cnt); PRINT_STATS_LE32(ht, unsupport_mcs); } else { struct mvm_statistics_rx_ht_phy *ht = &mvm->rx_stats.ofdm_ht; PRINT_STATS_LE32(ht, mh_format_err); PRINT_STATS_LE32(ht, agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_cnt); PRINT_STATS_LE32(ht, unsupport_mcs); } mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #undef PRINT_STAT_LE32 static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, char __user *user_buf, size_t count, loff_t *ppos, struct iwl_mvm_frame_stats *stats) { char *buff, *pos, *endpos; int idx, i; int ret; static const size_t bufsz = 1024; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; spin_lock_bh(&mvm->drv_stats_lock); pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "Legacy/HT/VHT\t:\t%d/%d/%d\n", stats->legacy_frames, stats->ht_frames, stats->vht_frames); pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", stats->bw_20_frames, stats->bw_40_frames, stats->bw_80_frames); pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", stats->ngi_frames, stats->sgi_frames); pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", stats->siso_frames, stats->mimo2_frames); pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", stats->fail_frames, stats->success_frames); pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", stats->agg_frames); pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", stats->ampdu_count); pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n", stats->ampdu_count > 0 ? (stats->agg_frames / stats->ampdu_count) : 0); pos += scnprintf(pos, endpos - pos, "Last Rates\n"); idx = stats->last_frame_idx - 1; for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) { idx = (idx + 1) % ARRAY_SIZE(stats->last_rates); if (stats->last_rates[idx] == 0) continue; pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", (int)(ARRAY_SIZE(stats->last_rates) - i)); pos += rs_pretty_print_rate(pos, endpos - pos, stats->last_rates[idx]); } spin_unlock_bh(&mvm->drv_stats_lock); ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, &mvm->drv_rx_stats); } static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int __maybe_unused ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); /* allow one more restart that we're provoking here */ if (mvm->fw_restart >= 0) mvm->fw_restart++; /* take the return value to make compiler happy - it will fail anyway */ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL); mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (!iwl_mvm_firmware_running(mvm)) return -EIO; iwl_force_nmi(mvm->trans); return count; } static ssize_t iwl_dbgfs_scan_ant_rxchain_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); /* print which antennas were set for the scan command by the user */ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: "); if (mvm->scan_rx_ant & ANT_A) pos += scnprintf(buf + pos, bufsz - pos, "A"); if (mvm->scan_rx_ant & ANT_B) pos += scnprintf(buf + pos, bufsz - pos, "B"); if (mvm->scan_rx_ant & ANT_C) pos += scnprintf(buf + pos, bufsz - pos, "C"); pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u8 scan_rx_ant; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) return -EINVAL; if (scan_rx_ant > ANT_ABC) return -EINVAL; if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) return -EINVAL; if (mvm->scan_rx_ant != scan_rx_ant) { mvm->scan_rx_ant = scan_rx_ant; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } return count; } static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | IWL_RSS_HASH_TYPE_IPV4_UDP | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP | IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; int ret, i, num_repeats, nbytes = count / 2; ret = hex2bin(cmd.indirection_table, buf, nbytes); if (ret) return ret; /* * The input is the redirection table, partial or full. * Repeat the pattern if needed. * For example, input of 01020F will be repeated 42 times, * indirecting RSS hash results to queues 1, 2, 15 (skipping * queues 3 - 14). */ num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; for (i = 1; i < num_repeats; i++) memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, nbytes); /* handle cut in the middle pattern for the last places */ memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, ARRAY_SIZE(cmd.indirection_table) % nbytes); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); else ret = 0; mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_rx_cmd_buffer rxb = { ._rx_page_order = 0, .truesize = 0, /* not used */ ._offset = 0, }; struct iwl_rx_packet *pkt; struct iwl_rx_mpdu_desc *desc; int bin_len = count / 2; int ret = -EINVAL; size_t mpdu_cmd_hdr_size = (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_rx_mpdu_desc) : IWL_RX_DESC_SIZE_V1; if (!iwl_mvm_firmware_running(mvm)) return -EIO; /* supporting only 9000 descriptor */ if (!mvm->trans->cfg->mq_rx_supported) return -ENOTSUPP; rxb._page = alloc_pages(GFP_ATOMIC, 0); if (!rxb._page) return -ENOMEM; pkt = rxb_addr(&rxb); ret = hex2bin(page_address(rxb._page), buf, bin_len); if (ret) goto out; /* avoid invalid memory access */ if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size) goto out; /* check this is RX packet */ if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) != WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)) goto out; /* check the length in metadata matches actual received length */ desc = (void *)pkt->data; if (le16_to_cpu(desc->mpdu_len) != (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt))) goto out; local_bh_disable(); iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0); local_bh_enable(); ret = 0; out: iwl_free_rxb(&rxb); return ret ?: count; } static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) { struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; struct sk_buff *beacon; struct ieee80211_tx_info *info; struct iwl_mac_beacon_cmd beacon_cmd = {}; u8 rate; u16 flags; int i; len /= 2; /* Element len should be represented by u8 */ if (len >= U8_MAX) return -EINVAL; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (!iwl_mvm_has_new_tx_api(mvm) && !fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return -EINVAL; rcu_read_lock(); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, true); if (!vif) continue; if (vif->type == NL80211_IFTYPE_AP) break; } if (i == NUM_MAC_INDEX_DRIVER || !vif) goto out_err; mvm->hw->extra_beacon_tailroom = len; beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); if (!beacon) goto out_err; if (len && hex2bin(skb_put_zero(beacon, len), bin, len)) { dev_kfree_skb(beacon); goto out_err; } mvm->beacon_inject_active = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); info = IEEE80211_SKB_CB(beacon); rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); flags = iwl_mvm_mac80211_idx_to_hwrate(rate); if (rate == IWL_FIRST_CCK_RATE) flags |= IWL_MAC_BEACON_CCK; beacon_cmd.flags = cpu_to_le16(flags); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); mutex_lock(&mvm->mutex); iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); mutex_unlock(&mvm->mutex); dev_kfree_skb(beacon); rcu_read_unlock(); return 0; out_err: rcu_read_unlock(); return -EINVAL; } static ssize_t iwl_dbgfs_inject_beacon_ie_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = _iwl_dbgfs_inject_beacon_ie(mvm, buf, count); mvm->hw->extra_beacon_tailroom = 0; return ret ?: count; } static ssize_t iwl_dbgfs_inject_beacon_ie_restore_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = _iwl_dbgfs_inject_beacon_ie(mvm, NULL, 0); mvm->hw->extra_beacon_tailroom = 0; mvm->beacon_inject_active = false; return ret ?: count; } static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int conf; char buf[8]; const size_t bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); conf = mvm->fwrt.dump.conf; mutex_unlock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { unsigned int conf_id; int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; ret = kstrtouint(buf, 0, &conf_id); if (ret) return ret; if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (count == 0) return 0; iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); return count; } #define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bcast_filter_cmd cmd; const struct iwl_fw_bcast_filter *filter; char *buf; int bufsz = 1024; int i, j, pos = 0; ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { ADD_TEXT("None\n"); mutex_unlock(&mvm->mutex); goto out; } mutex_unlock(&mvm->mutex); for (i = 0; cmd.filters[i].attrs[0].mask; i++) { filter = &cmd.filters[i]; ADD_TEXT("Filter [%d]:\n", i); ADD_TEXT("\tDiscard=%d\n", filter->discard); ADD_TEXT("\tFrame Type: %s\n", filter->frame_type ? "IPv4" : "Generic"); for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) { const struct iwl_fw_bcast_filter_attr *attr; attr = &filter->attrs[j]; if (!attr->mask) break; ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", j, attr->offset, attr->offset_type ? "IP End" : "Payload Start", be32_to_cpu(attr->mask), be32_to_cpu(attr->val), le16_to_cpu(attr->reserved1)); } } out: ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int pos, next_pos; struct iwl_fw_bcast_filter filter = {}; struct iwl_bcast_filter_cmd cmd; u32 filter_id, attr_id, mask, value; int err = 0; if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, &filter.frame_type, &pos) != 3) return -EINVAL; if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) || filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) return -EINVAL; for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); attr_id++) { struct iwl_fw_bcast_filter_attr *attr = &filter.attrs[attr_id]; if (pos >= count) break; if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", &attr->offset, &attr->offset_type, &mask, &value, &next_pos) != 4) return -EINVAL; attr->mask = cpu_to_be32(mask); attr->val = cpu_to_be32(value); if (mask) filter.num_attrs++; pos += next_pos; } mutex_lock(&mvm->mutex); memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], &filter, sizeof(filter)); /* send updated bcast filtering configuration */ if (iwl_mvm_firmware_running(mvm) && mvm->dbgfs_bcast_filtering.override && iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, sizeof(cmd), &cmd); mutex_unlock(&mvm->mutex); return err ?: count; } static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bcast_filter_cmd cmd; char *buf; int bufsz = 1024; int i, pos = 0; ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { ADD_TEXT("None\n"); mutex_unlock(&mvm->mutex); goto out; } mutex_unlock(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) { const struct iwl_fw_bcast_mac *mac = &cmd.macs[i]; ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", i, mac->default_discard, mac->attached_filters); } out: ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_bcast_filter_cmd cmd; struct iwl_fw_bcast_mac mac = {}; u32 mac_id, attached_filters; int err = 0; if (!mvm->bcast_filters) return -ENOENT; if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, &attached_filters) != 3) return -EINVAL; if (mac_id >= ARRAY_SIZE(cmd.macs) || mac.default_discard > 1 || attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) return -EINVAL; mac.attached_filters = cpu_to_le16(attached_filters); mutex_lock(&mvm->mutex); memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], &mac, sizeof(mac)); /* send updated bcast filtering configuration */ if (iwl_mvm_firmware_running(mvm) && mvm->dbgfs_bcast_filtering.override && iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, sizeof(cmd), &cmd); mutex_unlock(&mvm->mutex); return err ?: count; } #endif #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, mvm, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) #define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) #define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) #define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, sta, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode) static ssize_t iwl_dbgfs_prph_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); if (!mvm->dbgfs_prph_reg_addr) return -EINVAL; pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", mvm->dbgfs_prph_reg_addr, iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u8 args; u32 value; args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value); /* if we only want to set the reg address - nothing more to do */ if (args == 1) goto out; /* otherwise, make sure we have both address and value */ if (args != 2) return -EINVAL; iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value); out: return count; } static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); mutex_unlock(&mvm->mutex); return ret ?: count; } struct iwl_mvm_sniffer_apply { struct iwl_mvm *mvm; u8 *bssid; u16 aid; }; static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm_sniffer_apply *apply = data; apply->mvm->cur_aid = cpu_to_le16(apply->aid); memcpy(apply->mvm->cur_bssid, apply->bssid, sizeof(apply->mvm->cur_bssid)); return true; } static ssize_t iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_notification_wait wait; struct iwl_he_monitor_cmd he_mon_cmd = {}; struct iwl_mvm_sniffer_apply apply = { .mvm = mvm, }; u16 wait_cmds[] = { iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0), }; u32 aid; int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1], &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3], &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]); if (ret != 7) return -EINVAL; he_mon_cmd.aid = cpu_to_le16(aid); apply.aid = aid; apply.bssid = (void *)he_mon_cmd.bssid; mutex_lock(&mvm->mutex); /* * Use the notification waiter to get our function triggered * in sequence with other RX. This ensures that frames we get * on the RX queue _before_ the new configuration is applied * still have mvm->cur_aid pointing to the old AID, and that * frames on the RX queue _after_ the firmware processed the * new configuration (and sent the response, synchronously) * get mvm->cur_aid correctly set to the new AID. */ iwl_init_notification_wait(&mvm->notif_wait, &wait, wait_cmds, ARRAY_SIZE(wait_cmds), iwl_mvm_sniffer_apply, &apply); ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0), 0, sizeof(he_mon_cmd), &he_mon_cmd); /* no need to really wait, we already did anyway */ iwl_remove_notification(&mvm->notif_wait, &wait); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_he_sniffer_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[32]; int len; len = scnprintf(buf, sizeof(buf), "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", le16_to_cpu(mvm->cur_aid), mvm->cur_bssid[0], mvm->cur_bssid[1], mvm->cur_bssid[2], mvm->cur_bssid[3], mvm->cur_bssid[4], mvm->cur_bssid[5]); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1]; unsigned int pos = 0; size_t bufsz = sizeof(buf); int i; mutex_lock(&mvm->mutex); for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", mvm->uapsd_noagg_bssids[i].addr); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #ifdef CPTCFG_IWLMVM_VENDOR_CMDS static ssize_t iwl_dbgfs_tx_power_status_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[64]; int bufsz = sizeof(buf); int pos = 0; u32 mode = le32_to_cpu(mvm->txp_cmd.v5.v3.set_mode); bool txp_cmd_valid = mode == IWL_TX_POWER_MODE_SET_DEVICE; u16 val_24 = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_24); u16 val_52l = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_52_low); u16 val_52h = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_52_high); char buf_24[15] = "(not limited)"; char buf_52l[15] = "(not limited)"; char buf_52h[15] = "(not limited)"; if (txp_cmd_valid && val_24 < IWL_DEV_MAX_TX_POWER) sprintf(buf_24, "%d.%03d dBm", val_24 >> 3, (val_24 & 7) * 125); if (txp_cmd_valid && val_52l < IWL_DEV_MAX_TX_POWER) sprintf(buf_52l, "%d.%03d dBm", val_52l >> 3, (val_52l & 7) * 125); if (txp_cmd_valid && val_52h < IWL_DEV_MAX_TX_POWER) sprintf(buf_52h, "%d.%03d dBm", val_52h >> 3, (val_52h & 7) * 125); pos += scnprintf(buf + pos, bufsz - pos, "2.4 = %s\n", buf_24); pos += scnprintf(buf + pos, bufsz - pos, "5.2L = %s\n", buf_52l); pos += scnprintf(buf + pos, bufsz - pos, "5.2H = %s\n", buf_52h); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_csi_enabled_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[] = { mvm->csi_cfg.flags & IWL_CHANNEL_ESTIMATION_ENABLE ? '1' : '0', '\n' }; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t iwl_dbgfs_csi_enabled_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int err; bool enabled; if (buf[count - 1] != '\n') return -EINVAL; buf[count - 1] = 0; err = kstrtobool(buf, &enabled); if (err) return err; /* * disable -> disable is a no-op, but * enable -> enable resets the timer/count */ if (!enabled && !(mvm->csi_cfg.flags & IWL_CHANNEL_ESTIMATION_ENABLE)) return count; mutex_lock(&mvm->mutex); mvm->csi_cfg.flags &= ~IWL_CHANNEL_ESTIMATION_ENABLE; if (enabled) mvm->csi_cfg.flags |= IWL_CHANNEL_ESTIMATION_ENABLE; if (iwl_mvm_firmware_running(mvm)) err = iwl_mvm_send_csi_cmd(mvm); mutex_unlock(&mvm->mutex); return err ?: count; } static ssize_t iwl_dbgfs_csi_count_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; s64 ctr = -1; u8 buf[32]; int len; if (mvm->csi_cfg.flags & IWL_CHANNEL_ESTIMATION_COUNTER) ctr = mvm->csi_cfg.count; len = scnprintf(buf, sizeof(buf), "%lld\n", ctr); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_csi_count_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int err; s64 ctr; if (buf[count - 1] != '\n') return -EINVAL; buf[count - 1] = 0; err = kstrtos64(buf, 0, &ctr); if (err) return err; if (ctr <= 0) { mvm->csi_cfg.flags &= ~IWL_CHANNEL_ESTIMATION_COUNTER; mvm->csi_cfg.count = 0; } else if (ctr <= UINT_MAX) { mvm->csi_cfg.flags |= IWL_CHANNEL_ESTIMATION_COUNTER; mvm->csi_cfg.count = ctr; } else { return -ERANGE; } return count; } static ssize_t iwl_dbgfs_csi_timeout_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; s64 timer = -1; u8 buf[32]; int len; if (mvm->csi_cfg.flags & IWL_CHANNEL_ESTIMATION_TIMER) timer = mvm->csi_cfg.timer; len = scnprintf(buf, sizeof(buf), "%lld\n", timer); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_csi_timeout_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int err; s64 timer; if (buf[count - 1] != '\n') return -EINVAL; buf[count - 1] = 0; err = kstrtos64(buf, 0, &timer); if (err) return err; if (timer < 0) { mvm->csi_cfg.flags &= ~IWL_CHANNEL_ESTIMATION_TIMER; mvm->csi_cfg.timer = 0; } else if (timer <= UINT_MAX) { mvm->csi_cfg.flags |= IWL_CHANNEL_ESTIMATION_TIMER; mvm->csi_cfg.timer = timer; } else { return -ERANGE; } return count; } static ssize_t iwl_dbgfs_csi_frame_types_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[32]; int len; len = scnprintf(buf, sizeof(buf), "0x%llx\n", mvm->csi_cfg.frame_types); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_csi_frame_types_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int err; u64 frame_types; if (buf[count - 1] != '\n') return -EINVAL; buf[count - 1] = 0; err = kstrtou64(buf, 0, &frame_types); if (err) return err; mvm->csi_cfg.frame_types = frame_types; return count; } static ssize_t iwl_dbgfs_csi_interval_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[32]; int len; len = scnprintf(buf, sizeof(buf), "%u\n", mvm->csi_cfg.interval); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_csi_interval_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int err; u32 interval; if (buf[count - 1] != '\n') return -EINVAL; buf[count - 1] = 0; err = kstrtou32(buf, 0, &interval); if (err) return err; mvm->csi_cfg.interval = interval; if (interval) mvm->csi_cfg.flags |= IWL_CHANNEL_ESTIMATION_INTERVAL; else mvm->csi_cfg.flags &= ~IWL_CHANNEL_ESTIMATION_INTERVAL; return count; } static ssize_t iwl_dbgfs_csi_addresses_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[2 + ETH_ALEN * 3 * IWL_NUM_CHANNEL_ESTIMATION_FILTER_ADDRS]; u8 *pos = buf; int i; for (i = 0; i < mvm->csi_cfg.num_filter_addrs; i++) pos += scnprintf(pos, sizeof(buf) - (pos - buf), "%pM\n", mvm->csi_cfg.filter_addrs[i].addr); return simple_read_from_buffer(user_buf, count, ppos, buf, pos - buf); } static ssize_t iwl_dbgfs_csi_addresses_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { char *pos = buf; int num = 0, i; struct { u8 addr[ETH_ALEN] __aligned(2); } addrs[IWL_NUM_CHANNEL_ESTIMATION_FILTER_ADDRS]; if (buf[count - 1] != '\n') return -EINVAL; buf[count - 1] = 0; while (num < IWL_NUM_CHANNEL_ESTIMATION_FILTER_ADDRS) { char *addrstr = strsep(&pos, "\n "); u8 addr[ETH_ALEN]; int n; if (!addrstr || !*addrstr) break; n = sscanf(addrstr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]); if (n != ETH_ALEN) return -EINVAL; ether_addr_copy(addrs[num].addr, addr); num++; } /* too many specified if the string isn't NULL now */ if (pos) return -EINVAL; mvm->csi_cfg.num_filter_addrs = num; for (i = 0; i < num; i++) ether_addr_copy(mvm->csi_cfg.filter_addrs[i].addr, addrs[i].addr); return count; } #endif /* CPTCFG_IWLMVM_VENDOR_CMDS */ static ssize_t iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; struct iwl_ltr_config_cmd ltr_config = {0}; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (sscanf(buf, "%x,%x,%x,%x,%x,%x,%x", <r_config.flags, <r_config.static_long, <r_config.static_short, <r_config.ltr_cfg_values[0], <r_config.ltr_cfg_values[1], <r_config.ltr_cfg_values[2], <r_config.ltr_cfg_values[3]) != 7) { return -EINVAL; } mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(ltr_config), <r_config); mutex_unlock(&mvm->mutex); if (ret) IWL_ERR(mvm, "failed to send ltr configuration cmd\n"); return ret ?: count; } MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS MVM_DEBUGFS_READ_WRITE_FILE_OPS(tt_tx_backoff, 64); #endif MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); MVM_DEBUGFS_READ_FILE_OPS(rs_data); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(fw_ver); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512); MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS MVM_DEBUGFS_READ_FILE_OPS(tx_power_status); MVM_DEBUGFS_READ_WRITE_FILE_OPS(csi_enabled, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(csi_count, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(csi_timeout, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(csi_frame_types, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(csi_interval, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(csi_addresses, 2 + ETH_ALEN * 3 * IWL_NUM_CHANNEL_ESTIMATION_FILTER_ADDRS); #endif MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); #endif #ifdef CONFIG_ACPI MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); #endif MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16); MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32); MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512); static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_dbg_mem_access_cmd cmd = {}; struct iwl_dbg_mem_access_rsp *rsp; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &cmd, }, .len = { sizeof(cmd) }, }; size_t delta; ssize_t ret, len; if (!iwl_mvm_firmware_running(mvm)) return -EIO; hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, DEBUG_GROUP, 0); cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ); /* Take care of alignment of both the position and the length */ delta = *ppos & 0x3; cmd.addr = cpu_to_le32(*ppos - delta); cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS)); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); if (ret < 0) return ret; rsp = (void *)hcmd.resp_pkt->data; if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; goto out; } len = min((size_t)le32_to_cpu(rsp->len) << 2, iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp)); len = min(len - delta, count); if (len < 0) { ret = -EFAULT; goto out; } ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len); *ppos += ret; out: iwl_free_resp(&hcmd); return ret; } static ssize_t iwl_dbgfs_mem_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_dbg_mem_access_cmd *cmd; struct iwl_dbg_mem_access_rsp *rsp; struct iwl_host_cmd hcmd = {}; size_t cmd_size; size_t data_size; u32 op, len; ssize_t ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, DEBUG_GROUP, 0); if (*ppos & 0x3 || count < 4) { op = DEBUG_MEM_OP_WRITE_BYTES; len = min(count, (size_t)(4 - (*ppos & 0x3))); data_size = len; } else { op = DEBUG_MEM_OP_WRITE; len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS); data_size = len << 2; } cmd_size = sizeof(*cmd) + ALIGN(data_size, 4); cmd = kzalloc(cmd_size, GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->op = cpu_to_le32(op); cmd->len = cpu_to_le32(len); cmd->addr = cpu_to_le32(*ppos); if (copy_from_user((void *)cmd->data, user_buf, data_size)) { kfree(cmd); return -EFAULT; } hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, hcmd.data[0] = (void *)cmd; hcmd.len[0] = cmd_size; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); kfree(cmd); if (ret < 0) return ret; rsp = (void *)hcmd.resp_pkt->data; if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; goto out; } ret = data_size; *ppos += ret; out: iwl_free_resp(&hcmd); return ret; } static const struct file_operations iwl_dbgfs_mem_ops = { .read = iwl_dbgfs_mem_read, .write = iwl_dbgfs_mem_write, .open = simple_open, .llseek = default_llseek, }; void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (iwl_mvm_has_tlc_offload(mvm)) { MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400); } MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600); } void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) { #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS struct iwl_tt_params *tt_params = &mvm->thermal_throttle.params; #endif struct dentry *bcast_dir __maybe_unused; char buf[100]; spin_lock_init(&mvm->drv_stats_lock); mvm->debugfs_dir = dbgfs_dir; #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS MVM_DEBUGFS_ADD_FILE(tt_tx_backoff, dbgfs_dir, 0400); #endif MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200); #ifdef CONFIG_ACPI MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400); #endif #ifdef CPTCFG_IWLMVM_VENDOR_CMDS MVM_DEBUGFS_ADD_FILE(tx_power_status, mvm->debugfs_dir, 0400); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSI_REPORTING) || fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSI_REPORTING_V2)) { MVM_DEBUGFS_ADD_FILE(csi_enabled, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(csi_count, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(csi_timeout, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(csi_frame_types, mvm->debugfs_dir, 0600); debugfs_create_u32("csi_rate_n_flags_val", 0600, mvm->debugfs_dir, &mvm->csi_cfg.rate_n_flags_val); debugfs_create_u32("csi_rate_n_flags_mask", 0600, mvm->debugfs_dir, &mvm->csi_cfg.rate_n_flags_mask); } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSI_REPORTING_V2)) { MVM_DEBUGFS_ADD_FILE(csi_interval, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(csi_addresses, mvm->debugfs_dir, 0600); } #endif MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) MVM_DEBUGFS_ADD_FILE(ltr_config, mvm->debugfs_dir, 0200); debugfs_create_bool("enable_scan_iteration_notif", 0600, mvm->debugfs_dir, &mvm->scan_iter_notif_enabled); debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, &mvm->drop_bcn_ap_mode); MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, 0400); #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { bcast_dir = debugfs_create_dir("bcast_filtering", mvm->debugfs_dir); debugfs_create_bool("override", 0600, bcast_dir, &mvm->dbgfs_bcast_filtering.override); MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, bcast_dir, 0600); MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, bcast_dir, 0600); } #endif #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, &mvm->d3_wake_sysassert); debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir, &mvm->last_netdetect_scans); #endif debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, &mvm->ps_disabled); debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir, &mvm->nvm_hw_blob); debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir, &mvm->nvm_sw_blob); debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir, &mvm->nvm_calib_blob); debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir, &mvm->nvm_prod_blob); debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob); debugfs_create_blob("nvm_reg", S_IRUSR, mvm->debugfs_dir, &mvm->nvm_reg_blob); #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS debugfs_create_u32("ct_kill_exit", 0600, mvm->debugfs_dir, &tt_params->ct_kill_exit); debugfs_create_u32("ct_kill_entry", 0600, mvm->debugfs_dir, &tt_params->ct_kill_entry); debugfs_create_u32("ct_kill_duration", 0600, mvm->debugfs_dir, &tt_params->ct_kill_duration); debugfs_create_u32("dynamic_smps_entry", 0600, mvm->debugfs_dir, &tt_params->dynamic_smps_entry); debugfs_create_u32("dynamic_smps_exit", 0600, mvm->debugfs_dir, &tt_params->dynamic_smps_exit); debugfs_create_u32("tx_protection_entry", 0600, mvm->debugfs_dir, &tt_params->tx_protection_entry); debugfs_create_u32("tx_protection_exit", 0600, mvm->debugfs_dir, &tt_params->tx_protection_exit); #endif debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops); /* * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) */ #if LINUX_VERSION_IS_GEQ(3,12,0) snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent); #else snprintf(buf, 100, "../../%s/%s", dbgfs_dir->d_parent->d_parent->d_name.name, dbgfs_dir->d_parent->d_name.name); #endif debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h000066400000000000000000000101551351064634500271600ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #define MVM_DEBUGFS_READ_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ const char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ argtype *arg = file->private_data; \ char buf[buflen] = {}; \ size_t buf_size = min(count, sizeof(buf) - 1); \ \ if (copy_from_user(buf, user_buf, buf_size)) \ return -EFAULT; \ \ return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \ } \ #define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \ MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \ MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c000066400000000000000000000476531351064634500303370ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * Copyright (C) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * Copyright (C) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" #include "constants.h" struct iwl_mvm_loc_entry { struct list_head list; u8 addr[ETH_ALEN]; u8 lci_len, civic_len; u8 buf[]; }; static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) { struct iwl_mvm_loc_entry *e, *t; mvm->ftm_initiator.req = NULL; mvm->ftm_initiator.req_wdev = NULL; memset(mvm->ftm_initiator.responses, 0, sizeof(mvm->ftm_initiator.responses)); list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { list_del(&e->list); kfree(e); } } void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) { struct cfg80211_pmsr_result result = { .status = NL80211_PMSR_STATUS_FAILURE, .final = 1, .host_time = ktime_get_boot_ns(), .type = NL80211_PMSR_TYPE_FTM, }; int i; lockdep_assert_held(&mvm->mutex); if (!mvm->ftm_initiator.req) return; for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, ETH_ALEN); result.ftm.burst_index = mvm->ftm_initiator.responses[i]; cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, &result, GFP_KERNEL); } cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, GFP_KERNEL); iwl_mvm_ftm_reset(mvm); } static int iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) { switch (s) { case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: return 0; case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: return -EBUSY; default: WARN_ON_ONCE(1); return -EIO; } } static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd_v5 *cmd, struct cfg80211_pmsr_request *req) { int i; cmd->request_id = req->cookie; cmd->num_of_ap = req->n_peers; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (IWL_MVM_FTM_INITIATOR_COMMON_CALIB) { cmd->common_calib = cpu_to_le16(IWL_MVM_FTM_INITIATOR_COMMON_CALIB); cmd->initiator_flags = cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB); } #endif /* use maximum for "no timeout" or bigger than what we can do */ if (!req->timeout || req->timeout > 255 * 100) cmd->req_timeout = 255; else cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); /* * We treat it always as random, since if not we'll * have filled our local address there instead. */ cmd->macaddr_random = 1; memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; if (vif->bss_conf.assoc) memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); else eth_broadcast_addr(cmd->range_req_bssid); } static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd *cmd, struct cfg80211_pmsr_request *req) { int i; cmd->initiator_flags = cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); cmd->request_id = req->cookie; cmd->num_of_ap = req->n_peers; /* * Use a large value for "no timeout". Don't use the maximum value * because of fw limitations. */ if (req->timeout) cmd->req_timeout_ms = cpu_to_le32(req->timeout); else cmd->req_timeout_ms = cpu_to_le32(0xfffff); memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (IWL_MVM_FTM_INITIATOR_COMMON_CALIB) { cmd->common_calib = cpu_to_le16(IWL_MVM_FTM_INITIATOR_COMMON_CALIB); cmd->initiator_flags |= cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB); } if (IWL_MVM_FTM_INITIATOR_FAST_ALGO_DISABLE) cmd->initiator_flags |= cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED); #endif if (vif->bss_conf.assoc) { memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); /* AP's TSF is only relevant if associated */ for (i = 0; i < req->n_peers; i++) { if (req->peers[i].report_ap_tsf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); return; } } } else { eth_broadcast_addr(cmd->range_req_bssid); } /* Don't report AP's TSF */ cmd->tsf_mac_id = cpu_to_le32(0xff); } static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, u8 *channel, u8 *bandwidth, u8 *ctrl_ch_position) { u32 freq = peer->chandef.chan->center_freq; *channel = ieee80211_frequency_to_channel(freq); switch (peer->chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: *bandwidth = IWL_TOF_BW_20_LEGACY; break; case NL80211_CHAN_WIDTH_20: *bandwidth = IWL_TOF_BW_20_HT; break; case NL80211_CHAN_WIDTH_40: *bandwidth = IWL_TOF_BW_40; break; case NL80211_CHAN_WIDTH_80: *bandwidth = IWL_TOF_BW_80; break; default: IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", peer->chandef.width); return -EINVAL; } *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; return 0; } static int iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v2 *target) { int ret; ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num, &target->bandwidth, &target->ctrl_ch_position); if (ret) return ret; memcpy(target->bssid, peer->addr, ETH_ALEN); target->burst_period = cpu_to_le16(peer->ftm.burst_period); target->samples_per_burst = peer->ftm.ftms_per_burst; target->num_of_bursts = peer->ftm.num_bursts_exp; target->measure_type = 0; /* regular two-sided FTM */ target->retries_per_sample = peer->ftm.ftmr_retries; target->asap_mode = peer->ftm.asap; target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; if (peer->ftm.request_lci) target->location_req |= IWL_TOF_LOC_LCI; if (peer->ftm.request_civicloc) target->location_req |= IWL_TOF_LOC_CIVIC; target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES target->notify_mcsi = IWL_MVM_FTM_INITIATOR_MCSI_ENABLED; #endif return 0; } #define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry *target) { int ret; ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num, &target->bandwidth, &target->ctrl_ch_position); if (ret) return ret; memcpy(target->bssid, peer->addr, ETH_ALEN); target->burst_period = cpu_to_le16(peer->ftm.burst_period); target->samples_per_burst = peer->ftm.ftms_per_burst; target->num_of_bursts = peer->ftm.num_bursts_exp; target->ftmr_max_retries = peer->ftm.ftmr_retries; target->initiator_ap_flags = cpu_to_le32(0); if (peer->ftm.asap) FTM_PUT_FLAG(ASAP); if (peer->ftm.request_lci) FTM_PUT_FLAG(LCI_REQUEST); if (peer->ftm.request_civicloc) FTM_PUT_FLAG(CIVIC_REQUEST); if (IWL_MVM_FTM_INITIATOR_DYNACK) FTM_PUT_FLAG(DYN_ACK); if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) FTM_PUT_FLAG(ALGO_LR); else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) FTM_PUT_FLAG(ALGO_FFT); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (IWL_MVM_FTM_INITIATOR_MCSI_ENABLED) FTM_PUT_FLAG(MCSI_REPORT); #endif return 0; } int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v5 cmd_v5; struct iwl_tof_range_req_cmd cmd; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); u8 num_of_ap; struct iwl_host_cmd hcmd = { .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), .dataflags[0] = IWL_HCMD_DFL_DUP, }; u32 status = 0; int err, i; lockdep_assert_held(&mvm->mutex); if (mvm->ftm_initiator.req) return -EBUSY; if (new_api) { iwl_mvm_ftm_cmd(mvm, vif, &cmd, req); hcmd.data[0] = &cmd; hcmd.len[0] = sizeof(cmd); num_of_ap = cmd.num_of_ap; } else { iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); hcmd.data[0] = &cmd_v5; hcmd.len[0] = sizeof(cmd_v5); num_of_ap = cmd_v5.num_of_ap; } for (i = 0; i < num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; if (new_api) err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]); else err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); if (err) return err; } err = iwl_mvm_send_cmd_status(mvm, &hcmd, &status); if (!err && status) { IWL_ERR(mvm, "FTM range request command failure, status: %u\n", status); err = iwl_ftm_range_request_status_to_err(status); } if (!err) { mvm->ftm_initiator.req = req; mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); } return err; } void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_abort_cmd cmd = { .request_id = req->cookie, }; lockdep_assert_held(&mvm->mutex); if (req != mvm->ftm_initiator.req) return; if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, LOCATION_GROUP, 0), 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "failed to abort FTM process\n"); } static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, const u8 *addr) { int i; for (i = 0; i < req->n_peers; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; if (ether_addr_equal_unaligned(peer->addr, addr)) return i; } return -ENOENT; } static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) { u32 gp2_ts = le32_to_cpu(fw_gp2_ts); u32 curr_gp2, diff; u64 now_from_boot_ns; iwl_mvm_get_sync_time(mvm, &curr_gp2, &now_from_boot_ns); if (curr_gp2 >= gp2_ts) diff = curr_gp2 - gp2_ts; else diff = curr_gp2 + (U32_MAX - gp2_ts + 1); return now_from_boot_ns - (u64)diff * 1000; } static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, struct cfg80211_pmsr_result *res) { struct iwl_mvm_loc_entry *entry; list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { if (!ether_addr_equal_unaligned(res->addr, entry->addr)) continue; if (entry->lci_len) { res->ftm.lci_len = entry->lci_len; res->ftm.lci = entry->buf; } if (entry->civic_len) { res->ftm.civicloc_len = entry->civic_len; res->ftm.civicloc = entry->buf + entry->lci_len; } /* we found the entry we needed */ break; } } static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, u8 num_of_aps) { lockdep_assert_held(&mvm->mutex); if (request_id != (u8)mvm->ftm_initiator.req->cookie) { IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", request_id, (u8)mvm->ftm_initiator.req->cookie); return -EINVAL; } if (num_of_aps > mvm->ftm_initiator.req->n_peers) { IWL_ERR(mvm, "FTM range response invalid\n"); return -EINVAL; } return 0; } static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, struct cfg80211_pmsr_result *res) { s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); IWL_DEBUG_INFO(mvm, "entry %d\n", index); IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index); IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread); IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); } void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data; int i; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); u8 num_of_aps, last_in_batch; lockdep_assert_held(&mvm->mutex); if (!mvm->ftm_initiator.req) { IWL_ERR(mvm, "Got FTM response but have no request?\n"); return; } if (new_api) { if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp->request_id, fw_resp->num_of_aps)) return; num_of_aps = fw_resp->num_of_aps; last_in_batch = fw_resp->last_report; } else { if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, fw_resp_v5->num_of_aps)) return; num_of_aps = fw_resp_v5->num_of_aps; last_in_batch = fw_resp_v5->last_in_batch; } IWL_DEBUG_INFO(mvm, "Range response received\n"); IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n", mvm->ftm_initiator.req->cookie, num_of_aps); for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { struct cfg80211_pmsr_result result = {}; struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap; int peer_idx; if (new_api) { if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) fw_ap = &fw_resp->ap[i]; else fw_ap = (void *)&fw_resp_v6->ap[i]; result.final = fw_resp->ap[i].last_burst; result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); result.ap_tsf_valid = 1; } else { /* the first part is the same for old and new APIs */ fw_ap = (void *)&fw_resp_v5->ap[i]; /* * FIXME: the firmware needs to report this, we don't * even know the number of bursts the responder picked * (if we asked it to) */ result.final = 0; } peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, fw_ap->bssid); if (peer_idx < 0) { IWL_WARN(mvm, "Unknown address (%pM, target #%d) in FTM response\n", fw_ap->bssid, i); continue; } switch (fw_ap->measure_status) { case IWL_TOF_ENTRY_SUCCESS: result.status = NL80211_PMSR_STATUS_SUCCESS; break; case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: result.status = NL80211_PMSR_STATUS_TIMEOUT; break; case IWL_TOF_ENTRY_NO_RESPONSE: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; break; case IWL_TOF_ENTRY_REQUEST_REJECTED: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_PEER_BUSY; result.ftm.busy_retry_time = fw_ap->refusal_period; break; default: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; break; } memcpy(result.addr, fw_ap->bssid, ETH_ALEN); result.host_time = iwl_mvm_ftm_get_host_time(mvm, fw_ap->timestamp); result.type = NL80211_PMSR_TYPE_FTM; result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; mvm->ftm_initiator.responses[peer_idx]++; result.ftm.rssi_avg = fw_ap->rssi; result.ftm.rssi_avg_valid = 1; result.ftm.rssi_spread = fw_ap->rssi_spread; result.ftm.rssi_spread_valid = 1; result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); result.ftm.rtt_avg_valid = 1; result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); result.ftm.rtt_variance_valid = 1; result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); result.ftm.rtt_spread_valid = 1; iwl_mvm_ftm_get_lci_civic(mvm, &result); cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, &result, GFP_KERNEL); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n", fw_ap->rttConfidence); iwl_mvm_debug_range_resp(mvm, i, &result); } if (last_in_batch) { cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, GFP_KERNEL); iwl_mvm_ftm_reset(mvm); } } void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); const struct ieee80211_mgmt *mgmt = (void *)pkt->data; size_t len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm_loc_entry *entry; const u8 *ies, *lci, *civic, *msr_ie; size_t ies_len, lci_len = 0, civic_len = 0; size_t baselen = IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.ftm); static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; if (len <= baselen) return; lockdep_assert_held(&mvm->mutex); ies = mgmt->u.action.u.ftm.variable; ies_len = len - baselen; msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, &rprt_type_lci, 1, 4); if (msr_ie) { lci = msr_ie + 2; lci_len = msr_ie[1]; } msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, &rprt_type_civic, 1, 4); if (msr_ie) { civic = msr_ie + 2; civic_len = msr_ie[1]; } entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); if (!entry) return; memcpy(entry->addr, mgmt->bssid, ETH_ALEN); entry->lci_len = lci_len; if (lci_len) memcpy(entry->buf, lci, lci_len); entry->civic_len = civic_len; if (civic_len) memcpy(entry->buf + lci_len, civic, civic_len); list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c000066400000000000000000000177571351064634500303400ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "mvm.h" #include "constants.h" static int iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_chan_def *chandef) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_tof_responder_config_cmd cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | IWL_TOF_RESPONDER_CMD_VALID_BSSID | IWL_TOF_RESPONDER_CMD_VALID_STA_ID), .sta_id = mvmvif->bcast_sta.sta_id, }; lockdep_assert_held(&mvm->mutex); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES cmd.cmd_valid_fields |= cpu_to_le32(IWL_MVM_FTM_RESP_VALID); cmd.responder_cfg_flags |= cpu_to_le32(IWL_MVM_FTM_RESP_FLAGS); if (IWL_MVM_FTM_RESP_TOA_OFFSET) { cmd.cmd_valid_fields |= cpu_to_le32(IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE); cmd.toa_offset = cpu_to_le16(IWL_MVM_FTM_RESP_TOA_OFFSET); } #endif switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: cmd.bandwidth = IWL_TOF_BW_20_LEGACY; break; case NL80211_CHAN_WIDTH_20: cmd.bandwidth = IWL_TOF_BW_20_HT; break; case NL80211_CHAN_WIDTH_40: cmd.bandwidth = IWL_TOF_BW_40; cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; case NL80211_CHAN_WIDTH_80: cmd.bandwidth = IWL_TOF_BW_80; cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; default: WARN_ON(1); return -EINVAL; } memcpy(cmd.bssid, vif->addr, ETH_ALEN); return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD, LOCATION_GROUP, 0), 0, sizeof(cmd), &cmd); } static int iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_ftm_responder_params *params) { struct iwl_tof_responder_dyn_config_cmd cmd = { .lci_len = cpu_to_le32(params->lci_len + 2), .civic_len = cpu_to_le32(params->civicloc_len + 2), }; u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0}; struct iwl_host_cmd hcmd = { .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD, LOCATION_GROUP, 0), .data[0] = &cmd, .len[0] = sizeof(cmd), .data[1] = &data, /* .len[1] set later */ /* may not be able to DMA from stack */ .dataflags[1] = IWL_HCMD_DFL_DUP, }; u32 aligned_lci_len = ALIGN(params->lci_len + 2, 4); u32 aligned_civicloc_len = ALIGN(params->civicloc_len + 2, 4); u8 *pos = data; lockdep_assert_held(&mvm->mutex); if (aligned_lci_len + aligned_civicloc_len > sizeof(data)) { IWL_ERR(mvm, "LCI/civicloc data too big (%zd + %zd)\n", params->lci_len, params->civicloc_len); return -ENOBUFS; } pos[0] = WLAN_EID_MEASURE_REPORT; pos[1] = params->lci_len; memcpy(pos + 2, params->lci, params->lci_len); pos += aligned_lci_len; pos[0] = WLAN_EID_MEASURE_REPORT; pos[1] = params->civicloc_len; memcpy(pos + 2, params->civicloc, params->civicloc_len); hcmd.len[1] = aligned_lci_len + aligned_civicloc_len; return iwl_mvm_send_cmd(mvm, &hcmd); } int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_ftm_responder_params *params; struct ieee80211_chanctx_conf ctx, *pctx; u16 *phy_ctxt_id; struct iwl_mvm_phy_ctxt *phy_ctxt; int ret; params = vif->bss_conf.ftmr_params; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!vif->bss_conf.ftm_responder)) return -EINVAL; if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active) { IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); return -EIO; } rcu_read_lock(); pctx = rcu_dereference(vif->chanctx_conf); /* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care * about changes in the ctx after releasing the lock because the driver * is still protected by the mutex. */ ctx = *pctx; phy_ctxt_id = (u16 *)pctx->drv_priv; rcu_read_unlock(); phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, ctx.rx_chains_static, ctx.rx_chains_dynamic); if (ret) return ret; ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def); if (ret) return ret; if (params) ret = iwl_mvm_ftm_responder_dyn_cfg_cmd(mvm, vif, params); return ret; } void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { if (!vif->bss_conf.ftm_responder) return; iwl_mvm_ftm_start_responder(mvm, vif); } void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_ftm_responder_stats *resp = (void *)pkt->data; struct cfg80211_ftm_responder_stats *stats = &mvm->ftm_resp_stats; u32 flags = le32_to_cpu(resp->flags); if (resp->success_ftm == resp->ftm_per_burst) stats->success_num++; else if (resp->success_ftm >= 2) stats->partial_num++; else stats->failed_num++; if ((flags & FTM_RESP_STAT_ASAP_REQ) && (flags & FTM_RESP_STAT_ASAP_RESP)) stats->asap_num++; if (flags & FTM_RESP_STAT_NON_ASAP_RESP) stats->non_asap_num++; stats->total_duration_ms += le32_to_cpu(resp->duration) / USEC_PER_MSEC; if (flags & FTM_RESP_STAT_TRIGGER_UNKNOWN) stats->unknown_triggers_num++; if (flags & FTM_RESP_STAT_DUP) stats->reschedule_requests_num++; if (flags & FTM_RESP_STAT_NON_ASAP_OUT_WIN) stats->out_of_window_triggers_num++; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h000066400000000000000000000074541351064634500267340ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __fw_api_h__ #define __fw_api_h__ #include "fw/api/tdls.h" #include "fw/api/mac-cfg.h" #include "fw/api/offload.h" #include "fw/api/context.h" #include "fw/api/time-event.h" #include "fw/api/datapath.h" #include "fw/api/phy.h" #include "fw/api/config.h" #include "fw/api/soc.h" #include "fw/api/alive.h" #include "fw/api/binding.h" #include "fw/api/cmdhdr.h" #include "fw/api/coex.h" #include "fw/api/commands.h" #include "fw/api/d3.h" #include "fw/api/filter.h" #include "fw/api/led.h" #include "fw/api/mac.h" #include "fw/api/nvm-reg.h" #include "fw/api/phy-ctxt.h" #include "fw/api/power.h" #include "fw/api/rs.h" #include "fw/api/rx.h" #include "fw/api/scan.h" #include "fw/api/sf.h" #include "fw/api/sta.h" #include "fw/api/stats.h" #include "fw/api/location.h" #include "fw/api/tx.h" #include "fw/api/testing.h" #endif /* __fw_api_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/fw.c000066400000000000000000001350371351064634500261570ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-debug.h" #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ #include "iwl-prph.h" #include "fw/acpi.h" #include "mvm.h" #include "fw/dbg.h" #include "iwl-phy-db.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #include "iwl-dnt-cfg.h" #include "fw/testmode.h" #endif #define MVM_UCODE_ALIVE_TIMEOUT (HZ * CPTCFG_IWL_TIMEOUT_FACTOR) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ * CPTCFG_IWL_TIMEOUT_FACTOR) #define UCODE_VALID_OK cpu_to_le32(0x1) struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; }; /* set device type and latency */ static int iwl_set_soc_latency(struct iwl_mvm *mvm) { struct iwl_soc_configuration_cmd cmd; int ret; cmd.device_type = (mvm->trans->cfg->integrated) ? cpu_to_le32(SOC_CONFIG_CMD_INTEGRATED) : cpu_to_le32(SOC_CONFIG_CMD_DISCRETE); cmd.soc_latency = cpu_to_le32(mvm->trans->cfg->soc_latency); ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0), 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to set soc latency: %d\n", ret); return ret; } static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) { struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { .valid = cpu_to_le32(valid_tx_ant), }; IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, sizeof(tx_ant_cmd), &tx_ant_cmd); } static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) { int i; struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), }; if (mvm->trans->num_rx_queues == 1) return 0; /* Do not direct RSS traffic to Q 0 which is our fallback queue */ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) cmd.indirection_table[i] = 1 + (i % (mvm->trans->num_rx_queues - 1)); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } static int iwl_configure_rxq(struct iwl_mvm *mvm) { int i, num_queues, size, ret; struct iwl_rfh_queue_config *cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD), .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; /* Do not configure default queue, it is configured via context info */ num_queues = mvm->trans->num_rx_queues - 1; size = struct_size(cmd, data, num_queues); cmd = kzalloc(size, GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->num_queues = num_queues; for (i = 0; i < num_queues; i++) { struct iwl_trans_rxq_dma_data data; cmd->data[i].q_num = i + 1; iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data); cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); cmd->data[i].urbd_stts_wrptr = cpu_to_le64(data.urbd_stts_wrptr); cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb); cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); } hcmd.data[0] = cmd; hcmd.len[0] = size; ret = iwl_mvm_send_cmd(mvm, &hcmd); kfree(cmd); return ret; } static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) { struct iwl_dqa_enable_cmd dqa_cmd = { .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), }; u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0); int ret; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); if (ret) IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret); else IWL_DEBUG_FW(mvm, "Working in DQA mode\n"); return ret; } void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; __le32 *dump_data = mfu_dump_notif->data; int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); int i; if (mfu_dump_notif->index_num == 0) IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", le32_to_cpu(mfu_dump_notif->assert_id)); for (i = 0; i < n_words; i++) IWL_DEBUG_INFO(mvm, "MFUART assert dump, dword %u: 0x%08x\n", le16_to_cpu(mfu_dump_notif->index_num) * n_words + i, le32_to_cpu(dump_data[i])); } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_alive_data *alive_data = data; struct mvm_alive_resp_v3 *palive3; struct mvm_alive_resp *palive; struct iwl_umac_alive *umac; struct iwl_lmac_alive *lmac1; struct iwl_lmac_alive *lmac2 = NULL; u16 status; u32 lmac_error_event_table, umac_error_event_table; if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { palive = (void *)pkt->data; umac = &palive->umac_data; lmac1 = &palive->lmac_data[0]; lmac2 = &palive->lmac_data[1]; status = le16_to_cpu(palive->status); } else { palive3 = (void *)pkt->data; umac = &palive3->umac_data; lmac1 = &palive3->lmac_data; status = le16_to_cpu(palive3->status); } lmac_error_event_table = le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table); if (lmac2) mvm->trans->dbg.lmac_error_event_table[1] = le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr); if (!umac_error_event_table) { mvm->support_umac_log = false; } else if (umac_error_event_table >= mvm->trans->cfg->min_umac_error_event_table) { mvm->support_umac_log = true; } else { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", umac_error_event_table, (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT"); mvm->support_umac_log = false; } if (mvm->support_umac_log) iwl_fw_umac_set_alive_err_table(mvm->trans, umac_error_event_table); alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr); alive_data->valid = status == IWL_ALIVE_STATUS_OK; #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_tm_set_fw_ver(mvm->trans, le32_to_cpu(lmac1->ucode_major), le32_to_cpu(lmac1->ucode_minor)); #endif IWL_DEBUG_FW(mvm, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", status, lmac1->ver_type, lmac1->ver_subtype); if (lmac2) IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); IWL_DEBUG_FW(mvm, "UMAC version: Major - 0x%x, Minor - 0x%x\n", le32_to_cpu(umac->umac_major), le32_to_cpu(umac->umac_minor)); iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac); return true; } static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); return true; } static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_phy_db *phy_db = data; if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); return true; } WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); return false; } static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; struct iwl_mvm_alive_data alive_data = {}; const struct fw_img *fw; int ret; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; bool run_in_rfkill = ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm); if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); else fw = iwl_get_ucode_image(mvm->fw, ucode_type); if (WARN_ON(!fw)) return -EINVAL; iwl_fw_set_current_image(&mvm->fwrt, ucode_type); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, alive_cmd, ARRAY_SIZE(alive_cmd), iwl_alive_fn, &alive_data); /* * We want to load the INIT firmware even in RFKILL * For the unified firmware case, the ucode_type is not * INIT, but we still need to run it. */ ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill); if (ret) { iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_remove_notification(&mvm->notif_wait, &alive_wait); return ret; } /* * Some things may run in the background now, but we * just wait for the ALIVE notification here. */ ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) { struct iwl_trans *trans = mvm->trans; if (ret == -ETIMEDOUT) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_ALIVE_TIMEOUT); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS)); else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, SB_CPU_1_STATUS), iwl_read_prph(trans, SB_CPU_2_STATUS)); iwl_fw_set_current_image(&mvm->fwrt, old_type); return ret; } if (!alive_data.valid) { IWL_ERR(mvm, "Loaded ucode is not valid!\n"); iwl_fw_set_current_image(&mvm->fwrt, old_type); return -EIO; } iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they * could be stopped, so wake them up. In firmware restart, * mac80211 will have the queues stopped as well until the * reconfiguration completes. During normal startup, they * will be empty. */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); /* * Set a 'fake' TID for the command queue, since we use the * hweight() of the tid_bitmap as a refcount now. Not that * we ever even consider the command queue as one we might * want to reuse, but be safe nevertheless. */ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = BIT(IWL_MAX_TID_COUNT + 2); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); #ifdef CPTCFG_IWLWIFI_DEBUGFS iwl_fw_set_dbg_rec_on(&mvm->fwrt); #endif return 0; } static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) { struct iwl_notification_wait init_wait; struct iwl_nvm_access_complete_cmd nvm_complete = {}; struct iwl_init_extended_cfg_cmd init_cfg = { .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), }; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, }; int ret; lockdep_assert_held(&mvm->mutex); mvm->rfkill_safe_init_done = false; iwl_init_notification_wait(&mvm->notif_wait, &init_wait, init_complete, ARRAY_SIZE(init_complete), iwl_wait_init_complete, NULL); iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY); /* Will also start the device */ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); goto error; } iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE); /* Send init config command to mark that we are sending NVM access * commands */ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), CMD_SEND_IN_RFKILL, sizeof(init_cfg), &init_cfg); if (ret) { IWL_ERR(mvm, "Failed to run init config command: %d\n", ret); goto error; } /* Load NVM to NIC if needed */ if (mvm->nvm_file_name) { iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections); iwl_mvm_load_nvm_to_nic(mvm); } if (IWL_MVM_PARSE_NVM && read_nvm) { ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto error; } } ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_ACCESS_COMPLETE), CMD_SEND_IN_RFKILL, sizeof(nvm_complete), &nvm_complete); if (ret) { IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", ret); goto error; } /* We wait for the INIT complete notification */ ret = iwl_wait_notification(&mvm->notif_wait, &init_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) return ret; /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && read_nvm) { mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw); if (IS_ERR(mvm->nvm_data)) { ret = PTR_ERR(mvm->nvm_data); mvm->nvm_data = NULL; IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); return ret; } } mvm->rfkill_safe_init_done = true; return 0; error: iwl_remove_notification(&mvm->notif_wait, &init_wait); return ret; } static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { struct iwl_phy_cfg_cmd phy_cfg_cmd; enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES u32 override_mask, flow_override, flow_src; u32 event_override, event_src; const struct iwl_tlv_calib_ctrl *default_calib = &mvm->fw->default_calib[ucode_type]; #endif /* Set parameters */ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); /* set flags extra PHY configuration flags from the device's cfg */ phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags); phy_cfg_cmd.calib_control.event_trigger = mvm->fw->default_calib[ucode_type].event_trigger; phy_cfg_cmd.calib_control.flow_trigger = mvm->fw->default_calib[ucode_type].flow_trigger; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES override_mask = mvm->trans->dbg_cfg.MVM_CALIB_OVERRIDE_CONTROL; if (override_mask) { IWL_DEBUG_INFO(mvm, "calib settings overriden by user, control=0x%x\n", override_mask); switch (ucode_type) { case IWL_UCODE_INIT: flow_override = mvm->trans->dbg_cfg.MVM_CALIB_INIT_FLOW; event_override = mvm->trans->dbg_cfg.MVM_CALIB_INIT_EVENT; IWL_DEBUG_CALIB(mvm, "INIT: flow_override %x, event_override %x\n", flow_override, event_override); break; case IWL_UCODE_REGULAR: flow_override = mvm->trans->dbg_cfg.MVM_CALIB_D0_FLOW; event_override = mvm->trans->dbg_cfg.MVM_CALIB_D0_EVENT; IWL_DEBUG_CALIB(mvm, "REGULAR: flow_override %x, event_override %x\n", flow_override, event_override); break; case IWL_UCODE_WOWLAN: flow_override = mvm->trans->dbg_cfg.MVM_CALIB_D3_FLOW; event_override = mvm->trans->dbg_cfg.MVM_CALIB_D3_EVENT; IWL_DEBUG_CALIB(mvm, "WOWLAN: flow_override %x, event_override %x\n", flow_override, event_override); break; default: IWL_ERR(mvm, "ERROR: calib case isn't valid\n"); flow_override = 0; event_override = 0; break; } IWL_DEBUG_CALIB(mvm, "override_mask %x\n", override_mask); /* find the new calib setting for the flow calibrations */ flow_src = le32_to_cpu(default_calib->flow_trigger); IWL_DEBUG_CALIB(mvm, "flow_src %x\n", flow_src); flow_override &= override_mask; flow_src &= ~override_mask; flow_override |= flow_src; phy_cfg_cmd.calib_control.flow_trigger = cpu_to_le32(flow_override); IWL_DEBUG_CALIB(mvm, "new flow calib setting = %x\n", flow_override); /* find the new calib setting for the event calibrations */ event_src = le32_to_cpu(default_calib->event_trigger); IWL_DEBUG_CALIB(mvm, "event_src %x\n", event_src); event_override &= override_mask; event_src &= ~override_mask; event_override |= event_src; phy_cfg_cmd.calib_control.event_trigger = cpu_to_le32(event_override); IWL_DEBUG_CALIB(mvm, "new event calib setting = %x\n", event_override); } #endif IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg); return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, sizeof(phy_cfg_cmd), &phy_cfg_cmd); } int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) { struct iwl_notification_wait calib_wait; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB }; int ret; if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm, true); lockdep_assert_held(&mvm->mutex); mvm->rfkill_safe_init_done = false; iwl_init_notification_wait(&mvm->notif_wait, &calib_wait, init_complete, ARRAY_SIZE(init_complete), iwl_wait_phy_db_entry, mvm->phy_db); /* Will also start the device */ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); if (ret) { IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); goto remove_notif; } #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_start(mvm->trans); #endif if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) { ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto remove_notif; } /* Read the NVM only at driver load time, no need to do this twice */ if (read_nvm) { ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto remove_notif; } } /* In case we read the NVM from external file, load it to the NIC */ if (mvm->nvm_file_name) iwl_mvm_load_nvm_to_nic(mvm); WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver, "Too old NVM version (0x%0x, required = 0x%0x)", mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver); /* * abort after reading the nvm in case RF Kill is on, we will complete * the init seq later when RF kill will switch to off */ if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); goto remove_notif; } mvm->rfkill_safe_init_done = true; /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto remove_notif; ret = iwl_send_phy_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", ret); goto remove_notif; } /* * Some things may run in the background now, but we * just wait for the calibration complete notification. */ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, MVM_UCODE_CALIB_TIMEOUT); if (!ret) goto out; if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); ret = 0; } else { IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", ret); } goto out; remove_notif: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: mvm->rfkill_safe_init_done = false; if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { /* we want to debug INIT and we have no NVM - fake */ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + sizeof(struct ieee80211_channel) + sizeof(struct ieee80211_rate), GFP_KERNEL); if (!mvm->nvm_data) return -ENOMEM; mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; mvm->nvm_data->bands[0].n_channels = 1; mvm->nvm_data->bands[0].n_bitrates = 1; mvm->nvm_data->bands[0].bitrates = (void *)mvm->nvm_data->channels + 1; mvm->nvm_data->bands[0].bitrates->hw_value = 10; } return ret; } static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) { struct iwl_ltr_config_cmd cmd = { .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), }; if (!mvm->trans->ltr_enabled) return 0; return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(cmd), &cmd); } #ifdef CONFIG_ACPI static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm, union acpi_object *table, struct iwl_mvm_sar_profile *profile, bool enabled) { int i; profile->enabled = enabled; for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) { if ((table[i].type != ACPI_TYPE_INTEGER) || (table[i].integer.value > U8_MAX)) return -EINVAL; profile->table[i] = table[i].integer.value; } return 0; } static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) { union acpi_object *wifi_pkg, *table, *data; bool enabled; int ret, tbl_rev; data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } enabled = !!(wifi_pkg->package.elements[1].integer.value); /* position of the actual table */ table = &wifi_pkg->package.elements[2]; /* The profile from WRDS is officially profile 1, but goes * into sar_profiles[0] (because we don't have a profile 0). */ ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0], enabled); out_free: kfree(data); return ret; } static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) { union acpi_object *wifi_pkg, *data; bool enabled; int i, n_profiles, ret, tbl_rev; data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) || (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) { ret = -EINVAL; goto out_free; } enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; /* * Check the validity of n_profiles. The EWRD profiles start * from index 1, so the maximum value allowed here is * ACPI_SAR_PROFILES_NUM - 1. */ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } for (i = 0; i < n_profiles; i++) { /* the tables start at element 3 */ static int pos = 3; /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't * have profile 0). So in the array we start from 1. */ ret = iwl_mvm_sar_set_profile(mvm, &wifi_pkg->package.elements[pos], &mvm->sar_profiles[i + 1], enabled); if (ret < 0) break; /* go to the next table */ pos += ACPI_SAR_TABLE_SIZE; } out_free: kfree(data); return ret; } static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) { union acpi_object *wifi_pkg, *data; int i, j, ret, tbl_rev; int idx = 1; data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev > 1) { ret = PTR_ERR(wifi_pkg); goto out_free; } mvm->geo_rev = tbl_rev; for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) { union acpi_object *entry; entry = &wifi_pkg->package.elements[idx++]; if ((entry->type != ACPI_TYPE_INTEGER) || (entry->integer.value > U8_MAX)) { ret = -EINVAL; goto out_free; } mvm->geo_profiles[i].values[j] = entry->integer.value; } } ret = 0; out_free: kfree(data); return ret; } int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { union { struct iwl_dev_tx_power_cmd v5; struct iwl_dev_tx_power_cmd_v4 v4; } cmd; int i, j, idx; int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; int len; BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2); BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS != ACPI_SAR_TABLE_SIZE); cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v4); else len = sizeof(cmd.v4.v3); for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) { struct iwl_mvm_sar_profile *prof; /* don't allow SAR to be disabled (profile 0 means disable) */ if (profs[i] == 0) return -EPERM; /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */ if (profs[i] > ACPI_SAR_PROFILE_NUM) return -EINVAL; /* profiles go from 1 to 4, so decrement to access the array */ prof = &mvm->sar_profiles[profs[i] - 1]; /* if the profile is disabled, do nothing */ if (!prof->enabled) { IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n", profs[i]); /* if one of the profiles is disabled, we fail all */ return -ENOENT; } IWL_DEBUG_INFO(mvm, "SAR EWRD: chain %d profile index %d\n", i, profs[i]); IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) { idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j; cmd.v5.v3.per_chain_restriction[i][j] = cpu_to_le16(prof->table[idx]); IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", j, prof->table[idx]); } } #ifdef CPTCFG_IWLMVM_VENDOR_CMDS mvm->sar_chain_a_profile = prof_a; mvm->sar_chain_b_profile = prof_b; #endif IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm) { /* * The GEO_TX_POWER_LIMIT command is not supported on earlier * firmware versions. Unfortunately, we don't have a TLV API * flag to rely on, so rely on the major version which is in * the first byte of ucode_ver. */ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 41; } int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { struct iwl_geo_tx_power_profiles_resp *resp; int ret; u16 len; void *data; struct iwl_geo_tx_power_profiles_cmd geo_cmd; struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1; struct iwl_host_cmd cmd; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) { geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); len = sizeof(geo_cmd); data = &geo_cmd; } else { geo_cmd_v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); len = sizeof(geo_cmd_v1); data = &geo_cmd_v1; } cmd = (struct iwl_host_cmd){ .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT), .len = { len, }, .flags = CMD_WANT_SKB, .data = { data }, }; if (!iwl_mvm_sar_geo_support(mvm)) return -EOPNOTSUPP; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); return ret; } resp = (void *)cmd.resp_pkt->data; ret = le32_to_cpu(resp->profile_idx); if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) { ret = -EIO; IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret); } iwl_free_resp(&cmd); return ret; } static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { struct iwl_geo_tx_power_profiles_cmd cmd = { .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES), }; int ret, i, j; u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); if (!iwl_mvm_sar_geo_support(mvm)) return 0; ret = iwl_mvm_sar_get_wgds_table(mvm); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "Geo SAR BIOS table invalid or unavailable. (%d)\n", ret); /* we don't fail if the table is not available */ return 0; } IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE); BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { struct iwl_per_chain_offset *chain = (struct iwl_per_chain_offset *)&cmd.table[i]; for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) { u8 *value; value = &mvm->geo_profiles[i].values[j * ACPI_GEO_PER_CHAIN_SIZE]; chain[j].max_tx_power = cpu_to_le16(value[0]); chain[j].chain_a = value[1]; chain[j].chain_b = value[2]; IWL_DEBUG_RADIO(mvm, "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", i, j, value[1], value[2], value[0]); } } cmd.table_revision = cpu_to_le32(mvm->geo_rev); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) { return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(struct iwl_geo_tx_power_profiles_cmd_v1), &cmd); } return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd); } static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) { union acpi_object *wifi_pkg, *data, *enabled; int i, j, ret, tbl_rev; int idx = 2; mvm->ppag_table.enabled = cpu_to_le32(0); data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } enabled = &wifi_pkg->package.elements[1]; if (enabled->type != ACPI_TYPE_INTEGER || (enabled->integer.value != 0 && enabled->integer.value != 1)) { ret = -EINVAL; goto out_free; } mvm->ppag_table.enabled = cpu_to_le32(enabled->integer.value); if (!mvm->ppag_table.enabled) { ret = 0; goto out_free; } /* * read, verify gain values and save them into the PPAG table. * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the * following sub-bands to High-Band (5GHz). */ for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { union acpi_object *ent; ent = &wifi_pkg->package.elements[idx++]; if (ent->type != ACPI_TYPE_INTEGER || (j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) || (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) || (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) || (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) { mvm->ppag_table.enabled = cpu_to_le32(0); ret = -EINVAL; goto out_free; } mvm->ppag_table.gain[i][j] = ent->integer.value; } } ret = 0; out_free: kfree(data); return ret; } int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { int i, j, ret; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { IWL_DEBUG_RADIO(mvm, "PPAG capability not supported by FW, command not sent.\n"); return 0; } IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); IWL_DEBUG_RADIO(mvm, "PPAG is %s\n", mvm->ppag_table.enabled ? "enabled" : "disabled"); for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { IWL_DEBUG_RADIO(mvm, "PPAG table: chain[%d] band[%d]: gain = %d\n", i, j, mvm->ppag_table.gain[i][j]); } } ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), 0, sizeof(mvm->ppag_table), &mvm->ppag_table); if (ret < 0) IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", ret); return ret; } static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { int ret; ret = iwl_mvm_get_ppag_table(mvm); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "PPAG BIOS table invalid or unavailable. (%d)\n", ret); return 0; } return iwl_mvm_ppag_send_cmd(mvm); } #else /* CONFIG_ACPI */ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { return 0; } int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { return -ENOENT; } int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { return -ENOENT; } #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) { u32 error_log_size = mvm->fw->ucode_capa.error_log_size; int ret; u32 resp; struct iwl_fw_error_recovery_cmd recovery_cmd = { .flags = cpu_to_le32(flags), .buf_size = 0, }; struct iwl_host_cmd host_cmd = { .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), .flags = CMD_WANT_SKB, .data = {&recovery_cmd, }, .len = {sizeof(recovery_cmd), }, }; /* no error log was defined in TLV */ if (!error_log_size) return; if (flags & ERROR_RECOVERY_UPDATE_DB) { /* no buf was allocated while HW reset */ if (!mvm->error_recovery_buf) return; host_cmd.data[1] = mvm->error_recovery_buf; host_cmd.len[1] = error_log_size; host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; recovery_cmd.buf_size = cpu_to_le32(error_log_size); } ret = iwl_mvm_send_cmd(mvm, &host_cmd); kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; if (ret) { IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret); return; } /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */ if (flags & ERROR_RECOVERY_UPDATE_DB) { resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data); if (resp) IWL_ERR(mvm, "Failed to send recovery cmd blob was invalid %d\n", resp); } } static int iwl_mvm_sar_init(struct iwl_mvm *mvm) { int ret; ret = iwl_mvm_sar_get_wrds_table(mvm); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "WRDS SAR BIOS table invalid or unavailable. (%d)\n", ret); /* * If not available, don't fail and don't bother with EWRD. * Return 1 to tell that we can't use WGDS either. */ return 1; } ret = iwl_mvm_sar_get_ewrd_table(mvm); /* if EWRD is not available, we can still use WRDS, so don't fail */ if (ret < 0) IWL_DEBUG_RADIO(mvm, "EWRD SAR BIOS table invalid or unavailable. (%d)\n", ret); #if defined(CPTCFG_IWLMVM_VENDOR_CMDS) && defined(CONFIG_ACPI) /* * if no profile was chosen by the user yet, choose profile 1 (WRDS) as * default for both chains */ if (mvm->sar_chain_a_profile && mvm->sar_chain_b_profile) ret = iwl_mvm_sar_select_profile(mvm, mvm->sar_chain_a_profile, mvm->sar_chain_b_profile); else #endif ret = iwl_mvm_sar_select_profile(mvm, 1, 1); /* * If we don't have profile 0 from BIOS, just skip it. This * means that SAR Geo will not be enabled either, even if we * have other valid profiles. */ if (ret == -ENOENT) return 1; return ret; } static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) { int ret; if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm, false); ret = iwl_run_init_mvm_ucode(mvm, false); if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); if (iwlmvm_mod_params.init_dbg) return 0; return ret; } iwl_fw_dbg_stop_sync(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY); mvm->rfkill_safe_init_done = false; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) return ret; mvm->rfkill_safe_init_done = true; iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE); return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); } int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; struct ieee80211_channel *chan; struct cfg80211_chan_def chandef; lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); if (ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); goto error; } iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_start(mvm->trans); #endif if (!mvm->trans->dbg.ini_valid) { mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ if (mvm->fw->dbg.dest_tlv) mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_LATENCY)) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tx_latency *thrshold_trig; u32 thrshld; u32 vif; u32 iface = 0; u16 tid; u16 mode; u32 window; trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_LATENCY); vif = le32_to_cpu(trig->vif_type); if (vif == IWL_FW_DBG_CONF_VIF_ANY) { iface = BIT(IEEE80211_TX_LATENCY_BSS); iface |= BIT(IEEE80211_TX_LATENCY_P2P); } else if (vif <= IWL_FW_DBG_CONF_VIF_AP) { iface = BIT(IEEE80211_TX_LATENCY_BSS); } else { iface = BIT(IEEE80211_TX_LATENCY_P2P); } thrshold_trig = (void *)trig->data; thrshld = le32_to_cpu(thrshold_trig->thrshold); tid = le16_to_cpu(thrshold_trig->tid_bitmap); mode = le16_to_cpu(thrshold_trig->mode); window = le32_to_cpu(thrshold_trig->window); IWL_DEBUG_INFO(mvm, "Tx latency trigger cfg: threshold = %u, tid = 0x%x, mode = 0x%x, window = %u vif = 0x%x\n", thrshld, tid, mode, window, iface); ieee80211_tx_lat_thrshld_cfg(mvm->hw, thrshld, tid, window, mode, iface); } #endif ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto error; if (!iwl_mvm_has_unified_ucode(mvm)) { /* Send phy db control command and then phy db calibration */ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; ret = iwl_send_phy_cfg_cmd(mvm); if (ret) goto error; } ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto error; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) { ret = iwl_set_soc_latency(mvm); if (ret) goto error; } /* Init RSS configuration */ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { ret = iwl_configure_rxq(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RX queues: %d\n", ret); goto error; } } if (iwl_mvm_has_new_rx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", ret); goto error; } } /* init the fw <-> mac80211 STA mapping */ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) { ret = iwl_mvm_send_dqa_cmd(mvm); if (ret) goto error; } /* Add auxiliary station for scanning */ ret = iwl_mvm_add_aux_sta(mvm); if (ret) goto error; /* Add all the PHY contexts */ chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); for (i = 0; i < NUM_PHY_CTX; i++) { /* * The channel used here isn't relevant as it's * going to be overwritten in the other flows. * For now use the first channel we have. */ ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], &chandef, 1, 1); if (ret) goto error; } if (iwl_mvm_is_tt_in_fw(mvm)) { /* in order to give the responsibility of ct-kill and * TX backoff to FW we need to send empty temperature reporting * cmd during init time */ iwl_mvm_send_temp_report_ths_cmd(mvm); } else { /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); } #ifdef CONFIG_THERMAL /* TODO: read the budget from BIOS / Platform NVM */ /* * In case there is no budget from BIOS / Platform NVM the default * budget should be 2000mW (cooling state 0). */ if (iwl_mvm_is_ctdp_supported(mvm)) { ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, mvm->cooling_dev.cur_state); if (ret) goto error; } #endif if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) WARN_ON(iwl_mvm_config_ltr(mvm)); ret = iwl_mvm_power_update_device(mvm); if (ret) goto error; /* * RTNL is not taken during Ct-kill, but we don't need to scan/Tx * anyway, so don't init MCC. */ if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { ret = iwl_mvm_init_mcc(mvm); if (ret) goto error; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET; ret = iwl_mvm_config_scan(mvm); if (ret) goto error; } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS /* set_mode must be IWL_TX_POWER_MODE_SET_DEVICE if this was * ever initialized. */ if (le32_to_cpu(mvm->txp_cmd.v5.v3.set_mode) == IWL_TX_POWER_MODE_SET_DEVICE) { int len; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(mvm->txp_cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(mvm->txp_cmd.v4); else len = sizeof(mvm->txp_cmd.v4.v3); if (iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &mvm->txp_cmd)) IWL_ERR(mvm, "failed to update TX power\n"); } #endif if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid)) IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n"); ret = iwl_mvm_ppag_init(mvm); if (ret) goto error; ret = iwl_mvm_sar_init(mvm); if (ret == 0) { ret = iwl_mvm_sar_geo_init(mvm); } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) { /* * If basic SAR is not available, we check for WGDS, * which should *not* be available either. If it is * available, issue an error, because we can't use SAR * Geo without basic SAR. */ IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); } if (ret < 0) goto error; iwl_mvm_leds_sync(mvm); IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: if (!iwlmvm_mod_params.init_dbg || !ret) iwl_mvm_stop_device(mvm); return ret; } int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) { int ret, i; lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); if (ret) { IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); goto error; } #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_start(mvm->trans); #endif ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto error; /* Send phy db control command and then phy db calibration*/ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; ret = iwl_send_phy_cfg_cmd(mvm); if (ret) goto error; /* init the fw <-> mac80211 STA mapping */ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); /* Add auxiliary station for scanning */ ret = iwl_mvm_add_aux_sta(mvm); if (ret) goto error; return 0; error: iwl_mvm_stop_device(mvm); return ret; } void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; u32 flags = le32_to_cpu(card_state_notif->flags); IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", (flags & HW_CARD_DISABLED) ? "Kill" : "On", (flags & SW_CARD_DISABLED) ? "Kill" : "On", (flags & CT_KILL_CARD_DISABLED) ? "Reached" : "Not reached"); } void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; IWL_DEBUG_INFO(mvm, "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", le32_to_cpu(mfuart_notif->installed_ver), le32_to_cpu(mfuart_notif->external_ver), le32_to_cpu(mfuart_notif->status), le32_to_cpu(mfuart_notif->duration)); if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) IWL_DEBUG_INFO(mvm, "MFUART: image size: 0x%08x\n", le32_to_cpu(mfuart_notif->image_size)); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/led.c000066400000000000000000000125311351064634500263000ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "iwl-io.h" #include "iwl-csr.h" #include "mvm.h" static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm *mvm, bool on) { struct iwl_led_cmd led_cmd = { .status = cpu_to_le32(on), }; struct iwl_host_cmd cmd = { .id = WIDE_ID(LONG_GROUP, LEDS_CMD), .len = { sizeof(led_cmd), }, .data = { &led_cmd, }, .flags = CMD_ASYNC, }; int err; if (!iwl_mvm_firmware_running(mvm)) return; err = iwl_mvm_send_cmd(mvm, &cmd); if (err) IWL_WARN(mvm, "LED command failed: %d\n", err); } static void iwl_mvm_led_set(struct iwl_mvm *mvm, bool on) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) { iwl_mvm_send_led_fw_cmd(mvm, on); return; } iwl_write32(mvm->trans, CSR_LED_REG, on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF); } static void iwl_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led); iwl_mvm_led_set(mvm, brightness > 0); } int iwl_mvm_leds_init(struct iwl_mvm *mvm) { int mode = iwlwifi_mod_params.led_mode; int ret; switch (mode) { case IWL_LED_BLINK: IWL_ERR(mvm, "Blink led mode not supported, used default\n"); /* fall through */ case IWL_LED_DEFAULT: case IWL_LED_RF_STATE: mode = IWL_LED_RF_STATE; break; case IWL_LED_DISABLE: IWL_INFO(mvm, "Led disabled\n"); return 0; default: return -EINVAL; } mvm->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(mvm->hw->wiphy)); mvm->led.brightness_set = iwl_led_brightness_set; mvm->led.max_brightness = 1; if (mode == IWL_LED_RF_STATE) mvm->led.default_trigger = ieee80211_get_radio_led_name(mvm->hw); ret = led_classdev_register(mvm->trans->dev, &mvm->led); if (ret) { kfree(mvm->led.name); IWL_INFO(mvm, "Failed to enable led\n"); return ret; } mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE; return 0; } void iwl_mvm_leds_sync(struct iwl_mvm *mvm) { if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) return; /* * if we control through the register, we're doing it * even when the firmware isn't up, so no need to sync */ if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) return; iwl_mvm_led_set(mvm, mvm->led.brightness > 0); } void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) return; led_classdev_unregister(&mvm->led); kfree(mvm->led.name); mvm->init_status &= ~IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c000066400000000000000000001436741351064634500272710ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-io.h" #include "iwl-prph.h" #include "fw-api.h" #include "mvm.h" #include "time-event.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, IWL_MVM_TX_FIFO_VI, IWL_MVM_TX_FIFO_BE, IWL_MVM_TX_FIFO_BK, }; const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { IWL_GEN2_EDCA_TX_FIFO_VO, IWL_GEN2_EDCA_TX_FIFO_VI, IWL_GEN2_EDCA_TX_FIFO_BE, IWL_GEN2_EDCA_TX_FIFO_BK, IWL_GEN2_TRIG_TX_FIFO_VO, IWL_GEN2_TRIG_TX_FIFO_VI, IWL_GEN2_TRIG_TX_FIFO_BE, IWL_GEN2_TRIG_TX_FIFO_BK, }; struct iwl_mvm_mac_iface_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; enum iwl_tsf_id preferred_tsf; bool found_vif; }; static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 min_bi; /* Skip the interface for which we are trying to assign a tsf_id */ if (vif == data->vif) return; /* * The TSF is a hardware/firmware resource, there are 4 and * the driver should assign and free them as needed. However, * there are cases where 2 MACs should share the same TSF ID * for the purpose of clock sync, an optimization to avoid * clock drift causing overlapping TBTTs/DTIMs for a GO and * client in the system. * * The firmware will decide according to the MAC type which * will be the master and slave. Clients that need to sync * with a remote station will be the master, and an AP or GO * will be the slave. * * Depending on the new interface type it can be slaved to * or become the master of an existing interface. */ switch (data->vif->type) { case NL80211_IFTYPE_STATION: /* * The new interface is a client, so if the one we're iterating * is an AP, and the beacon interval of the AP is a multiple or * divisor of the beacon interval of the client, the same TSF * should be used to avoid drift between the new client and * existing AP. The existing AP will get drift updates from the * new client context in this case. */ if (vif->type != NL80211_IFTYPE_AP || data->preferred_tsf != NUM_TSF_IDS || !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) break; min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int); if (!min_bi) break; if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) { data->preferred_tsf = mvmvif->tsf_id; return; } break; case NL80211_IFTYPE_AP: /* * The new interface is AP/GO, so if its beacon interval is a * multiple or a divisor of the beacon interval of an existing * interface, it should get drift updates from an existing * client or use the same TSF as an existing GO. There's no * drift between TSFs internally but if they used different * TSFs then a new client MAC could update one of them and * cause drift that way. */ if ((vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_STATION) || data->preferred_tsf != NUM_TSF_IDS || !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) break; min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int); if (!min_bi) break; if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) { data->preferred_tsf = mvmvif->tsf_id; return; } break; default: /* * For all other interface types there's no need to * take drift into account. Either they're exclusive * like IBSS and monitor, or we don't care much about * their TSF (like P2P Device), but we won't be able * to share the TSF resource. */ break; } /* * Unless we exited above, we can't share the TSF resource * that the virtual interface we're iterating over is using * with the new one, so clear the available bit and if this * was the preferred one, reset that as well. */ __clear_bit(mvmvif->tsf_id, data->available_tsf_ids); if (data->preferred_tsf == mvmvif->tsf_id) data->preferred_tsf = NUM_TSF_IDS; } static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Iterator may already find the interface being added -- skip it */ if (vif == data->vif) { data->found_vif = true; return; } /* Mark MAC IDs as used by clearing the available bit, and * (below) mark TSFs as used if their existing use is not * compatible with the new interface type. * No locking or atomic bit operations are needed since the * data is on the stack of the caller function. */ __clear_bit(mvmvif->id, data->available_mac_ids); /* find a suitable tsf_id */ iwl_mvm_mac_tsf_id_iter(_data, mac, vif); } void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { .mvm = mvm, .vif = vif, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, }; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_tsf_id_iter, &data); if (data.preferred_tsf != NUM_TSF_IDS) mvmvif->tsf_id = data.preferred_tsf; else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids)) mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS); } int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { .mvm = mvm, .vif = vif, .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 }, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, .found_vif = false, }; int ret, i; lockdep_assert_held(&mvm->mutex); /* * Allocate a MAC ID and a TSF for this MAC, along with the queues * and other resources. */ /* * Before the iterator, we start with all MAC IDs and TSFs available. * * During iteration, all MAC IDs are cleared that are in use by other * virtual interfaces, and all TSF IDs are cleared that can't be used * by this new virtual interface because they're used by an interface * that can't share it with the new one. * At the same time, we check if there's a preferred TSF in the case * that we should share it with another interface. */ /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */ switch (vif->type) { case NL80211_IFTYPE_ADHOC: break; case NL80211_IFTYPE_STATION: if (!vif->p2p) break; /* fall through */ default: __clear_bit(0, data.available_mac_ids); } ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_iface_iterator, &data); /* * In the case we're getting here during resume, it's similar to * firmware restart, and with RESUME_ALL the iterator will find * the vif being added already. * We don't want to reassign any IDs in either case since doing * so would probably assign different IDs (as interfaces aren't * necessarily added in the same order), but the old IDs were * preserved anyway, so skip ID assignment for both resume and * recovery. */ if (data.found_vif) return 0; /* Therefore, in recovery, we can't get here */ if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return -EBUSY; mvmvif->id = find_first_bit(data.available_mac_ids, NUM_MAC_INDEX_DRIVER); if (mvmvif->id == NUM_MAC_INDEX_DRIVER) { IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n"); ret = -EIO; goto exit_fail; } if (data.preferred_tsf != NUM_TSF_IDS) mvmvif->tsf_id = data.preferred_tsf; else mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS); if (mvmvif->tsf_id == NUM_TSF_IDS) { IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n"); ret = -EIO; goto exit_fail; } mvmvif->color = 0; INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; /* No need to allocate data queues to P2P Device MAC and NAN.*/ if (vif->type == NL80211_IFTYPE_P2P_DEVICE || vif->type == NL80211_IFTYPE_NAN) return 0; /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). */ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; return 0; exit_fail: memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); return ret; } static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band, u8 *cck_rates, u8 *ofdm_rates) { struct ieee80211_supported_band *sband; unsigned long basic = vif->bss_conf.basic_rates; int lowest_present_ofdm = 100; int lowest_present_cck = 100; u8 cck = 0; u8 ofdm = 0; int i; sband = mvm->hw->wiphy->bands[band]; for_each_set_bit(i, &basic, BITS_PER_LONG) { int hw = sband->bitrates[i].hw_value; if (hw >= IWL_FIRST_OFDM_RATE) { ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); if (lowest_present_ofdm > hw) lowest_present_ofdm = hw; } else { BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); cck |= BIT(hw); if (lowest_present_cck > hw) lowest_present_cck = hw; } } /* * Now we've got the basic rates as bitmaps in the ofdm and cck * variables. This isn't sufficient though, as there might not * be all the right rates in the bitmap. E.g. if the only basic * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps * and 6 Mbps because the 802.11-2007 standard says in 9.6: * * [...] a STA responding to a received frame shall transmit * its Control Response frame [...] at the highest rate in the * BSSBasicRateSet parameter that is less than or equal to the * rate of the immediately previous frame in the frame exchange * sequence ([...]) and that is of the same modulation class * ([...]) as the received frame. If no rate contained in the * BSSBasicRateSet parameter meets these conditions, then the * control frame sent in response to a received frame shall be * transmitted at the highest mandatory rate of the PHY that is * less than or equal to the rate of the received frame, and * that is of the same modulation class as the received frame. * * As a consequence, we need to add all mandatory rates that are * lower than all of the basic rates to these bitmaps. */ if (IWL_RATE_24M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE; if (IWL_RATE_12M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE; /* 6M already there or needed so always add */ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE; /* * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. * Note, however: * - if no CCK rates are basic, it must be ERP since there must * be some basic rates at all, so they're OFDM => ERP PHY * (or we're in 5 GHz, and the cck bitmap will never be used) * - if 11M is a basic rate, it must be ERP as well, so add 5.5M * - if 5.5M is basic, 1M and 2M are mandatory * - if 2M is basic, 1M is mandatory * - if 1M is basic, that's the only valid ACK rate. * As a consequence, it's not as complicated as it sounds, just add * any lower rates to the ACK rate bitmap. */ if (IWL_RATE_11M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE; if (IWL_RATE_5M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE; if (IWL_RATE_2M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE; /* 1M already there or needed so always add */ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE; *cck_rates = cck; *ofdm_rates = ofdm; } static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd) { /* for both sta and ap, ht_operation_mode hold the protection_mode */ u8 protection_mode = vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; /* The fw does not distinguish between ht and fat */ u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode); /* * See section 9.23.3.1 of IEEE 80211-2012. * Nongreenfield HT STAs Present is not supported. */ switch (protection_mode) { case IEEE80211_HT_OP_MODE_PROTECTION_NONE: break; case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: cmd->protection_flags |= cpu_to_le32(ht_flag); break; case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: /* Protect when channel wider than 20MHz */ if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) cmd->protection_flags |= cpu_to_le32(ht_flag); break; default: IWL_ERR(mvm, "Illegal protection mode %d\n", protection_mode); break; } } static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, const u8 *bssid_override, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_chanctx_conf *chanctx; bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); u8 cck_ack_rates, ofdm_ack_rates; const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; int i; cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); cmd->action = cpu_to_le32(action); switch (vif->type) { case NL80211_IFTYPE_STATION: if (vif->p2p) cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA); else cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA); break; case NL80211_IFTYPE_AP: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO); break; case NL80211_IFTYPE_MONITOR: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER); break; case NL80211_IFTYPE_P2P_DEVICE: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE); break; case NL80211_IFTYPE_ADHOC: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS); break; default: WARN_ON_ONCE(1); } cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); memcpy(cmd->node_addr, vif->addr, ETH_ALEN); if (bssid) memcpy(cmd->bssid_addr, bssid, ETH_ALEN); else eth_broadcast_addr(cmd->bssid_addr); rcu_read_lock(); chanctx = rcu_dereference(vif->chanctx_conf); iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band : NL80211_BAND_2GHZ, &cck_ack_rates, &ofdm_ack_rates); rcu_read_unlock(); cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates); cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); cmd->cck_short_preamble = cpu_to_le32(vif->bss_conf.use_short_preamble ? MAC_FLG_SHORT_PREAMBLE : 0); cmd->short_slot = cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0); cmd->filter_flags = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) { u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); cmd->ac[ucode_ac].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min); cmd->ac[ucode_ac].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max); cmd->ac[ucode_ac].edca_txop = cpu_to_le16(mvmvif->queue_params[i].txop * 32); cmd->ac[ucode_ac].aifsn = mvmvif->queue_params[i].aifs; cmd->ac[ucode_ac].fifos_mask = BIT(txf); } if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); if (vif->bss_conf.use_cts_prot) cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", vif->bss_conf.use_cts_prot, vif->bss_conf.ht_operation_mode); if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); if (ht_enabled) iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); } static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, struct iwl_mac_ctx_cmd *cmd) { int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(*cmd), cmd); if (ret) IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", le32_to_cpu(cmd->action), ret); return ret; } static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, const u8 *bssid_override) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mac_data_sta *ctxt_sta; WARN_ON(vif->type != NL80211_IFTYPE_STATION); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); if (vif->p2p) { struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA /* * Pass CT window including OPPPS enable flag as part of a WA * to pass P2P OPPPS certification test. Refer to * IWLMVM_P2P_OPPPS_TEST_WA description in Kconfig.noupstream. */ if (mvm->p2p_opps_test_wa_vif) cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow); else #endif cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); ctxt_sta = &cmd.p2p_sta.sta; } else { ctxt_sta = &cmd.sta; } /* We need the dtim_period to set the MAC as associated */ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && !force_assoc_off) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u8 ap_sta_id = mvmvif->ap_sta_id; u32 dtim_offs; /* * The DTIM count counts down, so when it is N that means N * more beacon intervals happen until the DTIM TBTT. Therefore * add this to the current time. If that ends up being in the * future, the firmware will handle it. * * Also note that the system_timestamp (which we get here as * "sync_device_ts") and TSF timestamp aren't at exactly the * same offset in the frame -- the TSF is at the first symbol * of the TSF, the system timestamp is at signal acquisition * time. This means there's an offset between them of at most * a few hundred microseconds (24 * 8 bits + PLCP time gives * 384us in the longest case), this is currently not relevant * as the firmware wakes up around 2ms before the TBTT. */ dtim_offs = vif->bss_conf.sync_dtim_count * vif->bss_conf.beacon_int; /* convert TU to usecs */ dtim_offs *= 1024; ctxt_sta->dtim_tsf = cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); ctxt_sta->dtim_time = cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); ctxt_sta->assoc_beacon_arrive_time = cpu_to_le32(vif->bss_conf.sync_device_ts); IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", le64_to_cpu(ctxt_sta->dtim_tsf), le32_to_cpu(ctxt_sta->dtim_time), dtim_offs); ctxt_sta->is_assoc = cpu_to_le32(1); /* * allow multicast data frames only as long as the station is * authorized, i.e., GTK keys are already installed (if needed) */ if (ap_sta_id < IWL_MVM_STATION_COUNT) { struct ieee80211_sta *sta; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (!IS_ERR_OR_NULL(sta)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->sta_state == IEEE80211_STA_AUTHORIZED) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP); } rcu_read_unlock(); } } else { ctxt_sta->is_assoc = cpu_to_le32(0); /* Allow beacons to pass through as long as we are not * associated, or we do not have dtim period information. */ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); } ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); } return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; u32 tfd_queue_msk = BIT(mvm->snif_queue); int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_IN_CRC32 | MAC_FILTER_ACCEPT_GRP); ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); /* Allocate sniffer station */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, vif->type, IWL_STA_GENERAL_PURPOSE); if (ret) return ret; return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_ACCEPT_GRP); /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); /* TODO: Assumes that the beacon id == mac context id */ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } struct iwl_mvm_go_iterator_data { bool go_active; }; static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_go_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_ibss_active) data->go_active = true; } static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mvm_go_iterator_data data = {}; WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Override the filter flags to accept only probe requests */ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); /* * This flag should be set to true when the P2P Device is * discoverable and there is at least another active P2P GO. Settings * this flag will allow the P2P Device to be discoverable on other * channels in addition to its listen channel. * Note that this flag should not be set in other cases as it opens the * Rx filters on all MAC and increases the number of interrupts. */ ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_go_iterator, &data); cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size) { u32 tim_idx; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; /* The index is relative to frame start but we start looking at the * variable-length part of the beacon. */ tim_idx = mgmt->u.beacon.variable - beacon; /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ while ((tim_idx < (frame_size - 2)) && (beacon[tim_idx] != WLAN_EID_TIM)) tim_idx += beacon[tim_idx+1] + 2; /* If TIM field was found, set variables */ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { *tim_index = cpu_to_le32(tim_idx); *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]); } else { IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); } } static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) { struct ieee80211_mgmt *mgmt = (void *)beacon; const u8 *ie; if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) return 0; frame_size -= mgmt->u.beacon.variable - beacon; ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size); if (!ie) return 0; return ie - beacon; } u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, struct ieee80211_vif *vif) { u8 rate; if (info->band == NL80211_BAND_5GHZ || vif->p2p) rate = IWL_FIRST_OFDM_RATE; else rate = IWL_FIRST_CCK_RATE; #ifdef CPTCFG_IWLWIFI_FORCE_OFDM_RATE rate = IWL_FIRST_OFDM_RATE; #endif return rate; } static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, struct iwl_tx_cmd *tx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info; u8 rate; u32 tx_flags; info = IEEE80211_SKB_CB(beacon); /* Set up TX command fields */ tx->len = cpu_to_le16((u16)beacon->len); tx->sta_id = mvmvif->bcast_sta.sta_id; tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS; tx->tx_flags = cpu_to_le32(tx_flags); /* * TODO: the firwmare advertises this, but has a bug. We should revert * this when the firmware will be fixed. */ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION) || true) { iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); } rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); if (rate == IWL_FIRST_CCK_RATE) tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); } int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len) { struct iwl_host_cmd cmd = { .id = BEACON_TEMPLATE_CMD, .flags = CMD_ASYNC, }; cmd.len[0] = len; cmd.data[0] = data; cmd.dataflags[0] = 0; cmd.len[1] = beacon->len; cmd.data[1] = beacon->data; cmd.dataflags[1] = IWL_HCMD_DFL_DUP; return iwl_mvm_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_beacon_cmd_v6 beacon_cmd = {}; iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_beacon_cmd_v7 beacon_cmd = {}; iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); beacon_cmd.csa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len)); beacon_cmd.ecsa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon); struct iwl_mac_beacon_cmd beacon_cmd = {}; u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); u16 flags; flags = iwl_mvm_mac80211_idx_to_hwrate(rate); if (rate == IWL_FIRST_CCK_RATE) flags |= IWL_MAC_BEACON_CCK; beacon_cmd.flags = cpu_to_le16(flags); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); beacon_cmd.csa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len)); beacon_cmd.ecsa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { if (WARN_ON(!beacon)) return -EINVAL; if (IWL_MVM_NON_TRANSMITTING_AP) return 0; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); /* TODO: remove first condition once FW merge new TLV */ if (iwl_mvm_has_new_tx_api(mvm) || fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon); return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); } /* The beacon template for the AP/GO/IBSS has changed and needs update */ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct sk_buff *beacon; int ret; WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC); beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); if (!beacon) return -ENOMEM; #ifdef CPTCFG_IWLWIFI_DEBUGFS if (mvm->beacon_inject_active) return -EBUSY; #endif ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); dev_kfree_skb(beacon); return ret; } struct iwl_mvm_mac_ap_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; u32 beacon_device_ts; u16 beacon_int; }; /* Find the beacon_device_ts and beacon_int for a managed interface */ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_ap_iterator_data *data = _data; if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) return; /* Station client has higher priority over P2P client*/ if (vif->p2p && data->beacon_device_ts) return; data->beacon_device_ts = vif->bss_conf.sync_device_ts; data->beacon_int = vif->bss_conf.beacon_int; } /* * Fill the specific data for mac context of type AP of P2P GO */ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, struct iwl_mac_data_ap *ctxt_ap, bool add) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_ap_iterator_data data = { .mvm = mvm, .vif = vif, .beacon_device_ts = 0 }; /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); /* * in AP mode, pass probe requests and beacons from other APs * (needed for ht protection); when there're no any associated * station don't ask FW to pass beacons to prevent unnecessary * wake-ups. */ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); } else { IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); } ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue); /* * Only set the beacon time when the MAC is being added, when we * just modify the MAC then we should keep the time -- the firmware * can otherwise have a "jumping" TBTT. */ if (add) { /* * If there is a station/P2P client interface which is * associated, set the AP's TBTT far enough from the station's * TBTT. Otherwise, set it to the current system time */ ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_ap_iterator, &data); if (data.beacon_device_ts) { u32 rand = (prandom_u32() % (64 - 36)) + 36; mvmvif->ap_beacon_time = data.beacon_device_ts + ieee80211_tu_to_usec(data.beacon_int * rand / 100); } else { mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm); } } ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time); ctxt_ap->beacon_tsf = 0; /* unused */ /* TODO: Assume that the beacon id == mac context id */ ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); } static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for ap mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap, action == FW_CTXT_ACTION_ADD); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for GO mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap, action == FW_CTXT_ACTION_ADD); cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); cmd.go.opp_ps_enabled = cpu_to_le32(!!(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT)); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, const u8 *bssid_override) { switch (vif->type) { case NL80211_IFTYPE_STATION: return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, force_assoc_off, bssid_override); break; case NL80211_IFTYPE_AP: if (!vif->p2p) return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action); else return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action); break; case NL80211_IFTYPE_MONITOR: return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); case NL80211_IFTYPE_P2P_DEVICE: return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action); case NL80211_IFTYPE_ADHOC: return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action); default: break; } return -EOPNOTSUPP; } int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) return -EOPNOTSUPP; if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, true, NULL); if (ret) return ret; /* will only do anything at resume from D3 time */ iwl_mvm_set_last_nonqos_seq(mvm, vif); mvmvif->uploaded = true; return 0; } int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) return -EOPNOTSUPP; if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, force_assoc_off, bssid_override); } int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd; int ret; if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) return -EOPNOTSUPP; if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; memset(&cmd, 0, sizeof(cmd)); cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd); if (ret) { IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); return ret; } mvmvif->uploaded = false; if (vif->type == NL80211_IFTYPE_MONITOR) { __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); iwl_mvm_dealloc_snif_sta(mvm); } return 0; } static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, struct ieee80211_vif *csa_vif, u32 gp2, bool tx_success) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); /* Don't start to countdown from a failed beacon */ if (!tx_success && !mvmvif->csa_countdown) return; mvmvif->csa_countdown = true; if (!ieee80211_csa_is_complete(csa_vif)) { int c = ieee80211_csa_update_counter(csa_vif); iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); if (csa_vif->p2p && !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && tx_success) { u32 rel_time = (c + 1) * csa_vif->bss_conf.beacon_int - IWL_MVM_CHANNEL_SWITCH_TIME_GO; u32 apply_time = gp2 + rel_time * 1024; iwl_mvm_schedule_csa_period(mvm, csa_vif, IWL_MVM_CHANNEL_SWITCH_TIME_GO - IWL_MVM_CHANNEL_SWITCH_MARGIN, apply_time); } } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) { /* we don't have CSA NoA scheduled yet, switch now */ ieee80211_csa_finish(csa_vif); RCU_INIT_POINTER(mvm->csa_vif, NULL); } } void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data; struct ieee80211_vif *csa_vif; struct ieee80211_vif *tx_blocked_vif; struct agg_tx_status *agg_status; u16 status; lockdep_assert_held(&mvm->mutex); mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) { struct iwl_mvm_tx_resp *beacon_notify_hdr = &beacon_v5->beacon_notify_hdr; mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0; agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n", status, beacon_notify_hdr->failure_frame, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2, le32_to_cpu(beacon_notify_hdr->initial_rate)); } else { mvm->ibss_manager = beacon->ibss_mgr_status != 0; status = le32_to_cpu(beacon->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x tsf:0x%016llX gp2:0x%X\n", status, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2); } csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (unlikely(csa_vif && csa_vif->csa_active)) iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, (status == TX_STATUS_SUCCESS)); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (unlikely(tx_blocked_vif)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); /* * The channel switch is started and we have blocked the * stations. If this is the first beacon (the timeout wasn't * set), set the unblock timeout, otherwise countdown */ if (!mvm->csa_tx_block_bcn_timeout) mvm->csa_tx_block_bcn_timeout = IWL_MVM_CS_UNBLOCK_TX_TIMEOUT; else mvm->csa_tx_block_bcn_timeout--; /* Check if the timeout is expired, and unblock tx */ if (mvm->csa_tx_block_bcn_timeout == 0) { iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); } } } void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_beacons_notif *mb = (void *)pkt->data; struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; struct iwl_fw_dbg_trigger_tlv *trigger; u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; u32 rx_missed_bcon, rx_missed_bcon_since_rx; struct ieee80211_vif *vif; u32 id = le32_to_cpu(mb->mac_id); IWL_DEBUG_INFO(mvm, "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", le32_to_cpu(mb->mac_id), le32_to_cpu(mb->consec_missed_beacons), le32_to_cpu(mb->consec_missed_beacons_since_last_rx), le32_to_cpu(mb->num_recvd_beacons), le32_to_cpu(mb->num_expected_beacons)); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (!vif) goto out; rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons); rx_missed_bcon_since_rx = le32_to_cpu(mb->consec_missed_beacons_since_last_rx); /* * TODO: the threshold should be adjusted based on latency conditions, * and/or in case of a CS flow on one of the other AP vifs. */ if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) iwl_mvm_connection_loss(mvm, vif, "missed beacons"); else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) goto out; bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); stop_trig_missed_bcon_since_rx = le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx); /* TODO: implement start trigger */ if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS); out: rcu_read_unlock(); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_stored_beacon_notif *sb = (void *)pkt->data; struct ieee80211_rx_status rx_status; struct sk_buff *skb; u32 size = le32_to_cpu(sb->byte_count); if (size == 0) return; skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } /* update rx_status according to the notification's metadata */ memset(&rx_status, 0, sizeof(rx_status)); rx_status.mactime = le64_to_cpu(sb->tsf); /* TSF as indicated by the firmware is at INA time */ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), rx_status.band); /* copy the data */ skb_put_data(skb, sb->data, size); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); /* pass it as regular rx to mac80211 */ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; struct iwl_probe_resp_data *old_data, *new_data; int len = iwl_rx_packet_payload_len(pkt); u32 id = le32_to_cpu(notif->mac_id); struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; if (WARN_ON_ONCE(len < sizeof(*notif))) return; IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", notif->noa_active, notif->csa_counter); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false); if (!vif) return; mvmvif = iwl_mvm_vif_from_mac80211(vif); new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); if (!new_data) return; memcpy(&new_data->notif, notif, sizeof(new_data->notif)); /* noa_attr contains 1 reserved byte, need to substruct it */ new_data->noa_len = sizeof(struct ieee80211_vendor_ie) + sizeof(new_data->notif.noa_attr) - 1; /* * If it's a one time NoA, only one descriptor is needed, * adjust the length according to len_low. */ if (new_data->notif.noa_attr.len_low == sizeof(struct ieee80211_p2p_noa_desc) + 2) new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc); old_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvmvif->mvm->mutex)); rcu_assign_pointer(mvmvif->probe_resp_data, new_data); if (old_data) kfree_rcu(old_data, rcu_head); if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA && notif->csa_counter >= 1) ieee80211_csa_set_counter(vif, notif->csa_counter); } void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data; struct ieee80211_vif *csa_vif, *vif; struct iwl_mvm_vif *mvmvif; int len = iwl_rx_packet_payload_len(pkt); u32 id_n_color, csa_id, mac_id; if (WARN_ON_ONCE(len < sizeof(*notif))) return; id_n_color = le32_to_cpu(notif->id_and_color); mac_id = id_n_color & FW_CTXT_ID_MSK; if (WARN_ON_ONCE(mac_id >= NUM_MAC_INDEX_DRIVER)) return; rcu_read_lock(); vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]); mvmvif = iwl_mvm_vif_from_mac80211(vif); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference(mvm->csa_vif); if (WARN_ON(!csa_vif || !csa_vif->csa_active || csa_vif != vif)) goto out_unlock; csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); if (WARN(csa_id != id_n_color, "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", csa_id, id_n_color)) goto out_unlock; IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n"); schedule_delayed_work(&mvm->cs_tx_unblock_dwork, msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * csa_vif->bss_conf.beacon_int)); ieee80211_csa_finish(csa_vif); rcu_read_unlock(); RCU_INIT_POINTER(mvm->csa_vif, NULL); return; case NL80211_IFTYPE_STATION: if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_csa_client_absent(mvm, vif); cancel_delayed_work(&mvmvif->csa_work); ieee80211_chswitch_done(vif, true); break; default: /* should never happen */ WARN_ON_ONCE(1); break; } out_unlock: rcu_read_unlock(); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c000066400000000000000000004613271351064634500267030ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" #include "sta.h" #include "time-event.h" #include "iwl-eeprom-parse.h" #include "iwl-phy-db.h" #include "iwl-vendor-cmd.h" #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #include "iwl-dnt-cfg.h" #include "iwl-dnt-dispatch.h" #endif #ifdef CPTCFG_NL80211_TESTMODE #include "fw/testmode.h" #endif #include "fw/api/nan.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { .max = CPTCFG_IWLWIFI_NUM_STA_INTERFACES, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE), }, }; static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { { .num_different_channels = CPTCFG_IWLWIFI_NUM_CHANNELS, .max_interfaces = CPTCFG_IWLWIFI_NUM_STA_INTERFACES + 2, .limits = iwl_mvm_limits, .n_limits = ARRAY_SIZE(iwl_mvm_limits), }, }; static const struct ieee80211_iface_limit iwl_mvm_limits_nan[] = { { .max = CPTCFG_IWLWIFI_NUM_STA_INTERFACES, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE), }, { .max = 1, .types = BIT(NL80211_IFTYPE_NAN), }, }; static const struct ieee80211_iface_combination iwl_mvm_iface_combinations_nan[] = { { .num_different_channels = CPTCFG_IWLWIFI_NUM_CHANNELS, .max_interfaces = CPTCFG_IWLWIFI_NUM_STA_INTERFACES + 3, .limits = iwl_mvm_limits_nan, .n_limits = ARRAY_SIZE(iwl_mvm_limits_nan), }, }; #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING /* * Use the reserved field to indicate magic values. * these values will only be used internally by the driver, * and won't make it to the fw (reserved will be 0). * BC_FILTER_MAGIC_IP - configure the val of this attribute to * be the vif's ip address. in case there is not a single * ip address (0, or more than 1), this attribute will * be skipped. * BC_FILTER_MAGIC_MAC - set the val of this attribute to * the LSB bytes of the vif's mac address */ enum { BC_FILTER_MAGIC_NONE = 0, BC_FILTER_MAGIC_IP, BC_FILTER_MAGIC_MAC, }; static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { { /* arp */ .discard = 0, .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, .attrs = { { /* frame type - arp, hw type - ethernet */ .offset_type = BCAST_FILTER_OFFSET_PAYLOAD_START, .offset = sizeof(rfc1042_header), .val = cpu_to_be32(0x08060001), .mask = cpu_to_be32(0xffffffff), }, { /* arp dest ip */ .offset_type = BCAST_FILTER_OFFSET_PAYLOAD_START, .offset = sizeof(rfc1042_header) + 2 + sizeof(struct arphdr) + ETH_ALEN + sizeof(__be32) + ETH_ALEN, .mask = cpu_to_be32(0xffffffff), /* mark it as special field */ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), }, }, }, { /* dhcp offer bcast */ .discard = 0, .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, .attrs = { { /* udp dest port - 68 (bootp client)*/ .offset_type = BCAST_FILTER_OFFSET_IP_END, .offset = offsetof(struct udphdr, dest), .val = cpu_to_be32(0x00440000), .mask = cpu_to_be32(0xffff0000), }, { /* dhcp - lsb bytes of client hw address */ .offset_type = BCAST_FILTER_OFFSET_IP_END, .offset = 38, .mask = cpu_to_be32(0xffffffff), /* mark it as special field */ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), }, }, }, /* last filter must be empty */ {}, }; #endif static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { .max_peers = IWL_MVM_TOF_MAX_APS, .report_ap_tsf = 1, .randomize_mac_addr = 1, .ftm = { .supported = 1, .asap = 1, .non_asap = 1, .request_lci = 1, .request_civicloc = 1, .max_bursts_exponent = -1, /* all supported */ .max_ftms_per_burst = 0, /* no limits */ .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80), .preambles = BIT(NL80211_PREAMBLE_LEGACY) | BIT(NL80211_PREAMBLE_HT) | BIT(NL80211_PREAMBLE_VHT), }, }; static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) { int i; memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); for (i = 0; i < NUM_PHY_CTX; i++) { mvm->phy_ctxts[i].id = i; mvm->phy_ctxts[i].ref = 0; } } struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed) { struct ieee80211_regdomain *regd = NULL; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcc_update_resp *resp; IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); lockdep_assert_held(&mvm->mutex); resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", PTR_ERR_OR_ZERO(resp)); goto out; } if (changed) { u32 status = le32_to_cpu(resp->status); *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || status == MCC_RESP_ILLEGAL); } regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info)); /* Store the return source id */ src_id = resp->source_id; kfree(resp); if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", PTR_ERR_OR_ZERO(regd)); goto out; } IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); mvm->lar_regdom_set = true; mvm->mcc_src = src_id; out: return regd; } void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) { bool changed; struct ieee80211_regdomain *regd; if (!iwl_mvm_is_lar_supported(mvm)) return; regd = iwl_mvm_get_current_regdomain(mvm, &changed); if (!IS_ERR_OR_NULL(regd)) { /* only update the regulatory core if changed */ if (changed) regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); } } struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed) { return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", iwl_mvm_is_wifi_mcc_supported(mvm) ? MCC_SOURCE_GET_CURRENT : MCC_SOURCE_OLD_FW, changed); } int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) { enum iwl_mcc_source used_src; struct ieee80211_regdomain *regd; int ret; bool changed; const struct ieee80211_regdomain *r = rtnl_dereference(mvm->hw->wiphy->regd); if (!r) return -ENOENT; /* save the last source in case we overwrite it below */ used_src = mvm->mcc_src; if (iwl_mvm_is_wifi_mcc_supported(mvm)) { /* Notify the firmware we support wifi location updates */ regd = iwl_mvm_get_current_regdomain(mvm, NULL); if (!IS_ERR_OR_NULL(regd)) kfree(regd); } /* Now set our last stored MCC and source */ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, &changed); if (IS_ERR_OR_NULL(regd)) return -EIO; /* update cfg80211 if the regdomain was changed */ if (changed) ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); else ret = 0; kfree(regd); return ret; } const static u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { { .iftype = NL80211_IFTYPE_STATION, .extended_capabilities = he_if_types_ext_capa_sta, .extended_capabilities_mask = he_if_types_ext_capa_sta, .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), }, }; int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; int num_mac, ret, i; static const u32 mvm_ciphers[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; #ifdef CONFIG_PM_SLEEP bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); #endif /* Tell mac80211 our characteristics */ ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, TIMING_BEACON_ONLY); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); ieee80211_hw_set(hw, STA_MMPDU_TXQ); /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO * gets out of sync and isn't a TX command, so that we have an * assert EDC. * * It's not clear where the bug is, but since we didn't used to * support A-MSDU until moving the mac80211 iTXQs, just leave it * for older devices. We also don't see this issue on any newer * devices. */ if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, HAS_RATE_CONTROL); } if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { ieee80211_hw_set(hw, AP_LINK_PS); } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { /* * we absolutely need this for the new TX API since that comes * with many more queues than the current code can deal with * for station powersave */ return -EINVAL; } if (mvm->trans->num_rx_queues > 1) ieee80211_hw_set(hw, USES_RSS); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; #endif hw->queues = IEEE80211_MAX_QUEUES; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; /* this is the case for CCK frames, it's better (only 8) for OFDM */ hw->radiotap_timestamp.accuracy = 22; if (!iwl_mvm_has_tlc_offload(mvm)) hw->rate_control_algorithm = RS_NAME; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; hw->max_tx_fragments = mvm->trans->max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->cipher_suites = mvm->ciphers; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP_256; hw->wiphy->n_cipher_suites++; } /* Enable 11w if software crypto is not enabled (as the * firmware will interpret some mgmt packets, so enabling it * with software crypto isn't safe). */ if (!iwlwifi_mod_params.swcrypto) { ieee80211_hw_set(hw, MFP_CAPABLE); mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; hw->wiphy->n_cipher_suites++; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_128; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_256; hw->wiphy->n_cipher_suites++; } } /* currently FW API supports only one optional cipher scheme */ if (mvm->fw->cs[0].cipher) { const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0]; struct ieee80211_cipher_scheme *cs = &mvm->cs[0]; mvm->hw->n_cipher_schemes = 1; cs->cipher = le32_to_cpu(fwcs->cipher); cs->iftype = BIT(NL80211_IFTYPE_STATION); cs->hdr_len = fwcs->hdr_len; cs->pn_len = fwcs->pn_len; cs->pn_off = fwcs->pn_off; cs->key_idx_off = fwcs->key_idx_off; cs->key_idx_mask = fwcs->key_idx_mask; cs->key_idx_shift = fwcs->key_idx_shift; cs->mic_len = fwcs->mic_len; mvm->hw->cipher_schemes = mvm->cs; mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher; hw->wiphy->n_cipher_suites++; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; } ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_ND_RANDOM_MAC_ADDR; hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); hw->txq_data_size = sizeof(struct iwl_mvm_txq); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; if (iwl_mvm_is_lar_supported(mvm)) hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; else hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | REGULATORY_DISABLE_BEACON_HINTS; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_NAN_SUPPORT)) { hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_NAN); hw->wiphy->iface_combinations = iwl_mvm_iface_combinations_nan; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mvm_iface_combinations_nan); hw->wiphy->nan_supported_bands = BIT(NL80211_BAND_2GHZ); if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) hw->wiphy->nan_supported_bands |= BIT(NL80211_BAND_5GHZ); hw->max_nan_de_entries = NAN_MAX_SUPPORTED_DE_ENTRIES; } else { hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mvm_iface_combinations); } hw->wiphy->max_remain_on_channel_duration = 10000; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); hw->wiphy->addresses = mvm->addresses; hw->wiphy->n_addresses = 1; /* Extract additional MAC addresses if available */ num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (mvm->trans->dbg_cfg.hw_address.len) num_mac = IWL_MVM_MAX_ADDRESSES; #endif for (i = 1; i < num_mac; i++) { memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, ETH_ALEN); mvm->addresses[i].addr[5]++; hw->wiphy->n_addresses++; } iwl_mvm_reset_phy_ctxts(mvm); hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_2GHZ] = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { hw->wiphy->bands[NL80211_BAND_5GHZ] = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } hw->wiphy->hw_version = mvm->trans->hw_id; if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_sched_scan_reqs = 1; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; /* we create the 802.11 header and zero length SSID IE. */ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; hw->wiphy->max_sched_scan_plan_interval = U16_MAX; /* * the firmware uses u8 for num of iterations, but 0xff is saved for * infinite loop, so the maximum number of iterations is actually 254. */ hw->wiphy->max_sched_scan_plan_iterations = 254; hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_QUIET; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); } if (iwl_mvm_is_oce_supported(mvm)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); } if (mvm->nvm_data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) { hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(he_iftypes_ext_capa); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE | WIPHY_WOWLAN_NET_DETECT; if (!iwlwifi_mod_params.swcrypto) mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE | WIPHY_WOWLAN_4WAY_HANDSHAKE; mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; hw->wiphy->wowlan = &mvm->wowlan; } #endif #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING /* assign default bcast filtering configuration */ mvm->bcast_filters = iwl_mvm_default_bcast_filters; #endif ret = iwl_mvm_leds_init(mvm); if (ret) return ret; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; ieee80211_hw_set(hw, TDLS_WIDER_BW); } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } hw->netdev_features |= mvm->cfg->features; if (!iwl_mvm_is_csum_supported(mvm)) { hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM); /* We may support SW TX CSUM */ if (IWL_MVM_SW_TX_CSUM_OFFLOAD) hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; } if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS iwl_mvm_vendor_cmds_register(mvm); #endif ret = ieee80211_register_hw(mvm->hw); if (ret) { #ifdef CPTCFG_IWLMVM_VENDOR_CMDS iwl_mvm_vendor_cmds_unregister(mvm); #endif iwl_mvm_leds_exit(mvm); } return ret; } static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_sta *sta = control->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } if (offchannel && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; /* treat non-bufferable MMPDUs on AP interfaces as broadcast */ if ((info->control.vif->type == NL80211_IFTYPE_AP || info->control.vif->type == NL80211_IFTYPE_ADHOC) && ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); if (ap_sta_id < IWL_MVM_STATION_COUNT) { /* mac80211 holds rcu read lock */ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (IS_ERR_OR_NULL(sta)) goto drop; } } if (sta) { if (iwl_mvm_tx_skb(mvm, skb, sta)) goto drop; return; } if (iwl_mvm_tx_skb_non_sta(mvm, skb)) goto drop; return; drop: ieee80211_free_txskb(hw, skb); } void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); struct sk_buff *skb = NULL; /* * No need for threads to be pending here, they can leave the first * taker all the work. * * mvmtxq->tx_request logic: * * If 0, no one is currently TXing, set to 1 to indicate current thread * will now start TX and other threads should quit. * * If 1, another thread is currently TXing, set to 2 to indicate to * that thread that there was another request. Since that request may * have raced with the check whether the queue is empty, the TXing * thread should check the queue's status one more time before leaving. * This check is done in order to not leave any TX hanging in the queue * until the next TX invocation (which may not even happen). * * If 2, another thread is currently TXing, and it will already double * check the queue, so do nothing. */ if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) return; rcu_read_lock(); do { while (likely(!mvmtxq->stopped && (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_DISABLED))) { skb = ieee80211_tx_dequeue(hw, txq); if (!skb) { if (txq->sta) IWL_DEBUG_TX(mvm, "TXQ of sta %pM tid %d is now empty\n", txq->sta->addr, txq->tid); break; } if (!txq->sta) iwl_mvm_tx_skb_non_sta(mvm, skb); else iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); } static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); /* * Please note that racing is handled very carefully here: * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is * deleted afterwards. * This means that if: * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): * queue is allocated and we can TX. * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): * a race, should defer the frame. * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): * need to allocate the queue and defer the frame. * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): * queue is already scheduled for allocation, no need to allocate, * should defer the frame. */ /* If the queue is allocated TX and return. */ if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { /* * Check that list is empty to avoid a race where txq_id is * already updated, but the queue allocation work wasn't * finished */ if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) return; iwl_mvm_mac_itxq_xmit(hw, txq); return; } /* The list is being deleted only after the queue is fully allocated. */ if (!list_empty(&mvmtxq->list)) return; list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); schedule_work(&mvm->add_stream_wk); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ do { \ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ break; \ iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ } while (0) static void iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, enum ieee80211_ampdu_mlme_action action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, "TX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, tid_data->ssn); break; } case IEEE80211_AMPDU_TX_STOP_CONT: CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, "TX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; case IEEE80211_AMPDU_RX_START: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, "RX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, rx_ba_ssn); break; case IEEE80211_AMPDU_RX_STOP: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, "RX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; default: break; } } static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; u16 buf_size = params->buf_size; bool amsdu = params->amsdu; u16 timeout = params->timeout; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); if (!(mvm->nvm_data->sku_cap_11n_enable)) return -EACCES; mutex_lock(&mvm->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == iwl_mvm_sta_from_mac80211(sta)->sta_id) { struct iwl_mvm_vif *mvmvif; u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; mdata->opened_rx_ba_sessions = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); } if (!iwl_enable_rx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, timeout); break; case IEEE80211_AMPDU_RX_STOP: ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, timeout); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); break; case IEEE80211_AMPDU_TX_STOP_CONT: ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size, amsdu); break; default: WARN_ON_ONCE(1); ret = -EINVAL; break; } if (!ret) { u16 rx_ba_ssn = 0; if (action == IEEE80211_AMPDU_RX_START) rx_ba_ssn = *ssn; iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, rx_ba_ssn, action); } mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->uploaded = false; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); mvmvif->phy_ctxt = NULL; memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { iwl_mvm_stop_device(mvm); mvm->cur_aid = 0; mvm->scan_status = 0; mvm->ps_disabled = false; mvm->rfkill_safe_init_done = false; /* just in case one was running */ iwl_mvm_cleanup_roc_te(mvm); ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_ftm_restart(mvm); /* * cleanup all interfaces, even inactive ones, as some might have * gone down during the HW restart */ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); ieee80211_wake_queues(mvm->hw); mvm->vif_count = 0; mvm->rx_ba_sessions = 0; mvm->fwrt.dump.conf = FW_DBG_INVALID; mvm->monitor_on = false; /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); } int __iwl_mvm_mac_start(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART * so later code will - from now on - see that we're doing it. */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); } ret = iwl_mvm_up(mvm); iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_POST_INIT); if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup * that normally iwl_mvm_mac_restart_complete() below * would do. */ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); } return ret; } static int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_mac_start(mvm); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) { int ret; mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", ret); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS if (mvm->csi_cfg.flags & IWL_CHANNEL_ESTIMATION_ENABLE) iwl_mvm_send_csi_cmd(mvm); #endif iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); /* * If we have TDLS peers, remove them. We don't know the last seqno/PN * of packets the FW sent out, so we must reconnect. */ iwl_mvm_teardown_tdls_peers(mvm); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (reconfig_type) { case IEEE80211_RECONFIG_TYPE_RESTART: iwl_mvm_restart_complete(mvm); break; case IEEE80211_RECONFIG_TYPE_SUSPEND: break; } } void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); /* firmware counters are obviously reset now, but we shouldn't * partially track so also clear the fw_reset_accu counters. */ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); /* async_handlers_wk is now blocked */ /* * The work item could be running or queued if the * ROC time event stops just as we get here. */ flush_work(&mvm->roc_done_wk); iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ /* the fw is stopped, the aux sta is dead: clean up driver state */ iwl_mvm_del_aux_sta(mvm); /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * hw (as restart_complete() won't be called in this case) and mac80211 * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); /* We shouldn't have any UIDs still set. Loop over all the UIDs to * make sure there's nothing left there and warn if any is found. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int i; for (i = 0; i < mvm->max_scans; i++) { if (WARN_ONCE(mvm->scan_uid_status[i], "UMAC scan UID %d status was not cleaned\n", i)) mvm->scan_uid_status[i] = 0; } } } static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); /* * Lock and clear the firmware running bit here already, so that * new commands coming in elsewhere, e.g. from debugfs, will not * be able to proceed. This is important here because one of those * debugfs files causes the firmware dump to be triggered, and if we * don't stop debugfs accesses before canceling that it could be * retriggered after we flush it but before we've cleared the bit. */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_fw_cancel_dumps(&mvm->fwrt); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS cancel_delayed_work_sync(&mvm->tx_latency_watchdog_wk); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); iwl_fw_free_dump_desc(&mvm->fwrt); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); /* * The worker might have been waiting for the mutex, let it run and * discover that its list is now empty. */ cancel_work_sync(&mvm->async_handlers_wk); } static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) { u16 i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < NUM_PHY_CTX; i++) if (!mvm->phy_ctxts[i].ref) return &mvm->phy_ctxts[i]; IWL_ERR(mvm, "No available PHY context\n"); return NULL; } static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { int len; union { struct iwl_dev_tx_power_cmd v5; struct iwl_dev_tx_power_cmd_v4 v4; } cmd = { .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), .v5.v3.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power), }; if (tx_power == IWL_DEFAULT_MAX_TX_POWER) cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v4); else len = sizeof(cmd.v4.v3); return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (mvmvif->csa_failed) { mvmvif->csa_failed = false; ret = -EIO; goto out_unlock; } if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id); if (WARN_ON(!mvmsta)) { ret = -EIO; goto out_unlock; } if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) goto out_unlock; iwl_mvm_stop_session_protection(mvm, vif); } } mvmvif->ps_disabled = false; ret = iwl_mvm_power_update_ps(mvm); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), }; IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); mutex_lock(&mvm->mutex); WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); mutex_unlock(&mvm->mutex); WARN_ON(iwl_mvm_post_channel_switch(hw, vif)); } static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) { struct iwl_mvm *mvm; struct iwl_mvm_vif *mvmvif; struct ieee80211_vif *vif; mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); mvm = mvmvif->mvm; iwl_mvm_abort_channel_switch(mvm->hw, vif); ieee80211_chswitch_done(vif, false); } static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mvmvif->mvm = mvm; RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); /* * Not much to do here. The stack will not allow interface * types or combinations that we didn't advertise, so we * don't really have to check the types. */ mutex_lock(&mvm->mutex); /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) mvmvif->beacon_stats.accu_num_beacons += mvmvif->beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) goto out_unlock; rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); /* Currently not much to do for NAN */ if (vif->type == NL80211_IFTYPE_NAN) goto out_unlock; /* Counting number of interfaces is needed for legacy PM */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count++; /* * The AP binding flow can be done only after the beacon * template is configured (which happens only in the mac80211 * start_ap() flow), and adding the broadcast station can happen * only after the binding. * In addition, since modifying the MAC before adding a bcast * station is not allowed by the FW, delay the adding of MAC context to * the point where we can also add the bcast station. * In short: there's not much we can do at this point, other than * allocating resources :) */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) { IWL_ERR(mvm, "Failed to allocate bcast sta\n"); goto out_release; } /* * Only queue for this station is the mcast queue, * which shouldn't be in TFD mask anyway */ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 0, vif->type, IWL_STA_MULTICAST); if (ret) goto out_release; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; } mvmvif->features |= hw->netdev_features; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_release; ret = iwl_mvm_power_update_mac(mvm); if (ret) goto out_remove_mac; /* beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) goto out_remove_mac; if (!mvm->bf_allowed_vif && vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { mvm->bf_allowed_vif = mvmvif; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; } /* * P2P_DEVICE interface does not have a channel context assigned to it, * so a dedicated PHY context is allocated to it and the corresponding * MAC context is bound to it at this stage. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!mvmvif->phy_ctxt) { ret = -ENOSPC; goto out_free_bf; } iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_unref_phy; ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); if (ret) goto out_unbind; /* Save a pointer to p2p device vif, so it can later be used to * update the p2p device MAC when a GO is started/stopped */ mvm->p2p_device_vif = vif; } iwl_mvm_tcm_add_vif(mvm, vif); INIT_DELAYED_WORK(&mvmvif->csa_work, iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = true; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_unref_phy: iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: mvmvif->phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); out_release: if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. * We assume here that all the packets sent to the OFFCHANNEL * queue are sent in ROC session. */ flush_work(&mvm->roc_done_wk); } } static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_probe_resp_data *probe_data; iwl_mvm_prepare_mac_removal(mvm, vif); if (vif->type == NL80211_IFTYPE_NAN) { struct wireless_dev *wdev = ieee80211_vif_to_wdev(vif); /* cfg80211 should stop NAN before interface removal */ if (wdev && WARN_ON(wdev_running(wdev))) iwl_mvm_stop_nan(hw, vif); return; } if (!(vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)) iwl_mvm_tcm_rm_vif(mvm, vif); mutex_lock(&mvm->mutex); probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); if (probe_data) kfree_rcu(probe_data, rcu_head); if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); iwl_mvm_vif_dbgfs_clean(mvm, vif); /* * For AP/GO interface, the tear down of the resources allocated to the * interface is be handled as part of the stop_ap flow. */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { #ifdef CPTCFG_NL80211_TESTMODE if (vif == mvm->noa_vif) { mvm->noa_vif = NULL; mvm->noa_duration = 0; } #endif iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); iwl_mvm_dealloc_bcast_sta(mvm, vif); goto out_release; } #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA if (mvmvif == mvm->p2p_opps_test_wa_vif) mvm->p2p_opps_test_wa_vif = NULL; #endif if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; } if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE iwl_mvm_tdls_peer_cache_clear(mvm, vif); #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ out_release: mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) { return 0; } struct iwl_mvm_mc_iter_data { struct iwl_mvm *mvm; int port_id; }; static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mc_iter_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; struct iwl_host_cmd hcmd = { .id = MCAST_FILTER_CMD, .flags = CMD_ASYNC, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int ret, len; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS if (!(mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_EINVAL) && mvm->mcast_active_filter_cmd) cmd = mvm->mcast_active_filter_cmd; #endif /* if we don't have free ports, mcast frames will be dropped */ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) return; if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) return; cmd->port_id = data->port_id++; memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); hcmd.len[0] = len; hcmd.data[0] = cmd; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } #ifndef CPTCFG_IWLMVM_VENDOR_CMDS static #endif void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) { struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd; struct netdev_hw_addr *addr; int addr_count; bool pass_all; int len; addr_count = netdev_hw_addr_list_count(mc_list); pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || IWL_MVM_FW_MCAST_FILTER_PASS_ALL; if (pass_all) addr_count = 0; len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); cmd = kzalloc(len, GFP_ATOMIC); if (!cmd) return 0; if (pass_all) { cmd->pass_all = 1; return (u64)(unsigned long)cmd; } netdev_hw_addr_list_for_each(addr, mc_list) { IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", cmd->count, addr->addr); memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], addr->addr, ETH_ALEN); cmd->count++; } return (u64)(unsigned long)cmd; } static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; mutex_lock(&mvm->mutex); /* replace previous configuration */ kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = cmd; if (!cmd) goto out; if (changed_flags & FIF_ALLMULTI) cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); if (cmd->pass_all) cmd->count = 0; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS iwl_mvm_active_rx_filters(mvm); #endif iwl_mvm_recalc_multicast(mvm); out: mutex_unlock(&mvm->mutex); *total_flags = 0; } static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int filter_flags, unsigned int changed_flags) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* We support only filter for probe requests */ if (!(changed_flags & FIF_PROBE_REQ)) return; /* Supported only for p2p client interfaces */ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || !vif->p2p) return; mutex_lock(&mvm->mutex); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); } #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING struct iwl_bcast_iter_data { struct iwl_mvm *mvm; struct iwl_bcast_filter_cmd *cmd; u8 current_filter; }; static void iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, const struct iwl_fw_bcast_filter *in_filter, struct iwl_fw_bcast_filter *out_filter) { struct iwl_fw_bcast_filter_attr *attr; int i; memcpy(out_filter, in_filter, sizeof(*out_filter)); for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { attr = &out_filter->attrs[i]; if (!attr->mask) break; switch (attr->reserved1) { case cpu_to_le16(BC_FILTER_MAGIC_IP): if (vif->bss_conf.arp_addr_cnt != 1) { attr->mask = 0; continue; } attr->val = vif->bss_conf.arp_addr_list[0]; break; case cpu_to_le16(BC_FILTER_MAGIC_MAC): attr->val = *(__be32 *)&vif->addr[2]; break; default: break; } attr->reserved1 = 0; out_filter->num_attrs++; } } static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_bcast_iter_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct iwl_bcast_filter_cmd *cmd = data->cmd; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_fw_bcast_mac *bcast_mac; int i; if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) return; bcast_mac = &cmd->macs[mvmvif->id]; /* * enable filtering only for associated stations, but not for P2P * Clients */ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || !vif->bss_conf.assoc) return; bcast_mac->default_discard = 1; /* copy all configured filters */ for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { /* * Make sure we don't exceed our filters limit. * if there is still a valid filter to be configured, * be on the safe side and just allow bcast for this mac. */ if (WARN_ON_ONCE(data->current_filter >= ARRAY_SIZE(cmd->filters))) { bcast_mac->default_discard = 0; bcast_mac->attached_filters = 0; break; } iwl_mvm_set_bcast_filter(vif, &mvm->bcast_filters[i], &cmd->filters[data->current_filter]); /* skip current filter if it contains no attributes */ if (!cmd->filters[data->current_filter].num_attrs) continue; /* attach the filter to current mac */ bcast_mac->attached_filters |= cpu_to_le16(BIT(data->current_filter)); data->current_filter++; } } bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, struct iwl_bcast_filter_cmd *cmd) { struct iwl_bcast_iter_data iter_data = { .mvm = mvm, .cmd = cmd, }; if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) return false; memset(cmd, 0, sizeof(*cmd)); cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); cmd->max_macs = ARRAY_SIZE(cmd->macs); #ifdef CPTCFG_IWLWIFI_DEBUGFS /* use debugfs filters/macs if override is configured */ if (mvm->dbgfs_bcast_filtering.override) { memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, sizeof(cmd->filters)); memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, sizeof(cmd->macs)); return true; } #endif /* if no filters are configured, do nothing */ if (!mvm->bcast_filters) return false; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS if (!(mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_EINVAL) && mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_BCAST) { cmd->disable = 1; return true; } #endif /* configure and attach these filters for each associated sta vif */ ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bcast_filter_iterator, &iter_data); return true; } int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) { struct iwl_bcast_filter_cmd cmd; if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) return 0; if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) return 0; return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, sizeof(cmd), &cmd); } #else inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) { return 0; } #endif static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mu_group_mgmt_cmd cmd = {}; memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, WLAN_MEMBERSHIP_LEN); memcpy(cmd.user_position, vif->bss_conf.mu_group.position, WLAN_USER_POSITION_LEN); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, UPDATE_MU_GROUPS_CMD), 0, sizeof(cmd), &cmd); } static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { if (vif->mu_mimo_owner) { struct iwl_mu_group_mgmt_notif *notif = _data; /* * MU-MIMO Group Id action frame is little endian. We treat * the data received from firmware as if it came from the * action frame, so no conversion is needed. */ ieee80211_update_mu_groups(vif, (u8 *)¬if->membership_status, (u8 *)¬if->user_position); } } void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mu_mimo_iface_iterator, notif); } static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) { u8 byte_num = ppe_pos_bit / 8; u8 bit_num = ppe_pos_bit % 8; u8 residue_bits; u8 res; if (bit_num <= 5) return (ppe[byte_num] >> bit_num) & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); /* * If bit_num > 5, we have to combine bits with next byte. * Calculate how many bits we need to take from current byte (called * here "residue_bits"), and add them to bits from next byte. */ residue_bits = 8 - bit_num; res = (ppe[byte_num + 1] & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << residue_bits; res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); return res; } static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_he_sta_context_cmd sta_ctxt_cmd = { .sta_id = sta_id, .tid_limit = IWL_MAX_TID_COUNT, .bss_color = vif->bss_conf.bss_color, .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, .frame_time_rts_th = cpu_to_le16(vif->bss_conf.frame_time_rts_th), }; int size = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE) ? sizeof(sta_ctxt_cmd) : sizeof(struct iwl_he_sta_context_cmd_v1); struct ieee80211_sta *sta; u32 flags; int i; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); if (IS_ERR(sta)) { rcu_read_unlock(); WARN(1, "Can't find STA to configure HE\n"); return; } if (!sta->he_cap.has_he) { rcu_read_unlock(); return; } flags = 0; /* Block 26-tone RU OFDMA transmissions */ if (mvmvif->he_ru_2mhz_block) flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; /* HTC flags */ if (sta->he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { u8 link_adap = ((sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + (sta->he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); if (link_adap == 2) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); else if (link_adap == 3) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); if (sta->he_cap.he_cap_elem.mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); /* * Initialize the PPE thresholds to "None" (7), as described in Table * 9-262ac of 80211.ax/D3.0. */ memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext)); /* If PPE Thresholds exist, parse them into a FW-familiar format. */ if (sta->he_cap.he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; u8 ru_index_bitmap = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; u8 *ppe = &sta->he_cap.ppe_thres[0]; u8 ppe_pos_bit = 7; /* Starting after PPE header */ /* * FW currently supports only nss == MAX_HE_SUPP_NSS * * If nss > MAX: we can ignore values we don't support * If nss < MAX: we can set zeros in other streams */ if (nss > MAX_HE_SUPP_NSS) { IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, MAX_HE_SUPP_NSS); nss = MAX_HE_SUPP_NSS; } for (i = 0; i < nss; i++) { u8 ru_index_tmp = ru_index_bitmap << 1; u8 bw; for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) { ru_index_tmp >>= 1; if (!(ru_index_tmp & 1)) continue; sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; } } flags |= STA_CTXT_HE_PACKET_EXT; } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] & IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) != IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) { int low_th = -1; int high_th = -1; /* Take the PPE thresholds from the nominal padding info */ switch (sta->he_cap.he_cap_elem.phy_cap_info[9] & IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_NONE; break; case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: low_th = IWL_HE_PKT_EXT_BPSK; high_th = IWL_HE_PKT_EXT_NONE; break; case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_BPSK; break; } /* Set the PPE thresholds accordingly */ if (low_th >= 0 && high_th >= 0) { struct iwl_he_pkt_ext *pkt_ext = (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext; for (i = 0; i < MAX_HE_SUPP_NSS; i++) { u8 bw; for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) { pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } } flags |= STA_CTXT_HE_PACKET_EXT; } } rcu_read_unlock(); /* Mark MU EDCA as enabled, unless none detected on some AC */ flags |= STA_CTXT_HE_MU_EDCA_CW; for (i = 0; i < IEEE80211_NUM_ACS; i++) { struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = &mvmvif->queue_params[i].mu_edca_param_rec; u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); if (!mvmvif->queue_params[i].mu_edca) { flags &= ~STA_CTXT_HE_MU_EDCA_CW; break; } sta_ctxt_cmd.trig_based_txf[ac].cwmin = cpu_to_le16(mu_edca->ecw_min_max & 0xf); sta_ctxt_cmd.trig_based_txf[ac].cwmax = cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); sta_ctxt_cmd.trig_based_txf[ac].aifsn = cpu_to_le16(mu_edca->aifsn & 0xf); sta_ctxt_cmd.trig_based_txf[ac].mu_time = cpu_to_le16(mu_edca->mu_edca_timer); } if (vif->bss_conf.multi_sta_back_32bit) flags |= STA_CTXT_HE_32BIT_BA_BITMAP; if (vif->bss_conf.ack_enabled) flags |= STA_CTXT_HE_ACK_ENABLED; if (vif->bss_conf.uora_exists) { flags |= STA_CTXT_HE_TRIG_RND_ALLOC; sta_ctxt_cmd.rand_alloc_ecwmin = vif->bss_conf.uora_ocw_range & 0x7; sta_ctxt_cmd.rand_alloc_ecwmax = (vif->bss_conf.uora_ocw_range >> 3) & 0x7; } if (!(sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN)) flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (mvm->trans->dbg_cfg.no_ack_en & 0x2) flags &= ~STA_CTXT_HE_ACK_ENABLED; /* MU EDCA override */ if (mvm->trans->dbg_cfg.mu_edca) { u32 mu_edca = mvm->trans->dbg_cfg.mu_edca; for (i = 0; i < IEEE80211_NUM_ACS; i++) { sta_ctxt_cmd.trig_based_txf[i].aifsn = cpu_to_le16(mu_edca & 0xf); sta_ctxt_cmd.trig_based_txf[i].cwmin = cpu_to_le16((mu_edca >> 4) & 0xf); sta_ctxt_cmd.trig_based_txf[i].cwmax = cpu_to_le16((mu_edca >> 8) & 0xf); sta_ctxt_cmd.trig_based_txf[i].mu_time = cpu_to_le16((mu_edca >> 12) & 0xff); } } #endif if (vif->bss_conf.nontransmitted) { flags |= STA_CTXT_HE_REF_BSSID_VALID; ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, vif->bss_conf.transmitter_bssid); sta_ctxt_cmd.max_bssid_indicator = vif->bss_conf.bssid_indicator; sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; sta_ctxt_cmd.profile_periodicity = vif->bss_conf.profile_periodicity; } sta_ctxt_cmd.flags = cpu_to_le32(flags); if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD, DATA_PATH_GROUP, 0), 0, size, &sta_ctxt_cmd)) IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; /* * Re-calculate the tsf id, as the master-slave relations depend on the * beacon interval, which was not known when the station interface was * added. */ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } /* Update MU EDCA params */ if (changes & BSS_CHANGED_QOS && mvmvif->associated && bss_conf->assoc && vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC * branch for disassociation below. */ if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* after sending it once, adopt mac80211 data */ memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); mvmvif->associated = bss_conf->assoc; if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); memset(&mvmvif->beacon_stats, 0, sizeof(mvmvif->beacon_stats)); /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) { IWL_ERR(mvm, "failed to update quotas\n"); return; } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* * If we're restarting then the firmware will * obviously have lost synchronisation with * the AP. It will attempt to synchronise by * itself, but we can make it more reliable by * scheduling a session protection time event. * * The firmware needs to receive a beacon to * catch up with synchronisation, use 110% of * the beacon interval. * * Set a large maximum delay to allow for more * than a single interface. */ u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, 5 * dur, false); } iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); if (vif->p2p) { iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC); } } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. */ ret = iwl_mvm_sf_update(mvm, vif, false); WARN_ONCE(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status), "Failed to update SF upon disassociation\n"); /* * If we get an assert during the connection (after the * station has been added, but before the vif is set * to associated), mac80211 will re-add the station and * then configure the vif. Since the vif is not * associated, we would remove the station here and * this would fail the recovery. */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* * Remove AP station now that * the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); if (ret) IWL_ERR(mvm, "failed to remove AP station\n"); mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) IWL_ERR(mvm, "failed to update quotas\n"); /* this will take the cleared BSSID from bss_conf */ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) IWL_ERR(mvm, "failed to update MAC %pM (clear after unassoc)\n", vif->addr); } /* * The firmware tracks the MU-MIMO group on its own. * However, on HW restart we should restore this data. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { ret = iwl_mvm_update_mu_groups(mvm, vif); if (ret) IWL_ERR(mvm, "failed to update VHT MU_MIMO groups\n"); } iwl_mvm_recalc_multicast(mvm); iwl_mvm_configure_bcast_filter(mvm); /* reset rssi values */ mvmvif->bf_data.ave_beacon_signal = 0; iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, IEEE80211_SMPS_AUTOMATIC); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon from the associated AP so * remove the session protection. */ iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | /* * Send power command on every beacon change, * because we may have not enabled beacon abort yet. */ BSS_CHANGED_BEACON_INFO)) { ret = iwl_mvm_power_update_mac(mvm); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); } if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } if (changes & BSS_CHANGED_CQM) { IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); /* reset cqm events tracking */ mvmvif->bf_data.last_cqm_event = 0; if (mvmvif->bf_data.bf_enabled) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) IWL_ERR(mvm, "failed to update CQM thresholds\n"); } } if (changes & BSS_CHANGED_ARP_FILTER) { IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); iwl_mvm_configure_bcast_filter(mvm); } } static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret, i; mutex_lock(&mvm->mutex); /* Send the beacon template */ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); if (ret) goto out_unlock; /* * Re-calculate the tsf id, as the master-slave relations depend on the * beacon interval, which was not known when the AP interface was added. */ if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); mvmvif->ap_assoc_sta_count = 0; /* Add the mac context */ ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_unlock; /* Perform the binding */ ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_remove; /* * This is not very nice, but the simplest: * For older FWs adding the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW so make the order of removal depend on * the TLV */ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) goto out_unbind; /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) { iwl_mvm_rm_mcast_sta(mvm, vif); goto out_unbind; } } else { /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) { iwl_mvm_send_rm_bcast_sta(mvm, vif); goto out_unbind; } } /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; /* send all the early keys to the device now */ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; if (!key) continue; mvmvif->ap_early_keys[i] = NULL; ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); if (ret) goto out_quota_failed; } if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, true, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); } /* power updated needs to be done before quotas */ iwl_mvm_power_update_mac(mvm); ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_quota_failed; /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_bt_coex_vif_change(mvm); /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); iwl_mvm_ftm_restart_responder(mvm, vif); goto out_unlock; out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: iwl_mvm_mac_ctxt_remove(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_prepare_mac_removal(mvm, vif); mutex_lock(&mvm->mutex); /* Handle AP stop while in CSA */ if (rcu_access_pointer(mvm->csa_vif) == vif) { iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); mvm->csa_tx_block_bcn_timeout = 0; } mvmvif->ap_ibss_active = false; mvm->ap_last_beacon_gp2 = 0; if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, false, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); } iwl_mvm_bt_coex_vif_change(mvm); /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); /* * This is not very nice, but the simplest: * For older FWs removing the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW (which will stop beaconing when removing * bcast station). * So make the order of removal depend on the TLV */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_send_rm_bcast_sta(mvm, vif); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); mutex_unlock(&mvm->mutex); } static void iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Changes will be applied when the AP/IBSS is started */ if (!mvmvif->ap_ibss_active) return; if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* Need to send a new beacon template to the FW */ if (changes & BSS_CHANGED_BEACON && iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) IWL_WARN(mvm, "Failed updating beacon data\n"); if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } if (changes & BSS_CHANGED_FTM_RESPONDER) { int ret = iwl_mvm_ftm_start_responder(mvm, vif); if (ret) IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", ret); } } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); switch (vif->type) { case NL80211_IFTYPE_STATION: iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_MONITOR: if (changes & BSS_CHANGED_MU_GROUPS) iwl_mvm_update_mu_groups(mvm, vif); break; default: /* shouldn't happen */ WARN_ON_ONCE(1); } mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; if (hw_req->req.n_channels == 0 || hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a hw_scan when it's already stopped. This can * happen, for instance, if we stopped the scan ourselves, * called ieee80211_scan_completed() and the userspace called * cancel scan scan before ieee80211_scan_work() could run. * To handle that, simply return if the scan is not running. */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from mac80211 */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, false); } static void iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, true); } static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); unsigned long txqs = 0, tids = 0; int tid; /* * If we have TVQM then we get too high queue numbers - luckily * we really shouldn't get here with that because such hardware * should have firmware supporting buffer station offload. */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; __set_bit(tid_data->txq_id, &txqs); if (iwl_mvm_tid_queued(mvm, tid_data) == 0) continue; __set_bit(tid, &tids); } switch (cmd) { case STA_NOTIFY_SLEEP: for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) ieee80211_sta_set_buffered(sta, tid, true); if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx * queues to this station will not be transmitted. The fw will * send a Tx response with TX_STATUS_FAIL_DEST_PS. */ break; case STA_NOTIFY_AWAKE: if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) break; if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); iwl_mvm_sta_modify_ps_wake(mvm, sta); break; default: break; } spin_unlock_bh(&mvmsta->lock); } static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { __iwl_mvm_mac_sta_notify(hw, cmd, sta); } void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (WARN_ON(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return; } mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta->vif || mvmsta->vif->type != NL80211_IFTYPE_AP) { rcu_read_unlock(); return; } if (mvmsta->sleeping != sleeping) { mvmsta->sleeping = sleeping; __iwl_mvm_mac_sta_notify(mvm->hw, sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, sta); ieee80211_sta_ps_transition(sta, sleeping); } if (sleeping) { switch (notif->type) { case IWL_MVM_PM_EVENT_AWAKE: case IWL_MVM_PM_EVENT_ASLEEP: break; case IWL_MVM_PM_EVENT_UAPSD: ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); break; case IWL_MVM_PM_EVENT_PS_POLL: ieee80211_sta_pspoll(sta); break; default: break; } } rcu_read_unlock(); } static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); /* * This is called before mac80211 does RCU synchronisation, * so here we already invalidate our internal RCU-protected * station pointer. The rest of the code will thus no longer * be able to find the station this way, and we don't rely * on further RCU synchronisation after the sta_state() * callback deleted the station. */ mutex_lock(&mvm->mutex); if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-ENOENT)); mutex_unlock(&mvm->mutex); } static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const u8 *bssid) { int i; if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_tcm_mac *mdata; mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); mdata->opened_rx_ba_sessions = false; } if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } if (!vif->p2p && (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } } vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; } static void iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *peer_addr, enum nl80211_tdls_operation action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_TDLS); if (!trig) return; tdls_trig = (void *)trig->data; if (!(tdls_trig->action_bitmap & BIT(action))) return; if (tdls_trig->peer_mode && memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "TDLS event occurred, peer %pM, action %d", peer_addr, action); } struct iwl_mvm_he_obss_narrow_bw_ru_data { bool tolerated; }; static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, struct cfg80211_bss *bss, void *_data) { struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; const struct element *elem; elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data, bss->ies->len); if (!elem || elem->datalen < 10 || !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { data->tolerated = false; } } static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { .tolerated = true, }; if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { mvmvif->he_ru_2mhz_block = false; return; } cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, iwl_mvm_check_he_obss_narrow_bw_ru_iter, &iter_data); /* * If there is at least one AP on radar channel that cannot * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. */ mvmvif->he_ru_2mhz_block = !iter_data.tolerated; } static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int ret; IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", sta->addr, old_state, new_state); /* this would be a mac80211 bug ... but don't crash */ if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return -EINVAL; /* * If we are in a STA removal flow and in DQA mode: * * This is after the sync_rcu part, so the queues have already been * flushed. No more TXs on their way in mac80211's path, and no more in * the queues. * Also, we won't be getting any new TX frames for this station. * What we might have are deferred TX frames that need to be taken care * of. * * Drop any still-queued deferred-frame before removing the STA, and * make sure the worker is no longer handling frames for this STA. */ if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { flush_work(&mvm->add_stream_wk); /* * No need to make sure deferred TX indication is off since the * worker will already remove it if it was on */ } mutex_lock(&mvm->mutex); /* track whether or not the station is associated */ mvm_sta->sta_state = new_state; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { /* * Firmware bug - it'll crash if the beacon interval is less * than 16. We can't avoid connecting at all, so refuse the * station state change, this will cause mac80211 to abandon * attempts to connect to this AP, and eventually wpa_s will * blacklist the AP... */ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.beacon_int < 16) { IWL_ERR(mvm, "AP %pM beacon interval is %d, refusing due to firmware bug!\n", sta->addr, vif->bss_conf.beacon_int); ret = -EINVAL; goto out_unlock; } if (sta->tdls && (vif->p2p || iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT || iwl_mvm_phy_ctx_count(mvm) > 1)) { IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); ret = -EBUSY; goto out_unlock; } ret = iwl_mvm_add_sta(mvm, vif, sta); if (sta->tdls && ret == 0) { iwl_mvm_recalc_tdls_state(mvm, vif, true); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_SETUP); } sta->max_rc_amsdu_len = 1; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* * EBS may be disabled due to previous failures reported by FW. * Reset EBS status here assuming environment has been changed. */ mvm->last_ebs_successful = true; iwl_mvm_check_uapsd(mvm, vif, sta->addr); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { if (vif->type == NL80211_IFTYPE_AP) { vif->bss_conf.he_support = sta->he_cap.has_he; mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); } else if (vif->type == NL80211_IFTYPE_STATION) { vif->bss_conf.he_support = sta->he_cap.has_he; mvmvif->he_ru_2mhz_block = false; if (sta->he_cap.has_he) iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, false); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { ret = 0; /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); if (sta->tdls) iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_ENABLE_LINK); /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); /* * Now that the station is authorized, i.e., keys were already * installed, need to indicate to the FW that * multicast data frames can be forwarded to the driver */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { /* Multicast data frames are no longer allowed */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); /* disable beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); WARN_ON(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)); ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_NONE) { ret = 0; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { ret = iwl_mvm_rm_sta(mvm, vif, sta); if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_DISABLE_LINK); } if (unlikely(ret && test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status))) ret = 0; } else { ret = -EIO; } out_unlock: mutex_unlock(&mvm->mutex); if (sta->tdls && ret == 0) { if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); } return ret; } static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mvm->rts_threshold = value; return 0; } static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED)) iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) iwl_mvm_sf_update(mvm, vif, false); } static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->queue_params[ac] = *params; /* * No need to update right away, we'll get BSS_CHANGED_QOS * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); return ret; } return 0; } static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 req_duration) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; if (req_duration > duration) duration = req_duration; mutex_lock(&mvm->mutex); /* Try really hard to protect the session and hear a beacon */ iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (!vif->bss_conf.idle) { ret = -EBUSY; goto out; } ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a sched_scan when it's already stopped. This * can happen, for instance, if we stopped the scan ourselves, * called ieee80211_sched_scan_stopped() and the userspace called * stop sched scan scan before ieee80211_sched_scan_stopped_work() * could run. To handle this, simply return if the scan is * not running. */ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { mutex_unlock(&mvm->mutex); return 0; } ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); return ret; } static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta; struct iwl_mvm_key_pn *ptk_pn; int keyidx = key->keyidx; int ret, i; u8 key_offset; if (iwlwifi_mod_params.swcrypto) { IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); return -EOPNOTSUPP; } switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->cfg->gen2) { key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; } else if (vif->type == NL80211_IFTYPE_STATION) { key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; } else { IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); return -EOPNOTSUPP; } break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!iwl_mvm_has_new_tx_api(mvm)) key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (vif->type == NL80211_IFTYPE_STATION) break; if (iwl_mvm_has_new_tx_api(mvm)) return -EOPNOTSUPP; /* support HW crypto on TX */ return 0; default: /* currently FW supports only one optional cipher scheme */ if (hw->n_cipher_schemes && hw->cipher_schemes->cipher == key->cipher) key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; else return -EOPNOTSUPP; } switch (cmd) { case SET_KEY: if ((vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_AP) && !sta) { /* * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. * CMAC/GMAC in AP/IBSS modes must be done in software. */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) ret = -EOPNOTSUPP; else ret = 0; if (key->cipher != WLAN_CIPHER_SUITE_GCMP && key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && !iwl_mvm_has_new_tx_api(mvm)) { key->hw_key_idx = STA_KEY_IDX_INVALID; break; } if (!mvmvif->ap_ibss_active) { for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (!mvmvif->ap_early_keys[i]) { mvmvif->ap_early_keys[i] = key; break; } } if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) ret = -ENOSPC; break; } } /* During FW restart, in order to restore the state as it was, * don't try to reprogram keys we previously failed for. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && key->hw_key_idx == STA_KEY_IDX_INVALID) { IWL_DEBUG_MAC80211(mvm, "skip invalid idx key programming during restart\n"); ret = 0; break; } if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && sta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { struct ieee80211_key_seq seq; int tid, q; mvmsta = iwl_mvm_sta_from_mac80211(sta); WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, mvm->trans->num_rx_queues), GFP_KERNEL); if (!ptk_pn) { ret = -ENOMEM; break; } for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { ieee80211_get_key_rx_seq(key, tid, &seq); for (q = 0; q < mvm->trans->num_rx_queues; q++) memcpy(ptk_pn->q[q].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); } rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); } /* in HW restart reuse the index, otherwise request a new one */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) key_offset = key->hw_key_idx; else key_offset = STA_KEY_IDX_INVALID; IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); key->hw_key_idx = STA_KEY_IDX_INVALID; /* * can't add key for RX, but we don't need it * in the device for TX so still return 0, * unless we have new TX API where we cannot * put key material into the TX_CMD */ if (iwl_mvm_has_new_tx_api(mvm)) ret = -EOPNOTSUPP; else ret = 0; } break; case DISABLE_KEY: ret = -ENOENT; for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (mvmvif->ap_early_keys[i] == key) { mvmvif->ap_early_keys[i] = NULL; ret = 0; } } /* found in pending list - don't do anything else */ if (ret == 0) break; if (key->hw_key_idx == STA_KEY_IDX_INVALID) { ret = 0; break; } if (sta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { mvmsta = iwl_mvm_sta_from_mac80211(sta); ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); if (ptk_pn) kfree_rcu(ptk_pn, rcu_head); } IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); break; default: ret = -EINVAL; } return ret; } static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) return; iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); } static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_hs20_roc_res *resp; int resp_len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm_time_event_data *te_data = data; if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); return true; } resp = (void *)pkt->data; IWL_DEBUG_TE(mvm, "Aux ROC: Received response from ucode: status=%d uid=%d\n", resp->status, resp->event_unique_id); te_data->uid = le32_to_cpu(resp->event_unique_id); IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid); spin_lock_bh(&mvm->time_event_lock); list_add_tail(&te_data->list, &mvm->aux_roc_te_list); spin_unlock_bh(&mvm->time_event_lock); return true; } #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, int duration) { int res; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; u32 dtim_interval = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; u32 req_dur, delay; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), }; struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &aux_roc_req.channel_info); u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); /* Set the channel info data */ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, (channel->band == NL80211_BAND_2GHZ) ? PHY_BAND_24 : PHY_BAND_5, PHY_VHT_CHANNEL_MODE20, 0); /* Set the time and duration */ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); /* * If we are associated we want the delay time to be at least one * dtim interval so that the FW can wait until after the DTIM and * then start the time event, this will potentially allow us to * remain off-channel for the max duration. * Since we want to use almost a whole dtim interval we would also * like the delay to be for 2-3 dtim intervals, in case there are * other time events with higher priority. */ if (vif->bss_conf.assoc) { delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); /* We cannot remain off-channel longer than the DTIM interval */ if (dtim_interval <= req_dur) { req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; if (req_dur <= AUX_ROC_MIN_DURATION) req_dur = dtim_interval - AUX_ROC_MIN_SAFETY_BUFFER; } } tail->duration = cpu_to_le32(req_dur); tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", channel->hw_value, req_dur, duration, delay, dtim_interval); /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { spin_unlock_bh(&mvm->time_event_lock); return -EIO; } te_data->vif = vif; te_data->duration = duration; te_data->id = HOT_SPOT_CMD; spin_unlock_bh(&mvm->time_event_lock); /* * Use a notification wait, which really just processes the * command response and doesn't wait for anything, in order * to be able to process the response and get the UID inside * the RX path. Using CMD_WANT_SKB doesn't work because it * stores the buffer and then wakes up this thread, by which * time another notification (that the time event started) * might already be processed unsuccessfully. */ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response, ARRAY_SIZE(time_event_response), iwl_mvm_rx_aux_roc, te_data); res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_roc_req); if (res) { IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); iwl_remove_notification(&mvm->notif_wait, &wait_time_event); goto out_clear_te; } /* No need to wait for anything, so just pass 1 (0 isn't valid) */ res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); /* should never fail */ WARN_ON_ONCE(res); if (res) { out_clear_te: spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } return res; } static int iwl_mvm_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, enum ieee80211_roc_type type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; struct iwl_mvm_phy_ctxt *phy_ctxt; int ret, i; IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); /* * Flush the done work, just in case it's still pending, so that * the work it does can complete and we can accept new frames. */ flush_work(&mvm->roc_done_wk); mutex_lock(&mvm->mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { /* Use aux roc framework (HS20) */ ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); goto out_unlock; } IWL_ERR(mvm, "hotspot not supported\n"); ret = -EINVAL; goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); ret = -EINVAL; goto out_unlock; } for (i = 0; i < NUM_PHY_CTX; i++) { phy_ctxt = &mvm->phy_ctxts[i]; if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) continue; if (phy_ctxt->ref && channel == phy_ctxt->channel) { /* * Unbind the P2P_DEVICE from the current PHY context, * and if the PHY context is not used remove it. */ ret = iwl_mvm_binding_remove_vif(mvm, vif); if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); /* Bind the P2P_DEVICE to the current PHY Context */ mvmvif->phy_ctxt = phy_ctxt; ret = iwl_mvm_binding_add_vif(mvm, vif); if (WARN(ret, "Failed binding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); goto schedule_time_event; } } /* Need to update the PHY context only if the ROC channel changed */ if (channel == mvmvif->phy_ctxt->channel) goto schedule_time_event; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); /* * Change the PHY context configuration as it is currently referenced * only by the P2P Device MAC */ if (mvmvif->phy_ctxt->ref == 1) { ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, &chandef, 1, 1); if (ret) goto out_unlock; } else { /* * The PHY context is shared with other MACs. Need to remove the * P2P Device from the binding, allocate an new PHY context and * create a new binding */ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out_unlock; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 1, 1); if (ret) { IWL_ERR(mvm, "Failed to change PHY context\n"); goto out_unlock; } /* Unbind the P2P_DEVICE from the current PHY context */ ret = iwl_mvm_binding_remove_vif(mvm, vif); if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); /* Bind the P2P_DEVICE to the new allocated PHY context */ mvmvif->phy_ctxt = phy_ctxt; ret = iwl_mvm_binding_add_vif(mvm, vif); if (WARN(ret, "Failed binding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); } schedule_time_event: /* Schedule the time events */ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return ret; } static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); IWL_DEBUG_MAC80211(mvm, "enter\n"); mutex_lock(&mvm->mutex); iwl_mvm_stop_roc(mvm); mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return 0; } struct iwl_mvm_ftm_responder_iter_data { bool responder; struct ieee80211_chanctx_conf *ctx; }; static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_ftm_responder_iter_data *data = _data; if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) data->responder = true; } static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm_ftm_responder_iter_data data = { .responder = false, .ctx = ctx, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ftm_responder_chanctx_iter, &data); return data.responder; } static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); if (ret) { IWL_ERR(mvm, "Failed to add PHY context\n"); goto out; } iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); *phy_ctxt_id = phy_ctxt->id; out: return ret; } static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_add_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; lockdep_assert_held(&mvm->mutex); iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); } static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_remove_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); } static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | IEEE80211_CHANCTX_CHANGE_RX_CHAINS | IEEE80211_CHANCTX_CHANGE_RADAR | IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), "Cannot change PHY. Ref=%d, changed=0x%X\n", phy_ctxt->ref, changed)) return; mutex_lock(&mvm->mutex); /* we are only changing the min_width, may be a noop */ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { if (phy_ctxt->width == def->width) goto out_unlock; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && def->width <= NL80211_CHAN_WIDTH_20) goto out_unlock; } iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); out_unlock: mutex_unlock(&mvm->mutex); } static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx, bool switching_chanctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); mvmvif->phy_ctxt = phy_ctxt; switch (vif->type) { case NL80211_IFTYPE_AP: /* only needed if we're switching chanctx (i.e. during CSA) */ if (switching_chanctx) { mvmvif->ap_ibss_active = true; break; } /* fall through */ case NL80211_IFTYPE_ADHOC: /* * The AP binding flow is handled as part of the start_ap flow * (in bss_info_changed), similarly for IBSS. */ ret = 0; goto out; case NL80211_IFTYPE_STATION: mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ mvmvif->ps_disabled = true; break; default: ret = -EINVAL; goto out; } ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out; /* * Power state must be updated before quotas, * otherwise fw will complain. */ iwl_mvm_power_update_mac(mvm); /* Setting the quota at this stage is only required for monitor * interfaces. For the other types, the bss_info changed flow * will handle quota settings. */ if (vif->type == NL80211_IFTYPE_MONITOR) { mvmvif->monitor_active = true; ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_remove_binding; ret = iwl_mvm_add_snif_sta(mvm, vif); if (ret) goto out_remove_binding; } /* Handle binding during CSA */ if (vif->type == NL80211_IFTYPE_AP) { iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { mvmvif->csa_bcn_pending = true; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { u32 duration = 3 * vif->bss_conf.beacon_int; /* Protect the session to make sure we hear the first * beacon on the new channel. */ iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); } iwl_mvm_update_quotas(mvm, false, NULL); } goto out; out_remove_binding: iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); out: if (ret) mvmvif->phy_ctxt = NULL; return ret; } static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx, bool switching_chanctx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_vif *disabled_vif = NULL; lockdep_assert_held(&mvm->mutex); iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); switch (vif->type) { case NL80211_IFTYPE_ADHOC: goto out; case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; mvmvif->ps_disabled = false; iwl_mvm_rm_snif_sta(mvm, vif); break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ if (!switching_chanctx || !mvmvif->ap_ibss_active) goto out; mvmvif->csa_countdown = false; /* Set CS bit on all the stations */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); /* Save blocked iface, the timeout is set on the next beacon */ rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); mvmvif->ap_ibss_active = false; break; case NL80211_IFTYPE_STATION: if (!switching_chanctx) break; disabled_vif = vif; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: break; } iwl_mvm_update_quotas(mvm, false, disabled_vif); iwl_mvm_binding_remove_vif(mvm, vif); out: mvmvif->phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); mutex_unlock(&mvm->mutex); } static int iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, struct ieee80211_vif_chanctx_switch *vifs) { int ret; mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); if (ret) { IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); goto out_reassign; } ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_remove; } /* we don't support TDLS during DCM - can be caused by channel switch */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); goto out; out_remove: __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); out_reassign: if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); goto out_restart; } if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, struct ieee80211_vif_chanctx_switch *vifs) { int ret; mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_reassign; } goto out; out_reassign: if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; /* we only support a single-vif right now */ if (n_vifs > 1) return -EOPNOTSUPP; switch (mode) { case CHANCTX_SWMODE_SWAP_CONTEXTS: ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); break; case CHANCTX_SWMODE_REASSIGN_VIF: ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); break; default: ret = -EOPNOTSUPP; break; } return ret; } static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); return mvm->ibss_manager; } static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (!mvm_sta || !mvm_sta->vif) { IWL_ERR(mvm, "Station is not associated to a vif\n"); return -EINVAL; } return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); } #ifdef CPTCFG_NL80211_TESTMODE static const struct nla_policy iwl_mvm_tm_policy[IWL_TM_ATTR_MAX + 1] = { [IWL_TM_ATTR_CMD] = { .type = NLA_U32 }, [IWL_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, [IWL_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, }; static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void *data, int len) { struct nlattr *tb[IWL_TM_ATTR_MAX + 1]; int err; u32 noa_duration; err = nla_parse(tb, IWL_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, NULL); if (err) return err; if (!tb[IWL_TM_ATTR_CMD]) return -EINVAL; switch (nla_get_u32(tb[IWL_TM_ATTR_CMD])) { case IWL_TM_CMD_SET_NOA: if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || !vif->bss_conf.enable_beacon || !tb[IWL_TM_ATTR_NOA_DURATION]) return -EINVAL; noa_duration = nla_get_u32(tb[IWL_TM_ATTR_NOA_DURATION]); if (noa_duration >= vif->bss_conf.beacon_int) return -EINVAL; mvm->noa_duration = noa_duration; mvm->noa_vif = vif; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) { return -EOPNOTSUPP; } return iwl_mvm_update_quotas(mvm, true, NULL); case IWL_TM_CMD_SET_BEACON_FILTER: /* must be associated client vif - ignore authorized */ if (!vif || vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || !tb[IWL_TM_ATTR_BEACON_FILTER_STATE]) return -EINVAL; if (nla_get_u32(tb[IWL_TM_ATTR_BEACON_FILTER_STATE])) return iwl_mvm_enable_beacon_filter(mvm, vif, 0); return iwl_mvm_disable_beacon_filter(mvm, vif, 0); } return -EOPNOTSUPP; } static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int err; mutex_lock(&mvm->mutex); err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); mutex_unlock(&mvm->mutex); return err; } #endif static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { /* By implementing this operation, we prevent mac80211 from * starting its own channel switch timer, so that we can call * ieee80211_chswitch_done() ourselves at the right time * (which is when the absence time event starts). */ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "dummy channel switch op\n"); } static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; lockdep_assert_held(&mvm->mutex); if (chsw->delay) cmd.cs_delayed_bcn_count = DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd); } static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 apply_time; /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the * GO/AP arrives. In case count <= 1 immediately schedule the * TE (this might result with some packet loss or connection * loss). */ if (chsw->count <= 1) apply_time = 0; else apply_time = chsw->device_timestamp + ((vif->bss_conf.beacon_int * (chsw->count - 1) - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); if (mvmvif->bf_data.bf_enabled) { int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) return ret; } iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, apply_time); return 0; } #define IWL_MAX_CSA_BLOCK_TX 1500 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mutex_lock(&mvm->mutex); mvmvif->csa_failed = false; IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_CHANNEL_SWITCH); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (WARN_ONCE(csa_vif && csa_vif->csa_active, "Another CSA is already in progress")) { ret = -EBUSY; goto out_unlock; } /* we still didn't unblock tx. prevent new CS meanwhile */ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex))) { ret = -EBUSY; goto out_unlock; } rcu_assign_pointer(mvm->csa_vif, vif); if (WARN_ONCE(mvmvif->csa_countdown, "Previous CSA countdown didn't complete")) { ret = -EBUSY; goto out_unlock; } mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; break; case NL80211_IFTYPE_STATION: if (chsw->block_tx) { /* * In case of undetermined / long time with immediate * quiet monitor status to gracefully disconnect */ if (!chsw->count || chsw->count * vif->bss_conf.beacon_int > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); } if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); if (ret) goto out_unlock; } else { iwl_mvm_schedule_client_csa(mvm, vif, chsw); } mvmvif->csa_count = chsw->count; mvmvif->csa_misbehave = false; break; default: break; } mvmvif->ps_disabled = true; ret = iwl_mvm_power_update_ps(mvm); if (ret) goto out_unlock; /* we won't be on this channel any longer */ iwl_mvm_teardown_tdls_peers(mvm); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) return; if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { if (mvmvif->csa_misbehave) { /* Second time, give up on this AP*/ iwl_mvm_abort_channel_switch(hw, vif); ieee80211_chswitch_done(vif, false); mvmvif->csa_misbehave = false; return; } mvmvif->csa_misbehave = true; } mvmvif->csa_count = chsw->count; IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id); WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), CMD_ASYNC, sizeof(cmd), &cmd)); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) { int i; if (!iwl_mvm_has_new_tx_api(mvm)) { if (drop) { mutex_lock(&mvm->mutex); iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm) & queues, 0); mutex_unlock(&mvm->mutex); } else { iwl_trans_wait_tx_queues_empty(mvm->trans, queues); } return; } mutex_lock(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { struct ieee80211_sta *sta; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; if (drop) iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0); else iwl_mvm_wait_sta_queues_empty(mvm, iwl_mvm_sta_from_mac80211(sta)); } mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; struct ieee80211_sta *sta; int i; u32 msk = 0; if (!vif) { iwl_mvm_flush_no_vif(mvm, queues, drop); return; } if (vif->type != NL80211_IFTYPE_STATION) return; /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->vif != vif) continue; /* make sure only TDLS peers or the AP are flushed */ WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); if (drop) { if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0)) IWL_ERR(mvm, "flush request fail\n"); } else { msk |= mvmsta->tfd_queue_msk; if (iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); } } mutex_unlock(&mvm->mutex); /* this can take a while, and we may need/want other operations * to succeed while doing this, so do it without the mutex held */ if (!drop && !iwl_mvm_has_new_tx_api(mvm)) iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; memset(survey, 0, sizeof(*survey)); /* only support global statistics right now */ if (idx != 0) return -ENOENT; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) { ret = iwl_mvm_request_statistics(mvm, false); if (ret) goto out; } survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_SCAN; survey->time = mvm->accu_radio_stats.on_time_rf + mvm->radio_stats.on_time_rf; do_div(survey->time, USEC_PER_MSEC); survey->time_rx = mvm->accu_radio_stats.rx_time + mvm->radio_stats.rx_time; do_div(survey->time_rx, USEC_PER_MSEC); survey->time_tx = mvm->accu_radio_stats.tx_time + mvm->radio_stats.tx_time; do_div(survey->time_tx, USEC_PER_MSEC); survey->time_scan = mvm->accu_radio_stats.on_time_scan + mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); ret = 0; out: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->avg_energy) { sinfo->signal_avg = mvmsta->avg_energy; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; if (!vif->bss_conf.assoc) return; mutex_lock(&mvm->mutex); if (mvmvif->ap_sta_id != mvmsta->sta_id) goto unlock; if (iwl_mvm_request_statistics(mvm, false)) goto unlock; sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + mvmvif->beacon_stats.accu_num_beacons; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ do { \ if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ break; \ iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ } while (0) struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MLME); if (!trig) return; trig_mlme = (void *)trig->data; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_assoc_denied, "DENIED ASSOC: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_assoc_timeout, "ASSOC TIMEOUT"); } else if (event->u.mlme.data == AUTH_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_auth_denied, "DENIED AUTH: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_auth_timeout, "AUTH TIMEOUT"); } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { CHECK_MLME_TRIGGER(stop_rx_deauth, "DEAUTH RX %d", event->u.mlme.reason); } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { CHECK_MLME_TRIGGER(stop_tx_deauth, "DEAUTH TX %d", event->u.mlme.reason); } #undef CHECK_MLME_TRIGGER } static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR received from %pM, tid %d, ssn %d", event->u.ba.sta->addr, event->u.ba.tid, event->u.ba.ssn); } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS #define MARKER_CMD_TX_LAT_PAYLOAD_SIZE 5 #define MARKER_CMD_TX_LAT_TID_OFFSET 12 #define MARKER_CMD_TX_LAT_DEFAULT_WIN 1000 #define MARKER_CMD_TX_LAT_UNKNOWN 0xffffffff static u32 iwl_mvm_send_latency_marker_cmd(struct iwl_mvm *mvm, u32 msrmnt, u16 seq, u16 tid) { struct timespec ts; int ret; struct iwl_mvm_marker_rsp *rsp; struct iwl_mvm_marker *marker; struct iwl_host_cmd cmd = { .id = MARKER_CMD, .flags = CMD_WANT_SKB, }; u32 cmd_size = sizeof(struct iwl_mvm_marker) + MARKER_CMD_TX_LAT_PAYLOAD_SIZE * sizeof(u32); getnstimeofday(&ts); marker = kzalloc(cmd_size, GFP_KERNEL); if (!marker) return -ENOMEM; cmd.data[0] = marker; cmd.len[0] = cmd_size; marker->dw_len = 0x8; marker->marker_id = MARKER_ID_TX_FRAME_LATENCY; marker->timestamp = cpu_to_le64(ts.tv_sec * 1000 + ts.tv_nsec / 1000000); /* metadata[0]-frame latency */ marker->metadata[0] = cpu_to_le32(msrmnt); /* metadata[1]-delta in msec from UTC to frame enter the kernel */ marker->metadata[1] = cpu_to_le32(MARKER_CMD_TX_LAT_UNKNOWN); /* metadata[2]-delta in msec from UTC to frame enter the NIC */ marker->metadata[2] = cpu_to_le32(MARKER_CMD_TX_LAT_UNKNOWN); /* metadata[3]-delta in msec from UTC to frecieved from the NIC */ marker->metadata[3] = cpu_to_le32(MARKER_CMD_TX_LAT_UNKNOWN); /* metadata[4]-bits:16-31-TFD Queue ID, 12-15-TID, 0-11-sequence */ marker->metadata[4] = cpu_to_le32(0xffff0000 | ((0xf & tid) << MARKER_CMD_TX_LAT_TID_OFFSET) | (seq & 0x0fff)); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &cmd); mutex_unlock(&mvm->mutex); if (ret) { IWL_ERR(mvm, "Couldn't send MARKER_CMD: %d\n", ret); goto out; } rsp = (void *)cmd.resp_pkt->data; ret = le32_to_cpu(rsp->gp2); iwl_free_resp(&cmd); out: kfree(marker); return ret; } /* * Trigger the fw log collection in case of a Tx packet that has crossed a * configured threshold. * * There are 3 differnt modes of triggering: * * Immediate Internal buffer mode: * The driver will retrieve monitor/usniffer data on the first driver * notification and wait for 1 second (during this 1 second it will not issue * another monitor/usniffer retrieve request). During this 1 second the driver * will store all notifications data to the trace including GP2 timestamp for * every notification. Also special mark will be sent to the firmware for every * notification. * * Delayed Internal buffer mode: * During the window interval the driver will calculate which tx had the * largest latency. When the monitor_collect_window expires the driver will * retrieve monitor/sniffer and print the notification for the packet with max * latency to the trace including GP2 timestamp. Also special mark will be sent * to the firmware for every notification. * * Continuous External buffer mode: * The mode is used when we are able to direct the usniffer logs to an external * memory device (should be started/stopped Manually). * During the window interval the driver will calculate which tx had the * largest latency. When the monitor_collect_window expires the driver print * the notification for the packet with max latency to the trace including GP2 * timestamp. Also special mark will be sent to the firmware for every * notification. */ /* * TX latency monitor watchdog is armed upon first latency notification. * It fires when monitor window is expired and does the following: * - immediate internal buffer mode: just reset start_round_ts flag * - delayed internal buffer mode: writes metadata to trace-cmd and collects * firmware dump * - continuous external buffer mode: just writes metadata to trace-cmd */ void iwl_mvm_tx_latency_watchdog_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, tx_latency_watchdog_wk.work); struct iwl_fw_dbg_trigger_tlv *trig; struct ieee80211_tx_latency_event *tx_lat = &mvm->last_tx_lat_event; struct ieee80211_tx_latency_event *max = &mvm->round_max_tx_lat; u32 round_dur = tx_lat->monitor_collec_wind; mvm->start_round_ts = 0; if (!round_dur) return; if (tx_lat->mode == IEEE80211_TX_LATENCY_EXT_BUF) { trace_iwlwifi_dev_tx_latency_thrshld(mvm->dev, max->msrmnt, max->pkt_start, max->pkt_end, max->tid, max->event_time, max->seq, mvm->max_tx_latency_gp2, 1); return; } trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_LATENCY); trace_iwlwifi_dev_tx_latency_thrshld(mvm->dev, max->msrmnt, max->pkt_start, max->pkt_end, max->tid, max->event_time, max->seq, mvm->max_tx_latency_gp2, 1); iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Tx Latency threshold was crossed, seq: 0x%x, msrmnt: %d.", max->seq, max->msrmnt); } /* * TX latency monitor work is scheduled upon every latency event. Regardless of * the monitor mode it as a first step sends firmware marker command containing * problematic packet's data - latency measurement, sequence number and TID. * It also obtains GP2 timestamp for this marker command. * The rest depends on monitor mode as follows: * 1. Immediate Internal Buffer mode. * - arms watchdog to fire after monitor period (1 sec) * - write packet's metadata to trace-cmd * - only if the packet is the first one starting the monitor period, * collect firmware dump * 2. Delayed Internal Buffer mode. * - arms watchdog to fire after user defined monitor period * - save largest latency packet's metadata * 3. Continuous External Buffer mode with delay. * - arms watchdog to fire after user defined monitor period * - save largest latency packet's metadata * 4. Continuous External Buffer mode without delay. * - write packet's metadata to trace-cmd */ void iwl_mvm_tx_latency_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, tx_latency_wk); struct iwl_fw_dbg_trigger_tlv *trig; struct ieee80211_tx_latency_event *tx_lat = &mvm->last_tx_lat_event; struct ieee80211_tx_latency_event *max = &mvm->round_max_tx_lat; s64 ts = ktime_to_ms(ktime_get()); u32 round_dur = tx_lat->monitor_collec_wind; u32 round_end = tx_lat->monitor_collec_wind ? tx_lat->monitor_collec_wind : MARKER_CMD_TX_LAT_DEFAULT_WIN; bool first_pkt = false; u32 gp2 = 0; u32 delay = 0; if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_LATENCY)) return; trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_LATENCY); tx_lat->event_time = ktime_to_ms(ktime_get()); gp2 = iwl_mvm_send_latency_marker_cmd(mvm, tx_lat->msrmnt, tx_lat->seq, tx_lat->tid); /* * If this is the first packet that crossed the threshold in the round * update start time stamp */ if (!mvm->start_round_ts) { mvm->start_round_ts = ts; first_pkt = true; memcpy(max, tx_lat, sizeof(*tx_lat)); mvm->max_tx_latency_gp2 = gp2; if (round_dur) delay = msecs_to_jiffies(round_dur); else if (tx_lat->mode == IEEE80211_TX_LATENCY_INT_BUF) delay = msecs_to_jiffies(round_end); if (delay) schedule_delayed_work(&mvm->tx_latency_watchdog_wk, delay); } /* * Updated the packet with the max latency. */ if (round_dur && max->msrmnt < tx_lat->msrmnt) { memcpy(max, tx_lat, sizeof(*tx_lat)); mvm->max_tx_latency_gp2 = gp2; } if (round_dur) return; if (tx_lat->mode == IEEE80211_TX_LATENCY_EXT_BUF) { trace_iwlwifi_dev_tx_latency_thrshld(mvm->dev, tx_lat->msrmnt, tx_lat->pkt_start, tx_lat->pkt_end, tx_lat->tid, tx_lat->event_time, tx_lat->seq, gp2, 0); return; } trace_iwlwifi_dev_tx_latency_thrshld(mvm->dev, tx_lat->msrmnt, tx_lat->pkt_start, tx_lat->pkt_end, tx_lat->tid, tx_lat->event_time, tx_lat->seq, gp2, 0); if (first_pkt) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Tx Latency threshold was crossed, seq: 0x%x, msrmnt: %d.", tx_lat->seq, tx_lat->msrmnt); } static void iwl_mvm_event_tx_latency_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { memcpy(&mvm->last_tx_lat_event, &event->u.tx_lat, sizeof(event->u.tx_lat)); schedule_work(&mvm->tx_latency_wk); } #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (event->type) { case MLME_EVENT: iwl_mvm_event_mlme_callback(mvm, vif, event); break; case BAR_RX_EVENT: iwl_mvm_event_bar_rx_callback(mvm, vif, event); break; case BA_FRAME_TIMEOUT: iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, event->u.ba.tid); break; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS case TX_LATENCY_EVENT: iwl_mvm_event_tx_latency_callback(mvm, vif, event); break; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ default: break; } } #define SYNC_RX_QUEUE_TIMEOUT (HZ * CPTCFG_IWL_TIMEOUT_FACTOR) void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, struct iwl_mvm_internal_rxq_notif *notif, u32 size) { u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; int ret; lockdep_assert_held(&mvm->mutex); if (!iwl_mvm_has_new_rx_api(mvm)) return; notif->cookie = mvm->queue_sync_cookie; if (notif->sync) atomic_set(&mvm->queue_sync_counter, mvm->trans->num_rx_queues); ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size, !notif->sync); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; } if (notif->sync) { ret = wait_event_timeout(mvm->rx_sync_waitq, atomic_read(&mvm->queue_sync_counter) == 0 || iwl_mvm_is_radio_killed(mvm), SYNC_RX_QUEUE_TIMEOUT); WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm)); } out: atomic_set(&mvm->queue_sync_counter, 0); mvm->queue_sync_cookie++; } static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_internal_rxq_notif data = { .type = IWL_MVM_RXQ_EMPTY, .sync = 1, }; mutex_lock(&mvm->mutex); iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_ftm_responder_stats *stats) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) return -EINVAL; mutex_lock(&mvm->mutex); *stats = mvm->ftm_resp_stats; mutex_unlock(&mvm->mutex); stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | BIT(NL80211_FTM_STATS_PARTIAL_NUM) | BIT(NL80211_FTM_STATS_FAILED_NUM) | BIT(NL80211_FTM_STATS_ASAP_NUM) | BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); return 0; } static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_ftm_start(mvm, vif, request); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_ftm_abort(mvm, request); mutex_unlock(&mvm->mutex); } static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) { u8 protocol = ip_hdr(skb)->protocol; if (!IS_ENABLED(CONFIG_INET)) return false; return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; } static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, struct sk_buff *head, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* For now don't aggregate IPv6 in AMSDU */ if (skb->protocol != htons(ETH_P_IP)) return false; if (!iwl_mvm_is_csum_supported(mvm)) return true; return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); } const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, .add_interface = iwl_mvm_mac_add_interface, .remove_interface = iwl_mvm_mac_remove_interface, .config = iwl_mvm_mac_config, .prepare_multicast = iwl_mvm_prepare_multicast, .configure_filter = iwl_mvm_configure_filter, .config_iface_filter = iwl_mvm_config_iface_filter, .bss_info_changed = iwl_mvm_bss_info_changed, .hw_scan = iwl_mvm_mac_hw_scan, .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, .sta_state = iwl_mvm_mac_sta_state, .sta_notify = iwl_mvm_mac_sta_notify, .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, .sta_rc_update = iwl_mvm_sta_rc_update, .conf_tx = iwl_mvm_mac_conf_tx, .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, .flush = iwl_mvm_mac_flush, .sched_scan_start = iwl_mvm_mac_sched_scan_start, .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, .set_key = iwl_mvm_mac_set_key, .update_tkip_key = iwl_mvm_mac_update_tkip_key, .remain_on_channel = iwl_mvm_roc, .cancel_remain_on_channel = iwl_mvm_cancel_roc, .add_chanctx = iwl_mvm_add_chanctx, .remove_chanctx = iwl_mvm_remove_chanctx, .change_chanctx = iwl_mvm_change_chanctx, .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, .start_ap = iwl_mvm_start_ap_ibss, .stop_ap = iwl_mvm_stop_ap_ibss, .join_ibss = iwl_mvm_start_ap_ibss, .leave_ibss = iwl_mvm_stop_ap_ibss, .tx_last_beacon = iwl_mvm_tx_last_beacon, .set_tim = iwl_mvm_set_tim, .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, .tdls_channel_switch = iwl_mvm_tdls_channel_switch, .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, .event_callback = iwl_mvm_mac_event_callback, .sync_rx_queues = iwl_mvm_sync_rx_queues, CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) #ifdef CONFIG_PM_SLEEP /* look at d3.c */ .suspend = iwl_mvm_suspend, .resume = iwl_mvm_resume, .set_wakeup = iwl_mvm_set_wakeup, .set_rekey_data = iwl_mvm_set_rekey_data, #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = iwl_mvm_ipv6_addr_change, #endif .set_default_unicast_key = iwl_mvm_set_default_unicast_key, #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, .start_pmsr = iwl_mvm_start_pmsr, .abort_pmsr = iwl_mvm_abort_pmsr, .start_nan = iwl_mvm_start_nan, .stop_nan = iwl_mvm_stop_nan, .add_nan_func = iwl_mvm_add_nan_func, .del_nan_func = iwl_mvm_del_nan_func, .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CPTCFG_IWLWIFI_DEBUGFS .sta_add_debugfs = iwl_mvm_sta_add_debugfs, #endif }; backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h000066400000000000000000002121341351064634500263410ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __IWL_MVM_H__ #define __IWL_MVM_H__ #include #include #include #include #ifdef CONFIG_THERMAL #include #endif #include "iwl-op-mode.h" #include "iwl-trans.h" #include "fw/notif-wait.h" #include "iwl-eeprom-parse.h" #include "fw/file.h" #include "iwl-config.h" #include "sta.h" #include "fw-api.h" #include "constants.h" #include "iwl-vendor-cmd.h" #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" #include "iwl-nvm-parse.h" #include #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 #define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16 /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) /* For GO, this value represents the number of TUs before CSA "beacon * 0" TBTT when the CSA time-event needs to be scheduled to start. It * must be big enough to ensure that we switch in time. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 /* For client, this value represents the number of TUs before CSA * "beacon 1" TBTT, instead. This is because we don't know when the * GO/AP will be in the new channel, so we switch early enough. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10 /* * This value (in TUs) is used to fine tune the CSA NoA end time which should * be just before "beacon 0" TBTT. */ #define IWL_MVM_CHANNEL_SWITCH_MARGIN 4 /* * Number of beacons to transmit on a new channel until we unblock tx to * the stations, even if we didn't identify them on a new channel */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 /* offchannel queue towards mac80211 */ #define IWL_MVM_OFFCHANNEL_QUEUE 0 extern const struct ieee80211_ops iwl_mvm_hw_ops; /** * struct iwl_mvm_mod_params - module parameters for iwlmvm * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. * We will register to mac80211 to have testmode working. The NIC must not * be up'ed after the INIT fw asserted. This is useful to be able to use * proprietary tools over testmode to debug the INIT fw. * @tfd_q_hang_detect: enabled the detection of hung transmit queues * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { bool init_dbg; bool tfd_q_hang_detect; int power_scheme; }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; struct iwl_mvm_phy_ctxt { u16 id; u16 color; u32 ref; enum nl80211_chan_width width; /* * TODO: This should probably be removed. Currently here only for rate * scaling algorithm */ struct ieee80211_channel *channel; }; struct iwl_mvm_time_event_data { struct ieee80211_vif *vif; struct list_head list; unsigned long end_jiffies; u32 duration; bool running; u32 uid; /* * The access to the 'id' field must be done when the * mvm->time_event_lock is held, as it value is used to indicate * if the te is in the time event list or not (when id == TE_MAX) */ u32 id; }; /* Power management */ /** * enum iwl_power_scheme * @IWL_POWER_LEVEL_CAM - Continuously Active Mode * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) * @IWL_POWER_LEVEL_LP - Low Power */ enum iwl_power_scheme { IWL_POWER_SCHEME_CAM = 1, IWL_POWER_SCHEME_BPS, IWL_POWER_SCHEME_LP }; #define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL #ifdef CPTCFG_IWLWIFI_DEBUGFS enum iwl_dbgfs_pm_mask { MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), }; struct iwl_dbgfs_pm { u16 keep_alive_seconds; u32 rx_data_timeout; u32 tx_data_timeout; bool skip_over_dtim; u8 skip_dtim_periods; bool lprx_ena; u32 lprx_rssi_threshold; bool snooze_ena; bool uapsd_misbehaving; bool use_ps_poll; int mask; }; /* beacon filtering */ enum iwl_dbgfs_bf_mask { MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), }; struct iwl_dbgfs_bf { u32 bf_energy_delta; u32 bf_roaming_energy_delta; u32 bf_roaming_state; u32 bf_temp_threshold; u32 bf_temp_fast_filter; u32 bf_temp_slow_filter; u32 bf_enable_beacon_filter; u32 bf_debug_flag; u32 bf_escape_timer; u32 ba_escape_timer; u32 ba_enable_beacon_abort; int mask; }; #endif enum iwl_mvm_smps_type_request { IWL_MVM_SMPS_REQ_BT_COEX, IWL_MVM_SMPS_REQ_TT, IWL_MVM_SMPS_REQ_PROT, NUM_IWL_MVM_SMPS_REQ, }; enum iwl_bt_force_ant_mode { BT_FORCE_ANT_DIS = 0, BT_FORCE_ANT_AUTO, BT_FORCE_ANT_BT, BT_FORCE_ANT_WIFI, BT_FORCE_ANT_MAX, }; /** * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs * @LOW_LATENCY_FORCE_UNSET: unset force mode * @LOW_LATENCY_FORCE_ON: for low latency on * @LOW_LATENCY_FORCE_OFF: for low latency off * @NUM_LOW_LATENCY_FORCE: max num of modes */ enum iwl_mvm_low_latency_force { LOW_LATENCY_FORCE_UNSET, LOW_LATENCY_FORCE_ON, LOW_LATENCY_FORCE_OFF, NUM_LOW_LATENCY_FORCE }; /** * struct iwl_mvm_low_latency_cause - low latency set causes * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command * @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap) * @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled * the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE * @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs * set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag * in low_latency. */ enum iwl_mvm_low_latency_cause { LOW_LATENCY_TRAFFIC = BIT(0), LOW_LATENCY_DEBUGFS = BIT(1), LOW_LATENCY_VCMD = BIT(2), LOW_LATENCY_VIF_TYPE = BIT(3), LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4), LOW_LATENCY_DEBUGFS_FORCE = BIT(5), }; /** * struct iwl_mvm_vif_bf_data - beacon filtering related data * @bf_enabled: indicates if beacon filtering is enabled * @ba_enabled: indicated if beacon abort is enabled * @ave_beacon_signal: average beacon signal * @last_cqm_event: rssi of the last cqm event * @bt_coex_min_thold: minimum threshold for BT coex * @bt_coex_max_thold: maximum threshold for BT coex * @last_bt_coex_event: rssi of the last BT coex event */ struct iwl_mvm_vif_bf_data { bool bf_enabled; bool ba_enabled; int ave_beacon_signal; int last_cqm_event; int bt_coex_min_thold; int bt_coex_max_thold; int last_bt_coex_event; }; /** * struct iwl_probe_resp_data - data for NoA/CSA updates * @rcu_head: used for freeing the data on update * @notif: notification data * @noa_len: length of NoA attribute, calculated from the notification */ struct iwl_probe_resp_data { struct rcu_head rcu_head; struct iwl_probe_resp_data_notif notif; int noa_len; }; /** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA * @bssid: BSSID for this (client) interface * @associated: indicates that we're currently associated, used only for * managing the firmware state in iwl_mvm_bss_info_changed_station() * @ap_assoc_sta_count: count of stations associated to us - valid only * if VIF type is AP * @uploaded: indicates the MAC context has been added to the device * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * should get quota etc. * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. * @low_latency: bit flags for low latency * see enum &iwl_mvm_low_latency_cause for causes. * @low_latency_actual: boolean, indicates low latency is set, * as a result from low_latency bit flags and takes force into account. * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following * vifs: P2P_DEVICE, GO and AP. * @beacon_skb: the skb used to hold the AP/GO beacon template * @smps_requests: the SMPS requests of different parts of the driver, * combined on update to yield the overall request to mac80211. * @beacon_stats: beacon statistics, containing the # of received beacons, * # of received beacons accumulated over FW restart, and the current * average signal of beacons retrieved from the firmware * @csa_failed: CSA failed to schedule time event, report an error later * @features: hw features active for this vif * @probe_resp_data: data from FW notification to store NOA and CSA related * data to be inserted into probe response. */ struct iwl_mvm_vif { struct iwl_mvm *mvm; u16 id; u16 color; u8 ap_sta_id; u8 bssid[ETH_ALEN]; bool associated; u8 ap_assoc_sta_count; u16 cab_queue; bool uploaded; bool ap_ibss_active; bool pm_enabled; bool monitor_active; u8 low_latency: 6; u8 low_latency_actual: 1; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; struct { u32 num_beacons, accu_num_beacons; u8 avg_signal; } beacon_stats; u32 ap_beacon_time; enum iwl_tsf_id tsf_id; /* * QoS data from mac80211, need to store this here * as mac80211 has a separate callback but we need * to have the data for the MAC context */ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; struct iwl_mvm_time_event_data time_event_data; struct iwl_mvm_time_event_data hs_time_event_data; struct iwl_mvm_int_sta bcast_sta; struct iwl_mvm_int_sta mcast_sta; /* * Assigned while mac80211 has the interface in a channel context, * or, for P2P Device, while it exists. */ struct iwl_mvm_phy_ctxt *phy_ctxt; #ifdef CONFIG_PM /* WoWLAN GTK rekey data */ struct { u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; __le64 replay_ctr; bool valid; } rekey_data; int tx_key_idx; bool seqno_valid; u16 seqno; #endif #if IS_ENABLED(CONFIG_IPV6) /* IPv6 addresses for WoWLAN */ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)]; int num_target_ipv6_addrs; #endif #ifdef CPTCFG_IWLWIFI_DEBUGFS struct dentry *dbgfs_dir; struct dentry *dbgfs_slink; struct iwl_dbgfs_pm dbgfs_pm; struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; int dbgfs_quota_min; #endif enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; /* FW identified misbehaving AP */ u8 uapsd_misbehaving_bssid[ETH_ALEN]; struct delayed_work uapsd_nonagg_detected_wk; /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; u16 csa_target_freq; u16 csa_count; u16 csa_misbehave; struct delayed_work csa_work; /* Indicates that we are waiting for a beacon on a new channel */ bool csa_bcn_pending; /* TCP Checksum Offload */ netdev_features_t features; struct iwl_probe_resp_data __rcu *probe_resp_data; /* we can only have 2 GTK + 2 IGTK active at a time */ struct ieee80211_key_conf *ap_early_keys[4]; /* 26-tone RU OFDMA transmissions should be blocked */ bool he_ru_2mhz_block; }; static inline struct iwl_mvm_vif * iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) { if (!vif) return NULL; return (void *)vif->drv_priv; } extern const u8 tid_to_mac80211_ac[]; #define IWL_MVM_SCAN_STOPPING_SHIFT 8 enum iwl_scan_status { IWL_MVM_SCAN_REGULAR = BIT(0), IWL_MVM_SCAN_SCHED = BIT(1), IWL_MVM_SCAN_NETDETECT = BIT(2), IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | IWL_MVM_SCAN_STOPPING_REGULAR, IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_STOPPING_SCHED, IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT, IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, IWL_MVM_SCAN_MASK = 0xff, }; enum iwl_mvm_scan_type { IWL_SCAN_TYPE_NOT_SET, IWL_SCAN_TYPE_UNASSOC, IWL_SCAN_TYPE_WILD, IWL_SCAN_TYPE_MILD, IWL_SCAN_TYPE_FRAGMENTED, IWL_SCAN_TYPE_FAST_BALANCE, }; enum iwl_mvm_sched_scan_pass_all_states { SCHED_SCAN_PASS_ALL_DISABLED, SCHED_SCAN_PASS_ALL_ENABLED, SCHED_SCAN_PASS_ALL_FOUND, }; /** * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure * @ct_kill_exit: worker to exit thermal kill * @dynamic_smps: Is thermal throttling enabled dynamic_smps? * @tx_backoff: The current thremal throttling tx backoff in uSec. * @min_backoff: The minimal tx backoff due to power restrictions * @params: Parameters to configure the thermal throttling algorithm. * @throttle: Is thermal throttling is active? */ struct iwl_mvm_tt_mgmt { struct delayed_work ct_kill_exit; bool dynamic_smps; u32 tx_backoff; u32 min_backoff; struct iwl_tt_params params; bool throttle; }; #ifdef CONFIG_THERMAL /** *struct iwl_mvm_thermal_device - thermal zone related data * @temp_trips: temperature thresholds for report * @fw_trips_index: keep indexes to original array - temp_trips * @tzone: thermal zone device data */ struct iwl_mvm_thermal_device { s16 temp_trips[IWL_MAX_DTS_TRIPS]; u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; struct thermal_zone_device *tzone; }; /* * struct iwl_mvm_cooling_device * @cur_state: current state * @cdev: struct thermal cooling device */ struct iwl_mvm_cooling_device { u32 cur_state; struct thermal_cooling_device *cdev; }; #endif #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 struct iwl_mvm_frame_stats { u32 legacy_frames; u32 ht_frames; u32 vht_frames; u32 bw_20_frames; u32 bw_40_frames; u32 bw_80_frames; u32 bw_160_frames; u32 sgi_frames; u32 ngi_frames; u32 siso_frames; u32 mimo2_frames; u32 agg_frames; u32 ampdu_count; u32 success_frames; u32 fail_frames; u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES]; int last_frame_idx; }; #define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_IDLE = 0, IWL_MVM_TDLS_SW_REQ_SENT, IWL_MVM_TDLS_SW_RESP_RCVD, IWL_MVM_TDLS_SW_REQ_RCVD, IWL_MVM_TDLS_SW_ACTIVE, }; enum iwl_mvm_traffic_load { IWL_MVM_TRAFFIC_LOW, IWL_MVM_TRAFFIC_MEDIUM, IWL_MVM_TRAFFIC_HIGH, }; DECLARE_EWMA(rate, 16, 16) struct iwl_mvm_tcm_mac { struct { u32 pkts[IEEE80211_NUM_ACS]; u32 airtime; } tx; struct { u32 pkts[IEEE80211_NUM_ACS]; u32 airtime; u32 last_ampdu_ref; } rx; struct { /* track AP's transfer in client mode */ u64 rx_bytes; struct ewma_rate rate; bool detected; } uapsd_nonagg_detect; bool opened_rx_ba_sessions; }; struct iwl_mvm_tcm { struct delayed_work work; spinlock_t lock; /* used when time elapsed */ unsigned long ts; /* timestamp when period ends */ unsigned long ll_ts; unsigned long uapsd_nonagg_ts; bool paused; struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER]; struct { u32 elapsed; /* milliseconds for this TCM period */ u32 airtime[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS]; enum iwl_mvm_traffic_load global_load; bool low_latency[NUM_MAC_INDEX_DRIVER]; bool change[NUM_MAC_INDEX_DRIVER]; bool global_change; } result; }; #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE #define IWL_MVM_TDLS_CNT_MAX_PEERS 4 struct iwl_mvm_tdls_peer_counter { struct list_head list; struct rcu_head rcu_head; struct mac_address mac __aligned(2); struct ieee80211_vif *vif; u32 tx_bytes; struct { u32 bytes; } ____cacheline_aligned_in_smp rx[]; }; #endif /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn * @num_stored: number of mpdus stored in the buffer * @buf_size: the reorder buffer size as set by the last addba request * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU * it is the time of last received sub-frame * @removed: prevent timer re-arming * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context */ struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; u16 buf_size; int queue; u16 last_amsdu; u8 last_sub_index; struct timer_list reorder_timer; bool removed; bool valid; spinlock_t lock; struct iwl_mvm *mvm; } ____cacheline_aligned_in_smp; /** * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno * @frames: list of skbs stored * @reorder_time: time the packet was stored in the reorder buffer */ struct _iwl_mvm_reorder_buf_entry { struct sk_buff_head frames; unsigned long reorder_time; }; /* make this indirection to get the aligned thing */ struct iwl_mvm_reorder_buf_entry { struct _iwl_mvm_reorder_buf_entry e; } #ifndef __CHECKER__ /* sparse doesn't like this construct: "bad integer constant expression" */ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) #endif ; /** * struct iwl_mvm_baid_data - BA session data * @sta_id: station id * @tid: tid of the session * @baid baid of the session * @timeout: the timeout set in the addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update * @session_timer: timer to check if BA session expired, runs at 2 * timeout * @mvm: mvm pointer, needed for timer context * @reorder_buf: reorder buffer, allocated per queue * @reorder_buf_data: data */ struct iwl_mvm_baid_data { struct rcu_head rcu_head; u8 sta_id; u8 tid; u8 baid; u16 timeout; u16 entries_per_queue; unsigned long last_rx; struct timer_list session_timer; struct iwl_mvm_baid_data __rcu **rcu_ptr; struct iwl_mvm *mvm; struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES]; struct iwl_mvm_reorder_buf_entry entries[]; }; static inline struct iwl_mvm_baid_data * iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf) { return (void *)((u8 *)buf - offsetof(struct iwl_mvm_baid_data, reorder_buf) - sizeof(*buf) * buf->queue); } /* * enum iwl_mvm_queue_status - queue status * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved * Basically, this means that this queue can be used for any purpose * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use * This is the state of a queue that has been dedicated for some RATID * (agg'd or not), but that hasn't yet gone through the actual enablement * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet. * Note that in this state there is no requirement to already know what TID * should be used with this queue, it is just marked as a queue that will * be used, and shouldn't be allocated to anyone else. * @IWL_MVM_QUEUE_READY: queue is ready to be used * This is the state of a queue that has been fully configured (including * SCD pointers, etc), has a specific RA/TID assigned to it, and can be * used to send traffic. * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared * This is a state in which a single queue serves more than one TID, all of * which are not aggregated. Note that the queue is only associated to one * RA. */ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_RESERVED, IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_SHARED, }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) #define IWL_MVM_INVALID_QUEUE 0xFFFF #define IWL_MVM_NUM_CIPHERS 10 struct iwl_mvm_sar_profile { bool enabled; u8 table[ACPI_SAR_TABLE_SIZE]; }; struct iwl_mvm_geo_profile { u8 values[ACPI_GEO_TABLE_SIZE]; }; struct iwl_mvm_txq { struct list_head list; u16 txq_id; atomic_t tx_request; bool stopped; }; static inline struct iwl_mvm_txq * iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq) { return (void *)txq->drv_priv; } static inline struct iwl_mvm_txq * iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid) { if (tid == IWL_MAX_TID_COUNT) tid = IEEE80211_NUM_TIDS; return (void *)sta->txq[tid]->drv_priv; } /** * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid * * @sta_id: sta id * @txq_tid: txq tid */ struct iwl_mvm_tvqm_txq_info { u8 sta_id; u8 txq_tid; }; struct iwl_mvm_dqa_txq_info { u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ u8 txq_tid; /* The TID "owner" of this queue*/ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ /* Timestamp for inactivation per TID of this queue */ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; enum iwl_mvm_queue_status status; }; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS struct iwl_csi_data_buffer { struct page *page; unsigned int offset; unsigned int page_order; }; #endif struct iwl_mvm { /* for logger access */ struct device *dev; struct iwl_trans *trans; const struct iwl_fw *fw; const struct iwl_cfg *cfg; struct iwl_phy_db *phy_db; struct ieee80211_hw *hw; /* for protecting access to iwl_mvm */ struct mutex mutex; struct list_head async_handlers_list; spinlock_t async_handlers_lock; struct work_struct async_handlers_wk; struct work_struct roc_done_wk; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS struct work_struct tx_latency_wk; struct delayed_work tx_latency_watchdog_wk; struct ieee80211_tx_latency_event last_tx_lat_event; struct ieee80211_tx_latency_event round_max_tx_lat; s64 start_round_ts; u32 max_tx_latency_gp2; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ unsigned long init_status; unsigned long status; u32 queue_sync_cookie; atomic_t queue_sync_counter; /* * for beacon filtering - * currently only one interface can be supported */ struct iwl_mvm_vif *bf_allowed_vif; bool hw_registered; bool rfkill_safe_init_done; bool support_umac_log; u32 ampdu_ref; bool ampdu_toggle; struct iwl_notif_wait_data notif_wait; union { struct mvm_statistics_rx_v3 rx_stats_v3; struct mvm_statistics_rx rx_stats; }; struct { u64 rx_time; u64 tx_time; u64 on_time_rf; u64 on_time_scan; } radio_stats, accu_radio_stats; struct list_head add_stream_txqs; union { struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; }; struct work_struct add_stream_wk; /* To add streams to queues */ const char *nvm_file_name; struct iwl_nvm_data *nvm_data; /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; struct iwl_fw_runtime fwrt; /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; /* configured by mac80211 */ u32 rts_threshold; /* Scan status, cmd (pre-allocated) and auxiliary station */ unsigned int scan_status; void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type scan_type; enum iwl_mvm_scan_type hb_scan_type; enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; struct delayed_work scan_timeout_dwork; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; /* start time of last scan in TSF of the mac that requested the scan */ u64 scan_start; /* the vif that requested the current scan */ struct iwl_mvm_vif *scan_vif; /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING /* broadcast filters to configure for each associated station */ const struct iwl_fw_bcast_filter *bcast_filters; #ifdef CPTCFG_IWLWIFI_DEBUGFS struct { bool override; struct iwl_bcast_filter_cmd cmd; } dbgfs_bcast_filtering; #endif #endif /* Internal station */ struct iwl_mvm_int_sta aux_sta; struct iwl_mvm_int_sta snif_sta; bool last_ebs_successful; u8 scan_last_antenna_idx; /* to toggle TX between antennas */ u8 mgmt_last_antenna_idx; /* last smart fifo state that was successfully sent to firmware */ enum iwl_sf_state sf_state; #ifdef CPTCFG_IWLWIFI_DEBUGFS struct dentry *debugfs_dir; u32 dbgfs_sram_offset, dbgfs_sram_len; u32 dbgfs_prph_reg_addr; bool disable_power_off; bool disable_power_off_d3; bool beacon_inject_active; bool scan_iter_notif_enabled; struct debugfs_blob_wrapper nvm_hw_blob; struct debugfs_blob_wrapper nvm_sw_blob; struct debugfs_blob_wrapper nvm_calib_blob; struct debugfs_blob_wrapper nvm_prod_blob; struct debugfs_blob_wrapper nvm_phy_sku_blob; struct debugfs_blob_wrapper nvm_reg_blob; struct iwl_mvm_frame_stats drv_rx_stats; spinlock_t drv_stats_lock; u16 dbgfs_rx_phyinfo; #endif struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; struct list_head time_event_list; spinlock_t time_event_lock; /* * A bitmap indicating the index of the key in use. The firmware * can hold 16 keys at most. Reflect this fact. */ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; u8 fw_key_deleted[STA_KEY_MAX_NUM]; u8 vif_count; struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; u8 *error_recovery_buf; #ifdef CPTCFG_IWLWIFI_LEDS struct led_classdev led; #endif struct ieee80211_vif *p2p_device_vif; #ifdef CONFIG_PM struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; /* sched scan settings for net detect */ struct ieee80211_scan_ies nd_ies; struct cfg80211_match_set *nd_match_sets; int n_nd_match_sets; struct ieee80211_channel **nd_channels; int n_nd_channels; bool net_detect; u8 offload_tid; #ifdef CPTCFG_IWLWIFI_DEBUGFS bool d3_wake_sysassert; bool d3_test_active; u32 d3_test_pme_ptr; struct ieee80211_vif *keep_vif; u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */ #endif #endif wait_queue_head_t rx_sync_waitq; /* BT-Coex */ struct iwl_bt_coex_profile_notif last_bt_notif; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; u8 bt_tx_prio; enum iwl_bt_force_ant_mode bt_force_ant_mode; /* Aux ROC */ struct list_head aux_roc_te_list; /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; #ifdef CONFIG_THERMAL struct iwl_mvm_thermal_device tz_device; struct iwl_mvm_cooling_device cooling_dev; #endif s32 temperature; /* Celsius */ /* * Debug option to set the NIC temperature. This option makes the * driver think this is the actual NIC temperature, and ignore the * real temperature that is received from the fw */ bool temperature_test; /* Debug test temperature is enabled */ unsigned long bt_coex_last_tcm_ts; u8 uapsd_noagg_bssid_write_idx; struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM] __aligned(2); struct iwl_mvm_tcm tcm; #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE struct list_head tdls_peer_cache_list; u32 tdls_peer_cache_cnt; #endif struct iwl_time_quota_cmd last_quota_cmd; #ifdef CPTCFG_NL80211_TESTMODE u32 noa_duration; struct ieee80211_vif *noa_vif; #endif /* Tx queues */ u16 aux_queue; u16 snif_queue; u16 probe_queue; u16 p2p_dev_queue; /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ /* Indicate if 32Khz external clock is valid */ u32 ext_clock_valid; struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; u8 csa_tx_block_bcn_timeout; /* system time of last beacon (for AP/GO interface) */ u32 ap_last_beacon_gp2; /* indicates that we transmitted the last beacon */ bool ibss_manager; bool lar_regdom_set; enum iwl_mcc_source mcc_src; /* TDLS channel switch data */ struct { struct delayed_work dwork; enum iwl_mvm_tdls_cs_state state; /* * Current cs sta - might be different from periodic cs peer * station. Value is meaningless when the cs-state is idle. */ u8 cur_sta_id; /* TDLS periodic channel-switch peer */ struct { u8 sta_id; u8 op_class; bool initiator; /* are we the link initiator */ struct cfg80211_chan_def chandef; struct sk_buff *skb; /* ch sw template */ u32 ch_sw_tm_ie; /* timestamp of last ch-sw request sent (GP2 time) */ u32 sent_timestamp; } peer; } tdls_cs; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS union { struct iwl_dev_tx_power_cmd_v4 v4; struct iwl_dev_tx_power_cmd v5; } txp_cmd; #endif #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA /* * Add the following as part of a WA to pass P2P OPPPS certification * test. Refer to IWLMVM_P2P_OPPPS_TEST_WA description in * Kconfig.noupstream for details. */ struct iwl_mvm_vif *p2p_opps_test_wa_vif; #endif u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; struct cfg80211_ftm_responder_stats ftm_resp_stats; struct { struct cfg80211_pmsr_request *req; struct wireless_dev *req_wdev; struct list_head loc_list; int responses[IWL_MVM_TOF_MAX_APS]; } ftm_initiator; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS struct iwl_mcast_filter_cmd *mcast_active_filter_cmd; u8 rx_filters; struct { u32 flags; u32 timer; u32 count; u64 frame_types; u32 rate_n_flags_val; u32 rate_n_flags_mask; u32 interval; struct { u8 addr[ETH_ALEN] __aligned(2); } filter_addrs[IWL_NUM_CHANNEL_ESTIMATION_FILTER_ADDRS]; u8 num_filter_addrs; } csi_cfg; /* we store up to some number of chunks, plus the first notification */ struct iwl_csi_data_buffer csi_data_entries[IWL_CSI_MAX_EXPECTED_CHUNKS + 1]; unsigned int csi_portid; struct list_head list; /* move this out from vendor commands once there are other users */ struct { u8 csi_notif; } cmd_ver; #endif /* CPTCFG_IWLMVM_VENDOR_CMDS */ struct ieee80211_vif *nan_vif; #define IWL_MAX_BAID 32 struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID]; /* * Drop beacons from other APs in AP mode when there are no connected * clients. */ bool drop_bcn_ap_mode; struct delayed_work cs_tx_unblock_dwork; /* does a monitor vif exist (only one can exist hence bool) */ bool monitor_on; /* sniffer data to include in radiotap */ __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; #ifdef CONFIG_ACPI struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS u8 sar_chain_a_profile; u8 sar_chain_b_profile; #endif struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES]; u32 geo_rev; struct iwl_ppag_table_cmd ppag_table; u32 ppag_rev; #endif }; /* Extract MVM priv from op_mode and _hw */ #define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) #define IWL_MAC80211_GET_MVM(_hw) \ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) /** * enum iwl_mvm_status - MVM status bits * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_CTKILL, IWL_MVM_STATUS_ROC_RUNNING, IWL_MVM_STATUS_HW_RESTART_REQUESTED, IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, }; /* Keep track of completed init configuration */ enum iwl_mvm_init_status { IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0), IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1), }; static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); } static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); } /* Must be called with rcu_read_lock() held and it can only be * released when mvmsta is not needed anymore. */ static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) return NULL; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* This can happen if the station has been removed right now */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) return NULL; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* This can happen if the station has been removed right now */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } static inline struct ieee80211_vif * iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) { if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac))) return NULL; if (rcu) return rcu_dereference(mvm->vif_id_to_mac[vif_id]); return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id], lockdep_is_held(&mvm->mutex)); } static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADAPTIVE_DWELL); } static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2); } static inline bool iwl_mvm_is_adwell_hb_ap_num_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP); } static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm) { /* OCE should never be enabled for LMAC scan FWs */ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE); } static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS); } static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF); } static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) && (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE); } static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) && (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE); } static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) { bool nvm_lar = mvm->nvm_data->lar_enabled; bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); if (iwlwifi_mod_params.lar_disable) return false; /* * Enable LAR only if it is supported by the FW (TLV) && * enabled in the NVM */ if (mvm->cfg->nvm_type == IWL_NVM_EXT) return nvm_lar && tlv_lar; else return tlv_lar; } static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); } static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && IWL_MVM_BT_COEX_RRC; } static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && !IWL_MVM_HW_CSUM_DISABLE; } static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) && IWL_MVM_BT_COEX_MPLUT; } static inline bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) && !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT); } static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) { /* TODO - replace with TLV once defined */ return mvm->trans->cfg->use_tfh; } static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) { /* TODO - better define this */ return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* * TODO: * The issue of how to determine CDB APIs and usage is still not fully * defined. * There is a compilation for CDB and non-CDB FW, but there may * be also runtime check. * For now there is a TLV for checking compilation mode, but a * runtime check will also have to be here - once defined. */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT); } static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm) { /* * TODO: should this be the same as iwl_mvm_is_cdb_supported()? * but then there's a little bit of code in scan that won't make * any sense... */ return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG); } static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER); } static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_RX_STATS); } static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY); } static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD); } static inline struct agg_tx_status * iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) { if (iwl_mvm_has_new_tx_api(mvm)) return &((struct iwl_mvm_tx_resp *)tx_resp)->status; else return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status; } static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { /* these two TLV are redundant since the responsibility to CT-kill by * FW happens only after we send at least one command of * temperature THs report. */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); } static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT); } extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac) { return iwl_mvm_has_new_tx_api(mvm) ? iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac]; } struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ }; void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); int __iwl_mvm_mac_start(struct iwl_mvm *mvm); /****************** * MVM Methods ******************/ /* uCode */ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); /* Utils */ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac); void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime); u32 iwl_mvm_get_systime(struct iwl_mvm *mvm); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd); int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data); int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status); int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id); void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid); #ifdef CPTCFG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); #else static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } #endif int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags); int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags); int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids, u32 flags); void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd) { struct ieee80211_key_conf *keyconf = info->control.hw_key; tx_cmd->sec_ctl = TX_CMD_SEC_CCM; memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); } static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) { flush_work(&mvm->async_handlers_wk); } /* Statistics */ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ int iwl_nvm_init(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) { return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : mvm->fw->valid_tx_ant; } static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : mvm->fw->valid_rx_ant; } static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant) { *ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant); } static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) { u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | FW_PHY_CFG_RX_CHAIN); u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm); u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS; return mvm->fw->phy_config & phy_config; } int iwl_mvm_up(struct iwl_mvm *mvm); int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, struct iwl_bcast_filter_cmd *cmd); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS void iwl_mvm_tx_latency_wk(struct work_struct *wk); void iwl_mvm_tx_latency_watchdog_wk(struct work_struct *wk); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ /* * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_ */ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, const u8 *data, u32 count, bool async); void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM PHY */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon); int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len); u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, struct ieee80211_vif *vif); void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size); void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* Quota management */ static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm) { return iwl_mvm_has_quota_low_latency(mvm) ? sizeof(struct iwl_time_quota_cmd) : sizeof(struct iwl_time_quota_cmd_v1); } static inline struct iwl_time_quota_data *iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm, struct iwl_time_quota_cmd *cmd, int i) { struct iwl_time_quota_data_v1 *quotas; if (iwl_mvm_has_quota_low_latency(mvm)) return &cmd->quotas[i]; quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas; return (struct iwl_time_quota_data *)"as[i]; } int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, struct ieee80211_vif *disabled_vif); /* Scanning */ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies); int iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); void iwl_mvm_scan_timeout_wk(struct work_struct *work); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* UMAC scan */ int iwl_mvm_config_scan(struct iwl_mvm *mvm); void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM debugfs */ #ifdef CPTCFG_IWLWIFI_DEBUGFS void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) { } static inline void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } static inline void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif /* CPTCFG_IWLWIFI_DEBUGFS */ /* rate scaling */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status); /* power management */ int iwl_mvm_power_update_device(struct iwl_mvm *mvm); int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz); void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); #ifdef CPTCFG_IWLWIFI_LEDS int iwl_mvm_leds_init(struct iwl_mvm *mvm); void iwl_mvm_leds_exit(struct iwl_mvm *mvm); void iwl_mvm_leds_sync(struct iwl_mvm *mvm); #else static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) { return 0; } static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { } static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm) { } #endif /* D3 (WoWLAN, NetDetect) */ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int iwl_mvm_resume(struct ieee80211_hw *hw); void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data); void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev); void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); extern const struct file_operations iwl_dbgfs_d3_test_ops; struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm); #ifdef CONFIG_PM void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, struct iwl_wowlan_config_cmd *cmd); int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, u32 cmd_flags); /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data); void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, enum nl80211_band band); u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); /* beacon filtering */ #ifdef CPTCFG_IWLWIFI_DEBUGFS void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd); #else static inline void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) {} #endif int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); /* SMPS */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, enum ieee80211_smps_mode smps_request); bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm); /* Low latency */ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool low_latency, enum iwl_mvm_low_latency_cause cause); /* get SystemLowLatencyMode - only needed for beacon threshold? */ bool iwl_mvm_low_latency(struct iwl_mvm *mvm); bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band); void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency, u16 mac_id); /* get VMACLowLatencyMode */ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) { /* * should this consider associated/active/... state? * * Normally low-latency should only be active on interfaces * that are active, but at least with debugfs it can also be * enabled on interfaces that aren't active. However, when * interface aren't active then they aren't added into the * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ return mvmvif->low_latency_actual; } static inline void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, enum iwl_mvm_low_latency_cause cause) { u8 new_state; if (set) mvmvif->low_latency |= cause; else mvmvif->low_latency &= ~cause; /* * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are * allowed to actual mode. */ if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE && cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE) return; if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set) /* * We enter force state */ new_state = !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE); else /* * Check if any other one set low latency */ new_state = !!(mvmvif->low_latency & ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE | LOW_LATENCY_DEBUGFS_FORCE)); mvmvif->low_latency_actual = new_state; } /* Return a bitmask with all the hw supported queues, except for the * command queue, which can't be flushed. */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); iwl_fw_cancel_timestamp(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_fw_dbg_stop_sync(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); } /* Re-configure the SCD for a queue that has already been configured */ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn); /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); /* Location Aware Regulatory */ struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id); int iwl_mvm_init_mcc(struct iwl_mvm *mvm); void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed); struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed); int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); /* smart fifo */ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool added_vif); /* vendor commands */ void iwl_mvm_vendor_cmd_init(void); void iwl_mvm_vendor_cmd_exit(void); void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm); void iwl_mvm_vendor_cmds_unregister(struct iwl_mvm *mvm); /* FTM responder */ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* FTM initiator */ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm); void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request); void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req); /* TDLS */ /* * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present. * This TID is marked as used vs the AP and all connected TDLS peers. */ #define IWL_MVM_TDLS_FW_TID 4 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added); void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tdls_ch_sw_params *params); void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE void iwl_mvm_tdls_peer_cache_pkt(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, u32 len, int rxq); void iwl_mvm_tdls_peer_cache_clear(struct iwl_mvm *mvm, struct ieee80211_vif *vif); struct iwl_mvm_tdls_peer_counter * iwl_mvm_tdls_peer_cache_find(struct iwl_mvm *mvm, const u8 *addr); #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, struct iwl_mvm_internal_rxq_notif *notif, u32 size); void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); #define MVM_TCM_PERIOD_MSEC 500 #define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) #define MVM_LL_PERIOD (10 * HZ) void iwl_mvm_tcm_work(struct work_struct *work); void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm); void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel); void iwl_mvm_resume_tcm(struct iwl_mvm *mvm); void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q); void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, u16 tid); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS void iwl_mvm_send_tcm_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm); int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm); void iwl_mvm_active_rx_filters(struct iwl_mvm *mvm); void iwl_mvm_rx_csi_header(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_csi_chunk(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_send_csi_cmd(struct iwl_mvm *mvm); #endif /* NAN */ void iwl_mvm_nan_match(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_nan_de_term_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_start_nan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_nan_conf *conf); int iwl_mvm_stop_nan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int iwl_mvm_add_nan_func(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_nan_func *nan_func); void iwl_mvm_del_nan_func(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 instance_id); int iwl_mvm_nan_config_nan_faw_cmd(struct iwl_mvm *mvm, struct cfg80211_chan_def *chandef, u8 slots); int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); #ifdef CPTCFG_IWLWIFI_DEBUGFS void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); #endif /* 11ax Softap Test Mode */ /* Channel info utils */ static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS); } static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci) { return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ? sizeof(struct iwl_fw_channel_info) : sizeof(struct iwl_fw_channel_info_v1)); } static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm) { return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 : sizeof(struct iwl_fw_channel_info) - sizeof(struct iwl_fw_channel_info_v1); } static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, u32 chan, u8 band, u8 width, u8 ctrl_pos) { if (iwl_mvm_has_ultra_hb_channel(mvm)) { ci->channel = cpu_to_le32(chan); ci->band = band; ci->width = width; ci->ctrl_pos = ctrl_pos; } else { struct iwl_fw_channel_info_v1 *ci_v1 = (struct iwl_fw_channel_info_v1 *)ci; ci_v1->channel = chan; ci_v1->band = band; ci_v1->width = width; ci_v1->ctrl_pos = ctrl_pos; } } static inline void iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, struct cfg80211_chan_def *chandef) { iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value, (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5), iwl_mvm_get_channel_width(chandef), iwl_mvm_get_ctrl_pos(chandef)); } #endif /* __IWL_MVM_H__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/nan.c000066400000000000000000000500631351064634500263120ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "mvm.h" #include "fw/api/nan.h" #define NAN_WARMUP_TIMEOUT_USEC (120000000ULL) #define NAN_CHANNEL_24 (6) #define NAN_CHANNEL_52 (149) enum srf_type { SRF_BF_TYPE = BIT(0), SRF_INCLUDE = BIT(1), SRF_BLOOM_FILTER_IDX = BIT(2) | BIT(3), }; static bool iwl_mvm_can_beacon(struct ieee80211_vif *vif, enum nl80211_band band, u8 channel) { struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy; int freq = ieee80211_channel_to_frequency(channel, band); struct ieee80211_channel *chan = ieee80211_get_channel(wiphy, freq); struct cfg80211_chan_def def; if (!chan) return false; cfg80211_chandef_create(&def, chan, NL80211_CHAN_NO_HT); return cfg80211_reg_can_beacon(wiphy, &def, vif->type); } static inline bool iwl_mvm_nan_is_ver2(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NAN2_VER2); } static inline size_t iwl_mvm_nan_cfg_cmd_len(struct ieee80211_hw *hw) { return iwl_mvm_nan_is_ver2(hw) ? sizeof(struct iwl_nan_cfg_cmd_v2) : sizeof(struct iwl_nan_cfg_cmd); } static inline struct iwl_nan_umac_cfg *iwl_mvm_nan_get_umac_cfg(struct ieee80211_hw *hw, void *nan_cfg_cmd) { return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2 *)nan_cfg_cmd)->umac_cfg : &((struct iwl_nan_cfg_cmd *)nan_cfg_cmd)->umac_cfg; } static inline struct iwl_nan_testbed_cfg *iwl_mvm_nan_get_tb_cfg(struct ieee80211_hw *hw, void *nan_cfg_cmd) { return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2 *)nan_cfg_cmd)->tb_cfg : &((struct iwl_nan_cfg_cmd *)nan_cfg_cmd)->tb_cfg; } static inline struct iwl_nan_nan2_cfg *iwl_mvm_nan_get_nan2_cfg(struct ieee80211_hw *hw, void *nan_cfg_cmd) { return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2 *)nan_cfg_cmd)->nan2_cfg : &((struct iwl_nan_cfg_cmd *)nan_cfg_cmd)->nan2_cfg; } int iwl_mvm_start_nan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_nan_conf *conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); void *cmd; struct iwl_nan_umac_cfg *umac_cfg; struct iwl_nan_testbed_cfg *tb_cfg; struct iwl_nan_nan2_cfg *nan2_cfg; struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret = 0; u16 cdw = 0; IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Start NAN\n"); /* 2GHz is mandatory and nl80211 should make sure it is set. * Warn and add 2GHz if this happens anyway. */ if (WARN_ON(conf->bands && !(conf->bands & BIT(NL80211_BAND_2GHZ)))) return -EINVAL; conf->bands |= BIT(NL80211_BAND_2GHZ); cmd = kzalloc(iwl_mvm_nan_cfg_cmd_len(hw), GFP_KERNEL); if (!cmd) return -ENOMEM; umac_cfg = iwl_mvm_nan_get_umac_cfg(hw, cmd); tb_cfg = iwl_mvm_nan_get_tb_cfg(hw, cmd); nan2_cfg = iwl_mvm_nan_get_nan2_cfg(hw, cmd); mutex_lock(&mvm->mutex); umac_cfg->action = cpu_to_le32(FW_CTXT_ACTION_ADD); umac_cfg->tsf_id = cpu_to_le32(mvmvif->tsf_id); umac_cfg->beacon_template_id = cpu_to_le32(mvmvif->id); ether_addr_copy(umac_cfg->node_addr, vif->addr); umac_cfg->sta_id = cpu_to_le32(mvm->aux_sta.sta_id); umac_cfg->master_pref = conf->master_pref; if (conf->bands & BIT(NL80211_BAND_2GHZ)) { if (!iwl_mvm_can_beacon(vif, NL80211_BAND_2GHZ, NAN_CHANNEL_24)) { IWL_ERR(mvm, "Can't beacon on %d\n", NAN_CHANNEL_24); ret = -EINVAL; goto out; } tb_cfg->chan24 = NAN_CHANNEL_24; cdw |= conf->cdw_2g; } if (conf->bands & BIT(NL80211_BAND_5GHZ)) { if (!iwl_mvm_can_beacon(vif, NL80211_BAND_5GHZ, NAN_CHANNEL_52)) { IWL_ERR(mvm, "Can't beacon on %d\n", NAN_CHANNEL_52); ret = -EINVAL; goto out; } tb_cfg->chan52 = NAN_CHANNEL_52; cdw |= conf->cdw_5g << 3; } tb_cfg->warmup_timer = cpu_to_le32(NAN_WARMUP_TIMEOUT_USEC); tb_cfg->op_bands = 3; nan2_cfg->cdw = cpu_to_le16(cdw); if ((conf->bands & BIT(NL80211_BAND_2GHZ)) && (conf->bands & BIT(NL80211_BAND_5GHZ))) umac_cfg->dual_band = cpu_to_le32(1); ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_CONFIG_CMD, NAN_GROUP, 0), 0, iwl_mvm_nan_cfg_cmd_len(hw), cmd); if (!ret) mvm->nan_vif = vif; out: mutex_unlock(&mvm->mutex); kfree(cmd); return ret; } int iwl_mvm_stop_nan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { void *cmd; struct iwl_nan_umac_cfg *umac_cfg; struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret = 0; IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Stop NAN\n"); cmd = kzalloc(iwl_mvm_nan_cfg_cmd_len(hw), GFP_KERNEL); if (!cmd) return -ENOMEM; umac_cfg = iwl_mvm_nan_get_umac_cfg(hw, cmd); mutex_lock(&mvm->mutex); umac_cfg->action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_CONFIG_CMD, NAN_GROUP, 0), 0, iwl_mvm_nan_cfg_cmd_len(hw), cmd); if (!ret) mvm->nan_vif = NULL; mutex_unlock(&mvm->mutex); kfree(cmd); return ret; } static enum iwl_fw_nan_func_type iwl_fw_nan_func_type(enum nl80211_nan_function_type type) { switch (type) { case NL80211_NAN_FUNC_PUBLISH: return IWL_NAN_DE_FUNC_PUBLISH; case NL80211_NAN_FUNC_SUBSCRIBE: return IWL_NAN_DE_FUNC_SUBSCRIBE; case NL80211_NAN_FUNC_FOLLOW_UP: return IWL_NAN_DE_FUNC_FOLLOW_UP; default: return IWL_NAN_DE_FUNC_NOT_VALID; } } static u8 iwl_mvm_get_match_filter_len(struct cfg80211_nan_func_filter *filters, u8 num_filters) { int i; unsigned int len = 0; len += num_filters; for (i = 0; i < num_filters; i++) len += filters[i].len; if (WARN_ON_ONCE(len > U8_MAX)) return 0; return len; } static void iwl_mvm_copy_filters(struct cfg80211_nan_func_filter *filters, u8 num_filters, u8 *cmd_data) { int i; u8 offset = 0; for (i = 0; i < num_filters; i++) { memcpy(cmd_data + offset, &filters[i].len, sizeof(u8)); offset++; if (filters[i].len > 0) memcpy(cmd_data + offset, filters[i].filter, filters[i].len); offset += filters[i].len; } } static inline size_t iwl_mvm_nan_add_func_cmd_len(struct ieee80211_hw *hw) { if (iwl_mvm_nan_is_ver2(hw)) return sizeof(struct iwl_nan_add_func_cmd_v2); return sizeof(struct iwl_nan_add_func_cmd) - iwl_mvm_chan_info_padding(IWL_MAC80211_GET_MVM(hw)); } static inline struct iwl_nan_add_func_common *iwl_mvm_nan_get_add_func_common(struct ieee80211_hw *hw, void *nan_add_func_cmd) { return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_add_func_cmd_v2 *)nan_add_func_cmd)->cmn : &((struct iwl_nan_add_func_cmd *)nan_add_func_cmd)->cmn; } static inline u8 *iwl_mvm_nan_get_add_func_data(struct ieee80211_hw *hw, void *nan_add_func_cmd) { if (iwl_mvm_nan_is_ver2(hw)) return ((struct iwl_nan_add_func_cmd_v2 *) nan_add_func_cmd)->data; return ((struct iwl_nan_add_func_cmd *)nan_add_func_cmd)->data - iwl_mvm_chan_info_padding(IWL_MAC80211_GET_MVM(hw)); } int iwl_mvm_add_nan_func(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_nan_func *nan_func) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); void *cmd; struct iwl_nan_add_func_common *cmn; struct iwl_nan_add_func_common_tail *tail; struct iwl_host_cmd hcmd = { .id = iwl_cmd_id(NAN_DISCOVERY_FUNC_CMD, NAN_GROUP, 0), .flags = CMD_WANT_SKB, }; struct iwl_nan_add_func_res *resp; struct iwl_rx_packet *pkt; u8 *cmd_data; u16 flags = 0; u8 tx_filt_len, rx_filt_len; size_t cmd_len; int ret = 0; IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Add NAN func\n"); mutex_lock(&mvm->mutex); /* We assume here that mac80211 properly validated the nan_func */ cmd_len = iwl_mvm_nan_add_func_cmd_len(hw) + ALIGN(nan_func->serv_spec_info_len, 4); if (nan_func->srf_bf_len) cmd_len += ALIGN(nan_func->srf_bf_len + 1, 4); else if (nan_func->srf_num_macs) cmd_len += ALIGN(nan_func->srf_num_macs * ETH_ALEN + 1, 4); rx_filt_len = iwl_mvm_get_match_filter_len(nan_func->rx_filters, nan_func->num_rx_filters); tx_filt_len = iwl_mvm_get_match_filter_len(nan_func->tx_filters, nan_func->num_tx_filters); cmd_len += ALIGN(rx_filt_len, 4); cmd_len += ALIGN(tx_filt_len, 4); cmd = kzalloc(cmd_len, GFP_KERNEL); if (!cmd) { ret = -ENOBUFS; goto unlock; } hcmd.len[0] = cmd_len; hcmd.data[0] = cmd; cmn = iwl_mvm_nan_get_add_func_common(hw, cmd); tail = iwl_mvm_chan_info_cmd_tail(mvm, &cmn->faw_ci); cmd_data = iwl_mvm_nan_get_add_func_data(hw, cmd); cmn->action = cpu_to_le32(FW_CTXT_ACTION_ADD); cmn->type = iwl_fw_nan_func_type(nan_func->type); cmn->instance_id = nan_func->instance_id; tail->dw_interval = 1; memcpy(&cmn->service_id, nan_func->service_id, sizeof(cmn->service_id)); /* * TODO: Currently we want all the events, however we might need to be * able to unset this flag for solicited publish to disable "Replied" * events. */ flags |= IWL_NAN_DE_FUNC_FLAG_RAISE_EVENTS; if (nan_func->subscribe_active || nan_func->publish_type == NL80211_NAN_UNSOLICITED_PUBLISH) flags |= IWL_NAN_DE_FUNC_FLAG_UNSOLICITED_OR_ACTIVE; if (nan_func->close_range) flags |= IWL_NAN_DE_FUNC_FLAG_CLOSE_RANGE; if (nan_func->type == NL80211_NAN_FUNC_FOLLOW_UP || (nan_func->type == NL80211_NAN_FUNC_PUBLISH && !nan_func->publish_bcast)) flags |= IWL_NAN_DE_FUNC_FLAG_UNICAST; if (nan_func->publish_type == NL80211_NAN_SOLICITED_PUBLISH) flags |= IWL_NAN_DE_FUNC_FLAG_SOLICITED; cmn->flags = cpu_to_le16(flags); cmn->ttl = cpu_to_le32(nan_func->ttl); tail->serv_info_len = nan_func->serv_spec_info_len; if (nan_func->serv_spec_info_len) memcpy(cmd_data, nan_func->serv_spec_info, nan_func->serv_spec_info_len); if (nan_func->type == NL80211_NAN_FUNC_FOLLOW_UP) { cmn->flw_up_id = nan_func->followup_id; cmn->flw_up_req_id = nan_func->followup_reqid; memcpy(cmn->flw_up_addr, nan_func->followup_dest.addr, ETH_ALEN); cmn->ttl = cpu_to_le32(1); } cmd_data += ALIGN(tail->serv_info_len, 4); if (nan_func->srf_bf_len) { u8 srf_ctl = 0; srf_ctl |= SRF_BF_TYPE; srf_ctl |= (nan_func->srf_bf_idx << 2) & SRF_BLOOM_FILTER_IDX; if (nan_func->srf_include) srf_ctl |= SRF_INCLUDE; tail->srf_len = nan_func->srf_bf_len + 1; memcpy(cmd_data, &srf_ctl, sizeof(srf_ctl)); memcpy(cmd_data + 1, nan_func->srf_bf, nan_func->srf_bf_len); } else if (nan_func->srf_num_macs) { u8 srf_ctl = 0; int i; if (nan_func->srf_include) srf_ctl |= SRF_INCLUDE; tail->srf_len = nan_func->srf_num_macs * ETH_ALEN + 1; memcpy(cmd_data, &srf_ctl, sizeof(srf_ctl)); for (i = 0; i < nan_func->srf_num_macs; i++) { memcpy(cmd_data + 1 + i * ETH_ALEN, nan_func->srf_macs[i].addr, ETH_ALEN); } } cmd_data += ALIGN(tail->srf_len, 4); if (rx_filt_len > 0) iwl_mvm_copy_filters(nan_func->rx_filters, nan_func->num_rx_filters, cmd_data); tail->rx_filter_len = rx_filt_len; cmd_data += ALIGN(tail->rx_filter_len, 4); if (tx_filt_len > 0) iwl_mvm_copy_filters(nan_func->tx_filters, nan_func->num_tx_filters, cmd_data); tail->tx_filter_len = tx_filt_len; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { IWL_ERR(mvm, "Couldn't send NAN_DISCOVERY_FUNC_CMD: %d\n", ret); goto out_free; } pkt = hcmd.resp_pkt; if (WARN_ON(iwl_rx_packet_payload_len(pkt) != sizeof(*resp))) { ret = -EIO; goto out_free_resp; } resp = (void *)pkt->data; IWL_DEBUG_MAC80211(mvm, "Add NAN func response status: %d, instance_id: %d\n", resp->status, resp->instance_id); if (resp->status == IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_ENTRIES || resp->status == IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_MEMORY) { ret = -ENOBUFS; goto out_free_resp; } if (resp->status != IWL_NAN_DE_FUNC_STATUS_SUCCESSFUL) { ret = -EIO; goto out_free_resp; } if (cmn->instance_id && WARN_ON(resp->instance_id != cmn->instance_id)) { ret = -EIO; goto out_free_resp; } ret = 0; out_free_resp: iwl_free_resp(&hcmd); out_free: kfree(cmd); unlock: mutex_unlock(&mvm->mutex); return ret; } void iwl_mvm_del_nan_func(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 instance_id) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); void *cmd; struct iwl_nan_add_func_common *cmn; int ret; IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Remove NAN func\n"); cmd = kzalloc(iwl_mvm_nan_add_func_cmd_len(hw), GFP_KERNEL); if (!cmd) { IWL_ERR(mvm, "Failed to allocate command to remove NAN func instance_id: %d\n", instance_id); return; } cmn = iwl_mvm_nan_get_add_func_common(hw, cmd); mutex_lock(&mvm->mutex); cmn->action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); cmn->instance_id = instance_id; ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_DISCOVERY_FUNC_CMD, NAN_GROUP, 0), 0, iwl_mvm_nan_add_func_cmd_len(hw), cmd); if (ret) IWL_ERR(mvm, "Failed to remove NAN func instance_id: %d\n", instance_id); mutex_unlock(&mvm->mutex); kfree(cmd); } static u8 iwl_cfg_nan_func_type(u8 fw_type) { switch (fw_type) { case IWL_NAN_DE_FUNC_PUBLISH: return NL80211_NAN_FUNC_PUBLISH; case IWL_NAN_DE_FUNC_SUBSCRIBE: return NL80211_NAN_FUNC_SUBSCRIBE; case IWL_NAN_DE_FUNC_FOLLOW_UP: return NL80211_NAN_FUNC_FOLLOW_UP; default: return NL80211_NAN_FUNC_MAX_TYPE + 1; } } static void iwl_mvm_nan_match_v1(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_nan_disc_evt_notify_v1 *ev = (void *)pkt->data; struct cfg80211_nan_match_params match = {0}; int len = iwl_rx_packet_payload_len(pkt); if (WARN_ON_ONCE(!mvm->nan_vif)) { IWL_ERR(mvm, "NAN vif is NULL\n"); return; } if (WARN_ON_ONCE(len < sizeof(*ev))) { IWL_ERR(mvm, "Invalid NAN match event length: %d\n", len); return; } if (WARN_ON_ONCE(len < sizeof(*ev) + ev->service_info_len)) { IWL_ERR(mvm, "Invalid NAN match event length: %d, info_len: %d\n", len, ev->service_info_len); return; } match.type = iwl_cfg_nan_func_type(ev->type); if (WARN_ON_ONCE(match.type > NL80211_NAN_FUNC_MAX_TYPE)) { IWL_ERR(mvm, "Invalid func type\n"); return; } match.inst_id = ev->instance_id; match.peer_inst_id = ev->peer_instance; match.addr = ev->peer_mac_addr; match.info = ev->buf; match.info_len = ev->service_info_len; ieee80211_nan_func_match(mvm->nan_vif, &match, GFP_ATOMIC); } static void iwl_mvm_nan_match_v2(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_nan_disc_evt_notify_v2 *ev = (void *)pkt->data; u32 len = iwl_rx_packet_payload_len(pkt); u32 i = 0; if (WARN_ONCE(!mvm->nan_vif, "NAN vif is NULL")) return; if (WARN_ONCE(len < sizeof(*ev), "Invalid NAN match event length=%u", len)) return; if (WARN_ONCE(len < sizeof(*ev) + le32_to_cpu(ev->match_len) + le32_to_cpu(ev->avail_attrs_len), "Bad NAN match event: len=%u, match=%u, attrs=%u\n", len, ev->match_len, ev->avail_attrs_len)) return; i = 0; while (i < le32_to_cpu(ev->match_len)) { struct cfg80211_nan_match_params match = {0}; struct iwl_nan_disc_info *disc_info = (struct iwl_nan_disc_info *)(((u8 *)(ev + 1)) + i); match.type = iwl_cfg_nan_func_type(disc_info->type); match.inst_id = disc_info->instance_id; match.peer_inst_id = disc_info->peer_instance; match.addr = ev->peer_mac_addr; match.info = disc_info->buf; match.info_len = disc_info->service_info_len; ieee80211_nan_func_match(mvm->nan_vif, &match, GFP_ATOMIC); i += ALIGN(sizeof(*disc_info) + disc_info->service_info_len + le16_to_cpu(disc_info->sdea_service_info_len) + le16_to_cpu(disc_info->sec_ctxt_len), 4); } } void iwl_mvm_nan_match(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NAN_NOTIF_V2)) iwl_mvm_nan_match_v2(mvm, rxb); else iwl_mvm_nan_match_v1(mvm, rxb); } void iwl_mvm_nan_de_term_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_nan_de_term *ev = (void *)pkt->data; int len = iwl_rx_packet_payload_len(pkt); enum nl80211_nan_func_term_reason nl_reason; if (WARN_ON_ONCE(!mvm->nan_vif)) { IWL_ERR(mvm, "NAN vif is NULL\n"); return; } if (WARN_ON_ONCE(len != sizeof(*ev))) { IWL_ERR(mvm, "NAN DE termination event bad length: %d\n", len); return; } switch (ev->reason) { case IWL_NAN_DE_TERM_TTL_REACHED: nl_reason = NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED; break; case IWL_NAN_DE_TERM_USER_REQUEST: nl_reason = NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST; break; case IWL_NAN_DE_TERM_FAILURE: nl_reason = NL80211_NAN_FUNC_TERM_REASON_ERROR; break; default: WARN_ON_ONCE(1); return; } ieee80211_nan_func_terminated(mvm->nan_vif, ev->instance_id, nl_reason, GFP_ATOMIC); } int iwl_mvm_nan_config_nan_faw_cmd(struct iwl_mvm *mvm, struct cfg80211_chan_def *chandef, u8 slots) { struct iwl_nan_faw_config cmd = {}; struct iwl_nan_faw_config_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &cmd.faw_ci); struct iwl_mvm_vif *mvmvif; int ret; if (WARN_ON(!mvm->nan_vif)) return -EINVAL; mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(mvm->nan_vif); /* Set the channel info data */ iwl_mvm_set_chan_info_chandef(mvm, &cmd.faw_ci, chandef); ieee80211_chandef_to_operating_class(chandef, &tail->op_class); tail->slots = slots; tail->type = IWL_NAN_POST_NAN_ATTR_FURTHER_NAN; cmd.id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_FAW_CONFIG_CMD, NAN_GROUP, 0), 0, sizeof(cmd), &cmd); mutex_unlock(&mvm->mutex); return ret; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c000066400000000000000000000451771351064634500263500ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-trans.h" #include "iwl-csr.h" #include "mvm.h" #include "iwl-eeprom-parse.h" #include "iwl-eeprom-read.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" #include "fw/acpi.h" /* Default NVM size to read */ #define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024) #define NVM_WRITE_OPCODE 1 #define NVM_READ_OPCODE 0 /* load nvm chunk response */ enum { READ_NVM_CHUNK_SUCCEED = 0, READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 }; /* * prepare the NVM host command w/ the pointers to the nvm buffer * and send it to fw */ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, u16 offset, u16 length, const u8 *data) { struct iwl_nvm_access_cmd nvm_access_cmd = { .offset = cpu_to_le16(offset), .length = cpu_to_le16(length), .type = cpu_to_le16(section), .op_code = NVM_WRITE_OPCODE, }; struct iwl_host_cmd cmd = { .id = NVM_ACCESS_CMD, .len = { sizeof(struct iwl_nvm_access_cmd), length }, .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &nvm_access_cmd, data }, /* data may come from vmalloc, so use _DUP */ .dataflags = { 0, IWL_HCMD_DFL_DUP }, }; struct iwl_rx_packet *pkt; struct iwl_nvm_access_resp *nvm_resp; int ret; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ret; pkt = cmd.resp_pkt; /* Extract & check NVM write response */ nvm_resp = (void *)pkt->data; if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) { IWL_ERR(mvm, "NVM access write command failed for section %u (status = 0x%x)\n", section, le16_to_cpu(nvm_resp->status)); ret = -EIO; } iwl_free_resp(&cmd); return ret; } static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, u16 offset, u16 length, u8 *data) { struct iwl_nvm_access_cmd nvm_access_cmd = { .offset = cpu_to_le16(offset), .length = cpu_to_le16(length), .type = cpu_to_le16(section), .op_code = NVM_READ_OPCODE, }; struct iwl_nvm_access_resp *nvm_resp; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = NVM_ACCESS_CMD, .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &nvm_access_cmd, }, }; int ret, bytes_read, offset_read; u8 *resp_data; cmd.len[0] = sizeof(struct iwl_nvm_access_cmd); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ret; pkt = cmd.resp_pkt; /* Extract NVM response */ nvm_resp = (void *)pkt->data; ret = le16_to_cpu(nvm_resp->status); bytes_read = le16_to_cpu(nvm_resp->length); offset_read = le16_to_cpu(nvm_resp->offset); resp_data = nvm_resp->data; if (ret) { if ((offset != 0) && (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { /* * meaning of NOT_VALID_ADDRESS: * driver try to read chunk from address that is * multiple of 2K and got an error since addr is empty. * meaning of (offset != 0): driver already * read valid data from another chunk so this case * is not an error. */ IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", offset); ret = 0; } else { IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM access command failed with status %d (device: %s)\n", ret, mvm->cfg->name); ret = -ENODATA; } goto exit; } if (offset_read != offset) { IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n", offset_read); ret = -EINVAL; goto exit; } /* Write data to NVM */ memcpy(data + offset, resp_data, bytes_read); ret = bytes_read; exit: iwl_free_resp(&cmd); return ret; } static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, const u8 *data, u16 length) { int offset = 0; /* copy data in chunks of 2k (and remainder if any) */ while (offset < length) { int chunk_size, ret; chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, length - offset); ret = iwl_nvm_write_chunk(mvm, section, offset, chunk_size, data + offset); if (ret < 0) return ret; offset += chunk_size; } return 0; } /* * Reads an NVM section completely. * NICs prior to 7000 family doesn't have a real NVM, but just read * section 0 which is the EEPROM. Because the EEPROM reading is unlimited * by uCode, we need to manually check in this case that we don't * overflow and try to read more than the EEPROM size. * For 7000 family NICs, we supply the maximal size we can read, and * the uCode fills the response with as much data as we can, * without overflowing, so no check is needed. */ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, u8 *data, u32 size_read) { u16 length, offset = 0; int ret; /* Set nvm section read length */ length = IWL_NVM_DEFAULT_CHUNK_SIZE; ret = length; /* Read the NVM until exhausted (reading less than requested) */ while (ret == length) { /* Check no memory assumptions fail and cause an overflow */ if ((size_read + offset + length) > mvm->cfg->base_params->eeprom_size) { IWL_ERR(mvm, "EEPROM size is too small for NVM\n"); return -ENOBUFS; } ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); if (ret < 0) { IWL_DEBUG_EEPROM(mvm->trans->dev, "Cannot read NVM from section %d offset %d, length %d\n", section, offset, length); return ret; } offset += ret; } iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset); IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM section %d read completed\n", section); return offset; } static struct iwl_nvm_data * iwl_parse_nvm_sections(struct iwl_mvm *mvm) { struct iwl_nvm_section *sections = mvm->nvm_sections; const __be16 *hw; const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; bool lar_enabled; int regulatory_type; /* Checking for required sections */ if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); return NULL; } } else { if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP) regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP; else regulatory_type = NVM_SECTION_TYPE_REGULATORY; /* SW and REGULATORY sections are mandatory */ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[regulatory_type].data) { IWL_ERR(mvm, "Can't parse empty family 8000 OTP/NVM sections\n"); return NULL; } /* MAC_OVERRIDE or at least HW section must exist */ if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data && !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) { IWL_ERR(mvm, "Can't parse mac_address, empty sections\n"); return NULL; } /* PHY_SKU section is mandatory in B0 */ if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { IWL_ERR(mvm, "Can't parse phy_sku in B0, empty sections\n"); return NULL; } } hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; mac_override = (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ? (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; lar_enabled = !iwlwifi_mod_params.lar_disable && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, regulatory, mac_override, phy_sku, mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, lar_enabled); } /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) { int i, ret = 0; struct iwl_nvm_section *sections = mvm->nvm_sections; IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n"); for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) { if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length) continue; ret = iwl_nvm_write_section(mvm, i, sections[i].data, sections[i].length); if (ret < 0) { IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret); break; } } return ret; } int iwl_nvm_init(struct iwl_mvm *mvm) { int ret, section; u32 size_read = 0; u8 *nvm_buffer, *temp; const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step; if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) return -EINVAL; /* load NVM values from nic */ /* Read From FW NVM */ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, GFP_KERNEL); if (!nvm_buffer) return -ENOMEM; for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { /* we override the constness for initial read */ ret = iwl_nvm_read_section(mvm, section, nvm_buffer, size_read); if (ret == -ENODATA) { ret = 0; continue; } if (ret < 0) break; size_read += ret; temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); if (!temp) { ret = -ENOMEM; break; } iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret); mvm->nvm_sections[section].data = temp; mvm->nvm_sections[section].length = ret; #ifdef CPTCFG_IWLWIFI_DEBUGFS switch (section) { case NVM_SECTION_TYPE_SW: mvm->nvm_sw_blob.data = temp; mvm->nvm_sw_blob.size = ret; break; case NVM_SECTION_TYPE_CALIBRATION: mvm->nvm_calib_blob.data = temp; mvm->nvm_calib_blob.size = ret; break; case NVM_SECTION_TYPE_PRODUCTION: mvm->nvm_prod_blob.data = temp; mvm->nvm_prod_blob.size = ret; break; case NVM_SECTION_TYPE_PHY_SKU: mvm->nvm_phy_sku_blob.data = temp; mvm->nvm_phy_sku_blob.size = ret; break; case NVM_SECTION_TYPE_REGULATORY_SDP: case NVM_SECTION_TYPE_REGULATORY: mvm->nvm_reg_blob.data = temp; mvm->nvm_reg_blob.size = ret; break; default: if (section == mvm->cfg->nvm_hw_section_num) { mvm->nvm_hw_blob.data = temp; mvm->nvm_hw_blob.size = ret; break; } } #endif } if (!size_read) IWL_ERR(mvm, "OTP is blank\n"); kfree(nvm_buffer); /* Only if PNVM selected in the mod param - load external NVM */ if (mvm->nvm_file_name) { /* read External NVM file from the mod param */ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections); if (ret) { mvm->nvm_file_name = nvm_file_C; if ((ret == -EFAULT || ret == -ENOENT) && mvm->nvm_file_name) { /* in case nvm file was failed try again */ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections); if (ret) return ret; } else { return ret; } } } /* parse the relevant nvm sections */ mvm->nvm_data = iwl_parse_nvm_sections(mvm); if (!mvm->nvm_data) return -ENODATA; IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", mvm->nvm_data->nvm_version); return ret < 0 ? ret : 0; } struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id) { struct iwl_mcc_update_cmd mcc_update_cmd = { .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), .source_id = (u8)src_id, }; struct iwl_mcc_update_resp *resp_cp; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = MCC_UPDATE_CMD, .flags = CMD_WANT_SKB, .data = { &mcc_update_cmd }, }; int ret; u32 status; int resp_len, n_channels; u16 mcc; if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return ERR_PTR(-EOPNOTSUPP); cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", alpha2[0], alpha2[1], src_id); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ERR_PTR(ret); pkt = cmd.resp_pkt; /* Extract MCC response */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp->n_channels); resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); if (!resp_cp) { resp_cp = ERR_PTR(-ENOMEM); goto exit; } } else { struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kzalloc(resp_len, GFP_KERNEL); if (!resp_cp) { resp_cp = ERR_PTR(-ENOMEM); goto exit; } resp_cp->status = mcc_resp_v3->status; resp_cp->mcc = mcc_resp_v3->mcc; resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap); resp_cp->source_id = mcc_resp_v3->source_id; resp_cp->time = mcc_resp_v3->time; resp_cp->geo_info = mcc_resp_v3->geo_info; resp_cp->n_channels = mcc_resp_v3->n_channels; memcpy(resp_cp->channels, mcc_resp_v3->channels, n_channels * sizeof(__le32)); } status = le32_to_cpu(resp_cp->status); mcc = le16_to_cpu(resp_cp->mcc); /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ if (mcc == 0) { mcc = 0x3030; /* "00" - world */ resp_cp->mcc = cpu_to_le16(mcc); } IWL_DEBUG_LAR(mvm, "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", status, mcc, mcc >> 8, mcc & 0xff, n_channels); exit: iwl_free_resp(&cmd); return resp_cp; } int iwl_mvm_init_mcc(struct iwl_mvm *mvm) { bool tlv_lar; bool nvm_lar; int retval; struct ieee80211_regdomain *regd; char mcc[3]; if (mvm->cfg->nvm_type == IWL_NVM_EXT) { tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); nvm_lar = mvm->nvm_data->lar_enabled; if (tlv_lar != nvm_lar) IWL_INFO(mvm, "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n", tlv_lar ? "enabled" : "disabled", nvm_lar ? "enabled" : "disabled"); } if (!iwl_mvm_is_lar_supported(mvm)) return 0; /* * try to replay the last set MCC to FW. If it doesn't exist, * queue an update to cfg80211 to retrieve the default alpha2 from FW. */ retval = iwl_mvm_init_fw_regd(mvm); if (retval != -ENOENT) return retval; /* * Driver regulatory hint for initial update, this also informs the * firmware we support wifi location updates. * Disallow scans that might crash the FW while the LAR regdomain * is not set. */ mvm->lar_regdom_set = false; regd = iwl_mvm_get_current_regdomain(mvm, NULL); if (IS_ERR_OR_NULL(regd)) return -EIO; if (iwl_mvm_is_wifi_mcc_supported(mvm) && !iwl_acpi_get_mcc(mvm->dev, mcc)) { kfree(regd); regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, MCC_SOURCE_BIOS, NULL); if (IS_ERR_OR_NULL(regd)) return -EIO; } retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); kfree(regd); return retval; } void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mcc_chub_notif *notif = (void *)pkt->data; enum iwl_mcc_source src; char mcc[3]; struct ieee80211_regdomain *regd; int wgds_tbl_idx; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) { IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n"); return; } if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return; mcc[0] = le16_to_cpu(notif->mcc) >> 8; mcc[1] = le16_to_cpu(notif->mcc) & 0xff; mcc[2] = '\0'; src = notif->source_id; IWL_DEBUG_LAR(mvm, "RX: received chub update mcc cmd (mcc '%s' src %d)\n", mcc, src); regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); if (IS_ERR_OR_NULL(regd)) return; wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (wgds_tbl_idx < 0) IWL_DEBUG_INFO(mvm, "SAR WGDS is disabled (%d)\n", wgds_tbl_idx); else IWL_DEBUG_INFO(mvm, "SAR WGDS: geo profile %d is configured\n", wgds_tbl_idx); regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c000066400000000000000000000202271351064634500276450ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "mvm.h" void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, struct iwl_wowlan_config_cmd *cmd) { int i; /* * For QoS counters, we store the one to use next, so subtract 0x10 * since the uCode will add 0x10 *before* using the value while we * increment after using the value (i.e. store the next value to use). */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_ap_sta->tid_data[i].seq_number; seq -= 0x10; cmd->qos_seq[i] = cpu_to_le16(seq); } } int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, u32 cmd_flags) { union { struct iwl_proto_offload_cmd_v1 v1; struct iwl_proto_offload_cmd_v2 v2; struct iwl_proto_offload_cmd_v3_small v3s; struct iwl_proto_offload_cmd_v3_large v3l; } cmd = {}; struct iwl_host_cmd hcmd = { .id = PROT_OFFLOAD_CONFIG_CMD, .flags = cmd_flags, .data[0] = &cmd, .dataflags[0] = IWL_HCMD_DFL_DUP, }; struct iwl_proto_offload_cmd_common *common; u32 enabled = 0, size; u32 capa_flags = mvm->fw->ucode_capa.flags; #if IS_ENABLED(CONFIG_IPV6) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int i; /* * Skip tentative address when ns offload is enabled to avoid * violating RFC4862. * Keep tentative address when ns offload is disabled so the NS packets * will not be filtered out and will wake up the host. */ bool skip_tentative = offload_ns; if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL || capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { struct iwl_ns_config *nsc; struct iwl_targ_addr *addrs; int n_nsc, n_addrs; int c; int num_skipped = 0; if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { nsc = cmd.v3s.ns_config; n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S; addrs = cmd.v3s.targ_addrs; n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S; } else { nsc = cmd.v3l.ns_config; n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L; addrs = cmd.v3l.targ_addrs; n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L; } /* * For each address we have (and that will fit) fill a target * address struct and combine for NS offload structs with the * solicited node addresses. */ for (i = 0, c = 0; i < mvmvif->num_target_ipv6_addrs && i < n_addrs && c < n_nsc; i++) { struct in6_addr solicited_addr; int j; if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) { num_skipped++; continue; } addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], &solicited_addr); for (j = 0; j < c; j++) if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, &solicited_addr) == 0) break; if (j == c) c++; addrs[i].addr = mvmvif->target_ipv6_addrs[i]; addrs[i].config_num = cpu_to_le32(j); nsc[j].dest_ipv6_addr = solicited_addr; memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN); } if (mvmvif->num_target_ipv6_addrs - num_skipped) enabled |= IWL_D3_PROTO_IPV6_VALID; if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped); else cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped); } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { bool found = false; BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0])); for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) { if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) continue; memcpy(cmd.v2.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i], sizeof(cmd.v2.target_ipv6_addr[i])); found = true; } if (found) { enabled |= IWL_D3_PROTO_IPV6_VALID; memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); } } else { bool found = false; BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0])); for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) { if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) continue; memcpy(cmd.v1.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i], sizeof(cmd.v1.target_ipv6_addr[i])); found = true; } if (found) { enabled |= IWL_D3_PROTO_IPV6_VALID; memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); } } if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID)) enabled |= IWL_D3_PROTO_OFFLOAD_NS; #endif if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { common = &cmd.v3s.common; size = sizeof(cmd.v3s); } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { common = &cmd.v3l.common; size = sizeof(cmd.v3l); } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { common = &cmd.v2.common; size = sizeof(cmd.v2); } else { common = &cmd.v1.common; size = sizeof(cmd.v1); } if (vif->bss_conf.arp_addr_cnt) { enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID; common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); } if (!disable_offloading) common->enabled = cpu_to_le32(enabled); hcmd.len[0] = size; return iwl_mvm_send_cmd(mvm, &hcmd); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/ops.c000066400000000000000000001346221351064634500263430ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "fw/notif-wait.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-debug.h" #include "iwl-drv.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-phy-db.h" #include "iwl-eeprom-parse.h" #include "iwl-csr.h" #include "iwl-io.h" #include "iwl-prph.h" #include "rs.h" #include "fw/api/scan.h" #include "time-event.h" #include "fw-api.h" #include "fw/api/nan.h" #include "fw/acpi.h" #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #include "iwl-dnt-cfg.h" #include "iwl-dnt-dispatch.h" #include "iwl-tm-gnl.h" #endif #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_LICENSE("GPL"); static const struct iwl_op_mode_ops iwl_mvm_ops; static const struct iwl_op_mode_ops iwl_mvm_ops_mq; struct iwl_mvm_mod_params iwlmvm_mod_params = { .power_scheme = IWL_POWER_SCHEME_BPS, .tfd_q_hang_detect = true /* rest of fields are 0 by default */ }; module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444); MODULE_PARM_DESC(init_dbg, "set to true to debug an ASSERT in INIT fw (default: false"); module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); MODULE_PARM_DESC(power_scheme, "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect, bool, 0444); MODULE_PARM_DESC(tfd_q_hang_detect, "TFD queues hang detection (default: true"); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE static void iwl_mvm_rx_fw_logs(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_dnt_dispatch_collect_ucode_message(mvm->trans, rxb); } #endif /* * module init and exit functions */ static int __init iwl_mvm_init(void) { int ret; ret = iwl_mvm_rate_control_register(); if (ret) { pr_err("Unable to register rate control algorithm: %d\n", ret); return ret; } ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); if (ret) pr_err("Unable to register MVM op_mode: %d\n", ret); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS iwl_mvm_vendor_cmd_init(); #endif return ret; } module_init(iwl_mvm_init); static void __exit iwl_mvm_exit(void) { iwl_opmode_deregister("iwlmvm"); iwl_mvm_rate_control_unregister(); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS iwl_mvm_vendor_cmd_exit(); #endif } module_exit(iwl_mvm_exit); static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; u32 reg_val = 0; u32 phy_config = iwl_mvm_get_phy_config(mvm); radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> FW_PHY_CFG_RADIO_TYPE_POS; radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> FW_PHY_CFG_RADIO_STEP_POS; radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS; /* SKU control */ reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; /* radio configuration */ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); /* * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC * sampling, and shouldn't be set to any non-zero value. * The same is supposed to be true of the other HW, but unsetting * them (such as the 7260) causes automatic tests to fail on seemingly * unrelated errors. Need to further investigate this, but for now * we'll separate cases. */ if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | CSR_HW_IF_CONFIG_REG_D3_DEBUG, reg_val); IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, radio_cfg_step, radio_cfg_dash); /* * W/A : NIC is stuck in a reset state after Early PCIe power off * (PCIe power is lost before PERST# is asserted), causing ME FW * to lose ownership and not being able to obtain it back. */ if (!mvm->trans->cfg->apmg_not_supported) iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); } /** * enum iwl_rx_handler_context context for Rx handler * @RX_HANDLER_SYNC : this means that it will be called in the Rx path * which can't acquire mvm->mutex. * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex * (and only in this case!), it should be set as ASYNC. In that case, * it will be called from a worker with mvm->mutex held. * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the * mutex itself, it will be called from a worker without mvm->mutex held. */ enum iwl_rx_handler_context { RX_HANDLER_SYNC, RX_HANDLER_ASYNC_LOCKED, RX_HANDLER_ASYNC_UNLOCKED, }; /** * struct iwl_rx_handlers handler for FW notification * @cmd_id: command id * @context: see &iwl_rx_handler_context * @fn: the function is called when notification is received */ struct iwl_rx_handlers { u16 cmd_id; enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; #define RX_HANDLER(_cmd_id, _fn, _context) \ { .cmd_id = _cmd_id, .fn = _fn, .context = _context } #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context } /* * Handlers for fw notifications * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME * This list should be in order of frequency for performance purposes. * * The handler can be one from three contexts, see &iwl_rx_handler_context */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC), #ifdef CPTCFG_IWLMVM_VENDOR_CMDS RX_HANDLER_GRP(LOCATION_GROUP, CSI_HEADER_NOTIFICATION, iwl_mvm_rx_csi_header, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(LOCATION_GROUP, CSI_CHUNKS_NOTIFICATION, iwl_mvm_rx_csi_chunk, RX_HANDLER_ASYNC_LOCKED), #endif /* CPTCFG_IWLMVM_VENDOR_CMDS */ RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, iwl_mvm_window_status_notif, RX_HANDLER_SYNC), RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, RX_HANDLER_SYNC), RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC), RX_HANDLER(SCAN_ITERATION_COMPLETE, iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC), RX_HANDLER(SCAN_OFFLOAD_COMPLETE, iwl_mvm_rx_lmac_scan_complete_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, RX_HANDLER_SYNC), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC), RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, RX_HANDLER_SYNC), RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, RX_HANDLER_SYNC), RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC), RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC), RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC), RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS, iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(LOCATION_GROUP, TOF_LC_NOTIF, iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC), #ifdef CPTCFG_IWLMVM_VENDOR_CMDS RX_HANDLER_GRP(NAN_GROUP, NAN_DISCOVERY_TERMINATE_NOTIF, iwl_mvm_nan_de_term_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(NAN_GROUP, NAN_DISCOVERY_EVENT_NOTIF, iwl_mvm_nan_match, RX_HANDLER_SYNC), RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, iwl_mvm_probe_resp_data_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF, iwl_mvm_channel_switch_noa_notif, RX_HANDLER_SYNC), #endif #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE RX_HANDLER(DEBUG_LOG_MSG, iwl_mvm_rx_fw_logs, RX_HANDLER_SYNC), #endif }; #undef RX_HANDLER #undef RX_HANDLER_GRP /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(MVM_ALIVE), HCMD_NAME(REPLY_ERROR), HCMD_NAME(ECHO_CMD), HCMD_NAME(INIT_COMPLETE_NOTIF), HCMD_NAME(PHY_CONTEXT_CMD), HCMD_NAME(DBG_CFG), HCMD_NAME(SCAN_CFG_CMD), HCMD_NAME(SCAN_REQ_UMAC), HCMD_NAME(SCAN_ABORT_UMAC), HCMD_NAME(SCAN_COMPLETE_UMAC), HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), HCMD_NAME(REMOVE_STA), HCMD_NAME(FW_GET_ITEM_CMD), HCMD_NAME(TX_CMD), HCMD_NAME(SCD_QUEUE_CFG), HCMD_NAME(TXPATH_FLUSH), HCMD_NAME(MGMT_MCAST_KEY), HCMD_NAME(WEP_KEY), HCMD_NAME(SHARED_MEM_CFG), HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD), HCMD_NAME(MAC_CONTEXT_CMD), HCMD_NAME(TIME_EVENT_CMD), HCMD_NAME(TIME_EVENT_NOTIFICATION), HCMD_NAME(BINDING_CONTEXT_CMD), HCMD_NAME(TIME_QUOTA_CMD), HCMD_NAME(NON_QOS_TX_COUNTER_CMD), HCMD_NAME(LEDS_CMD), HCMD_NAME(LQ_CMD), HCMD_NAME(FW_PAGING_BLOCK_CMD), HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), HCMD_NAME(HOT_SPOT_CMD), HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), HCMD_NAME(BT_COEX_CI), HCMD_NAME(PHY_CONFIGURATION_CMD), HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), HCMD_NAME(PHY_DB_CMD), HCMD_NAME(SCAN_OFFLOAD_COMPLETE), HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), HCMD_NAME(CONFIG_2G_COEX_CMD), HCMD_NAME(POWER_TABLE_CMD), HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF), HCMD_NAME(DC2DC_CONFIG_CMD), HCMD_NAME(NVM_ACCESS_CMD), HCMD_NAME(BEACON_NOTIFICATION), HCMD_NAME(BEACON_TEMPLATE_CMD), HCMD_NAME(TX_ANT_CONFIGURATION_CMD), HCMD_NAME(BT_CONFIG), HCMD_NAME(STATISTICS_CMD), HCMD_NAME(STATISTICS_NOTIFICATION), HCMD_NAME(EOSP_NOTIFICATION), HCMD_NAME(REDUCE_TX_POWER_CMD), HCMD_NAME(CARD_STATE_NOTIFICATION), HCMD_NAME(MISSED_BEACONS_NOTIFICATION), HCMD_NAME(TDLS_CONFIG_CMD), HCMD_NAME(MAC_PM_POWER_TABLE), HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), HCMD_NAME(MFUART_LOAD_NOTIFICATION), HCMD_NAME(RSS_CONFIG_CMD), HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), HCMD_NAME(FRAME_RELEASE), HCMD_NAME(BA_NOTIF), HCMD_NAME(MCC_UPDATE_CMD), HCMD_NAME(MCC_CHUB_UPDATE_CMD), HCMD_NAME(MARKER_CMD), HCMD_NAME(BT_PROFILE_NOTIFICATION), HCMD_NAME(BCAST_FILTER_CMD), HCMD_NAME(MCAST_FILTER_CMD), HCMD_NAME(REPLY_SF_CFG_CMD), HCMD_NAME(REPLY_BEACON_FILTERING_CMD), HCMD_NAME(D3_CONFIG_CMD), HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), HCMD_NAME(OFFLOADS_QUERY_CMD), HCMD_NAME(REMOTE_WAKE_CONFIG_CMD), HCMD_NAME(MATCH_FOUND_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), HCMD_NAME(WOWLAN_PATTERNS), HCMD_NAME(WOWLAN_CONFIGURATION), HCMD_NAME(WOWLAN_TSC_RSC_PARAM), HCMD_NAME(WOWLAN_TKIP_PARAM), HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), HCMD_NAME(WOWLAN_GET_STATUSES), HCMD_NAME(SCAN_ITERATION_COMPLETE), HCMD_NAME(D0I3_END_CMD), HCMD_NAME(LTR_CONFIG), HCMD_NAME(LDBG_CONFIG_CMD), HCMD_NAME(DEBUG_LOG_MSG), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), HCMD_NAME(SOC_CONFIGURATION_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), HCMD_NAME(FW_ERROR_RECOVERY_CMD), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), HCMD_NAME(GEO_TX_POWER_LIMIT), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), HCMD_NAME(TLC_MNG_CONFIG_CMD), HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { HCMD_NAME(DBGC_SUSPEND_RESUME), HCMD_NAME(MFU_ASSERT_DUMP_NTF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_nan_names[] = { HCMD_NAME(NAN_CONFIG_CMD), HCMD_NAME(NAN_DISCOVERY_FUNC_CMD), HCMD_NAME(NAN_FAW_CONFIG_CMD), HCMD_NAME(NAN_DISCOVERY_EVENT_NOTIF), HCMD_NAME(NAN_DISCOVERY_TERMINATE_NOTIF), HCMD_NAME(NAN_FAW_START_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_location_names[] = { HCMD_NAME(TOF_RANGE_REQ_CMD), HCMD_NAME(TOF_CONFIG_CMD), HCMD_NAME(TOF_RANGE_ABORT_CMD), HCMD_NAME(TOF_RANGE_REQ_EXT_CMD), HCMD_NAME(TOF_RESPONDER_CONFIG_CMD), HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD), HCMD_NAME(TOF_LC_NOTIF), HCMD_NAME(TOF_RESPONDER_STATS), HCMD_NAME(TOF_MCSI_DEBUG_NOTIF), HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { HCMD_NAME(STORED_BEACON_NTF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { HCMD_NAME(NVM_ACCESS_COMPLETE), HCMD_NAME(NVM_GET_INFO), }; static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [NAN_GROUP] = HCMD_ARR(iwl_mvm_nan_names), [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), }; /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) { const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs; u64 dflt_pwr_limit; if (!backoff) return 0; dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev); while (backoff->pwr) { if (dflt_pwr_limit >= backoff->pwr) return backoff->backoff; backoff++; } return 0; } static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) { struct iwl_mvm *mvm = container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work); struct ieee80211_vif *tx_blocked_vif; struct iwl_mvm_vif *mvmvif; mutex_lock(&mvm->mutex); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (!tx_blocked_vif) goto unlock; mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); unlock: mutex_unlock(&mvm->mutex); } #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES static void iwl_mvm_init_modparams(struct iwl_mvm *mvm) { #define IWL_DBG_CFG(t, n) /* nothing */ #define IWL_DBG_CFG_STR(n) /* nothing */ #define IWL_DBG_CFG_NODEF(t, n) /* nothing */ #define IWL_DBG_CFG_BIN(n) /* nothing */ #define IWL_DBG_CFG_BINA(n, max) /* nothing */ #define IWL_DBG_CFG_RANGE(t, n, min, max) /* nothing */ #define IWL_MOD_PARAM(t, n) /* nothing */ #define IWL_MVM_MOD_PARAM(t, n) \ if (mvm->trans->dbg_cfg.__mvm_mod_param_##n) \ iwlmvm_mod_params.n = mvm->trans->dbg_cfg.mvm_##n; #define DBG_CFG_REINCLUDE #include "iwl-dbg-cfg.h" #undef IWL_DBG_CFG #undef IWL_DBG_CFG_STR #undef IWL_DBG_CFG_NODEF #undef IWL_DBG_CFG_BIN #undef IWL_DBG_CFG_BINA #undef IWL_DBG_CFG_RANGE #undef IWL_MOD_PARAM #undef IWL_MVM_MOD_PARAM } #endif static int iwl_mvm_fwrt_dump_start(void *ctx) { struct iwl_mvm *mvm = ctx; mutex_lock(&mvm->mutex); return 0; } static void iwl_mvm_fwrt_dump_end(void *ctx) { struct iwl_mvm *mvm = ctx; mutex_unlock(&mvm->mutex); } static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) { struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, host_cmd); mutex_unlock(&mvm->mutex); return ret; } static bool iwl_mvm_d3_debug_enable(void *ctx) { return IWL_MVM_D3_DEBUG; } static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, .send_hcmd = iwl_mvm_fwrt_send_hcmd, .d3_debug_enable = iwl_mvm_d3_debug_enable, }; #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE static int iwl_mvm_tm_send_hcmd(void *op_mode, struct iwl_host_cmd *host_cmd) { struct iwl_mvm *mvm = (struct iwl_mvm *)op_mode; if (WARN_ON_ONCE(!op_mode)) return -EINVAL; return iwl_mvm_send_cmd(mvm, host_cmd); } #endif #ifdef CPTCFG_IWLMVM_VENDOR_CMDS static u8 iwl_mvm_lookup_notif_ver(struct iwl_mvm *mvm, u8 grp, u8 cmd, u8 def) { const struct iwl_fw_cmd_version *entry; unsigned int i; if (!mvm->fw->ucode_capa.cmd_versions || !mvm->fw->ucode_capa.n_cmd_versions) return def; entry = mvm->fw->ucode_capa.cmd_versions; for (i = 0; i < mvm->fw->ucode_capa.n_cmd_versions; i++, entry++) { if (entry->group == grp && entry->cmd == cmd) { if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN) return def; return entry->notif_ver; } } return def; } #endif static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) { struct ieee80211_hw *hw; struct iwl_op_mode *op_mode; struct iwl_mvm *mvm; struct iwl_trans_config trans_cfg = {}; static const u8 no_reclaim_cmds[] = { TX_CMD, }; int err, scan_size; u32 min_backoff; enum iwl_amsdu_size rb_size_default; /* * We use IWL_MVM_STATION_COUNT to check the validity of the station * index all over the driver - check that its value corresponds to the * array size. */ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT); /******************************** * 1. Allocating and configuring HW data ********************************/ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + sizeof(struct iwl_mvm), &iwl_mvm_hw_ops); if (!hw) return NULL; if (cfg->max_rx_agg_size) hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; else hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (trans->dbg_cfg.rx_agg_subframes) hw->max_rx_aggregation_subframes = trans->dbg_cfg.rx_agg_subframes; #endif if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; else hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; op_mode = hw->priv; mvm = IWL_OP_MODE_GET_MVM(op_mode); mvm->dev = trans->dev; mvm->trans = trans; mvm->cfg = cfg; mvm->fw = fw; mvm->hw = hw; iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, dbgfs_dir); mvm->init_status = 0; if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; trans->rx_mpdu_cmd_hdr_size = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_rx_mpdu_desc) : IWL_RX_DESC_SIZE_V1; } else { op_mode->ops = &iwl_mvm_ops; trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); if (WARN_ON(trans->num_rx_queues > 1)) goto out_free; } mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; mvm->sf_state = SF_UNINIT; if (iwl_mvm_has_unified_ucode(mvm)) iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); else iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); spin_lock_init(&mvm->async_handlers_lock); INIT_LIST_HEAD(&mvm->time_event_list); INIT_LIST_HEAD(&mvm->aux_roc_te_list); INIT_LIST_HEAD(&mvm->async_handlers_list); spin_lock_init(&mvm->time_event_lock); INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS INIT_WORK(&mvm->tx_latency_wk, iwl_mvm_tx_latency_wk); INIT_DELAYED_WORK(&mvm->tx_latency_watchdog_wk, iwl_mvm_tx_latency_watchdog_wk); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_LIST_HEAD(&mvm->add_stream_txqs); init_waitqueue_head(&mvm->rx_sync_waitq); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS /* * by default capture all frame types * (but of course leave it disabled) */ mvm->csi_cfg.frame_types = ~0ULL; #endif /* CPTCFG_IWLMVM_VENDOR_CMDS */ atomic_set(&mvm->queue_sync_counter, 0); SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); spin_lock_init(&mvm->tcm.lock); INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); mvm->tcm.ts = jiffies; mvm->tcm.ll_ts = jiffies; mvm->tcm.uapsd_nonagg_ts = jiffies; #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE INIT_LIST_HEAD(&mvm->tdls_peer_cache_list); #endif #ifdef CPTCFG_IWLMVM_VENDOR_CMDS mvm->rx_filters = IWL_MVM_VENDOR_RXFILTER_EINVAL; #endif INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS /* set command/notification versions we care about */ mvm->cmd_ver.csi_notif = iwl_mvm_lookup_notif_ver(mvm, LOCATION_GROUP, CSI_CHUNKS_NOTIFICATION, 1); /* we only support versions 1 and 2 */ if (WARN_ON_ONCE(mvm->cmd_ver.csi_notif > 2)) goto out_free; #endif /* * Populate the state variables that the transport layer needs * to know about. */ trans_cfg.op_mode = op_mode; trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) rb_size_default = IWL_AMSDU_2K; else rb_size_default = IWL_AMSDU_4K; switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: trans_cfg.rx_buf_size = rb_size_default; break; case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_8K: trans_cfg.rx_buf_size = IWL_AMSDU_8K; break; case IWL_AMSDU_12K: trans_cfg.rx_buf_size = IWL_AMSDU_12K; break; default: pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size); trans_cfg.rx_buf_size = rb_size_default; } BUILD_BUG_ON(sizeof(struct iwl_ldbg_config_cmd) != LDBG_CFG_COMMAND_SIZE); trans->wide_cmd_header = true; trans_cfg.bc_table_dword = mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560; trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, driver_data[2]); trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD; /* Set a short watchdog for the command queue */ trans_cfg.cmd_q_wdg_timeout = iwl_mvm_get_wd_timeout(mvm, NULL, false, true); snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), "%s", fw->fw_version); /* Configure transport layer */ iwl_trans_configure(mvm->trans, &trans_cfg); trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg.conf_tlv)); trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; /* set up notification wait support */ iwl_notification_wait_init(&mvm->notif_wait); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_init(mvm->trans, dbgfs_dir); iwl_tm_init(trans, mvm->fw, &mvm->mutex, mvm); #endif /* Init phy db */ mvm->phy_db = iwl_phy_db_init(trans); if (!mvm->phy_db) { IWL_ERR(mvm, "Cannot init phy_db\n"); goto out_free; } IWL_INFO(mvm, "Detected %s, REV=0x%X\n", mvm->cfg->name, mvm->trans->hw_rev); if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES else if (trans->dbg_cfg.nvm_file) mvm->nvm_file_name = trans->dbg_cfg.nvm_file; #endif else IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file\n"); err = iwl_trans_start_hw(mvm->trans); if (err) goto out_free; mutex_lock(&mvm->mutex); err = iwl_run_init_mvm_ucode(mvm, true); if (err && err != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!iwlmvm_mod_params.init_dbg || !err) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); if (err < 0) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); goto out_free; } scan_size = iwl_mvm_scan_size(mvm); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); if (!mvm->scan_cmd) goto out_free; /* Set EBS as successful as long as not stated otherwise by the FW. */ mvm->last_ebs_successful = true; err = iwl_mvm_mac_setup_register(mvm); if (err) goto out_free; mvm->hw_registered = true; min_backoff = iwl_mvm_min_backoff(mvm); iwl_mvm_thermal_initialize(mvm, min_backoff); iwl_mvm_dbgfs_register(mvm, dbgfs_dir); if (!iwl_mvm_has_new_rx_stats_api(mvm)) memset(&mvm->rx_stats_v3, 0, sizeof(struct mvm_statistics_rx_v3)); else memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES iwl_mvm_init_modparams(mvm); #endif iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); return op_mode; out_free: iwl_fw_flush_dumps(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); if (iwlmvm_mod_params.init_dbg) return op_mode; #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_free(trans); #endif iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); iwl_trans_op_mode_leave(trans); ieee80211_free_hw(mvm->hw); return NULL; } static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = NULL; kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS kfree(mvm->mcast_active_filter_cmd); mvm->mcast_active_filter_cmd = NULL; iwl_mvm_vendor_cmds_unregister(mvm); #endif iwl_trans_op_mode_leave(mvm->trans); iwl_phy_db_free(mvm->phy_db); mvm->phy_db = NULL; #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_free(mvm->trans); #endif kfree(mvm->nvm_data); for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); cancel_delayed_work_sync(&mvm->tcm.work); #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE iwl_mvm_tdls_peer_cache_clear(mvm, NULL); #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); ieee80211_free_hw(mvm->hw); } struct iwl_async_handler_entry { struct list_head list; struct iwl_rx_cmd_buffer rxb; enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) { struct iwl_async_handler_entry *entry, *tmp; spin_lock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { iwl_free_rxb(&entry->rxb); list_del(&entry->list); kfree(entry); } spin_unlock_bh(&mvm->async_handlers_lock); } static void iwl_mvm_async_handlers_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, async_handlers_wk); struct iwl_async_handler_entry *entry, *tmp; LIST_HEAD(local_list); /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ /* * Sync with Rx path with a lock. Remove all the entries from this list, * add them to a local one (lock free), and then handle them. */ spin_lock_bh(&mvm->async_handlers_lock); list_splice_init(&mvm->async_handlers_list, &local_list); spin_unlock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &local_list, list) { if (entry->context == RX_HANDLER_ASYNC_LOCKED) mutex_lock(&mvm->mutex); entry->fn(mvm, &entry->rxb); iwl_free_rxb(&entry->rxb); list_del(&entry->list); if (entry->context == RX_HANDLER_ASYNC_LOCKED) mutex_unlock(&mvm->mutex); kfree(entry); } } static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_cmd *cmds_trig; int i; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_FW_NOTIF); if (!trig) return; cmds_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { /* don't collect on CMD 0 */ if (!cmds_trig->cmds[i].cmd_id) break; if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || cmds_trig->cmds[i].group_id != pkt->hdr.group_id) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "CMD 0x%02x.%02x received", pkt->hdr.group_id, pkt->hdr.cmd); break; } } static void iwl_mvm_rx_common(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_packet *pkt) { int i; iwl_mvm_rx_check_trigger(mvm, pkt); /* * Do the notification wait before RX handlers so * even if the RX handler consumes the RXB we have * access to it in the notification wait entry. */ iwl_notification_wait_notify(&mvm->notif_wait, pkt); for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; struct iwl_async_handler_entry *entry; if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) continue; if (rx_h->context == RX_HANDLER_SYNC) { rx_h->fn(mvm, rxb); return; } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); /* we can't do much... */ if (!entry) return; entry->rxb._page = rxb_steal_page(rxb); entry->rxb._offset = rxb->_offset; entry->rxb._rx_page_order = rxb->_rx_page_order; entry->fn = rx_h->fn; entry->context = rx_h->context; spin_lock(&mvm->async_handlers_lock); list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); schedule_work(&mvm->async_handlers_wk); break; } } static void iwl_mvm_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE /* * RX data may be forwarded to userspace in case the user * requested to monitor the rx w/o affecting the regular flow. * In this case the iwl_test object will handle forwarding the rx * data to user space. */ iwl_tm_gnl_send_rx(mvm->trans, rxb); #endif if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD)) iwl_mvm_rx_rx_phy_cmd(mvm, rxb); else iwl_mvm_rx_common(mvm, rxb, pkt); } static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE /* * RX data may be forwarded to userspace in case the user * requested to monitor the rx w/o affecting the regular flow. * In this case the iwl_test object will handle forwarding the rx * data to user space. */ iwl_tm_gnl_send_rx(mvm->trans, rxb); #endif if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); } static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, const struct iwl_device_cmd *cmd) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); /* * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA * commands that need to block the Tx queues. */ iwl_trans_block_txq_ptrs(mvm->trans, false); } static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue) { return queue == mvm->aux_queue || queue == mvm->probe_queue || queue == mvm->p2p_dev_queue || queue == mvm->snif_queue; } static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, int hw_queue, bool start) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_sta *sta; struct ieee80211_txq *txq; struct iwl_mvm_txq *mvmtxq; int i; unsigned long tid_bitmap; struct iwl_mvm_sta *mvmsta; u8 sta_id; sta_id = iwl_mvm_has_new_tx_api(mvm) ? mvm->tvqm_info[hw_queue].sta_id : mvm->queue_info[hw_queue].ra_sta_id; if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR_OR_NULL(sta)) goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); if (iwl_mvm_is_static_queue(mvm, hw_queue)) { if (!start) ieee80211_stop_queues(mvm->hw); else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST) ieee80211_wake_queues(mvm->hw); goto out; } if (iwl_mvm_has_new_tx_api(mvm)) { int tid = mvm->tvqm_info[hw_queue].txq_tid; tid_bitmap = BIT(tid); } else { tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; } for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { int tid = i; if (tid == IWL_MAX_TID_COUNT) tid = IEEE80211_NUM_TIDS; txq = sta->txq[tid]; mvmtxq = iwl_mvm_txq_from_mac80211(txq); mvmtxq->stopped = !start; if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) iwl_mvm_mac_itxq_xmit(mvm->hw, txq); } out: rcu_read_unlock(); } static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { iwl_mvm_queue_state_change(op_mode, hw_queue, false); } static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { iwl_mvm_queue_state_change(op_mode, hw_queue, true); } static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) { bool state = iwl_mvm_is_radio_killed(mvm); if (state) wake_up(&mvm->rx_sync_waitq); wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); } void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) { if (state) set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); else clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); iwl_mvm_set_rfkill_state(mvm); } static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); bool unified = iwl_mvm_has_unified_ucode(mvm); if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); else clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); iwl_mvm_set_rfkill_state(mvm); /* iwl_run_init_mvm_ucode is waiting for results, abort it. */ if (rfkill_safe_init_done) iwl_abort_notification_waits(&mvm->notif_wait); /* * Don't ask the transport to stop the firmware. We'll do it * after cfg80211 takes us down. */ if (unified) return false; /* * Stop the device if we run OPERATIONAL firmware or if we are in the * middle of the calibrations. */ return state && rfkill_safe_init_done; } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_tx_info *info; info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); ieee80211_free_txskb(mvm->hw, skb); } struct iwl_mvm_reprobe { struct device *dev; struct work_struct work; }; static void iwl_mvm_reprobe_wk(struct work_struct *wk) { struct iwl_mvm_reprobe *reprobe; reprobe = container_of(wk, struct iwl_mvm_reprobe, work); if (device_reprobe(reprobe->dev)) dev_err(reprobe->dev, "reprobe failed!\n"); kfree(reprobe); module_put(THIS_MODULE); } void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) { iwl_abort_notification_waits(&mvm->notif_wait); del_timer(&mvm->fwrt.dump.periodic_trig); /* * This is a bit racy, but worst case we tell mac80211 about * a stopped/aborted scan when that was already done which * is not a problem. It is necessary to abort any os scan * here because mac80211 requires having the scan cleared * before restarting. * We'll reset the scan_status to NONE in restart cleanup in * the next start() call from mac80211. If restart isn't called * (no fw restart) scan status will stay busy. */ iwl_mvm_report_scan_aborted(mvm); /* * If we're restarting already, don't cycle restarts. * If INIT fw asserted, it will likely fail again. * If WoWLAN fw asserted, don't restart either, mac80211 * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { iwl_fw_error_collect(&mvm->fwrt); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; IWL_ERR(mvm, "Firmware error during reconfiguration - reprobe!\n"); /* * get a module reference to avoid doing this while unloading * anyway and to avoid scheduling a work with code that's * being removed. */ if (!try_module_get(THIS_MODULE)) { IWL_ERR(mvm, "Module is being unloaded - abort\n"); return; } reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); if (!reprobe) { module_put(THIS_MODULE); return; } reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { IWL_ERR(mvm, "HW restart already requested, but not started\n"); } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered && !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { if (mvm->fw->ucode_capa.error_log_size) { u32 src_size = mvm->fw->ucode_capa.error_log_size; u32 src_addr = mvm->fw->ucode_capa.error_log_addr; u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC); if (recover_buf) { mvm->error_recovery_buf = recover_buf; iwl_trans_read_mem_bytes(mvm->trans, src_addr, recover_buf, src_size); } } iwl_fw_error_collect(&mvm->fwrt); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_dispatch_handle_nic_err(mvm->trans); #endif if (fw_error && mvm->fw_restart > 0) mvm->fw_restart--; set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); ieee80211_restart_hw(mvm->hw); } } static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) iwl_mvm_dump_nic_error_log(mvm); iwl_mvm_nic_restart(mvm, true); } static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); WARN_ON(1); iwl_mvm_nic_restart(mvm, true); } #define IWL_MVM_COMMON_OPS \ /* these could be differentiated */ \ .async_cb = iwl_mvm_async_cb, \ .queue_full = iwl_mvm_stop_sw_queue, \ .queue_not_full = iwl_mvm_wake_sw_queue, \ .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ .free_skb = iwl_mvm_free_skb, \ .nic_error = iwl_mvm_nic_error, \ .cmd_queue_full = iwl_mvm_cmd_queue_full, \ .nic_config = iwl_mvm_nic_config, \ /* as we only register one, these MUST be common! */ \ .start = iwl_op_mode_mvm_start, \ .stop = iwl_op_mode_mvm_stop #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #define IWL_MVM_COMMON_TEST_OPS \ .test_ops = { \ .send_hcmd = iwl_mvm_tm_send_hcmd, \ }, #else #define IWL_MVM_COMMON_TEST_OPS #endif static const struct iwl_op_mode_ops iwl_mvm_ops = { IWL_MVM_COMMON_OPS, IWL_MVM_COMMON_TEST_OPS .rx = iwl_mvm_rx, }; static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, unsigned int queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue); else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); } static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { IWL_MVM_COMMON_OPS, IWL_MVM_COMMON_TEST_OPS .rx = iwl_mvm_rx_mq, .rx_rss = iwl_mvm_rx_mq_rss, }; backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c000066400000000000000000000243641351064634500273230ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "fw-api.h" #include "mvm.h" /* Maps the driver specific channel width definition to the fw values */ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: return PHY_VHT_CHANNEL_MODE20; case NL80211_CHAN_WIDTH_40: return PHY_VHT_CHANNEL_MODE40; case NL80211_CHAN_WIDTH_80: return PHY_VHT_CHANNEL_MODE80; case NL80211_CHAN_WIDTH_160: return PHY_VHT_CHANNEL_MODE160; default: WARN(1, "Invalid channel width=%u", chandef->width); return PHY_VHT_CHANNEL_MODE20; } } /* * Maps the driver specific control channel position (relative to the center * freq) definitions to the the fw values */ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) { switch (chandef->chan->center_freq - chandef->center_freq1) { case -70: return PHY_VHT_CTRL_POS_4_BELOW; case -50: return PHY_VHT_CTRL_POS_3_BELOW; case -30: return PHY_VHT_CTRL_POS_2_BELOW; case -10: return PHY_VHT_CTRL_POS_1_BELOW; case 10: return PHY_VHT_CTRL_POS_1_ABOVE; case 30: return PHY_VHT_CTRL_POS_2_ABOVE; case 50: return PHY_VHT_CTRL_POS_3_ABOVE; case 70: return PHY_VHT_CTRL_POS_4_ABOVE; default: WARN(1, "Invalid channel definition"); /* fall through */ case 0: /* * The FW is expected to check the control channel position only * when in HT/VHT and the channel width is not 20MHz. Return * this value as the default one. */ return PHY_VHT_CTRL_POS_1_BELOW; } } /* * Construct the generic fields of the PHY context command */ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, struct iwl_phy_context_cmd *cmd, u32 action, u32 apply_time) { memset(cmd, 0, sizeof(struct iwl_phy_context_cmd)); cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, ctxt->color)); cmd->action = cpu_to_le32(action); cmd->apply_time = cpu_to_le32(apply_time); } /* * Add the phy configuration to the PHY context command */ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, struct iwl_phy_context_cmd *cmd, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { u8 active_cnt, idle_cnt; struct iwl_phy_context_cmd_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci); /* Set the channel info data */ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); /* Set rx the chains */ idle_cnt = chains_static; active_cnt = chains_dynamic; /* In scenarios where we only ever use a single-stream rates, * i.e. legacy 11b/g/a associations, single-stream APs or even * static SMPS, enable both chains to get diversity, improving * the case where we're far enough from the AP that attenuation * between the two antennas is sufficiently different to impact * performance. */ if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) { idle_cnt = 2; active_cnt = 2; } tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS); tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); tail->rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS); #ifdef CPTCFG_IWLWIFI_DEBUGFS if (unlikely(mvm->dbgfs_rx_phyinfo)) tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); #endif tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); } /* * Send a command to apply the current phy configuration. The command is send * only if something in the configuration changed: in case that this is the * first time that the phy configuration is applied or in case that the phy * configuration changed from the previous apply. */ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic, u32 action, u32 apply_time) { struct iwl_phy_context_cmd cmd; int ret; u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); /* Set the command header fields */ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); /* Set the command data */ iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, chains_static, chains_dynamic); ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd); if (ret) IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); return ret; } /* * Send a command to add a PHY context based on the current HW configuration. */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && ctxt->ref); lockdep_assert_held(&mvm->mutex); ctxt->channel = chandef->chan; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, FW_CTXT_ACTION_ADD, 0); } /* * Update the number of references to the given PHY context. This is valid only * in case the PHY context was already created, i.e., its reference count > 0. */ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { lockdep_assert_held(&mvm->mutex); ctxt->ref++; } /* * Send a command to modify the PHY context based on the current HW * configuration. Note that the function does not check that the configuration * changed. */ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY; lockdep_assert_held(&mvm->mutex); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && ctxt->channel->band != chandef->chan->band) { int ret; /* ... remove it here ...*/ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, FW_CTXT_ACTION_REMOVE, 0); if (ret) return ret; /* ... and proceed to add it again */ action = FW_CTXT_ACTION_ADD; } ctxt->channel = chandef->chan; ctxt->width = chandef->width; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, action, 0); } void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!ctxt)) return; ctxt->ref--; /* * Move unused phy's to a default channel. When the phy is moved the, * fw will cleanup immediate quiet bit if it was previously set, * otherwise we might not be able to reuse this phy. */ if (ctxt->ref == 0) { struct ieee80211_channel *chan; struct cfg80211_chan_def chandef; chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); } } static void iwl_mvm_binding_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { unsigned long *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (!mvmvif->phy_ctxt) return; if (vif->type == NL80211_IFTYPE_STATION || vif->type == NL80211_IFTYPE_AP) __set_bit(mvmvif->phy_ctxt->id, data); } int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm) { unsigned long phy_ctxt_counter = 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_binding_iterator, &phy_ctxt_counter); return hweight8(phy_ctxt_counter); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/power.c000066400000000000000000000755251351064634500267040ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include "iwl-debug.h" #include "mvm.h" #include "iwl-modparams.h" #include "fw/api/power.h" #define POWER_KEEP_ALIVE_PERIOD_SEC 25 static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, struct iwl_beacon_filter_cmd *cmd, u32 flags) { u16 len; IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", le32_to_cpu(cmd->ba_enable_beacon_abort)); IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", le32_to_cpu(cmd->ba_escape_timer)); IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", le32_to_cpu(cmd->bf_debug_flag)); IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", le32_to_cpu(cmd->bf_enable_beacon_filter)); IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", le32_to_cpu(cmd->bf_energy_delta)); IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", le32_to_cpu(cmd->bf_escape_timer)); IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", le32_to_cpu(cmd->bf_roaming_energy_delta)); IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", le32_to_cpu(cmd->bf_roaming_state)); IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", le32_to_cpu(cmd->bf_temp_threshold)); IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", le32_to_cpu(cmd->bf_temp_fast_filter)); IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", le32_to_cpu(cmd->bf_temp_slow_filter)); IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_low is: %d, %d\n", le32_to_cpu(cmd->bf_threshold_absolute_low[0]), le32_to_cpu(cmd->bf_threshold_absolute_low[1])); IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_high is: %d, %d\n", le32_to_cpu(cmd->bf_threshold_absolute_high[0]), le32_to_cpu(cmd->bf_threshold_absolute_high[1])); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BEACON_FILTER_V4)) len = sizeof(struct iwl_beacon_filter_cmd); else len = offsetof(struct iwl_beacon_filter_cmd, bf_threshold_absolute_low); return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags, len, cmd); } static void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->bss_conf.cqm_rssi_thold) { cmd->bf_energy_delta = cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); /* fw uses an absolute value for this */ cmd->bf_roaming_state = cpu_to_le32(-vif->bss_conf.cqm_rssi_thold); } cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); } static void iwl_mvm_power_log(struct iwl_mvm *mvm, struct iwl_mac_power_cmd *cmd) { IWL_DEBUG_POWER(mvm, "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n", cmd->id_and_color, iwlmvm_mod_params.power_scheme, le16_to_cpu(cmd->flags)); IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", le16_to_cpu(cmd->keep_alive_seconds)); if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) { IWL_DEBUG_POWER(mvm, "Disable power management\n"); return; } IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", le32_to_cpu(cmd->rx_data_timeout)); IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", le32_to_cpu(cmd->tx_data_timeout)); if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", cmd->skip_dtim_periods); if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", cmd->lprx_rssi_threshold); if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { IWL_DEBUG_POWER(mvm, "uAPSD enabled\n"); IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n", le32_to_cpu(cmd->rx_data_timeout_uapsd)); IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n", le32_to_cpu(cmd->tx_data_timeout_uapsd)); IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid); IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags); IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp); } } static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); enum ieee80211_ac_numbers ac; bool tid_found = false; #ifdef CPTCFG_IWLWIFI_DEBUGFS /* set advanced pm flag with no uapsd ACs to enable ps-poll */ if (mvmvif->dbgfs_pm.use_ps_poll) { cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); return; } #endif #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (mvm->trans->dbg_cfg.MVM_USE_PS_POLL) { cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); return; } #endif for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { if (!mvmvif->queue_params[ac].uapsd) continue; if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN) cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); cmd->uapsd_ac_flags |= BIT(ac); /* QNDP TID - the highest TID with no admission control */ if (!tid_found && !mvmvif->queue_params[ac].acm) { tid_found = true; switch (ac) { case IEEE80211_AC_VO: cmd->qndp_tid = 6; break; case IEEE80211_AC_VI: cmd->qndp_tid = 5; break; case IEEE80211_AC_BE: cmd->qndp_tid = 0; break; case IEEE80211_AC_BK: cmd->qndp_tid = 1; break; } } } cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | BIT(IEEE80211_AC_VI) | BIT(IEEE80211_AC_BE) | BIT(IEEE80211_AC_BK))) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); cmd->snooze_window = (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); } cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); } else { cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); } if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->heavy_tx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS; cmd->heavy_rx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS; } else { cmd->heavy_tx_thld_packets = IWL_MVM_PS_HEAVY_TX_THLD_PACKETS; cmd->heavy_rx_thld_packets = IWL_MVM_PS_HEAVY_RX_THLD_PACKETS; } cmd->heavy_tx_thld_percentage = IWL_MVM_PS_HEAVY_TX_THLD_PERCENT; cmd->heavy_rx_thld_percentage = IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; } struct iwl_allow_uapsd_iface_iterator_data { struct ieee80211_vif *current_vif; bool allow_uapsd; }; static void iwl_mvm_allow_uapsd_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_allow_uapsd_iface_iterator_data *data = _data; struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif); /* exclude the given vif */ if (vif == data->current_vif) return; switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_NAN: data->allow_uapsd = false; break; case NL80211_IFTYPE_STATION: /* allow UAPSD if P2P interface and BSS station interface share * the same channel. */ if (vif->bss_conf.assoc && other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && (other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)) data->allow_uapsd = false; break; default: break; } } static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_allow_uapsd_iface_iterator_data data = { .current_vif = vif, .allow_uapsd = true, }; if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, ETH_ALEN)) return false; /* * Avoid using uAPSD if P2P client is associated to GO that uses * opportunistic power save. This is due to current FW limitation. */ if (vif->p2p && (vif->bss_conf.p2p_noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT)) return false; if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) return false; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_allow_uapsd_iterator, &data); return data.allow_uapsd; } static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; bool radar_detect = false; rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); WARN_ON(!chanctx_conf); if (chanctx_conf) { chan = chanctx_conf->def.chan; radar_detect = chan->flags & IEEE80211_CHAN_RADAR; } rcu_read_unlock(); return radar_detect; } static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd, bool host_awake) { int dtimper = vif->bss_conf.dtim_period ?: 1; int skip; /* disable, in case we're supposed to override */ cmd->skip_dtim_periods = 0; cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); if (iwl_mvm_power_is_radar(vif)) return; if (dtimper >= 10) return; /* TODO: check that multicast wake lock is off */ if (host_awake) { if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) return; skip = 2; } else { int dtimper_tu = dtimper * vif->bss_conf.beacon_int; if (WARN_ON(!dtimper_tu)) return; /* configure skip over dtim up to 306TU - 314 msec */ skip = max_t(u8, 1, 306 / dtimper_tu); } /* the firmware really expects "look at every X DTIMs", so add 1 */ cmd->skip_dtim_periods = 1 + skip; cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); } static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd, bool host_awake) { int dtimper, bi; int keep_alive; struct iwl_mvm_vif *mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif); cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); dtimper = vif->bss_conf.dtim_period; bi = vif->bss_conf.beacon_int; /* * Regardless of power management state the driver must set * keep alive period. FW will use it for sending keep alive NDPs * immediately after association. Check that keep alive period * is at least 3 * DTIM */ keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi), USEC_PER_SEC); keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC); cmd->keep_alive_seconds = cpu_to_le16(keep_alive); if (mvm->ps_disabled) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); if (!vif->bss_conf.ps || !mvmvif->pm_enabled) return; if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) || !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE)) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); if (vif->bss_conf.beacon_rate && (vif->bss_conf.beacon_rate->bitrate == 10 || vif->bss_conf.beacon_rate->bitrate == 60)) { cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; } iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake); if (!host_awake) { cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) { cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT); cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT); } else { cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT); cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT); } if (iwl_mvm_power_allow_uapsd(mvm, vif)) iwl_mvm_power_configure_uapsd(mvm, vif, cmd); #ifdef CPTCFG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) cmd->keep_alive_seconds = cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { if (mvmvif->dbgfs_pm.skip_over_dtim) cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) cmd->rx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) cmd->tx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods; if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) { if (mvmvif->dbgfs_pm.lprx_ena) cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold; if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) { if (mvmvif->dbgfs_pm.snooze_ena) cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) { u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK; if (mvmvif->dbgfs_pm.uapsd_misbehaving) cmd->flags |= cpu_to_le16(flag); else cmd->flags &= cpu_to_le16(flag); } #endif /* CPTCFG_IWLWIFI_DEBUGFS */ } static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mac_power_cmd cmd = {}; iwl_mvm_power_build_cmd(mvm, vif, &cmd, mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN); iwl_mvm_power_log(mvm, &cmd); #ifdef CPTCFG_IWLWIFI_DEBUGFS memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); #endif return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0, sizeof(cmd), &cmd); } int iwl_mvm_power_update_device(struct iwl_mvm *mvm) { struct iwl_device_power_cmd cmd = { .flags = 0, }; if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) mvm->ps_disabled = true; if (!mvm->ps_disabled) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #ifdef CPTCFG_IWLWIFI_DEBUGFS if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 : mvm->disable_power_off) cmd.flags &= cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #endif if (mvm->ext_clock_valid) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK); IWL_DEBUG_POWER(mvm, "Sending device power command with flags = 0x%X\n", cmd.flags); return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd), &cmd); } void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, ETH_ALEN)) eth_zero_addr(mvmvif->uapsd_misbehaving_bssid); } static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { u8 *ap_sta_id = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* The ap_sta_id is not expected to change during current association * so no explicit protection is needed */ if (mvmvif->ap_sta_id == *ap_sta_id) memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, ETH_ALEN); } void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data; u8 ap_sta_id = le32_to_cpu(notif->sta_id); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id); } struct iwl_power_vifs { struct iwl_mvm *mvm; struct ieee80211_vif *bss_vif; struct ieee80211_vif *p2p_vif; struct ieee80211_vif *ap_vif; struct ieee80211_vif *monitor_vif; bool p2p_active; bool bss_active; bool ap_active; bool monitor_active; }; static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->pm_enabled = false; } static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool *disable_ps = _data; if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX) *disable_ps |= mvmvif->ps_disabled; } static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_power_vifs *power_iterator = _data; bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: break; case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP: /* only a single MAC of the same type */ WARN_ON(power_iterator->ap_vif); power_iterator->ap_vif = vif; if (active) power_iterator->ap_active = true; break; case NL80211_IFTYPE_MONITOR: /* only a single MAC of the same type */ WARN_ON(power_iterator->monitor_vif); power_iterator->monitor_vif = vif; if (active) power_iterator->monitor_active = true; break; case NL80211_IFTYPE_P2P_CLIENT: /* only a single MAC of the same type */ WARN_ON(power_iterator->p2p_vif); power_iterator->p2p_vif = vif; if (active) power_iterator->p2p_active = true; break; case NL80211_IFTYPE_STATION: power_iterator->bss_vif = vif; if (active) power_iterator->bss_active = true; break; default: break; } } static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, struct iwl_power_vifs *vifs) { struct iwl_mvm_vif *bss_mvmvif = NULL; struct iwl_mvm_vif *p2p_mvmvif = NULL; struct iwl_mvm_vif *ap_mvmvif = NULL; bool client_same_channel = false; bool ap_same_channel = false; lockdep_assert_held(&mvm->mutex); /* set pm_enable to false */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_disable_pm_iterator, NULL); if (vifs->bss_vif) bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif); if (vifs->p2p_vif) p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif); if (vifs->ap_vif) ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif); /* don't allow PM if any TDLS stations exist */ if (iwl_mvm_tdls_sta_count(mvm, NULL)) return; /* enable PM on bss if bss stand alone */ if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) { bss_mvmvif->pm_enabled = true; return; } /* enable PM on p2p if p2p stand alone */ if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) { p2p_mvmvif->pm_enabled = true; return; } if (vifs->bss_active && vifs->p2p_active) client_same_channel = (bss_mvmvif->phy_ctxt->id == p2p_mvmvif->phy_ctxt->id); if (vifs->bss_active && vifs->ap_active) ap_same_channel = (bss_mvmvif->phy_ctxt->id == ap_mvmvif->phy_ctxt->id); /* clients are not stand alone: enable PM if DCM */ if (!(client_same_channel || ap_same_channel)) { if (vifs->bss_active) bss_mvmvif->pm_enabled = true; if (vifs->p2p_active) p2p_mvmvif->pm_enabled = true; return; } /* * There is only one channel in the system and there are only * bss and p2p clients that share it */ if (client_same_channel && !vifs->ap_active) { /* share same channel*/ bss_mvmvif->pm_enabled = true; p2p_mvmvif->pm_enabled = true; } } #ifdef CPTCFG_IWLWIFI_DEBUGFS int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_power_cmd cmd = {}; int pos = 0; mutex_lock(&mvm->mutex); memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd)); mutex_unlock(&mvm->mutex); pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", iwlmvm_mod_params.power_scheme); pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", le16_to_cpu(cmd.flags)); pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", le16_to_cpu(cmd.keep_alive_seconds)); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) return pos; pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", (cmd.flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0); pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", cmd.skip_dtim_periods); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", le32_to_cpu(cmd.rx_data_timeout)); pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", le32_to_cpu(cmd.tx_data_timeout)); } if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) pos += scnprintf(buf+pos, bufsz-pos, "lprx_rssi_threshold = %d\n", cmd.lprx_rssi_threshold); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) return pos; pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n", le32_to_cpu(cmd.rx_data_timeout_uapsd)); pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n", le32_to_cpu(cmd.tx_data_timeout_uapsd)); pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid); pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n", cmd.uapsd_ac_flags); pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n", cmd.uapsd_max_sp); pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n", cmd.heavy_tx_thld_packets); pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n", cmd.heavy_rx_thld_packets); pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n", cmd.heavy_tx_thld_percentage); pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n", cmd.heavy_rx_thld_percentage); pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n", (cmd.flags & cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ? 1 : 0); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK))) return pos; pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n", cmd.snooze_interval); pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n", cmd.snooze_window); return pos; } void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) cmd->bf_roaming_energy_delta = cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD) cmd->bf_temp_threshold = cpu_to_le32(dbgfs_bf->bf_temp_threshold); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER) cmd->bf_temp_fast_filter = cpu_to_le32(dbgfs_bf->bf_temp_fast_filter); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER) cmd->bf_temp_slow_filter = cpu_to_le32(dbgfs_bf->bf_temp_slow_filter); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) cmd->ba_enable_beacon_abort = cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort); } #endif static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd, u32 cmd_flags) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period || vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags); if (!ret) mvmvif->bf_data.bf_enabled = true; return ret; } int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags) { struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(1), }; return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags); } static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags) { struct iwl_beacon_filter_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); if (!ret) mvmvif->bf_data.bf_enabled = false; return ret; } int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags) { return _iwl_mvm_disable_beacon_filter(mvm, vif, flags); } static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) { bool disable_ps; int ret; /* disable PS if CAM */ disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); /* ...or if any of the vifs require PS to be off */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_ps_disabled_iterator, &disable_ps); /* update device power state if it has changed */ if (mvm->ps_disabled != disable_ps) { bool old_ps_disabled = mvm->ps_disabled; mvm->ps_disabled = disable_ps; ret = iwl_mvm_power_update_device(mvm); if (ret) { mvm->ps_disabled = old_ps_disabled; return ret; } } return 0; } static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(1), }; if (!mvmvif->bf_data.bf_enabled) return 0; if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || mvm->ps_disabled || !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif)); return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0); } int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) { struct iwl_power_vifs vifs = { .mvm = mvm, }; int ret; lockdep_assert_held(&mvm->mutex); /* get vifs info */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_get_vifs_iterator, &vifs); ret = iwl_mvm_power_set_ps(mvm); if (ret) return ret; if (vifs.bss_vif) return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); return 0; } int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) { struct iwl_power_vifs vifs = { .mvm = mvm, }; int ret; lockdep_assert_held(&mvm->mutex); /* get vifs info */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_get_vifs_iterator, &vifs); iwl_mvm_power_set_pm(mvm, &vifs); ret = iwl_mvm_power_set_ps(mvm); if (ret) return ret; if (vifs.bss_vif) { ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif); if (ret) return ret; } if (vifs.p2p_vif) { ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif); if (ret) return ret; } if (vifs.bss_vif) return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); return 0; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/quota.c000066400000000000000000000272361351064634500266750ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "fw-api.h" #include "mvm.h" #define QUOTA_100 IWL_MVM_MAX_QUOTA #define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100) struct iwl_mvm_quota_iterator_data { int n_interfaces[MAX_BINDINGS]; int colors[MAX_BINDINGS]; int low_latency[MAX_BINDINGS]; #ifdef CPTCFG_IWLWIFI_DEBUGFS int dbgfs_min[MAX_BINDINGS]; #endif int n_low_latency_bindings; struct ieee80211_vif *disabled_vif; }; static void iwl_mvm_quota_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_quota_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 id; /* skip disabled interfaces here immediately */ if (vif == data->disabled_vif) return; if (!mvmvif->phy_ctxt) return; /* currently, PHY ID == binding ID */ id = mvmvif->phy_ctxt->id; /* need at least one binding per PHY */ BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS); if (WARN_ON_ONCE(id >= MAX_BINDINGS)) return; switch (vif->type) { case NL80211_IFTYPE_STATION: if (vif->bss_conf.assoc) break; return; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: if (mvmvif->ap_ibss_active) break; return; case NL80211_IFTYPE_MONITOR: if (mvmvif->monitor_active) break; return; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: return; default: WARN_ON_ONCE(1); return; } if (data->colors[id] < 0) data->colors[id] = mvmvif->phy_ctxt->color; else WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color); data->n_interfaces[id]++; #ifdef CPTCFG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_quota_min) data->dbgfs_min[id] = max(data->dbgfs_min[id], mvmvif->dbgfs_quota_min); #endif if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) { data->n_low_latency_bindings++; data->low_latency[id] = true; } } #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA /* * Zero quota for P2P client MAC as part of a WA to pass P2P OPPPS certification * test. Refer to IWLMVM_P2P_OPPPS_TEST_WA description in Kconfig.noupstream for * details. */ static void iwl_mvm_adjust_quota_for_p2p_wa(struct iwl_mvm *mvm, struct iwl_time_quota_cmd *cmd) { struct iwl_time_quota_data *quota; int i, phy_id = -1; if (!mvm->p2p_opps_test_wa_vif || !mvm->p2p_opps_test_wa_vif->phy_ctxt) return; phy_id = mvm->p2p_opps_test_wa_vif->phy_ctxt->id; for (i = 0; i < MAX_BINDINGS; i++) { u32 id; u32 id_n_c; quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i); id_n_c = le32_to_cpu(quota->id_and_color); id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS; if (id != phy_id) continue; quota->quota = 0; } } #endif static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm, struct iwl_time_quota_cmd *cmd) { #ifdef CPTCFG_NL80211_TESTMODE struct iwl_mvm_vif *mvmvif; int i, phy_id = -1, beacon_int = 0; if (!mvm->noa_duration || !mvm->noa_vif) return; mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif); if (!mvmvif->ap_ibss_active) return; phy_id = mvmvif->phy_ctxt->id; beacon_int = mvm->noa_vif->bss_conf.beacon_int; for (i = 0; i < MAX_BINDINGS; i++) { struct iwl_time_quota_data *data = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i); u32 id_n_c = le32_to_cpu(data->id_and_color); u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS; u32 quota = le32_to_cpu(data->quota); if (id != phy_id) continue; quota *= (beacon_int - mvm->noa_duration); quota /= beacon_int; IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", le32_to_cpu(data->quota), quota); data->quota = cpu_to_le32(quota); } #endif } int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_update, struct ieee80211_vif *disabled_vif) { struct iwl_time_quota_cmd cmd = {}; int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat; struct iwl_mvm_quota_iterator_data data = { .n_interfaces = {}, .colors = { -1, -1, -1, -1 }, .disabled_vif = disabled_vif, }; struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd; struct iwl_time_quota_data *qdata, *last_data; bool send = false; lockdep_assert_held(&mvm->mutex); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) return 0; /* update all upon completion */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) return 0; /* iterator data above must match */ BUILD_BUG_ON(MAX_BINDINGS != 4); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_quota_iterator, &data); /* * The FW's scheduling session consists of * IWL_MVM_MAX_QUOTA fragments. Divide these fragments * equally between all the bindings that require quota */ num_active_macs = 0; for (i = 0; i < MAX_BINDINGS; i++) { qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i); qdata->id_and_color = cpu_to_le32(FW_CTXT_INVALID); num_active_macs += data.n_interfaces[i]; } n_non_lowlat = num_active_macs; if (data.n_low_latency_bindings == 1) { for (i = 0; i < MAX_BINDINGS; i++) { if (data.low_latency[i]) { n_non_lowlat -= data.n_interfaces[i]; break; } } } if (data.n_low_latency_bindings == 1 && n_non_lowlat) { /* * Reserve quota for the low latency binding in case that * there are several data bindings but only a single * low latency one. Split the rest of the quota equally * between the other data interfaces. */ quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat; quota_rem = QUOTA_100 - n_non_lowlat * quota - QUOTA_LOWLAT_MIN; IWL_DEBUG_QUOTA(mvm, "quota: low-latency binding active, remaining quota per other binding: %d\n", quota); } else if (num_active_macs) { /* * There are 0 or more than 1 low latency bindings, or all the * data interfaces belong to the single low latency binding. * Split the quota equally between the data interfaces. */ quota = QUOTA_100 / num_active_macs; quota_rem = QUOTA_100 % num_active_macs; IWL_DEBUG_QUOTA(mvm, "quota: splitting evenly per binding: %d\n", quota); } else { /* values don't really matter - won't be used */ quota = 0; quota_rem = 0; } for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { if (data.colors[i] < 0) continue; qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, idx); qdata->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i])); if (data.n_interfaces[i] <= 0) qdata->quota = cpu_to_le32(0); #ifdef CPTCFG_IWLWIFI_DEBUGFS else if (data.dbgfs_min[i]) qdata->quota = cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100); #endif else if (data.n_low_latency_bindings == 1 && n_non_lowlat && data.low_latency[i]) /* * There is more than one binding, but only one of the * bindings is in low latency. For this case, allocate * the minimal required quota for the low latency * binding. */ qdata->quota = cpu_to_le32(QUOTA_LOWLAT_MIN); else qdata->quota = cpu_to_le32(quota * data.n_interfaces[i]); WARN_ONCE(le32_to_cpu(qdata->quota) > QUOTA_100, "Binding=%d, quota=%u > max=%u\n", idx, le32_to_cpu(qdata->quota), QUOTA_100); qdata->max_duration = cpu_to_le32(0); idx++; } /* Give the remainder of the session to the first data binding */ for (i = 0; i < MAX_BINDINGS; i++) { qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i); if (le32_to_cpu(qdata->quota) != 0) { le32_add_cpu(&qdata->quota, quota_rem); IWL_DEBUG_QUOTA(mvm, "quota: giving remainder of %d to binding %d\n", quota_rem, i); break; } } iwl_mvm_adjust_quota_for_noa(mvm, &cmd); /* check that we have non-zero quota for all valid bindings */ for (i = 0; i < MAX_BINDINGS; i++) { qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i); last_data = iwl_mvm_quota_cmd_get_quota(mvm, last, i); if (qdata->id_and_color != last_data->id_and_color) send = true; if (qdata->max_duration != last_data->max_duration) send = true; if (abs((int)le32_to_cpu(qdata->quota) - (int)le32_to_cpu(last_data->quota)) > IWL_MVM_QUOTA_THRESHOLD) send = true; if (qdata->id_and_color == cpu_to_le32(FW_CTXT_INVALID)) continue; WARN_ONCE(qdata->quota == 0, "zero quota on binding %d\n", i); } #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA /* * Zero quota for P2P client MAC as part of a WA to pass P2P OPPPS * certification test. Refer to IWLMVM_P2P_OPPPS_TEST_WA description in * Kconfig.noupstream for details. */ if (mvm->p2p_opps_test_wa_vif) iwl_mvm_adjust_quota_for_p2p_wa(mvm, &cmd); #endif if (!send && !force_update) { /* don't send a practically unchanged command, the firmware has * to re-initialize a lot of state and that can have an adverse * impact on it */ return 0; } err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), &cmd); if (err) IWL_ERR(mvm, "Failed to send quota: %d\n", err); else mvm->last_quota_cmd = cmd; return err; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c000066400000000000000000000360451351064634500266000ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "rs.h" #include "fw-api.h" #include "sta.h" #include "iwl-op-mode.h" #include "mvm.h" static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta) { switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: return IWL_TLC_MNG_CH_WIDTH_160MHZ; case IEEE80211_STA_RX_BW_80: return IWL_TLC_MNG_CH_WIDTH_80MHZ; case IEEE80211_STA_RX_BW_40: return IWL_TLC_MNG_CH_WIDTH_40MHZ; case IEEE80211_STA_RX_BW_20: default: return IWL_TLC_MNG_CH_WIDTH_20MHZ; } } static u8 rs_fw_set_active_chains(u8 chains) { u8 fw_chains = 0; if (chains & ANT_A) fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK; if (chains & ANT_B) fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; if (chains & ANT_C) WARN(false, "tlc offload doesn't support antenna C. chains: 0x%x\n", chains); return fw_chains; } static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) { struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; u8 supp = 0; if (he_cap->has_he) return 0; if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ); return supp; } static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband) { struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; bool vht_ena = vht_cap->vht_supported; u16 flags = 0; if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) { if (he_cap->has_he) { if (he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; if (he_cap->he_cap_elem.phy_cap_info[7] & IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK; } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) || (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; } if (mvm->cfg->ht_params->ldpc && ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) || (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; /* consider our LDPC support in case of HE */ if (sband->iftype_data && sband->iftype_data->he_cap.has_he && !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK)) flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; return flags; } static int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap, int nss) { u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & (0x3 << (2 * (nss - 1))); rx_mcs >>= (2 * (nss - 1)); switch (rx_mcs) { case IEEE80211_VHT_MCS_SUPPORT_0_7: return IWL_TLC_MNG_HT_RATE_MCS7; case IEEE80211_VHT_MCS_SUPPORT_0_8: return IWL_TLC_MNG_HT_RATE_MCS8; case IEEE80211_VHT_MCS_SUPPORT_0_9: return IWL_TLC_MNG_HT_RATE_MCS9; default: WARN_ON_ONCE(1); break; } return 0; } static void rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, const struct ieee80211_sta_vht_cap *vht_cap, struct iwl_tlc_config_cmd *cmd) { u16 supp; int i, highest_mcs; for (i = 0; i < sta->rx_nss; i++) { if (i == IWL_TLC_NSS_MAX) break; highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); if (!highest_mcs) continue; supp = BIT(highest_mcs + 1) - 1; if (sta->bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp); if (sta->bandwidth == IEEE80211_STA_RX_BW_160) cmd->ht_rates[i][IWL_TLC_HT_BW_160] = cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160]; } } static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs) { switch (mcs) { case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1; case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1; case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1; case IEEE80211_HE_MCS_NOT_SUPPORTED: return 0; } WARN(1, "invalid HE MCS %d\n", mcs); return 0; } static void rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct iwl_tlc_config_cmd *cmd) { const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); u16 tx_mcs_80 = le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80); u16 tx_mcs_160 = le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); int i; for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) { u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3; /* If one side doesn't support - mark both as not supporting */ if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED || _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) { _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; } if (_mcs_80 > _tx_mcs_80) _mcs_80 = _tx_mcs_80; cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80)); /* If one side doesn't support - mark both as not supporting */ if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED || _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) { _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; } if (_mcs_160 > _tx_mcs_160) _mcs_160 = _tx_mcs_160; cmd->ht_rates[i][IWL_TLC_HT_BW_160] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160)); } } static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct iwl_tlc_config_cmd *cmd) { int i; unsigned long tmp; unsigned long supp; /* must be unsigned long for for_each_set_bit */ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; /* non HT rates */ supp = 0; tmp = sta->supp_rates[sband->band]; for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value); cmd->non_ht_rates = cpu_to_le16(supp); cmd->mode = IWL_TLC_MNG_MODE_NON_HT; /* HT/VHT rates */ if (he_cap->has_he) { cmd->mode = IWL_TLC_MNG_MODE_HE; rs_fw_he_set_enabled_rates(sta, sband, cmd); } else if (vht_cap->vht_supported) { cmd->mode = IWL_TLC_MNG_MODE_VHT; rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tlc_update_notif *notif; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct iwl_lq_sta_rs_fw *lq_sta; u32 flags; rcu_read_lock(); notif = (void *)pkt->data; sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (IS_ERR_OR_NULL(sta)) { IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id); goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta) { IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id); goto out; } flags = le32_to_cpu(notif->flags); lq_sta = &mvmsta->lq_sta.rs_fw; if (flags & IWL_TLC_NOTIF_FLAG_RATE) { lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n", lq_sta->last_rate_n_flags); } if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvmsta->orig_amsdu_len) { u16 size = le32_to_cpu(notif->amsdu_size); int i; if (WARN_ON(sta->max_amsdu_len < size)) goto out; mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { if (mvmsta->amsdu_enabled & BIT(i)) sta->max_tid_amsdu_len[i] = iwl_mvm_max_amsdu_size(mvm, sta, i); else /* * Not so elegant, but this will effectively * prevent AMSDU on this TID */ sta->max_tid_amsdu_len[i] = 1; } IWL_DEBUG_RATE(mvm, "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", le32_to_cpu(notif->amsdu_size), size, mvmsta->amsdu_enabled); } out: rcu_read_unlock(); } static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; if (vht_cap->vht_supported) { switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: return IEEE80211_MAX_MPDU_LEN_VHT_11454; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: return IEEE80211_MAX_MPDU_LEN_VHT_7991; default: return IEEE80211_MAX_MPDU_LEN_VHT_3895; } } else if (ht_cap->ht_supported) { if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU) /* * agg is offloaded so we need to assume that agg * are enabled and max mpdu in ampdu is 4095 * (spec 802.11-2016 9.3.2.1) */ return IEEE80211_MAX_MPDU_LEN_HT_BA; else return IEEE80211_MAX_MPDU_LEN_HT_3839; } /* in legacy mode no amsdu is enabled so return zero */ return 0; } void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band, bool update) { struct ieee80211_hw *hw = mvm->hw; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0); struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta); struct iwl_tlc_config_cmd cfg_cmd = { .sta_id = mvmsta->sta_id, .max_ch_width = update ? rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), .max_mpdu_len = cpu_to_le16(max_amsdu_len), .amsdu = iwl_mvm_is_csum_supported(mvm), }; int ret; memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); #ifdef CPTCFG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif rs_fw_set_supp_rates(sta, sband, &cfg_cmd); #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES /* * if AP disables mimo on 160bw (cmd->ht_rates[1][1] == 0 * we disable mimo on 80bw cmd->ht_rates[1][0] */ if (mvm->trans->dbg_cfg.tx_siso_80bw_like_160bw && !cfg_cmd.ht_rates[1][1]) cfg_cmd.ht_rates[1][0] = 0; #endif /* * since TLC offload works with one mode we can assume * that only vht/ht is used and also set it as station max amsdu */ sta->max_amsdu_len = max_amsdu_len; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cfg_cmd), &cfg_cmd); if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); } void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta) { struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); lq_sta->pers.drv = mvm; lq_sta->pers.sta_id = mvmsta->sta_id; lq_sta->pers.chains = 0; memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); lq_sta->pers.last_rssi = S8_MIN; lq_sta->last_rate_n_flags = 0; #ifdef CPTCFG_MAC80211_DEBUGFS lq_sta->pers.dbg_fixed_rate = 0; #endif } int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable) { /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */ IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n"); return 0; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/rs.c000066400000000000000000003556271351064634500262000ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include #include #include #include #include #include #include #include #include "rs.h" #include "fw-api.h" #include "sta.h" #include "iwl-op-mode.h" #include "mvm.h" #include "debugfs.h" #define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ /* Calculations of success ratio are done in fixed point where 12800 is 100%. * Use this macro when dealing with thresholds consts set as a percentage */ #define RS_PERCENT(x) (128 * x) static u8 rs_ht_to_legacy[] = { [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX, [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX, [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX, [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX, [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX, [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX, [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX, [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX, [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX, [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX, }; static const u8 ant_toggle_lookup[] = { [ANT_NONE] = ANT_NONE, [ANT_A] = ANT_B, [ANT_B] = ANT_A, [ANT_AB] = ANT_AB, }; #define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\ IWL_RATE_##rp##M_INDEX, \ IWL_RATE_##rn##M_INDEX } #define IWL_DECLARE_MCS_RATE(s) \ [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \ IWL_RATE_INVM_INDEX, \ IWL_RATE_INVM_INDEX } /* * Parameter order: * rate, ht rate, prev rate, next rate * * If there isn't a valid next or previous rate then INV is used which * maps to IWL_RATE_INVALID * */ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = { IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */ IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */ IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */ IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */ IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */ IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */ IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */ IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */ IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */ IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */ IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */ IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */ IWL_DECLARE_MCS_RATE(7), /* MCS 7 */ IWL_DECLARE_MCS_RATE(8), /* MCS 8 */ IWL_DECLARE_MCS_RATE(9), /* MCS 9 */ }; enum rs_action { RS_ACTION_STAY = 0, RS_ACTION_DOWNSCALE = -1, RS_ACTION_UPSCALE = 1, }; enum rs_column_mode { RS_INVALID = 0, RS_LEGACY, RS_SISO, RS_MIMO2, }; #define MAX_NEXT_COLUMNS 7 #define MAX_COLUMN_CHECKS 3 struct rs_tx_column; typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col); struct rs_tx_column { enum rs_column_mode mode; u8 ant; bool sgi; enum rs_column next_columns[MAX_NEXT_COLUMNS]; allow_column_func_t checks[MAX_COLUMN_CHECKS]; }; static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); } static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) return false; if (sta->smps_mode == IEEE80211_SMPS_STATIC) return false; if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2) return false; if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) return false; if (mvm->nvm_data->sku_cap_mimo_disabled) return false; return true; } static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) return false; return true; } static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; if (is_ht20(rate) && (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)) return true; if (is_ht40(rate) && (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)) return true; if (is_ht80(rate) && (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)) return true; if (is_ht160(rate) && (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)) return true; return false; } static const struct rs_tx_column rs_tx_columns[] = { [RS_COLUMN_LEGACY_ANT_A] = { .mode = RS_LEGACY, .ant = ANT_A, .next_columns = { RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_SISO_ANT_A, RS_COLUMN_MIMO2, RS_COLUMN_INVALID, RS_COLUMN_INVALID, RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, .checks = { rs_ant_allow, }, }, [RS_COLUMN_LEGACY_ANT_B] = { .mode = RS_LEGACY, .ant = ANT_B, .next_columns = { RS_COLUMN_LEGACY_ANT_A, RS_COLUMN_SISO_ANT_B, RS_COLUMN_MIMO2, RS_COLUMN_INVALID, RS_COLUMN_INVALID, RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, .checks = { rs_ant_allow, }, }, [RS_COLUMN_SISO_ANT_A] = { .mode = RS_SISO, .ant = ANT_A, .next_columns = { RS_COLUMN_SISO_ANT_B, RS_COLUMN_MIMO2, RS_COLUMN_SISO_ANT_A_SGI, RS_COLUMN_LEGACY_ANT_A, RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, .checks = { rs_siso_allow, rs_ant_allow, }, }, [RS_COLUMN_SISO_ANT_B] = { .mode = RS_SISO, .ant = ANT_B, .next_columns = { RS_COLUMN_SISO_ANT_A, RS_COLUMN_MIMO2, RS_COLUMN_SISO_ANT_B_SGI, RS_COLUMN_LEGACY_ANT_A, RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, .checks = { rs_siso_allow, rs_ant_allow, }, }, [RS_COLUMN_SISO_ANT_A_SGI] = { .mode = RS_SISO, .ant = ANT_A, .sgi = true, .next_columns = { RS_COLUMN_SISO_ANT_B_SGI, RS_COLUMN_MIMO2_SGI, RS_COLUMN_SISO_ANT_A, RS_COLUMN_LEGACY_ANT_A, RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, .checks = { rs_siso_allow, rs_ant_allow, rs_sgi_allow, }, }, [RS_COLUMN_SISO_ANT_B_SGI] = { .mode = RS_SISO, .ant = ANT_B, .sgi = true, .next_columns = { RS_COLUMN_SISO_ANT_A_SGI, RS_COLUMN_MIMO2_SGI, RS_COLUMN_SISO_ANT_B, RS_COLUMN_LEGACY_ANT_A, RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, .checks = { rs_siso_allow, rs_ant_allow, rs_sgi_allow, }, }, [RS_COLUMN_MIMO2] = { .mode = RS_MIMO2, .ant = ANT_AB, .next_columns = { RS_COLUMN_SISO_ANT_A, RS_COLUMN_MIMO2_SGI, RS_COLUMN_LEGACY_ANT_A, RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_INVALID, RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, .checks = { rs_mimo_allow, }, }, [RS_COLUMN_MIMO2_SGI] = { .mode = RS_MIMO2, .ant = ANT_AB, .sgi = true, .next_columns = { RS_COLUMN_SISO_ANT_A_SGI, RS_COLUMN_MIMO2, RS_COLUMN_LEGACY_ANT_A, RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_INVALID, RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, .checks = { rs_mimo_allow, rs_sgi_allow, }, }, }; static inline u8 rs_extract_rate(u32 rate_n_flags) { /* also works for HT because bits 7:6 are zero there */ return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK); } static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) { int idx = 0; if (rate_n_flags & RATE_MCS_HT_MSK) { idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK; idx += IWL_RATE_MCS_0_INDEX; /* skip 9M not supported in HT*/ if (idx >= IWL_RATE_9M_INDEX) idx += 1; if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE)) return idx; } else if (rate_n_flags & RATE_MCS_VHT_MSK || rate_n_flags & RATE_MCS_HE_MSK) { idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; idx += IWL_RATE_MCS_0_INDEX; /* skip 9M not supported in VHT*/ if (idx >= IWL_RATE_9M_INDEX) idx++; if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE)) return idx; if ((rate_n_flags & RATE_MCS_HE_MSK) && (idx <= IWL_LAST_HE_RATE)) return idx; } else { /* legacy rate format, search for match in table */ u8 legacy_rate = rs_extract_rate(rate_n_flags); for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) if (iwl_rates[idx].plcp == legacy_rate) return idx; } return IWL_RATE_INVALID; } static void rs_rate_scale_perform(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, int tid, bool ndp); static void rs_fill_lq_cmd(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, const struct rs_rate *initial_rate); static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); /** * The following tables contain the expected throughput metrics for all rates * * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits * * where invalid entries are zeros. * * CCK rates are only valid in legacy table and will only be used in G * (2.4 GHz) band. */ static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = { 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0 }; /* Expected TpT tables. 4 indexes: * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI */ static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0}, {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0}, {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0}, {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0}, }; static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275}, {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280}, {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173}, {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284}, }; static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308}, {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312}, {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466}, {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691}, }; static const u16 expected_tpt_siso_160MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 191, 0, 244, 288, 298, 308, 313, 318, 323, 328, 330}, {0, 0, 0, 0, 200, 0, 251, 293, 302, 312, 317, 322, 327, 332, 334}, {0, 0, 0, 0, 439, 0, 875, 1307, 1736, 2584, 3419, 3831, 4240, 5049, 5581}, {0, 0, 0, 0, 488, 0, 972, 1451, 1925, 2864, 3785, 4240, 4691, 5581, 6165}, }; static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0}, {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0}, {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0}, {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0}, }; static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300}, {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303}, {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053}, {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221}, }; static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319}, {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320}, {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219}, {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545}, }; static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 240, 0, 278, 308, 313, 319, 322, 324, 328, 330, 334}, {0, 0, 0, 0, 247, 0, 282, 310, 315, 320, 323, 325, 329, 332, 338}, {0, 0, 0, 0, 875, 0, 1735, 2582, 3414, 5043, 6619, 7389, 8147, 9629, 10592}, {0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640}, }; /* mbps, mcs */ static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { { "1", "BPSK DSSS"}, { "2", "QPSK DSSS"}, {"5.5", "BPSK CCK"}, { "11", "QPSK CCK"}, { "6", "BPSK 1/2"}, { "9", "BPSK 1/2"}, { "12", "QPSK 1/2"}, { "18", "QPSK 3/4"}, { "24", "16QAM 1/2"}, { "36", "16QAM 3/4"}, { "48", "64QAM 2/3"}, { "54", "64QAM 3/4"}, { "60", "64QAM 5/6"}, }; #define MCS_INDEX_PER_STREAM (8) static const char *rs_pretty_ant(u8 ant) { static const char * const ant_name[] = { [ANT_NONE] = "None", [ANT_A] = "A", [ANT_B] = "B", [ANT_AB] = "AB", [ANT_C] = "C", [ANT_AC] = "AC", [ANT_BC] = "BC", [ANT_ABC] = "ABC", }; if (ant > ANT_ABC) return "UNKNOWN"; return ant_name[ant]; } static const char *rs_pretty_lq_type(enum iwl_table_type type) { static const char * const lq_types[] = { [LQ_NONE] = "NONE", [LQ_LEGACY_A] = "LEGACY_A", [LQ_LEGACY_G] = "LEGACY_G", [LQ_HT_SISO] = "HT SISO", [LQ_HT_MIMO2] = "HT MIMO", [LQ_VHT_SISO] = "VHT SISO", [LQ_VHT_MIMO2] = "VHT MIMO", [LQ_HE_SISO] = "HE SISO", [LQ_HE_MIMO2] = "HE MIMO", }; if (type < LQ_NONE || type >= LQ_MAX) return "UNKNOWN"; return lq_types[type]; } static char *rs_pretty_rate(const struct rs_rate *rate) { static char buf[40]; static const char * const legacy_rates[] = { [IWL_RATE_1M_INDEX] = "1M", [IWL_RATE_2M_INDEX] = "2M", [IWL_RATE_5M_INDEX] = "5.5M", [IWL_RATE_11M_INDEX] = "11M", [IWL_RATE_6M_INDEX] = "6M", [IWL_RATE_9M_INDEX] = "9M", [IWL_RATE_12M_INDEX] = "12M", [IWL_RATE_18M_INDEX] = "18M", [IWL_RATE_24M_INDEX] = "24M", [IWL_RATE_36M_INDEX] = "36M", [IWL_RATE_48M_INDEX] = "48M", [IWL_RATE_54M_INDEX] = "54M", }; static const char *const ht_vht_rates[] = { [IWL_RATE_MCS_0_INDEX] = "MCS0", [IWL_RATE_MCS_1_INDEX] = "MCS1", [IWL_RATE_MCS_2_INDEX] = "MCS2", [IWL_RATE_MCS_3_INDEX] = "MCS3", [IWL_RATE_MCS_4_INDEX] = "MCS4", [IWL_RATE_MCS_5_INDEX] = "MCS5", [IWL_RATE_MCS_6_INDEX] = "MCS6", [IWL_RATE_MCS_7_INDEX] = "MCS7", [IWL_RATE_MCS_8_INDEX] = "MCS8", [IWL_RATE_MCS_9_INDEX] = "MCS9", }; const char *rate_str; if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX)) rate_str = legacy_rates[rate->index]; else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) && (rate->index >= IWL_RATE_MCS_0_INDEX) && (rate->index <= IWL_RATE_MCS_9_INDEX)) rate_str = ht_vht_rates[rate->index]; else rate_str = "BAD_RATE"; sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type), rs_pretty_ant(rate->ant), rate_str); return buf; } static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate, const char *prefix) { IWL_DEBUG_RATE(mvm, "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n", prefix, rs_pretty_rate(rate), rate->bw, rate->sgi, rate->ldpc, rate->stbc); } static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) { window->data = 0; window->success_counter = 0; window->success_ratio = IWL_INVALID_VALUE; window->counter = 0; window->average_tpt = IWL_INVALID_VALUE; } static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm, struct iwl_scale_tbl_info *tbl) { int i; IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); for (i = 0; i < IWL_RATE_COUNT; i++) rs_rate_scale_clear_window(&tbl->win[i]); for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++) rs_rate_scale_clear_window(&tbl->tpc_win[i]); } static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) { return (ant_type & valid_antenna) == ant_type; } static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_data, u8 tid, struct ieee80211_sta *sta) { int ret = -EAGAIN; IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); /* start BA session until the peer sends del BA */ ret = ieee80211_start_tx_ba_session(sta, tid, 0); if (ret == -EAGAIN) { /* * driver and mac80211 is out of sync * this might be cause by reloading firmware * stop the tx ba session here */ IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n", tid); ieee80211_stop_tx_ba_session(sta, tid); } return ret; } static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, u8 tid, struct iwl_lq_sta *lq_sta, struct ieee80211_sta *sta) { struct iwl_mvm_tid_data *tid_data; /* * In AP mode, tid can be equal to IWL_MAX_TID_COUNT * when the frame is not QoS */ if (WARN_ON_ONCE(tid > IWL_MAX_TID_COUNT)) { IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n", tid, IWL_MAX_TID_COUNT); return; } else if (tid == IWL_MAX_TID_COUNT) { return; } tid_data = &mvmsta->tid_data[tid]; if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED && tid_data->state == IWL_AGG_OFF && (lq_sta->tx_agg_tid_en & BIT(tid)) && tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD) { IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0) tid_data->state = IWL_AGG_QUEUED; } } static inline int get_num_of_ant_from_rate(u32 rate_n_flags) { return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); } /* * Static function to get the expected throughput from an iwl_scale_tbl_info * that wraps a NULL pointer check */ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) { if (tbl->expected_tpt) return tbl->expected_tpt[rs_index]; return 0; } /** * rs_collect_tx_data - Update the success/failure sliding window * * We keep a sliding window of the last 62 packets transmitted * at this rate. window->data contains the bitmask of successful * packets. */ static int _rs_collect_tx_data(struct iwl_mvm *mvm, struct iwl_scale_tbl_info *tbl, int scale_index, int attempts, int successes, struct iwl_rate_scale_data *window) { static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); s32 fail_count, tpt; /* Get expected throughput */ tpt = get_expected_tpt(tbl, scale_index); /* * Keep track of only the latest 62 tx frame attempts in this rate's * history window; anything older isn't really relevant any more. * If we have filled up the sliding window, drop the oldest attempt; * if the oldest attempt (highest bit in bitmap) shows "success", * subtract "1" from the success counter (this is the main reason * we keep these bitmaps!). */ while (attempts > 0) { if (window->counter >= IWL_RATE_MAX_WINDOW) { /* remove earliest */ window->counter = IWL_RATE_MAX_WINDOW - 1; if (window->data & mask) { window->data &= ~mask; window->success_counter--; } } /* Increment frames-attempted counter */ window->counter++; /* Shift bitmap by one frame to throw away oldest history */ window->data <<= 1; /* Mark the most recent #successes attempts as successful */ if (successes > 0) { window->success_counter++; window->data |= 0x1; successes--; } attempts--; } /* Calculate current success ratio, avoid divide-by-0! */ if (window->counter > 0) window->success_ratio = 128 * (100 * window->success_counter) / window->counter; else window->success_ratio = IWL_INVALID_VALUE; fail_count = window->counter - window->success_counter; /* Calculate average throughput, if we have enough history. */ if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) || (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) window->average_tpt = (window->success_ratio * tpt + 64) / 128; else window->average_tpt = IWL_INVALID_VALUE; return 0; } static int rs_collect_tpc_data(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl, int scale_index, int attempts, int successes, u8 reduced_txp) { struct iwl_rate_scale_data *window = NULL; if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION)) return -EINVAL; window = &tbl->tpc_win[reduced_txp]; return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, window); } static void rs_update_tid_tpt_stats(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, u8 tid, int successes) { struct iwl_mvm_tid_data *tid_data; if (tid >= IWL_MAX_TID_COUNT) return; tid_data = &mvmsta->tid_data[tid]; /* * Measure if there're enough successful transmits per second. * These statistics are used only to decide if we can start a * BA session, so it should be updated only when A-MPDU is * off. */ if (tid_data->state != IWL_AGG_OFF) return; if (time_is_before_jiffies(tid_data->tpt_meas_start + HZ) || (tid_data->tx_count >= IWL_MVM_RS_AGG_START_THRESHOLD)) { tid_data->tx_count_last = tid_data->tx_count; tid_data->tx_count = 0; tid_data->tpt_meas_start = jiffies; } else { tid_data->tx_count += successes; } } static int rs_collect_tlc_data(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, u8 tid, struct iwl_scale_tbl_info *tbl, int scale_index, int attempts, int successes) { struct iwl_rate_scale_data *window = NULL; if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) return -EINVAL; if (tbl->column != RS_COLUMN_INVALID) { struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers; pers->tx_stats[tbl->column][scale_index].total += attempts; pers->tx_stats[tbl->column][scale_index].success += successes; } rs_update_tid_tpt_stats(mvm, mvmsta, tid, successes); /* Select window for current tx bit rate */ window = &(tbl->win[scale_index]); return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, window); } /* Convert rs_rate object into ucode rate bitmask */ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, struct rs_rate *rate) { u32 ucode_rate = 0; int index = rate->index; ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK); if (is_legacy(rate)) { ucode_rate |= iwl_rates[index].plcp; if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) ucode_rate |= RATE_MCS_CCK_MSK; return ucode_rate; } if (is_ht(rate)) { if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) { IWL_ERR(mvm, "Invalid HT rate index %d\n", index); index = IWL_LAST_HT_RATE; } ucode_rate |= RATE_MCS_HT_MSK; if (is_ht_siso(rate)) ucode_rate |= iwl_rates[index].plcp_ht_siso; else if (is_ht_mimo2(rate)) ucode_rate |= iwl_rates[index].plcp_ht_mimo2; else WARN_ON_ONCE(1); } else if (is_vht(rate)) { if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) { IWL_ERR(mvm, "Invalid VHT rate index %d\n", index); index = IWL_LAST_VHT_RATE; } ucode_rate |= RATE_MCS_VHT_MSK; if (is_vht_siso(rate)) ucode_rate |= iwl_rates[index].plcp_vht_siso; else if (is_vht_mimo2(rate)) ucode_rate |= iwl_rates[index].plcp_vht_mimo2; else WARN_ON_ONCE(1); } else { IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type); } if (is_siso(rate) && rate->stbc) { /* To enable STBC we need to set both a flag and ANT_AB */ ucode_rate |= RATE_MCS_ANT_AB_MSK; ucode_rate |= RATE_MCS_STBC_MSK; } ucode_rate |= rate->bw; if (rate->sgi) ucode_rate |= RATE_MCS_SGI_MSK; if (rate->ldpc) ucode_rate |= RATE_MCS_LDPC_MSK; return ucode_rate; } /* Convert a ucode rate into an rs_rate object */ static int rs_rate_from_ucode_rate(const u32 ucode_rate, enum nl80211_band band, struct rs_rate *rate) { u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK; u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate); u8 nss; memset(rate, 0, sizeof(*rate)); rate->index = iwl_hwrate_to_plcp_idx(ucode_rate); if (rate->index == IWL_RATE_INVALID) return -EINVAL; rate->ant = (ant_msk >> RATE_MCS_ANT_POS); /* Legacy */ if (!(ucode_rate & RATE_MCS_HT_MSK) && !(ucode_rate & RATE_MCS_VHT_MSK) && !(ucode_rate & RATE_MCS_HE_MSK)) { if (num_of_ant == 1) { if (band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; } return 0; } /* HT, VHT or HE */ if (ucode_rate & RATE_MCS_SGI_MSK) rate->sgi = true; if (ucode_rate & RATE_MCS_LDPC_MSK) rate->ldpc = true; if (ucode_rate & RATE_MCS_STBC_MSK) rate->stbc = true; if (ucode_rate & RATE_MCS_BF_MSK) rate->bfer = true; rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK; if (ucode_rate & RATE_MCS_HT_MSK) { nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; if (nss == 1) { rate->type = LQ_HT_SISO; WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, "stbc %d bfer %d", rate->stbc, rate->bfer); } else if (nss == 2) { rate->type = LQ_HT_MIMO2; WARN_ON_ONCE(num_of_ant != 2); } else { WARN_ON_ONCE(1); } } else if (ucode_rate & RATE_MCS_VHT_MSK) { nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; if (nss == 1) { rate->type = LQ_VHT_SISO; WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, "stbc %d bfer %d", rate->stbc, rate->bfer); } else if (nss == 2) { rate->type = LQ_VHT_MIMO2; WARN_ON_ONCE(num_of_ant != 2); } else { WARN_ON_ONCE(1); } } else if (ucode_rate & RATE_MCS_HE_MSK) { nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; if (nss == 1) { rate->type = LQ_HE_SISO; WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, "stbc %d bfer %d", rate->stbc, rate->bfer); } else if (nss == 2) { rate->type = LQ_HE_MIMO2; WARN_ON_ONCE(num_of_ant != 2); } else { WARN_ON_ONCE(1); } } WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 && !is_he(rate) && !is_vht(rate)); return 0; } /* switch to another antenna/antennas and return 1 */ /* if no other valid antenna found, return 0 */ static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate) { u8 new_ant_type; if (!rate->ant || WARN_ON_ONCE(rate->ant & ANT_C)) return 0; if (!rs_is_valid_ant(valid_ant, rate->ant)) return 0; new_ant_type = ant_toggle_lookup[rate->ant]; while ((new_ant_type != rate->ant) && !rs_is_valid_ant(valid_ant, new_ant_type)) new_ant_type = ant_toggle_lookup[new_ant_type]; if (new_ant_type == rate->ant) return 0; rate->ant = new_ant_type; return 1; } static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta, struct rs_rate *rate) { if (is_legacy(rate)) return lq_sta->active_legacy_rate; else if (is_siso(rate)) return lq_sta->active_siso_rate; else if (is_mimo2(rate)) return lq_sta->active_mimo2_rate; WARN_ON_ONCE(1); return 0; } static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, int rate_type) { u8 high = IWL_RATE_INVALID; u8 low = IWL_RATE_INVALID; /* 802.11A or ht walks to the next literal adjacent rate in * the rate table */ if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) { int i; u32 mask; /* Find the previous rate that is in the rate mask */ i = index - 1; if (i >= 0) mask = BIT(i); for (; i >= 0; i--, mask >>= 1) { if (rate_mask & mask) { low = i; break; } } /* Find the next rate that is in the rate mask */ i = index + 1; for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { if (rate_mask & mask) { high = i; break; } } return (high << 8) | low; } low = index; while (low != IWL_RATE_INVALID) { low = iwl_rates[low].prev_rs; if (low == IWL_RATE_INVALID) break; if (rate_mask & (1 << low)) break; } high = index; while (high != IWL_RATE_INVALID) { high = iwl_rates[high].next_rs; if (high == IWL_RATE_INVALID) break; if (rate_mask & (1 << high)) break; } return (high << 8) | low; } static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta, struct rs_rate *rate) { return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate); } /* Get the next supported lower rate in the current column. * Return true if bottom rate in the current column was reached */ static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta, struct rs_rate *rate) { u8 low; u16 high_low; u16 rate_mask; struct iwl_mvm *mvm = lq_sta->pers.drv; rate_mask = rs_get_supported_rates(lq_sta, rate); high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask, rate->type); low = high_low & 0xff; /* Bottom rate of column reached */ if (low == IWL_RATE_INVALID) return true; rate->index = low; return false; } /* Get the next rate to use following a column downgrade */ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, struct rs_rate *rate) { struct iwl_mvm *mvm = lq_sta->pers.drv; if (is_legacy(rate)) { /* No column to downgrade from Legacy */ return; } else if (is_siso(rate)) { /* Downgrade to Legacy if we were in SISO */ if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; rate->bw = RATE_MCS_CHAN_WIDTH_20; WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX || rate->index > IWL_RATE_MCS_9_INDEX); rate->index = rs_ht_to_legacy[rate->index]; rate->ldpc = false; } else { /* Downgrade to SISO with same MCS if in MIMO */ rate->type = is_vht_mimo2(rate) ? LQ_VHT_SISO : LQ_HT_SISO; } if (num_of_ant(rate->ant) > 1) rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm)); /* Relevant in both switching to SISO or Legacy */ rate->sgi = false; if (!rs_rate_supported(lq_sta, rate)) rs_get_lower_rate_in_column(lq_sta, rate); } /* Check if both rates share the same column */ static inline bool rs_rate_column_match(struct rs_rate *a, struct rs_rate *b) { bool ant_match; if (a->stbc || a->bfer) ant_match = (b->ant == ANT_A || b->ant == ANT_B); else ant_match = (a->ant == b->ant); return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) && ant_match; } static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate) { if (is_legacy(rate)) { if (rate->ant == ANT_A) return RS_COLUMN_LEGACY_ANT_A; if (rate->ant == ANT_B) return RS_COLUMN_LEGACY_ANT_B; goto err; } if (is_siso(rate)) { if (rate->ant == ANT_A || rate->stbc || rate->bfer) return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI : RS_COLUMN_SISO_ANT_A; if (rate->ant == ANT_B) return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI : RS_COLUMN_SISO_ANT_B; goto err; } if (is_mimo(rate)) return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2; err: return RS_COLUMN_INVALID; } static u8 rs_get_tid(struct ieee80211_hdr *hdr) { u8 tid = IWL_MAX_TID_COUNT; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & 0xf; } if (unlikely(tid > IWL_MAX_TID_COUNT)) tid = IWL_MAX_TID_COUNT; return tid; } /* * mac80211 sends us Tx status */ static void rs_drv_mac80211_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_op_mode *op_mode = mvm_r; struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta->vif) return; if (!ieee80211_is_data(hdr->frame_control) || info->flags & IEEE80211_TX_CTL_NO_ACK) return; iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info, ieee80211_is_qos_nullfunc(hdr->frame_control)); } /* * Begin a period of staying with a selected modulation mode. * Set "stay_in_tbl" flag to prevent any mode switches. * Set frame tx success limits according to legacy vs. high-throughput, * and reset overall (spanning all rates) tx success history statistics. * These control how long we stay using same modulation mode before * searching for a new mode. */ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy, struct iwl_lq_sta *lq_sta) { IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n"); lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN; if (is_legacy) { lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT; lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT; lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT; } else { lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT; lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT; lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT; } lq_sta->table_count = 0; lq_sta->total_failed = 0; lq_sta->total_success = 0; lq_sta->flush_timer = jiffies; lq_sta->visited_columns = 0; } static inline int rs_get_max_rate_from_mask(unsigned long rate_mask) { if (rate_mask) return find_last_bit(&rate_mask, BITS_PER_LONG); return IWL_RATE_INVALID; } static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta, const struct rs_tx_column *column) { switch (column->mode) { case RS_LEGACY: return lq_sta->max_legacy_rate_idx; case RS_SISO: return lq_sta->max_siso_rate_idx; case RS_MIMO2: return lq_sta->max_mimo2_rate_idx; default: WARN_ON_ONCE(1); } return lq_sta->max_legacy_rate_idx; } static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, const struct rs_tx_column *column, u32 bw) { /* Used to choose among HT tables */ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; if (WARN_ON_ONCE(column->mode != RS_LEGACY && column->mode != RS_SISO && column->mode != RS_MIMO2)) return expected_tpt_legacy; /* Legacy rates have only one table */ if (column->mode == RS_LEGACY) return expected_tpt_legacy; ht_tbl_pointer = expected_tpt_mimo2_20MHz; /* Choose among many HT tables depending on number of streams * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation * status */ if (column->mode == RS_SISO) { switch (bw) { case RATE_MCS_CHAN_WIDTH_20: ht_tbl_pointer = expected_tpt_siso_20MHz; break; case RATE_MCS_CHAN_WIDTH_40: ht_tbl_pointer = expected_tpt_siso_40MHz; break; case RATE_MCS_CHAN_WIDTH_80: ht_tbl_pointer = expected_tpt_siso_80MHz; break; case RATE_MCS_CHAN_WIDTH_160: ht_tbl_pointer = expected_tpt_siso_160MHz; break; default: WARN_ON_ONCE(1); } } else if (column->mode == RS_MIMO2) { switch (bw) { case RATE_MCS_CHAN_WIDTH_20: ht_tbl_pointer = expected_tpt_mimo2_20MHz; break; case RATE_MCS_CHAN_WIDTH_40: ht_tbl_pointer = expected_tpt_mimo2_40MHz; break; case RATE_MCS_CHAN_WIDTH_80: ht_tbl_pointer = expected_tpt_mimo2_80MHz; break; case RATE_MCS_CHAN_WIDTH_160: ht_tbl_pointer = expected_tpt_mimo2_160MHz; break; default: WARN_ON_ONCE(1); } } else { WARN_ON_ONCE(1); } if (!column->sgi && !lq_sta->is_agg) /* Normal */ return ht_tbl_pointer[0]; else if (column->sgi && !lq_sta->is_agg) /* SGI */ return ht_tbl_pointer[1]; else if (!column->sgi && lq_sta->is_agg) /* AGG */ return ht_tbl_pointer[2]; else /* AGG+SGI */ return ht_tbl_pointer[3]; } static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl) { struct rs_rate *rate = &tbl->rate; const struct rs_tx_column *column = &rs_tx_columns[tbl->column]; tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); } static s32 rs_get_best_rate(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl, /* "search" */ unsigned long rate_mask, s8 index) { struct iwl_scale_tbl_info *active_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); s32 success_ratio = active_tbl->win[index].success_ratio; u16 expected_current_tpt = active_tbl->expected_tpt[index]; const u16 *tpt_tbl = tbl->expected_tpt; u16 high_low; u32 target_tpt; int rate_idx; if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) { target_tpt = 100 * expected_current_tpt; IWL_DEBUG_RATE(mvm, "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n", success_ratio, target_tpt); } else { target_tpt = lq_sta->last_tpt; IWL_DEBUG_RATE(mvm, "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n", success_ratio, target_tpt); } rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG); while (rate_idx != IWL_RATE_INVALID) { if (target_tpt < (100 * tpt_tbl[rate_idx])) break; high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask, tbl->rate.type); rate_idx = (high_low >> 8) & 0xff; } IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n", rate_idx, target_tpt, rate_idx != IWL_RATE_INVALID ? 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE); return rate_idx; } static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) { struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->vht_cap; struct ieee80211_vht_cap vht_cap = { .vht_cap_info = cpu_to_le32(sta_vht_cap->cap), .supp_mcs = sta_vht_cap->vht_mcs, }; switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: /* * Don't use 160 MHz if VHT extended NSS support * says we cannot use 2 streams, we don't want to * deal with this. * We only check MCS 0 - they will support that if * we got here at all and we don't care which MCS, * we want to determine a more global state. */ if (ieee80211_get_vht_max_nss(&vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, 0, true) < sta->rx_nss) return RATE_MCS_CHAN_WIDTH_80; return RATE_MCS_CHAN_WIDTH_160; case IEEE80211_STA_RX_BW_80: return RATE_MCS_CHAN_WIDTH_80; case IEEE80211_STA_RX_BW_40: return RATE_MCS_CHAN_WIDTH_40; case IEEE80211_STA_RX_BW_20: default: return RATE_MCS_CHAN_WIDTH_20; } } /* * Check whether we should continue using same modulation mode, or * begin search for a new mode, based on: * 1) # tx successes or failures while using this mode * 2) # times calling this function * 3) elapsed time in this mode (not used, for now) */ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) { struct iwl_scale_tbl_info *tbl; int active_tbl; int flush_interval_passed = 0; struct iwl_mvm *mvm; mvm = lq_sta->pers.drv; active_tbl = lq_sta->active_tbl; tbl = &(lq_sta->lq_info[active_tbl]); /* If we've been disallowing search, see if we should now allow it */ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { /* Elapsed time using current modulation mode */ if (lq_sta->flush_timer) flush_interval_passed = time_after(jiffies, (unsigned long)(lq_sta->flush_timer + (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ))); /* * Check if we should allow search for new modulation mode. * If many frames have failed or succeeded, or we've used * this same modulation for a long time, allow search, and * reset history stats that keep track of whether we should * allow a new search. Also (below) reset all bitmaps and * stats in active history. */ if (force_search || (lq_sta->total_failed > lq_sta->max_failure_limit) || (lq_sta->total_success > lq_sta->max_success_limit) || ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) && (flush_interval_passed))) { IWL_DEBUG_RATE(mvm, "LQ: stay is expired %d %d %d\n", lq_sta->total_failed, lq_sta->total_success, flush_interval_passed); /* Allow search for new mode */ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED; IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n"); lq_sta->total_failed = 0; lq_sta->total_success = 0; lq_sta->flush_timer = 0; /* mark the current column as visited */ lq_sta->visited_columns = BIT(tbl->column); /* * Else if we've used this modulation mode enough repetitions * (regardless of elapsed time or success/failure), reset * history bitmaps and rate-specific stats for all rates in * active table. */ } else { lq_sta->table_count++; if (lq_sta->table_count >= lq_sta->table_count_limit) { lq_sta->table_count = 0; IWL_DEBUG_RATE(mvm, "LQ: stay in table clear win\n"); rs_rate_scale_clear_tbl_windows(mvm, tbl); } } /* If transitioning to allow "search", reset all history * bitmaps and stats in active table (this will become the new * "search" table). */ if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) { rs_rate_scale_clear_tbl_windows(mvm, tbl); } } } static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl, enum rs_action scale_action) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i; /* * In case TLC offload is not active amsdu_enabled is either 0xFFFF * or 0, since there is no per-TID alg. */ if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) || tbl->rate.index < IWL_RATE_MCS_5_INDEX || scale_action == RS_ACTION_DOWNSCALE) mvmsta->amsdu_enabled = 0; else mvmsta->amsdu_enabled = 0xFFFF; if (mvmsta->vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) mvmsta->max_amsdu_len = sta->max_amsdu_len; else mvmsta->max_amsdu_len = min_t(int, sta->max_amsdu_len, 8500); sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { if (mvmsta->amsdu_enabled) sta->max_tid_amsdu_len[i] = iwl_mvm_max_amsdu_size(mvm, sta, i); else /* * Not so elegant, but this will effectively * prevent AMSDU on this TID */ sta->max_tid_amsdu_len[i] = 1; } } /* * setup rate table in uCode */ static void rs_update_rate_tbl(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl) { rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate); iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq); } static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl, enum rs_action scale_action) { if (rs_bw_from_sta_bw(sta) != RATE_MCS_CHAN_WIDTH_80) return false; if (!is_vht_siso(&tbl->rate)) return false; if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) && (tbl->rate.index == IWL_RATE_MCS_0_INDEX) && (scale_action == RS_ACTION_DOWNSCALE)) { tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20; tbl->rate.index = IWL_RATE_MCS_4_INDEX; IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n"); goto tweaked; } /* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is * sustainable, i.e. we're past the test window. We can't go back * if MCS5 is just tested as this will happen always after switching * to 20Mhz MCS4 because the rate stats are cleared. */ if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) && (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) && (scale_action == RS_ACTION_STAY)) || ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) && (scale_action == RS_ACTION_UPSCALE)))) { tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80; tbl->rate.index = IWL_RATE_MCS_1_INDEX; IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n"); goto tweaked; } return false; tweaked: rs_set_expected_tpt_table(lq_sta, tbl); rs_rate_scale_clear_tbl_windows(mvm, tbl); return true; } static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl) { int i, j, max_rate; enum rs_column next_col_id; const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; const struct rs_tx_column *next_col; allow_column_func_t allow_func; u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm); const u16 *expected_tpt_tbl; u16 tpt, max_expected_tpt; for (i = 0; i < MAX_NEXT_COLUMNS; i++) { next_col_id = curr_col->next_columns[i]; if (next_col_id == RS_COLUMN_INVALID) continue; if (lq_sta->visited_columns & BIT(next_col_id)) { IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n", next_col_id); continue; } next_col = &rs_tx_columns[next_col_id]; if (!rs_is_valid_ant(valid_ants, next_col->ant)) { IWL_DEBUG_RATE(mvm, "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n", next_col_id, valid_ants, next_col->ant); continue; } for (j = 0; j < MAX_COLUMN_CHECKS; j++) { allow_func = next_col->checks[j]; if (allow_func && !allow_func(mvm, sta, &tbl->rate, next_col)) break; } if (j != MAX_COLUMN_CHECKS) { IWL_DEBUG_RATE(mvm, "Skip column %d: not allowed (check %d failed)\n", next_col_id, j); continue; } tpt = lq_sta->last_tpt / 100; expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col, rs_bw_from_sta_bw(sta)); if (WARN_ON_ONCE(!expected_tpt_tbl)) continue; max_rate = rs_get_max_allowed_rate(lq_sta, next_col); if (max_rate == IWL_RATE_INVALID) { IWL_DEBUG_RATE(mvm, "Skip column %d: no rate is allowed in this column\n", next_col_id); continue; } max_expected_tpt = expected_tpt_tbl[max_rate]; if (tpt >= max_expected_tpt) { IWL_DEBUG_RATE(mvm, "Skip column %d: can't beat current TPT. Max expected %d current %d\n", next_col_id, max_expected_tpt, tpt); continue; } IWL_DEBUG_RATE(mvm, "Found potential column %d. Max expected %d current %d\n", next_col_id, max_expected_tpt, tpt); break; } if (i == MAX_NEXT_COLUMNS) return RS_COLUMN_INVALID; return next_col_id; } static int rs_switch_to_column(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta, struct ieee80211_sta *sta, enum rs_column col_id) { struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct iwl_scale_tbl_info *search_tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); struct rs_rate *rate = &search_tbl->rate; const struct rs_tx_column *column = &rs_tx_columns[col_id]; const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; unsigned long rate_mask = 0; u32 rate_idx = 0; memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win)); rate->sgi = column->sgi; rate->ant = column->ant; if (column->mode == RS_LEGACY) { if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; rate->bw = RATE_MCS_CHAN_WIDTH_20; rate->ldpc = false; rate_mask = lq_sta->active_legacy_rate; } else if (column->mode == RS_SISO) { rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; rate_mask = lq_sta->active_siso_rate; } else if (column->mode == RS_MIMO2) { rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; rate_mask = lq_sta->active_mimo2_rate; } else { WARN_ONCE(1, "Bad column mode"); } if (column->mode != RS_LEGACY) { rate->bw = rs_bw_from_sta_bw(sta); rate->ldpc = lq_sta->ldpc; } search_tbl->column = col_id; rs_set_expected_tpt_table(lq_sta, search_tbl); lq_sta->visited_columns |= BIT(col_id); /* Get the best matching rate if we're changing modes. e.g. * SISO->MIMO, LEGACY->SISO, MIMO->SISO */ if (curr_column->mode != column->mode) { rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl, rate_mask, rate->index); if ((rate_idx == IWL_RATE_INVALID) || !(BIT(rate_idx) & rate_mask)) { IWL_DEBUG_RATE(mvm, "can not switch with index %d" " rate mask %lx\n", rate_idx, rate_mask); goto err; } rate->index = rate_idx; } IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n", col_id, rate->index); return 0; err: rate->type = LQ_NONE; return -1; } static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm, struct iwl_scale_tbl_info *tbl, s32 sr, int low, int high, int current_tpt, int low_tpt, int high_tpt) { enum rs_action action = RS_ACTION_STAY; if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) || (current_tpt == 0)) { IWL_DEBUG_RATE(mvm, "Decrease rate because of low SR\n"); return RS_ACTION_DOWNSCALE; } if ((low_tpt == IWL_INVALID_VALUE) && (high_tpt == IWL_INVALID_VALUE) && (high != IWL_RATE_INVALID)) { IWL_DEBUG_RATE(mvm, "No data about high/low rates. Increase rate\n"); return RS_ACTION_UPSCALE; } if ((high_tpt == IWL_INVALID_VALUE) && (high != IWL_RATE_INVALID) && (low_tpt != IWL_INVALID_VALUE) && (low_tpt < current_tpt)) { IWL_DEBUG_RATE(mvm, "No data about high rate and low rate is worse. Increase rate\n"); return RS_ACTION_UPSCALE; } if ((high_tpt != IWL_INVALID_VALUE) && (high_tpt > current_tpt)) { IWL_DEBUG_RATE(mvm, "Higher rate is better. Increate rate\n"); return RS_ACTION_UPSCALE; } if ((low_tpt != IWL_INVALID_VALUE) && (high_tpt != IWL_INVALID_VALUE) && (low_tpt < current_tpt) && (high_tpt < current_tpt)) { IWL_DEBUG_RATE(mvm, "Both high and low are worse. Maintain rate\n"); return RS_ACTION_STAY; } if ((low_tpt != IWL_INVALID_VALUE) && (low_tpt > current_tpt)) { IWL_DEBUG_RATE(mvm, "Lower rate is better\n"); action = RS_ACTION_DOWNSCALE; goto out; } if ((low_tpt == IWL_INVALID_VALUE) && (low != IWL_RATE_INVALID)) { IWL_DEBUG_RATE(mvm, "No data about lower rate\n"); action = RS_ACTION_DOWNSCALE; goto out; } IWL_DEBUG_RATE(mvm, "Maintain rate\n"); out: if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) { if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) { IWL_DEBUG_RATE(mvm, "SR is above NO DECREASE. Avoid downscale\n"); action = RS_ACTION_STAY; } else if (current_tpt > (100 * tbl->expected_tpt[low])) { IWL_DEBUG_RATE(mvm, "Current TPT is higher than max expected in low rate. Avoid downscale\n"); action = RS_ACTION_STAY; } else { IWL_DEBUG_RATE(mvm, "Decrease rate\n"); } } return action; } static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta) { /* Our chip supports Tx STBC and the peer is an HT/VHT STA which * supports STBC of at least 1*SS */ if (!lq_sta->stbc_capable) return false; if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) return false; return true; } static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index, int *weaker, int *stronger) { *weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP; if (*weaker > TPC_MAX_REDUCTION) *weaker = TPC_INVALID; *stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP; if (*stronger < 0) *stronger = TPC_INVALID; } static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct rs_rate *rate, enum nl80211_band band) { int index = rate->index; bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION && !vif->bss_conf.ps); IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n", cam, sta_ps_disabled); /* * allow tpc only if power management is enabled, or bt coex * activity grade allows it and we are on 2.4Ghz. */ if ((cam || sta_ps_disabled) && !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band)) return false; IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type); if (is_legacy(rate)) return index == IWL_RATE_54M_INDEX; if (is_ht(rate)) return index == IWL_RATE_MCS_7_INDEX; if (is_vht(rate)) return index == IWL_RATE_MCS_7_INDEX || index == IWL_RATE_MCS_8_INDEX || index == IWL_RATE_MCS_9_INDEX; WARN_ON_ONCE(1); return false; } enum tpc_action { TPC_ACTION_STAY, TPC_ACTION_DECREASE, TPC_ACTION_INCREASE, TPC_ACTION_NO_RESTIRCTION, }; static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm, s32 sr, int weak, int strong, int current_tpt, int weak_tpt, int strong_tpt) { /* stay until we have valid tpt */ if (current_tpt == IWL_INVALID_VALUE) { IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n"); return TPC_ACTION_STAY; } /* Too many failures, increase txp */ if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) || current_tpt == 0) { IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n"); return TPC_ACTION_NO_RESTIRCTION; } /* try decreasing first if applicable */ if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) && weak != TPC_INVALID) { if (weak_tpt == IWL_INVALID_VALUE && (strong_tpt == IWL_INVALID_VALUE || current_tpt >= strong_tpt)) { IWL_DEBUG_RATE(mvm, "no weak txp measurement. decrease txp\n"); return TPC_ACTION_DECREASE; } if (weak_tpt > current_tpt) { IWL_DEBUG_RATE(mvm, "lower txp has better tpt. decrease txp\n"); return TPC_ACTION_DECREASE; } } /* next, increase if needed */ if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) && strong != TPC_INVALID) { if (weak_tpt == IWL_INVALID_VALUE && strong_tpt != IWL_INVALID_VALUE && current_tpt < strong_tpt) { IWL_DEBUG_RATE(mvm, "higher txp has better tpt. increase txp\n"); return TPC_ACTION_INCREASE; } if (weak_tpt < current_tpt && (strong_tpt == IWL_INVALID_VALUE || strong_tpt > current_tpt)) { IWL_DEBUG_RATE(mvm, "lower txp has worse tpt. increase txp\n"); return TPC_ACTION_INCREASE; } } IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n"); return TPC_ACTION_STAY; } static bool rs_tpc_perform(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *vif = mvm_sta->vif; struct ieee80211_chanctx_conf *chanctx_conf; enum nl80211_band band; struct iwl_rate_scale_data *window; struct rs_rate *rate = &tbl->rate; enum tpc_action action; s32 sr; u8 cur = lq_sta->lq.reduced_tpc; int current_tpt; int weak, strong; int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE; #ifdef CPTCFG_MAC80211_DEBUGFS if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) { IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n", lq_sta->pers.dbg_fixed_txp_reduction); lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction; return cur != lq_sta->pers.dbg_fixed_txp_reduction; } #endif rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (WARN_ON(!chanctx_conf)) band = NUM_NL80211_BANDS; else band = chanctx_conf->def.chan->band; rcu_read_unlock(); if (!rs_tpc_allowed(mvm, vif, rate, band)) { IWL_DEBUG_RATE(mvm, "tpc is not allowed. remove txp restrictions\n"); lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION; return cur != TPC_NO_REDUCTION; } rs_get_adjacent_txp(mvm, cur, &weak, &strong); /* Collect measured throughputs for current and adjacent rates */ window = tbl->tpc_win; sr = window[cur].success_ratio; current_tpt = window[cur].average_tpt; if (weak != TPC_INVALID) weak_tpt = window[weak].average_tpt; if (strong != TPC_INVALID) strong_tpt = window[strong].average_tpt; IWL_DEBUG_RATE(mvm, "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n", cur, current_tpt, sr, weak, strong, weak_tpt, strong_tpt); action = rs_get_tpc_action(mvm, sr, weak, strong, current_tpt, weak_tpt, strong_tpt); /* override actions if we are on the edge */ if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) { IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n"); action = TPC_ACTION_STAY; } else if (strong == TPC_INVALID && (action == TPC_ACTION_INCREASE || action == TPC_ACTION_NO_RESTIRCTION)) { IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n"); action = TPC_ACTION_STAY; } switch (action) { case TPC_ACTION_DECREASE: lq_sta->lq.reduced_tpc = weak; return true; case TPC_ACTION_INCREASE: lq_sta->lq.reduced_tpc = strong; return true; case TPC_ACTION_NO_RESTIRCTION: lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION; return true; case TPC_ACTION_STAY: /* do nothing */ break; } return false; } /* * Do rate scaling and search for new modulation mode. */ static void rs_rate_scale_perform(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, int tid, bool ndp) { int low = IWL_RATE_INVALID; int high = IWL_RATE_INVALID; int index; struct iwl_rate_scale_data *window = NULL; int current_tpt = IWL_INVALID_VALUE; int low_tpt = IWL_INVALID_VALUE; int high_tpt = IWL_INVALID_VALUE; u32 fail_count; enum rs_action scale_action = RS_ACTION_STAY; u16 rate_mask; u8 update_lq = 0; struct iwl_scale_tbl_info *tbl, *tbl1; u8 active_tbl = 0; u8 done_search = 0; u16 high_low; s32 sr; u8 prev_agg = lq_sta->is_agg; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct rs_rate *rate; lq_sta->is_agg = !!mvmsta->agg_tids; /* * Select rate-scale / modulation-mode table to work with in * the rest of this function: "search" if searching for better * modulation mode, or "active" if doing rate scaling within a mode. */ if (!lq_sta->search_better_tbl) active_tbl = lq_sta->active_tbl; else active_tbl = 1 - lq_sta->active_tbl; tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; if (prev_agg != lq_sta->is_agg) { IWL_DEBUG_RATE(mvm, "Aggregation changed: prev %d current %d. Update expected TPT table\n", prev_agg, lq_sta->is_agg); rs_set_expected_tpt_table(lq_sta, tbl); rs_rate_scale_clear_tbl_windows(mvm, tbl); } /* current tx rate */ index = rate->index; /* rates available for this association, and for modulation mode */ rate_mask = rs_get_supported_rates(lq_sta, rate); if (!(BIT(index) & rate_mask)) { IWL_ERR(mvm, "Current Rate is not valid\n"); if (lq_sta->search_better_tbl) { /* revert to active table if search table is not valid*/ rate->type = LQ_NONE; lq_sta->search_better_tbl = 0; tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); rs_update_rate_tbl(mvm, sta, lq_sta, tbl); } return; } /* Get expected throughput table and history window for current rate */ if (!tbl->expected_tpt) { IWL_ERR(mvm, "tbl->expected_tpt is NULL\n"); return; } /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ window = &(tbl->win[index]); /* * If there is not enough history to calculate actual average * throughput, keep analyzing results of more tx frames, without * changing rate or mode (bypass most of the rest of this function). * Set up new rate table in uCode only if old rate is not supported * in current association (use new rate found above). */ fail_count = window->counter - window->success_counter; if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) && (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) { IWL_DEBUG_RATE(mvm, "%s: Test Window: succ %d total %d\n", rs_pretty_rate(rate), window->success_counter, window->counter); /* Can't calculate this yet; not enough history */ window->average_tpt = IWL_INVALID_VALUE; /* Should we stay with this modulation mode, * or search for a new one? */ rs_stay_in_table(lq_sta, false); return; } /* If we are searching for better modulation mode, check success. */ if (lq_sta->search_better_tbl) { /* If good success, continue using the "search" mode; * no need to send new link quality command, since we're * continuing to use the setup that we've been trying. */ if (window->average_tpt > lq_sta->last_tpt) { IWL_DEBUG_RATE(mvm, "SWITCHING TO NEW TABLE SR: %d " "cur-tpt %d old-tpt %d\n", window->success_ratio, window->average_tpt, lq_sta->last_tpt); /* Swap tables; "search" becomes "active" */ lq_sta->active_tbl = active_tbl; current_tpt = window->average_tpt; /* Else poor success; go back to mode in "active" table */ } else { IWL_DEBUG_RATE(mvm, "GOING BACK TO THE OLD TABLE: SR %d " "cur-tpt %d old-tpt %d\n", window->success_ratio, window->average_tpt, lq_sta->last_tpt); /* Nullify "search" table */ rate->type = LQ_NONE; /* Revert to "active" table */ active_tbl = lq_sta->active_tbl; tbl = &(lq_sta->lq_info[active_tbl]); /* Revert to "active" rate and throughput info */ index = tbl->rate.index; current_tpt = lq_sta->last_tpt; /* Need to set up a new rate table in uCode */ update_lq = 1; } /* Either way, we've made a decision; modulation mode * search is done, allow rate adjustment next time. */ lq_sta->search_better_tbl = 0; done_search = 1; /* Don't switch modes below! */ goto lq_update; } /* (Else) not in search of better modulation mode, try for better * starting rate, while staying in this mode. */ high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type); low = high_low & 0xff; high = (high_low >> 8) & 0xff; /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ sr = window->success_ratio; /* Collect measured throughputs for current and adjacent rates */ current_tpt = window->average_tpt; if (low != IWL_RATE_INVALID) low_tpt = tbl->win[low].average_tpt; if (high != IWL_RATE_INVALID) high_tpt = tbl->win[high].average_tpt; IWL_DEBUG_RATE(mvm, "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n", rs_pretty_rate(rate), current_tpt, sr, low, high, low_tpt, high_tpt); scale_action = rs_get_rate_action(mvm, tbl, sr, low, high, current_tpt, low_tpt, high_tpt); /* Force a search in case BT doesn't like us being in MIMO */ if (is_mimo(rate) && !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) { IWL_DEBUG_RATE(mvm, "BT Coex forbids MIMO. Search for new config\n"); rs_stay_in_table(lq_sta, true); goto lq_update; } switch (scale_action) { case RS_ACTION_DOWNSCALE: /* Decrease starting rate, update uCode's rate table */ if (low != IWL_RATE_INVALID) { update_lq = 1; index = low; } else { IWL_DEBUG_RATE(mvm, "At the bottom rate. Can't decrease\n"); } break; case RS_ACTION_UPSCALE: /* Increase starting rate, update uCode's rate table */ if (high != IWL_RATE_INVALID) { update_lq = 1; index = high; } else { IWL_DEBUG_RATE(mvm, "At the top rate. Can't increase\n"); } break; case RS_ACTION_STAY: /* No change */ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl); break; default: break; } lq_update: /* Replace uCode's rate table for the destination station. */ if (update_lq) { tbl->rate.index = index; if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK) rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action); rs_set_amsdu_len(mvm, sta, tbl, scale_action); rs_update_rate_tbl(mvm, sta, lq_sta, tbl); } rs_stay_in_table(lq_sta, false); /* * Search for new modulation mode if we're: * 1) Not changing rates right now * 2) Not just finishing up a search * 3) Allowing a new search */ if (!update_lq && !done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED && window->counter) { enum rs_column next_column; /* Save current throughput to compare with "search" throughput*/ lq_sta->last_tpt = current_tpt; IWL_DEBUG_RATE(mvm, "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n", update_lq, done_search, lq_sta->rs_state, window->counter); next_column = rs_get_next_column(mvm, lq_sta, sta, tbl); if (next_column != RS_COLUMN_INVALID) { int ret = rs_switch_to_column(mvm, lq_sta, sta, next_column); if (!ret) lq_sta->search_better_tbl = 1; } else { IWL_DEBUG_RATE(mvm, "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n"); lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED; } /* If new "search" mode was selected, set up in uCode table */ if (lq_sta->search_better_tbl) { /* Access the "search" table, clear its history. */ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); rs_rate_scale_clear_tbl_windows(mvm, tbl); /* Use new "search" start rate */ index = tbl->rate.index; rs_dump_rate(mvm, &tbl->rate, "Switch to SEARCH TABLE:"); rs_update_rate_tbl(mvm, sta, lq_sta, tbl); } else { done_search = 1; } } if (!ndp) rs_tl_turn_on_agg(mvm, mvmsta, tid, lq_sta, sta); if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) { tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); rs_set_stay_in_table(mvm, is_legacy(&tbl1->rate), lq_sta); } } struct rs_init_rate_info { s8 rssi; u8 rate_idx; }; static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = { { -60, IWL_RATE_54M_INDEX }, { -64, IWL_RATE_48M_INDEX }, { -68, IWL_RATE_36M_INDEX }, { -80, IWL_RATE_24M_INDEX }, { -84, IWL_RATE_18M_INDEX }, { -85, IWL_RATE_12M_INDEX }, { -86, IWL_RATE_11M_INDEX }, { -88, IWL_RATE_5M_INDEX }, { -90, IWL_RATE_2M_INDEX }, { S8_MIN, IWL_RATE_1M_INDEX }, }; static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = { { -60, IWL_RATE_54M_INDEX }, { -64, IWL_RATE_48M_INDEX }, { -72, IWL_RATE_36M_INDEX }, { -80, IWL_RATE_24M_INDEX }, { -84, IWL_RATE_18M_INDEX }, { -85, IWL_RATE_12M_INDEX }, { -87, IWL_RATE_9M_INDEX }, { S8_MIN, IWL_RATE_6M_INDEX }, }; static const struct rs_init_rate_info rs_optimal_rates_ht[] = { { -60, IWL_RATE_MCS_7_INDEX }, { -64, IWL_RATE_MCS_6_INDEX }, { -68, IWL_RATE_MCS_5_INDEX }, { -72, IWL_RATE_MCS_4_INDEX }, { -80, IWL_RATE_MCS_3_INDEX }, { -84, IWL_RATE_MCS_2_INDEX }, { -85, IWL_RATE_MCS_1_INDEX }, { S8_MIN, IWL_RATE_MCS_0_INDEX}, }; /* MCS index 9 is not valid for 20MHz VHT channel width, * but is ok for 40, 80 and 160MHz channels. */ static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = { { -60, IWL_RATE_MCS_8_INDEX }, { -64, IWL_RATE_MCS_7_INDEX }, { -68, IWL_RATE_MCS_6_INDEX }, { -72, IWL_RATE_MCS_5_INDEX }, { -80, IWL_RATE_MCS_4_INDEX }, { -84, IWL_RATE_MCS_3_INDEX }, { -85, IWL_RATE_MCS_2_INDEX }, { -87, IWL_RATE_MCS_1_INDEX }, { S8_MIN, IWL_RATE_MCS_0_INDEX}, }; static const struct rs_init_rate_info rs_optimal_rates_vht[] = { { -60, IWL_RATE_MCS_9_INDEX }, { -64, IWL_RATE_MCS_8_INDEX }, { -68, IWL_RATE_MCS_7_INDEX }, { -72, IWL_RATE_MCS_6_INDEX }, { -80, IWL_RATE_MCS_5_INDEX }, { -84, IWL_RATE_MCS_4_INDEX }, { -85, IWL_RATE_MCS_3_INDEX }, { -87, IWL_RATE_MCS_2_INDEX }, { -88, IWL_RATE_MCS_1_INDEX }, { S8_MIN, IWL_RATE_MCS_0_INDEX }, }; #define IWL_RS_LOW_RSSI_THRESHOLD (-76) /* dBm */ /* Init the optimal rate based on STA caps * This combined with rssi is used to report the last tx rate * to userspace when we haven't transmitted enough frames. */ static void rs_init_optimal_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta) { struct rs_rate *rate = &lq_sta->optimal_rate; if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID) rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID) rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; else if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; rate->bw = rs_bw_from_sta_bw(sta); rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL); /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */ if (is_mimo(rate)) { lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate; } else if (is_siso(rate)) { lq_sta->optimal_rate_mask = lq_sta->active_siso_rate; } else { lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate; if (lq_sta->band == NL80211_BAND_5GHZ) { lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy; lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); } else { lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy; lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); } } if (is_vht(rate)) { if (rate->bw == RATE_MCS_CHAN_WIDTH_20) { lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz; lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); } else { lq_sta->optimal_rates = rs_optimal_rates_vht; lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_vht); } } else if (is_ht(rate)) { lq_sta->optimal_rates = rs_optimal_rates_ht; lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht); } } /* Compute the optimal rate index based on RSSI */ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta) { struct rs_rate *rate = &lq_sta->optimal_rate; int i; rate->index = find_first_bit(&lq_sta->optimal_rate_mask, BITS_PER_LONG); for (i = 0; i < lq_sta->optimal_nentries; i++) { int rate_idx = lq_sta->optimal_rates[i].rate_idx; if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) && (BIT(rate_idx) & lq_sta->optimal_rate_mask)) { rate->index = rate_idx; break; } } return rate; } /* Choose an initial legacy rate and antenna to use based on the RSSI * of last Rx */ static void rs_get_initial_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, enum nl80211_band band, struct rs_rate *rate) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i, nentries; unsigned long active_rate; s8 best_rssi = S8_MIN; u8 best_ant = ANT_NONE; u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); const struct rs_init_rate_info *initial_rates; for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { if (!(lq_sta->pers.chains & BIT(i))) continue; if (lq_sta->pers.chain_signal[i] > best_rssi) { best_rssi = lq_sta->pers.chain_signal[i]; best_ant = BIT(i); } } IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n", rs_pretty_ant(best_ant), best_rssi); if (best_ant != ANT_A && best_ant != ANT_B) rate->ant = first_antenna(valid_tx_ant); else rate->ant = best_ant; rate->sgi = false; rate->ldpc = false; rate->bw = RATE_MCS_CHAN_WIDTH_20; rate->index = find_first_bit(&lq_sta->active_legacy_rate, BITS_PER_LONG); if (band == NL80211_BAND_5GHZ) { rate->type = LQ_LEGACY_A; initial_rates = rs_optimal_rates_5ghz_legacy; nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); } else { rate->type = LQ_LEGACY_G; initial_rates = rs_optimal_rates_24ghz_legacy; nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); } if (!IWL_MVM_RS_RSSI_BASED_INIT_RATE) goto out; /* Start from a higher rate if the corresponding debug capability * is enabled. The rate is chosen according to AP capabilities. * In case of VHT/HT when the rssi is low fallback to the case of * legacy rates. */ if (sta->vht_cap.vht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { /* * In AP mode, when a new station associates, rs is initialized * immediately upon association completion, before the phy * context is updated with the association parameters, so the * sta bandwidth might be wider than the phy context allows. * To avoid this issue, always initialize rs with 20mhz * bandwidth rate, and after authorization, when the phy context * is already up-to-date, re-init rs with the correct bw. */ u32 bw = mvmsta->sta_state < IEEE80211_STA_AUTHORIZED ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); switch (bw) { case RATE_MCS_CHAN_WIDTH_40: case RATE_MCS_CHAN_WIDTH_80: case RATE_MCS_CHAN_WIDTH_160: initial_rates = rs_optimal_rates_vht; nentries = ARRAY_SIZE(rs_optimal_rates_vht); break; case RATE_MCS_CHAN_WIDTH_20: initial_rates = rs_optimal_rates_vht_20mhz; nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); break; default: IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth); goto out; } active_rate = lq_sta->active_siso_rate; rate->type = LQ_VHT_SISO; rate->bw = bw; } else if (sta->ht_cap.ht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { initial_rates = rs_optimal_rates_ht; nentries = ARRAY_SIZE(rs_optimal_rates_ht); active_rate = lq_sta->active_siso_rate; rate->type = LQ_HT_SISO; } else { active_rate = lq_sta->active_legacy_rate; } for (i = 0; i < nentries; i++) { int rate_idx = initial_rates[i].rate_idx; if ((best_rssi >= initial_rates[i].rssi) && (BIT(rate_idx) & active_rate)) { rate->index = rate_idx; break; } } out: rs_dump_rate(mvm, rate, "INITIAL"); } /* Save info about RSSI of last Rx */ void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status) { struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; int i; lq_sta->pers.chains = rx_status->chains; lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0]; lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1]; lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2]; lq_sta->pers.last_rssi = S8_MIN; for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { if (!(lq_sta->pers.chains & BIT(i))) continue; if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi) lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i]; } } /** * rs_initialize_lq - Initialize a station's hardware rate table * * The uCode's station table contains a table of fallback rates * for automatic fallback during transmission. * * NOTE: This sets up a default set of values. These will be replaced later * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of * rc80211_simple. * * NOTE: Run REPLY_ADD_STA command to set up station table entry, before * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, * which requires station table entry to exist). */ static void rs_initialize_lq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, enum nl80211_band band) { struct iwl_scale_tbl_info *tbl; struct rs_rate *rate; u8 active_tbl = 0; if (!sta || !lq_sta) return; if (!lq_sta->search_better_tbl) active_tbl = lq_sta->active_tbl; else active_tbl = 1 - lq_sta->active_tbl; tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; rs_get_initial_rate(mvm, sta, lq_sta, band, rate); rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, "ant: 0x%x, chains 0x%x, fw tx ant: 0x%x, nvm tx ant: 0x%x\n", rate->ant, lq_sta->pers.chains, mvm->fw->valid_tx_ant, mvm->nvm_data ? mvm->nvm_data->valid_tx_ant : ANT_INVALID); tbl->column = rs_get_column_from_rate(rate); rs_set_expected_tpt_table(lq_sta, tbl); rs_fill_lq_cmd(mvm, sta, lq_sta, rate); /* TODO restore station should remember the lq cmd */ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq); } static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, struct ieee80211_tx_rate_control *txrc) { struct iwl_op_mode *op_mode = mvm_r; struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); struct sk_buff *skb = txrc->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_lq_sta *lq_sta; struct rs_rate *optimal_rate; u32 last_ucode_rate; if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) { /* if vif isn't initialized mvm doesn't know about * this station, so don't do anything with the it */ sta = NULL; mvm_sta = NULL; } if (!mvm_sta) return; lq_sta = mvm_sta; iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags, info->band, &info->control.rates[0]); info->control.rates[0].count = 1; /* Report the optimal rate based on rssi and STA caps if we haven't * converged yet (too little traffic) or exploring other modulations */ if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) { optimal_rate = rs_get_optimal_rate(mvm, lq_sta); last_ucode_rate = ucode_rate_from_rs_rate(mvm, optimal_rate); iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band, &txrc->reported_rate); } } static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, gfp_t gfp) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate; struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); lq_sta->pers.drv = mvm; #ifdef CPTCFG_MAC80211_DEBUGFS lq_sta->pers.dbg_fixed_rate = 0; lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID; lq_sta->pers.ss_force = RS_SS_FORCE_NONE; #endif lq_sta->pers.chains = 0; memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); lq_sta->pers.last_rssi = S8_MIN; return lq_sta; } static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, int nss) { u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & (0x3 << (2 * (nss - 1))); rx_mcs >>= (2 * (nss - 1)); if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7) return IWL_RATE_MCS_7_INDEX; else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8) return IWL_RATE_MCS_8_INDEX; else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9) return IWL_RATE_MCS_9_INDEX; WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED); return -1; } static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta, struct ieee80211_sta_vht_cap *vht_cap, struct iwl_lq_sta *lq_sta) { int i; int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1); if (highest_mcs >= IWL_RATE_MCS_0_INDEX) { for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) { if (i == IWL_RATE_9M_INDEX) continue; /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ if (i == IWL_RATE_MCS_9_INDEX && sta->bandwidth == IEEE80211_STA_RX_BW_20) continue; lq_sta->active_siso_rate |= BIT(i); } } if (sta->rx_nss < 2) return; highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2); if (highest_mcs >= IWL_RATE_MCS_0_INDEX) { for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) { if (i == IWL_RATE_9M_INDEX) continue; /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ if (i == IWL_RATE_MCS_9_INDEX && sta->bandwidth == IEEE80211_STA_RX_BW_20) continue; lq_sta->active_mimo2_rate |= BIT(i); } } } static void rs_ht_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, struct ieee80211_sta_ht_cap *ht_cap) { /* active_siso_rate mask includes 9 MBits (bit 5), * and CCK (bits 0-3), supp_rates[] does not; * shift to convert format, force 9 MBits off. */ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; lq_sta->active_siso_rate &= ~((u16)0x2); lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; lq_sta->active_mimo2_rate &= ~((u16)0x2); lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; if (mvm->cfg->ht_params->ldpc && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) lq_sta->ldpc = true; if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) lq_sta->stbc_capable = true; lq_sta->is_vht = false; } static void rs_vht_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, struct ieee80211_sta_vht_cap *vht_cap) { rs_vht_set_enabled_rates(sta, vht_cap, lq_sta); if (mvm->cfg->ht_params->ldpc && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)) lq_sta->ldpc = true; if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)) lq_sta->stbc_capable = true; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) lq_sta->bfer_capable = true; lq_sta->is_vht = true; } #ifdef CPTCFG_IWLWIFI_DEBUGFS void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) { spin_lock_bh(&mvm->drv_stats_lock); memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats)); spin_unlock_bh(&mvm->drv_stats_lock); } void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) { u8 nss = 0; spin_lock(&mvm->drv_stats_lock); if (agg) mvm->drv_rx_stats.agg_frames++; mvm->drv_rx_stats.success_frames++; switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: mvm->drv_rx_stats.bw_20_frames++; break; case RATE_MCS_CHAN_WIDTH_40: mvm->drv_rx_stats.bw_40_frames++; break; case RATE_MCS_CHAN_WIDTH_80: mvm->drv_rx_stats.bw_80_frames++; break; case RATE_MCS_CHAN_WIDTH_160: mvm->drv_rx_stats.bw_160_frames++; break; default: WARN_ONCE(1, "bad BW. rate 0x%x", rate); } if (rate & RATE_MCS_HT_MSK) { mvm->drv_rx_stats.ht_frames++; nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; } else if (rate & RATE_MCS_VHT_MSK) { mvm->drv_rx_stats.vht_frames++; nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; } else { mvm->drv_rx_stats.legacy_frames++; } if (nss == 1) mvm->drv_rx_stats.siso_frames++; else if (nss == 2) mvm->drv_rx_stats.mimo2_frames++; if (rate & RATE_MCS_SGI_MSK) mvm->drv_rx_stats.sgi_frames++; else mvm->drv_rx_stats.ngi_frames++; mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate; mvm->drv_rx_stats.last_frame_idx = (mvm->drv_rx_stats.last_frame_idx + 1) % ARRAY_SIZE(mvm->drv_rx_stats.last_rates); spin_unlock(&mvm->drv_stats_lock); } #endif /* * Called after adding a new station to initialize rate scaling */ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band) { int i, j; struct ieee80211_hw *hw = mvm->hw; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; struct ieee80211_supported_band *sband; unsigned long supp; /* must be unsigned long for for_each_set_bit */ lockdep_assert_held(&mvmsta->lq_sta.rs_drv.pers.lock); /* clear all non-persistent lq data */ memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); sband = hw->wiphy->bands[band]; lq_sta->lq.sta_id = mvmsta->sta_id; mvmsta->amsdu_enabled = 0; mvmsta->max_amsdu_len = sta->max_amsdu_len; for (j = 0; j < LQ_SIZE; j++) rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); lq_sta->flush_timer = 0; lq_sta->last_tx = jiffies; IWL_DEBUG_RATE(mvm, "LQ: *** rate scale station global init for station %d ***\n", mvmsta->sta_id); /* TODO: what is a good starting rate for STA? About middle? Maybe not * the lowest or the highest rate.. Could consider using RSSI from * previous packets? Need to have IEEE 802.1X auth succeed immediately * after assoc.. */ lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX; lq_sta->band = sband->band; /* * active legacy rates as per supported rates bitmap */ supp = sta->supp_rates[sband->band]; lq_sta->active_legacy_rate = 0; for_each_set_bit(i, &supp, BITS_PER_LONG) lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); /* TODO: should probably account for rx_highest for both HT/VHT */ if (!vht_cap || !vht_cap->vht_supported) rs_ht_init(mvm, sta, lq_sta, ht_cap); else rs_vht_init(mvm, sta, lq_sta, vht_cap); lq_sta->max_legacy_rate_idx = rs_get_max_rate_from_mask(lq_sta->active_legacy_rate); lq_sta->max_siso_rate_idx = rs_get_max_rate_from_mask(lq_sta->active_siso_rate); lq_sta->max_mimo2_rate_idx = rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate); IWL_DEBUG_RATE(mvm, "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n", lq_sta->active_legacy_rate, lq_sta->active_siso_rate, lq_sta->active_mimo2_rate, lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable, lq_sta->bfer_capable); IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n", lq_sta->max_legacy_rate_idx, lq_sta->max_siso_rate_idx, lq_sta->max_mimo2_rate_idx); /* These values will be overridden later */ lq_sta->lq.single_stream_ant_msk = iwl_mvm_bt_coex_get_single_ant_msk(mvm, iwl_mvm_get_valid_tx_ant(mvm)); lq_sta->lq.dual_stream_ant_msk = ANT_AB; /* as default allow aggregation for all tids */ lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; lq_sta->is_agg = 0; #ifdef CPTCFG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif rs_initialize_lq(mvm, sta, lq_sta, band); } static void rs_drv_rate_update(void *mvm_r, struct ieee80211_supported_band *sband, struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta, u32 changed) { struct iwl_op_mode *op_mode = mvm_r; struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); u8 tid; if (!iwl_mvm_sta_from_mac80211(sta)->vif) return; /* Stop any ongoing aggregations as rs starts off assuming no agg */ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_stop_tx_ba_session(sta, tid); iwl_mvm_rs_rate_init(mvm, sta, sband->band, true); } static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, struct ieee80211_tx_info *info, bool ndp) { int legacy_success; int retries; int i; struct iwl_lq_cmd *table; u32 lq_hwrate; struct rs_rate lq_rate, tx_resp_rate; struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; u32 tlc_info = (uintptr_t)info->status.status_driver_data[0]; u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK; u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info); u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; if (!lq_sta->pers.drv) { IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); return; } /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate)) { WARN_ON_ONCE(1); return; } #ifdef CPTCFG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate but * update tx stats */ if (lq_sta->pers.dbg_fixed_rate) { int index = tx_resp_rate.index; enum rs_column column; int attempts, success; column = rs_get_column_from_rate(&tx_resp_rate); if (WARN_ONCE(column == RS_COLUMN_INVALID, "Can't map rate 0x%x to column", tx_resp_hwrate)) return; if (info->flags & IEEE80211_TX_STAT_AMPDU) { attempts = info->status.ampdu_len; success = info->status.ampdu_ack_len; } else { attempts = info->status.rates[0].count; success = !!(info->flags & IEEE80211_TX_STAT_ACK); } lq_sta->pers.tx_stats[column][index].total += attempts; lq_sta->pers.tx_stats[column][index].success += success; IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n", tx_resp_hwrate, success, attempts); return; } #endif if (time_after(jiffies, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); /* reach here only in case of driver RS, call directly * the unlocked version */ rs_drv_rate_init(mvm, sta, info->band); return; } lq_sta->last_tx = jiffies; /* Ignore this Tx frame response if its initial rate doesn't match * that of latest Link Quality command. There may be stragglers * from a previous Link Quality command, but we're no longer interested * in those; they're either from the "active" mode while we're trying * to check "search" mode, or a prior "search" mode after we've moved * to a new "search" mode (which might become the new "active" mode). */ table = &lq_sta->lq; lq_hwrate = le32_to_cpu(table->rs_table[0]); if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { WARN_ON_ONCE(1); return; } /* Here we actually compare this rate to the latest LQ command */ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { IWL_DEBUG_RATE(mvm, "tx resp color 0x%x does not match 0x%x\n", lq_color, LQ_FLAG_COLOR_GET(table->flags)); /* Since rates mis-match, the last LQ command may have failed. * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with * ... driver. */ lq_sta->missed_rate_counter++; if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) { lq_sta->missed_rate_counter = 0; IWL_DEBUG_RATE(mvm, "Too many rates mismatch. Send sync LQ. rs_state %d\n", lq_sta->rs_state); iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq); } /* Regardless, ignore this status info for outdated rate */ return; } /* Rate did match, so reset the missed_rate_counter */ lq_sta->missed_rate_counter = 0; if (!lq_sta->search_better_tbl) { curr_tbl = &lq_sta->lq_info[lq_sta->active_tbl]; other_tbl = &lq_sta->lq_info[1 - lq_sta->active_tbl]; } else { curr_tbl = &lq_sta->lq_info[1 - lq_sta->active_tbl]; other_tbl = &lq_sta->lq_info[lq_sta->active_tbl]; } if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) { IWL_DEBUG_RATE(mvm, "Neither active nor search matches tx rate\n"); tmp_tbl = &lq_sta->lq_info[lq_sta->active_tbl]; rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE"); tmp_tbl = &lq_sta->lq_info[1 - lq_sta->active_tbl]; rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH"); rs_dump_rate(mvm, &lq_rate, "ACTUAL"); /* no matching table found, let's by-pass the data collection * and continue to perform rate scale to find the rate table */ rs_stay_in_table(lq_sta, true); goto done; } /* Updating the frame history depends on whether packets were * aggregated. * * For aggregation, all packets were transmitted at the same rate, the * first index into rate scale table. */ if (info->flags & IEEE80211_TX_STAT_AMPDU) { rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index, info->status.ampdu_len, info->status.ampdu_ack_len, reduced_txp); /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat * it as a single frame loss as we don't want the success ratio * to dip too quickly because a BA wasn't received. * For TPC, there's no need for this optimisation since we want * to recover very quickly from a bad power reduction and, * therefore we'd like the success ratio to get an immediate hit * when failing to get a BA, so we'd switch back to a lower or * zero power reduction. When FW transmits agg with a rate * different from the initial rate, it will not use reduced txp * and will send BA notification twice (one empty with reduced * txp equal to the value from LQ and one with reduced txp 0). * We need to update counters for each txp level accordingly. */ if (info->status.ampdu_ack_len == 0) info->status.ampdu_len = 1; rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index, info->status.ampdu_len, info->status.ampdu_ack_len); /* Update success/fail counts if not searching for new mode */ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { lq_sta->total_success += info->status.ampdu_ack_len; lq_sta->total_failed += (info->status.ampdu_len - info->status.ampdu_ack_len); } } else { /* For legacy, update frame history with for each Tx retry. */ retries = info->status.rates[0].count - 1; /* HW doesn't send more than 15 retries */ retries = min(retries, 15); /* The last transmission may have been successful */ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); /* Collect data for each rate used during failed TX attempts */ for (i = 0; i <= retries; ++i) { lq_hwrate = le32_to_cpu(table->rs_table[i]); if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { WARN_ON_ONCE(1); return; } /* Only collect stats if retried rate is in the same RS * table as active/search. */ if (rs_rate_column_match(&lq_rate, &curr_tbl->rate)) tmp_tbl = curr_tbl; else if (rs_rate_column_match(&lq_rate, &other_tbl->rate)) tmp_tbl = other_tbl; else continue; rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, tx_resp_rate.index, 1, i < retries ? 0 : legacy_success, reduced_txp); rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl, tx_resp_rate.index, 1, i < retries ? 0 : legacy_success); } /* Update success/fail counts if not searching for new mode */ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { lq_sta->total_success += legacy_success; lq_sta->total_failed += retries + (1 - legacy_success); } } /* The last TX rate is cached in lq_sta; it's set in if/else above */ lq_sta->last_rate_n_flags = lq_hwrate; IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); done: /* See if there's a better rate or modulation mode to try. */ if (sta->supp_rates[info->band]) rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp); } void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, struct ieee80211_tx_info *info, bool ndp) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); /* If it's locked we are in middle of init flow * just wait for next tx status to update the lq_sta data */ if (!spin_trylock(&mvmsta->lq_sta.rs_drv.pers.lock)) return; __iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp); spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock); } #ifdef CPTCFG_MAC80211_DEBUGFS static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq_cmd, enum nl80211_band band, u32 ucode_rate) { struct rs_rate rate; int i; int num_rates = ARRAY_SIZE(lq_cmd->rs_table); __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate); u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; for (i = 0; i < num_rates; i++) lq_cmd->rs_table[i] = ucode_rate_le32; if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) { WARN_ON_ONCE(1); return; } if (is_mimo(&rate)) lq_cmd->mimo_delim = num_rates - 1; else lq_cmd->mimo_delim = 0; lq_cmd->reduced_tpc = 0; if (num_of_ant(ant) == 1) lq_cmd->single_stream_ant_msk = ant; if (!mvm->trans->cfg->gen2) lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; else lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; } #endif /* CPTCFG_MAC80211_DEBUGFS */ static void rs_fill_rates_for_column(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta, struct rs_rate *rate, __le32 *rs_table, int *rs_table_index, int num_rates, int num_retries, u8 valid_tx_ant, bool toggle_ant) { int i, j; __le32 ucode_rate; bool bottom_reached = false; int prev_rate_idx = rate->index; int end = LINK_QUAL_MAX_RETRY_NUM; int index = *rs_table_index; for (i = 0; i < num_rates && index < end; i++) { for (j = 0; j < num_retries && index < end; j++, index++) { ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate)); rs_table[index] = ucode_rate; if (toggle_ant) rs_toggle_antenna(valid_tx_ant, rate); } prev_rate_idx = rate->index; bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate); if (bottom_reached && !is_legacy(rate)) break; } if (!bottom_reached && !is_legacy(rate)) rate->index = prev_rate_idx; *rs_table_index = index; } /* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI * column the rate table should look like this: * * rate[0] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI * rate[1] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI * rate[2] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI * rate[3] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI * rate[4] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI * rate[5] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps */ static void rs_build_rates_table(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, const struct rs_rate *initial_rate) { struct rs_rate rate; int num_rates, num_retries, index = 0; u8 valid_tx_ant = 0; struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; bool toggle_ant = false; u32 color; memcpy(&rate, initial_rate, sizeof(rate)); valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); /* TODO: remove old API when min FW API hits 14 */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) && rs_stbc_allow(mvm, sta, lq_sta)) rate.stbc = true; if (is_siso(&rate)) { num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES; num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE; } else if (is_mimo(&rate)) { num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES; num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE; } else { num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES; num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES; toggle_ant = true; } rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, num_rates, num_retries, valid_tx_ant, toggle_ant); rs_get_lower_rate_down_column(lq_sta, &rate); if (is_siso(&rate)) { num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES; num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES; lq_cmd->mimo_delim = index; } else if (is_legacy(&rate)) { num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES; num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES; } else { WARN_ON_ONCE(1); } toggle_ant = true; rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, num_rates, num_retries, valid_tx_ant, toggle_ant); rs_get_lower_rate_down_column(lq_sta, &rate); num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES; num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES; rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, num_rates, num_retries, valid_tx_ant, toggle_ant); /* update the color of the LQ command (as a counter at bits 1-3) */ color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags)); lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color); } struct rs_bfer_active_iter_data { struct ieee80211_sta *exclude_sta; struct iwl_mvm_sta *bfer_mvmsta; }; static void rs_bfer_active_iter(void *_data, struct ieee80211_sta *sta) { struct rs_bfer_active_iter_data *data = _data; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq; u32 ss_params = le32_to_cpu(lq_cmd->ss_params); if (sta == data->exclude_sta) return; /* The current sta has BFER allowed */ if (ss_params & LQ_SS_BFER_ALLOWED) { WARN_ON_ONCE(data->bfer_mvmsta != NULL); data->bfer_mvmsta = mvmsta; } } static int rs_bfer_priority(struct iwl_mvm_sta *sta) { int prio = -1; enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif); switch (viftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: prio = 3; break; case NL80211_IFTYPE_P2P_CLIENT: prio = 2; break; case NL80211_IFTYPE_STATION: prio = 1; break; default: WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id); prio = -1; } return prio; } /* Returns >0 if sta1 has a higher BFER priority compared to sta2 */ static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1, struct iwl_mvm_sta *sta2) { int prio1 = rs_bfer_priority(sta1); int prio2 = rs_bfer_priority(sta2); if (prio1 > prio2) return 1; if (prio1 < prio2) return -1; return 0; } static void rs_set_lq_ss_params(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, const struct rs_rate *initial_rate) { struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct rs_bfer_active_iter_data data = { .exclude_sta = sta, .bfer_mvmsta = NULL, }; struct iwl_mvm_sta *bfer_mvmsta = NULL; u32 ss_params = LQ_SS_PARAMS_VALID; if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) goto out; #ifdef CPTCFG_MAC80211_DEBUGFS /* Check if forcing the decision is configured. * Note that SISO is forced by not allowing STBC or BFER */ if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC) ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE); else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER) ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE); if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) { IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n", lq_sta->pers.ss_force); goto out; } #endif if (lq_sta->stbc_capable) ss_params |= LQ_SS_STBC_1SS_ALLOWED; if (!lq_sta->bfer_capable) goto out; ieee80211_iterate_stations_atomic(mvm->hw, rs_bfer_active_iter, &data); bfer_mvmsta = data.bfer_mvmsta; /* This code is safe as it doesn't run concurrently for different * stations. This is guaranteed by the fact that calls to * ieee80211_tx_status wouldn't run concurrently for a single HW. */ if (!bfer_mvmsta) { IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n"); ss_params |= LQ_SS_BFER_ALLOWED; goto out; } IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n", bfer_mvmsta->sta_id); /* Disallow BFER on another STA if active and we're a higher priority */ if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) { struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.rs_drv.lq; u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params); bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED; bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params); iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd); ss_params |= LQ_SS_BFER_ALLOWED; IWL_DEBUG_RATE(mvm, "Lower priority BFER sta found (%d). Switch BFER\n", bfer_mvmsta->sta_id); } out: lq_cmd->ss_params = cpu_to_le32(ss_params); } static void rs_fill_lq_cmd(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, const struct rs_rate *initial_rate) { struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START; lq_cmd->agg_time_limit = cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT); #ifdef CPTCFG_MAC80211_DEBUGFS if (lq_sta->pers.dbg_fixed_rate) { rs_build_rates_table_from_fixed(mvm, lq_cmd, lq_sta->band, lq_sta->pers.dbg_fixed_rate); return; } #endif if (WARN_ON_ONCE(!sta || !initial_rate)) return; rs_build_rates_table(mvm, sta, lq_sta, initial_rate); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate); mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) && num_of_ant(initial_rate->ant) == 1) lq_cmd->single_stream_ant_msk = initial_rate->ant; lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize; /* * In case of low latency, tell the firmware to leave a frame in the * Tx Fifo so that it can start a transaction in the same TxOP. This * basically allows the firmware to send bursts. */ if (iwl_mvm_vif_low_latency(mvmvif)) lq_cmd->agg_frame_cnt_limit--; if (mvmsta->vif->p2p) lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK; lq_cmd->agg_time_limit = cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta)); } static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { return hw->priv; } /* rate scale requires free function to be implemented */ static void rs_free(void *mvm_rate) { return; } static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta) { struct iwl_op_mode *op_mode __maybe_unused = mvm_r; struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); IWL_DEBUG_RATE(mvm, "enter\n"); IWL_DEBUG_RATE(mvm, "leave\n"); } #ifdef CPTCFG_MAC80211_DEBUGFS int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) { char *type, *bw; u8 mcs = 0, nss = 0; u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; if (!(rate & RATE_MCS_HT_MSK) && !(rate & RATE_MCS_VHT_MSK) && !(rate & RATE_MCS_HE_MSK)) { int index = iwl_hwrate_to_plcp_idx(rate); return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n", rs_pretty_ant(ant), index == IWL_RATE_INVALID ? "BAD" : iwl_rate_mcs[index].mbps); } if (rate & RATE_MCS_VHT_MSK) { type = "VHT"; mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; } else if (rate & RATE_MCS_HT_MSK) { type = "HT"; mcs = rate & RATE_HT_MCS_INDEX_MSK; nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; } else if (rate & RATE_MCS_HE_MSK) { type = "HE"; mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; } else { type = "Unknown"; /* shouldn't happen */ } switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: bw = "20Mhz"; break; case RATE_MCS_CHAN_WIDTH_40: bw = "40Mhz"; break; case RATE_MCS_CHAN_WIDTH_80: bw = "80Mhz"; break; case RATE_MCS_CHAN_WIDTH_160: bw = "160Mhz"; break; default: bw = "BAD BW"; } return scnprintf(buf, bufsz, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n", type, rs_pretty_ant(ant), bw, mcs, nss, (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", (rate & RATE_MCS_BF_MSK) ? "BF " : ""); } /** * Program the device to use fixed rate for frame transmit * This is for debugging/testing only * once the device start use fixed rate, we need to reload the module * to being back the normal operation. */ static void rs_program_fix_rate(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta) { lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate); if (lq_sta->pers.dbg_fixed_rate) { rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL); iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq); } } static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_lq_sta *lq_sta = file->private_data; struct iwl_mvm *mvm; char buf[64]; size_t buf_size; u32 parsed_rate; mvm = lq_sta->pers.drv; memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; if (sscanf(buf, "%x", &parsed_rate) == 1) lq_sta->pers.dbg_fixed_rate = parsed_rate; else lq_sta->pers.dbg_fixed_rate = 0; rs_program_fix_rate(mvm, lq_sta); return count; } static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buff; int desc = 0; int i = 0; ssize_t ret; static const size_t bufsz = 2048; struct iwl_lq_sta *lq_sta = file->private_data; struct iwl_mvm_sta *mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv); struct iwl_mvm *mvm; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct rs_rate *rate = &tbl->rate; u32 ss_params; mvm = lq_sta->pers.drv; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", lq_sta->lq.sta_id); desc += scnprintf(buff + desc, bufsz - desc, "failed=%d success=%d rate=0%lX\n", lq_sta->total_failed, lq_sta->total_success, lq_sta->active_legacy_rate); desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", lq_sta->pers.dbg_fixed_rate); desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : ""); desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n", (is_legacy(rate)) ? "legacy" : is_vht(rate) ? "VHT" : "HT"); if (!is_legacy(rate)) { desc += scnprintf(buff + desc, bufsz - desc, " %s", (is_siso(rate)) ? "SISO" : "MIMO2"); desc += scnprintf(buff + desc, bufsz - desc, " %s", (is_ht20(rate)) ? "20MHz" : (is_ht40(rate)) ? "40MHz" : (is_ht80(rate)) ? "80MHz" : (is_ht160(rate)) ? "160MHz" : "BAD BW"); desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n", (rate->sgi) ? "SGI" : "NGI", (rate->ldpc) ? "LDPC" : "BCC", (lq_sta->is_agg) ? "AGG on" : "", (mvmsta->amsdu_enabled) ? "AMSDU on" : ""); } desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n", lq_sta->last_rate_n_flags); desc += scnprintf(buff + desc, bufsz - desc, "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n", lq_sta->lq.flags, lq_sta->lq.mimo_delim, lq_sta->lq.single_stream_ant_msk, lq_sta->lq.dual_stream_ant_msk); desc += scnprintf(buff + desc, bufsz - desc, "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", le16_to_cpu(lq_sta->lq.agg_time_limit), lq_sta->lq.agg_disable_start_th, lq_sta->lq.agg_frame_cnt_limit); desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc); ss_params = le32_to_cpu(lq_sta->lq.ss_params); desc += scnprintf(buff + desc, bufsz - desc, "single stream params: %s%s%s%s\n", (ss_params & LQ_SS_PARAMS_VALID) ? "VALID" : "INVALID", (ss_params & LQ_SS_BFER_ALLOWED) ? ", BFER" : "", (ss_params & LQ_SS_STBC_1SS_ALLOWED) ? ", STBC" : "", (ss_params & LQ_SS_FORCE) ? ", FORCE" : ""); desc += scnprintf(buff + desc, bufsz - desc, "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", lq_sta->lq.initial_rate_index[0], lq_sta->lq.initial_rate_index[1], lq_sta->lq.initial_rate_index[2], lq_sta->lq.initial_rate_index[3]); for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]); desc += scnprintf(buff + desc, bufsz - desc, " rate[%d] 0x%X ", i, r); desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r); } ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); kfree(buff); return ret; } static const struct file_operations rs_sta_dbgfs_scale_table_ops = { .write = rs_sta_dbgfs_scale_table_write, .read = rs_sta_dbgfs_scale_table_read, .open = simple_open, .llseek = default_llseek, }; static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buff; int desc = 0; int i, j; ssize_t ret; struct iwl_scale_tbl_info *tbl; struct rs_rate *rate; struct iwl_lq_sta *lq_sta = file->private_data; buff = kmalloc(1024, GFP_KERNEL); if (!buff) return -ENOMEM; for (i = 0; i < LQ_SIZE; i++) { tbl = &(lq_sta->lq_info[i]); rate = &tbl->rate; desc += sprintf(buff+desc, "%s type=%d SGI=%d BW=%s DUP=0\n" "index=%d\n", lq_sta->active_tbl == i ? "*" : "x", rate->type, rate->sgi, is_ht20(rate) ? "20MHz" : is_ht40(rate) ? "40MHz" : is_ht80(rate) ? "80MHz" : is_ht160(rate) ? "160MHz" : "ERR", rate->index); for (j = 0; j < IWL_RATE_COUNT; j++) { desc += sprintf(buff+desc, "counter=%d success=%d %%=%d\n", tbl->win[j].counter, tbl->win[j].success_counter, tbl->win[j].success_ratio); } } ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); kfree(buff); return ret; } static const struct file_operations rs_sta_dbgfs_stats_table_ops = { .read = rs_sta_dbgfs_stats_table_read, .open = simple_open, .llseek = default_llseek, }; static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { static const char * const column_name[] = { [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A", [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B", [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A", [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B", [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI", [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI", [RS_COLUMN_MIMO2] = "MIMO2", [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI", }; static const char * const rate_name[] = { [IWL_RATE_1M_INDEX] = "1M", [IWL_RATE_2M_INDEX] = "2M", [IWL_RATE_5M_INDEX] = "5.5M", [IWL_RATE_11M_INDEX] = "11M", [IWL_RATE_6M_INDEX] = "6M|MCS0", [IWL_RATE_9M_INDEX] = "9M", [IWL_RATE_12M_INDEX] = "12M|MCS1", [IWL_RATE_18M_INDEX] = "18M|MCS2", [IWL_RATE_24M_INDEX] = "24M|MCS3", [IWL_RATE_36M_INDEX] = "36M|MCS4", [IWL_RATE_48M_INDEX] = "48M|MCS5", [IWL_RATE_54M_INDEX] = "54M|MCS6", [IWL_RATE_MCS_7_INDEX] = "MCS7", [IWL_RATE_MCS_8_INDEX] = "MCS8", [IWL_RATE_MCS_9_INDEX] = "MCS9", [IWL_RATE_MCS_10_INDEX] = "MCS10", [IWL_RATE_MCS_11_INDEX] = "MCS11", }; char *buff, *pos, *endpos; int col, rate; ssize_t ret; struct iwl_lq_sta *lq_sta = file->private_data; struct rs_rate_stats *stats; static const size_t bufsz = 1024; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "COLUMN,"); for (rate = 0; rate < IWL_RATE_COUNT; rate++) pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]); pos += scnprintf(pos, endpos - pos, "\n"); for (col = 0; col < RS_COLUMN_COUNT; col++) { pos += scnprintf(pos, endpos - pos, "%s,", column_name[col]); for (rate = 0; rate < IWL_RATE_COUNT; rate++) { stats = &(lq_sta->pers.tx_stats[col][rate]); pos += scnprintf(pos, endpos - pos, "%llu/%llu,", stats->success, stats->total); } pos += scnprintf(pos, endpos - pos, "\n"); } ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_lq_sta *lq_sta = file->private_data; memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats)); return count; } static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = { .read = rs_sta_dbgfs_drv_tx_stats_read, .write = rs_sta_dbgfs_drv_tx_stats_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t iwl_dbgfs_ss_force_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_lq_sta *lq_sta = file->private_data; char buf[12]; int bufsz = sizeof(buf); int pos = 0; static const char * const ss_force_name[] = { [RS_SS_FORCE_NONE] = "none", [RS_SS_FORCE_STBC] = "stbc", [RS_SS_FORCE_BFER] = "bfer", [RS_SS_FORCE_SISO] = "siso", }; pos += scnprintf(buf+pos, bufsz-pos, "%s\n", ss_force_name[lq_sta->pers.ss_force]); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = lq_sta->pers.drv; int ret = 0; if (!strncmp("none", buf, 4)) { lq_sta->pers.ss_force = RS_SS_FORCE_NONE; } else if (!strncmp("siso", buf, 4)) { lq_sta->pers.ss_force = RS_SS_FORCE_SISO; } else if (!strncmp("stbc", buf, 4)) { if (lq_sta->stbc_capable) { lq_sta->pers.ss_force = RS_SS_FORCE_STBC; } else { IWL_ERR(mvm, "can't force STBC. peer doesn't support\n"); ret = -EINVAL; } } else if (!strncmp("bfer", buf, 4)) { if (lq_sta->bfer_capable) { lq_sta->pers.ss_force = RS_SS_FORCE_BFER; } else { IWL_ERR(mvm, "can't force BFER. peer doesn't support\n"); ret = -EINVAL; } } else { IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n"); ret = -EINVAL; } return ret ?: count; } #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta) #define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \ debugfs_create_file(#name, mode, parent, lq_sta, \ &iwl_dbgfs_##name##_ops); \ } while (0) MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta, struct dentry *dir) { struct iwl_lq_sta *lq_sta = priv_sta; struct iwl_mvm_sta *mvmsta; mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv); if (!mvmsta->vif) return; debugfs_create_file("rate_scale_table", 0600, dir, lq_sta, &rs_sta_dbgfs_scale_table_ops); debugfs_create_file("rate_stats_table", 0400, dir, lq_sta, &rs_sta_dbgfs_stats_table_ops); debugfs_create_file("drv_tx_stats", 0600, dir, lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops); debugfs_create_u8("tx_agg_tid_enable", 0600, dir, &lq_sta->tx_agg_tid_en); debugfs_create_u8("reduced_tpc", 0600, dir, &lq_sta->pers.dbg_fixed_txp_reduction); MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600); } #endif /* * Initialization of rate scaling information is done by driver after * the station is added. Since mac80211 calls this function before a * station is added we ignore it. */ static void rs_rate_init_ops(void *mvm_r, struct ieee80211_supported_band *sband, struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *mvm_sta) { } /* ops for rate scaling implemented in the driver */ static const struct rate_control_ops rs_mvm_ops_drv = { .name = RS_NAME, .tx_status = rs_drv_mac80211_tx_status, .get_rate = rs_drv_get_rate, .rate_init = rs_rate_init_ops, .alloc = rs_alloc, .free = rs_free, .alloc_sta = rs_drv_alloc_sta, .free_sta = rs_free_sta, .rate_update = rs_drv_rate_update, #ifdef CPTCFG_MAC80211_DEBUGFS .add_sta_debugfs = rs_drv_add_sta_debugfs, #endif .capa = RATE_CTRL_CAPA_VHT_EXT_NSS_BW, }; void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band, bool update) { if (iwl_mvm_has_tlc_offload(mvm)) { rs_fw_rate_init(mvm, sta, band, update); } else { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock(&mvmsta->lq_sta.rs_drv.pers.lock); rs_drv_rate_init(mvm, sta, band); spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock); } } int iwl_mvm_rate_control_register(void) { return ieee80211_rate_control_register(&rs_mvm_ops_drv); } void iwl_mvm_rate_control_unregister(void) { ieee80211_rate_control_unregister(&rs_mvm_ops_drv); } static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable) { struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq; lockdep_assert_held(&mvm->mutex); if (enable) { if (mvmsta->tx_protection == 0) lq->flags |= LQ_FLAG_USE_RTS_MSK; mvmsta->tx_protection++; } else { mvmsta->tx_protection--; if (mvmsta->tx_protection == 0) lq->flags &= ~LQ_FLAG_USE_RTS_MSK; } return iwl_mvm_send_lq_cmd(mvm, lq); } /** * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection * @mvmsta: The station * @enable: Enable Tx protection? */ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable) { if (iwl_mvm_has_tlc_offload(mvm)) return rs_fw_tx_protection(mvm, mvmsta, enable); else return rs_drv_tx_protection(mvm, mvmsta, enable); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/rs.h000066400000000000000000000352561351064634500261760ustar00rootroot00000000000000/****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #ifndef __rs_h__ #define __rs_h__ #include #include "iwl-config.h" #include "fw-api.h" #include "iwl-trans.h" #define RS_NAME "iwl-mvm-rs" struct iwl_rs_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ u8 plcp_vht_siso; u8 plcp_vht_mimo2; u8 prev_rs; /* previous rate used in rs algo */ u8 next_rs; /* next rate used in rs algo */ }; #define IWL_RATE_60M_PLCP 3 enum { IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, IWL_RATE_INVALID = IWL_RATE_COUNT, }; #define LINK_QUAL_MAX_RETRY_NUM 16 enum { IWL_RATE_6M_INDEX_TABLE = 0, IWL_RATE_9M_INDEX_TABLE, IWL_RATE_12M_INDEX_TABLE, IWL_RATE_18M_INDEX_TABLE, IWL_RATE_24M_INDEX_TABLE, IWL_RATE_36M_INDEX_TABLE, IWL_RATE_48M_INDEX_TABLE, IWL_RATE_54M_INDEX_TABLE, IWL_RATE_1M_INDEX_TABLE, IWL_RATE_2M_INDEX_TABLE, IWL_RATE_5M_INDEX_TABLE, IWL_RATE_11M_INDEX_TABLE, IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, }; /* #define vs. enum to keep from defaulting to 'large integer' */ #define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) #define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) #define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) #define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) #define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) #define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) #define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) #define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) #define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) #define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) #define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) #define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) #define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) /* uCode API values for HT/VHT bit rates */ enum { IWL_RATE_HT_SISO_MCS_0_PLCP = 0, IWL_RATE_HT_SISO_MCS_1_PLCP = 1, IWL_RATE_HT_SISO_MCS_2_PLCP = 2, IWL_RATE_HT_SISO_MCS_3_PLCP = 3, IWL_RATE_HT_SISO_MCS_4_PLCP = 4, IWL_RATE_HT_SISO_MCS_5_PLCP = 5, IWL_RATE_HT_SISO_MCS_6_PLCP = 6, IWL_RATE_HT_SISO_MCS_7_PLCP = 7, IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8, IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9, IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA, IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB, IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC, IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD, IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE, IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF, IWL_RATE_VHT_SISO_MCS_0_PLCP = 0, IWL_RATE_VHT_SISO_MCS_1_PLCP = 1, IWL_RATE_VHT_SISO_MCS_2_PLCP = 2, IWL_RATE_VHT_SISO_MCS_3_PLCP = 3, IWL_RATE_VHT_SISO_MCS_4_PLCP = 4, IWL_RATE_VHT_SISO_MCS_5_PLCP = 5, IWL_RATE_VHT_SISO_MCS_6_PLCP = 6, IWL_RATE_VHT_SISO_MCS_7_PLCP = 7, IWL_RATE_VHT_SISO_MCS_8_PLCP = 8, IWL_RATE_VHT_SISO_MCS_9_PLCP = 9, IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10, IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11, IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12, IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13, IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14, IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15, IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16, IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17, IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18, IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19, IWL_RATE_HT_SISO_MCS_INV_PLCP, IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, }; #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) #define IWL_INVALID_VALUE -1 #define TPC_MAX_REDUCTION 15 #define TPC_NO_REDUCTION 0 #define TPC_INVALID 0xff #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) #define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64) #define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) #define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ /* load per tid defines for A-MPDU activation */ #define IWL_AGG_TPT_THREHOLD 0 #define IWL_AGG_ALL_TID 0xff enum iwl_table_type { LQ_NONE, LQ_LEGACY_G, /* legacy types */ LQ_LEGACY_A, LQ_HT_SISO, /* HT types */ LQ_HT_MIMO2, LQ_VHT_SISO, /* VHT types */ LQ_VHT_MIMO2, LQ_HE_SISO, /* HE types */ LQ_HE_MIMO2, LQ_MAX, }; struct rs_rate { int index; enum iwl_table_type type; u8 ant; u32 bw; bool sgi; bool ldpc; bool stbc; bool bfer; }; #define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \ ((type) == LQ_LEGACY_A)) #define is_type_ht_siso(type) ((type) == LQ_HT_SISO) #define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2) #define is_type_vht_siso(type) ((type) == LQ_VHT_SISO) #define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2) #define is_type_he_siso(type) ((type) == LQ_HE_SISO) #define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2) #define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \ is_type_he_siso(type)) #define is_type_mimo2(type) (is_type_ht_mimo2(type) || \ is_type_vht_mimo2(type) || is_type_he_mimo2(type)) #define is_type_mimo(type) (is_type_mimo2(type)) #define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type)) #define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type)) #define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type)) #define is_type_a_band(type) ((type) == LQ_LEGACY_A) #define is_type_g_band(type) ((type) == LQ_LEGACY_G) #define is_legacy(rate) is_type_legacy((rate)->type) #define is_ht_siso(rate) is_type_ht_siso((rate)->type) #define is_ht_mimo2(rate) is_type_ht_mimo2((rate)->type) #define is_vht_siso(rate) is_type_vht_siso((rate)->type) #define is_vht_mimo2(rate) is_type_vht_mimo2((rate)->type) #define is_siso(rate) is_type_siso((rate)->type) #define is_mimo2(rate) is_type_mimo2((rate)->type) #define is_mimo(rate) is_type_mimo((rate)->type) #define is_ht(rate) is_type_ht((rate)->type) #define is_vht(rate) is_type_vht((rate)->type) #define is_he(rate) is_type_he((rate)->type) #define is_a_band(rate) is_type_a_band((rate)->type) #define is_g_band(rate) is_type_g_band((rate)->type) #define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20) #define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40) #define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80) #define is_ht160(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_160) #define IWL_MAX_MCS_DISPLAY_SIZE 12 struct iwl_rate_mcs_info { char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; }; /** * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW * @last_rate_n_flags: last rate reported by FW * @sta_id: the id of the station #ifdef CPTCFG_MAC80211_DEBUGFS * @dbg_fixed_rate: for debug, use fixed rate if not 0 * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU #endif * @chains: bitmask of chains reported in %chain_signal * @chain_signal: per chain signal strength * @last_rssi: last rssi reported * @drv: pointer back to the driver data */ struct iwl_lq_sta_rs_fw { /* last tx rate_n_flags */ u32 last_rate_n_flags; /* persistent fields - initialized only once - keep last! */ struct lq_sta_pers_rs_fw { u32 sta_id; #ifdef CPTCFG_MAC80211_DEBUGFS u32 dbg_fixed_rate; u16 dbg_agg_frame_count_lim; #endif u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; s8 last_rssi; struct iwl_mvm *drv; } pers; }; /** * struct iwl_rate_scale_data -- tx success history for one rate */ struct iwl_rate_scale_data { u64 data; /* bitmap of successful frames */ s32 success_counter; /* number of frames successful */ s32 success_ratio; /* per-cent * 128 */ s32 counter; /* number of frames attempted */ s32 average_tpt; /* success ratio * expected throughput */ }; /* Possible Tx columns * Tx Column = a combo of legacy/siso/mimo x antenna x SGI */ enum rs_column { RS_COLUMN_LEGACY_ANT_A = 0, RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_SISO_ANT_A, RS_COLUMN_SISO_ANT_B, RS_COLUMN_SISO_ANT_A_SGI, RS_COLUMN_SISO_ANT_B_SGI, RS_COLUMN_MIMO2, RS_COLUMN_MIMO2_SGI, RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI, RS_COLUMN_COUNT = RS_COLUMN_LAST + 1, RS_COLUMN_INVALID, }; enum rs_ss_force_opt { RS_SS_FORCE_NONE = 0, RS_SS_FORCE_STBC, RS_SS_FORCE_BFER, RS_SS_FORCE_SISO, }; /* Packet stats per rate */ struct rs_rate_stats { u64 success; u64 total; }; /** * struct iwl_scale_tbl_info -- tx params and success history for all rates * * There are two of these in struct iwl_lq_sta, * one for "active", and one for "search". */ struct iwl_scale_tbl_info { struct rs_rate rate; enum rs_column column; const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ /* per txpower-reduction history */ struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1]; }; enum { RS_STATE_SEARCH_CYCLE_STARTED, RS_STATE_SEARCH_CYCLE_ENDED, RS_STATE_STAY_IN_COLUMN, }; /** * struct iwl_lq_sta -- driver's rate scaling private structure * * Pointer to this gets passed back and forth between driver and mac80211. */ struct iwl_lq_sta { u8 active_tbl; /* index of active table, range 0-1 */ u8 rs_state; /* RS_STATE_* */ u8 search_better_tbl; /* 1: currently trying alternate mode */ s32 last_tpt; /* The following determine when to search for a new mode */ u32 table_count_limit; u32 max_failure_limit; /* # failed frames before new search */ u32 max_success_limit; /* # successful frames before new search */ u32 table_count; u32 total_failed; /* total failed frames, any/all rates */ u32 total_success; /* total successful frames, any/all rates */ u64 flush_timer; /* time staying in mode before new search */ u32 visited_columns; /* Bitmask marking which Tx columns were * explored during a search cycle */ u64 last_tx; bool is_vht; bool ldpc; /* LDPC Rx is supported by the STA */ bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */ bool bfer_capable; /* Remote supports beamformee and we BFer */ enum nl80211_band band; /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ unsigned long active_legacy_rate; unsigned long active_siso_rate; unsigned long active_mimo2_rate; /* Highest rate per Tx mode */ u8 max_legacy_rate_idx; u8 max_siso_rate_idx; u8 max_mimo2_rate_idx; /* Optimal rate based on RSSI and STA caps. * Used only to reflect link speed to userspace. */ struct rs_rate optimal_rate; unsigned long optimal_rate_mask; const struct rs_init_rate_info *optimal_rates; int optimal_nentries; u8 missed_rate_counter; struct iwl_lq_cmd lq; struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ u8 tx_agg_tid_en; /* last tx rate_n_flags */ u32 last_rate_n_flags; /* packets destined for this STA are aggregated */ u8 is_agg; /* tx power reduce for this sta */ int tpc_reduce; /* persistent fields - initialized only once - keep last! */ struct lq_sta_pers { #ifdef CPTCFG_MAC80211_DEBUGFS u32 dbg_fixed_rate; u8 dbg_fixed_txp_reduction; /* force STBC/BFER/SISO for testing */ enum rs_ss_force_opt ss_force; #endif u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; s8 last_rssi; struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; struct iwl_mvm *drv; spinlock_t lock; /* for races in reinit/update table */ } pers; }; /* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp * Note, it's iwlmvm <-> mac80211 interface. * bits 0-7: reduced tx power * bits 8-10: LQ command's color */ #define RS_DRV_DATA_TXP_MSK 0xff #define RS_DRV_DATA_LQ_COLOR_POS 8 #define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS) #define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\ RS_DRV_DATA_LQ_COLOR_POS) #define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\ (((uintptr_t)_p) |\ ((_c) << RS_DRV_DATA_LQ_COLOR_POS))) /* Initialize station's rate scaling information after adding station */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band, bool update); /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, struct ieee80211_tx_info *info, bool ndp); /** * iwl_rate_control_register - Register the rate control algorithm callbacks * * Since the rate control algorithm is hardware specific, there is no need * or reason to place it as a stand alone module. The driver can call * iwl_rate_control_register in order to register the rate control callbacks * with the mac80211 subsystem. This should be performed prior to calling * ieee80211_register_hw * */ int iwl_mvm_rate_control_register(void); /** * iwl_rate_control_unregister - Unregister the rate control callbacks * * This should be called after calling ieee80211_unregister_hw, but before * the driver is unloaded. */ void iwl_mvm_rate_control_unregister(void); struct iwl_mvm_sta; int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); #ifdef CPTCFG_IWLWIFI_DEBUGFS void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm); #endif #ifdef CPTCFG_MAC80211_DEBUGFS void rs_remove_sta_debugfs(void *mvm, void *mvm_sta); #endif void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta); void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band, bool update); int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); #endif /* __rs__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/rx.c000066400000000000000000000665521351064634500262010ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include #include #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" /* * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler * * Copies the phy information in mvm->last_phy_info, it will be used when the * actual data will come from the fw in the next packet. */ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info)); mvm->ampdu_ref++; #ifdef CPTCFG_IWLWIFI_DEBUGFS if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { spin_lock(&mvm->drv_stats_lock); mvm->drv_rx_stats.ampdu_count++; spin_unlock(&mvm->drv_stats_lock); } #endif } /* * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 * * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct napi_struct *napi, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); unsigned int fraglen; /* * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len, * but those are all multiples of 4 long) all goes away, but we * want the *end* of it, which is going to be the start of the IP * header, to be aligned when it gets pulled in. * The beginning of the skb->data is aligned on at least a 4-byte * boundary after allocation. Everything here is aligned at least * on a 2-byte boundary so we can just take hdrlen & 3 and pad by * the result. */ skb_reserve(skb, hdrlen & 3); /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that * splice() or TCP coalesce are more efficient. * * Since, in addition, ieee80211_data_to_8023() always pull in at * least 8 bytes (possibly more for mesh) we can do the same here * to save the cost of doing it later. That still doesn't pull in * the actual IP header since the typical case has a SNAP header. * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; skb_put_data(skb, hdr, hdrlen); fraglen = len - hdrlen; if (fraglen) { int offset = (void *)hdr + hdrlen - rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } ieee80211_rx_napi(mvm->hw, sta, skb, napi); } /* * iwl_mvm_get_signal_strength - use new rx PHY INFO API * values are reported by the fw as positive values - need to negate * to obtain their dBM. Account for missing antennas by replacing 0 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. */ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info, struct ieee80211_rx_status *rx_status) { int energy_a, energy_b, energy_c, max_energy; u32 val; val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> IWL_RX_INFO_ENERGY_ANT_A_POS; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> IWL_RX_INFO_ENERGY_ANT_B_POS; energy_b = energy_b ? -energy_b : S8_MIN; energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> IWL_RX_INFO_ENERGY_ANT_C_POS; energy_c = energy_c ? -energy_c : S8_MIN; max_energy = max(energy_a, energy_b); max_energy = max(max_energy, energy_c); IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", energy_a, energy_b, energy_c, max_energy); rx_status->signal = max_energy; rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA) >> RX_RES_PHY_FLAGS_ANTENNA_POS; rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; rx_status->chain_signal[2] = energy_c; } /* * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format * @mvm: the mvm object * @hdr: 80211 header * @stats: status in mac80211's format * @rx_pkt_status: status coming from fw * * returns non 0 value if the packet should be dropped */ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, u32 rx_pkt_status, u8 *crypt_len) { if (!ieee80211_has_protected(hdr->frame_control) || (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_NO_ENC) return 0; /* packet was encrypted with unknown alg */ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_ENC_ERR) return 0; switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) { case RX_MPDU_RES_STATUS_SEC_CCM_ENC: /* alg is CCM: check MIC only */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: /* Don't drop the frame and decrypt it in SW */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_DEPRECATE_TTAK) && !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) return 0; *crypt_len = IEEE80211_TKIP_IV_LEN; /* fall through */ case RX_MPDU_RES_STATUS_SEC_WEP_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_WEP_ENC) *crypt_len = IEEE80211_WEP_IV_LEN; return 0; case RX_MPDU_RES_STATUS_SEC_EXT_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; return 0; default: /* Expected in monitor (not having the keys) */ if (!mvm->monitor_on) IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); } return 0; } static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, u32 len, struct iwl_rx_phy_info *phy_info, u32 rate_n_flags) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_tcm_mac *mdata; struct iwl_mvm_vif *mvmvif; int mac; int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */ /* expected throughput in 100Kbps, single stream, 20 MHz */ static const u8 thresh_tpt[] = { 9, 18, 30, 42, 60, 78, 90, 96, 120, 135, }; u16 thr; if (ieee80211_is_data_qos(hdr->frame_control)) ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)]; mvmsta = iwl_mvm_sta_from_mac80211(sta); mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); mdata = &mvm->tcm.data[mac]; mdata->rx.pkts[ac]++; /* count the airtime only once for each ampdu */ if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) { mdata->rx.last_ampdu_ref = mvm->ampdu_ref; mdata->rx.airtime += le16_to_cpu(phy_info->frame_time); } mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))) return; if (mdata->opened_rx_ba_sessions || mdata->uapsd_nonagg_detect.detected || (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) || mvmsta->sta_id != mvmvif->ap_sta_id) return; if (rate_n_flags & RATE_MCS_HT_MSK) { thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK]; thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS); } else { if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >= ARRAY_SIZE(thresh_tpt))) return; thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK]; thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS); } thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >> RATE_MCS_CHAN_WIDTH_POS); mdata->uapsd_nonagg_detect.rx_bytes += len; ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr); } static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, struct sk_buff *skb, u32 status) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (mvmvif->features & NETIF_F_RXCSUM && status & RX_MPDU_RES_STATUS_CSUM_DONE && status & RX_MPDU_RES_STATUS_CSUM_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; } /* * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler * * Handles the actual data of the Rx packet from the fw */ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct ieee80211_hdr *hdr; struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_info *phy_info; struct iwl_rx_mpdu_res_start *rx_res; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u32 len; u32 rate_n_flags; u32 rx_pkt_status; u8 crypt_len = 0; phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); len = le16_to_cpu(rx_res->byte_count); rx_pkt_status = le32_to_cpup((__le32 *) (pkt->data + sizeof(*rx_res) + len)); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } rx_status = IEEE80211_SKB_RXCB(skb); /* * drop the packet if it has failed being decrypted by HW */ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, &crypt_len)) { IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", rx_pkt_status); kfree_skb(skb); return; } /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* This will be used in several places later */ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); /* rx_status carries information about the packet to mac80211 */ rx_status->mactime = le64_to_cpu(phy_info->timestamp); rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); rx_status->band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band); /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal, (unsigned long long)rx_status->mactime); rcu_read_lock(); if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) { u32 id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK; id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT; if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; } } else if (!is_multicast_ether_addr(hdr->addr2)) { /* This is fine since we prevent two stations with the same * address from being added. */ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); struct iwl_fw_dbg_trigger_tlv *trig; struct ieee80211_vif *vif = mvmsta->vif; /* We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); if (mvmvif->csa_target_freq == rx_status->freq) iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); } rs_update_last_rssi(mvm, mvmsta, rx_status); trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_RSSI); if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; s32 rssi; rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info, rate_n_flags); #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE /* * these packets are from the AP or the existing TDLS peer. * In both cases an existing station. */ iwl_mvm_tdls_peer_cache_pkt(mvm, hdr, len, 0); #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, rx_pkt_status); } rcu_read_unlock(); /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { /* * We know which subframes of an A-MPDU belong * together since we get a single PHY response * from the firmware for all of them */ rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->ampdu_reference = mvm->ampdu_ref; } /* Set up the HT phy flags */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rx_status->bw = RATE_INFO_BW_160; break; } if (!(rate_n_flags & RATE_MCS_CCK_MSK) && rate_n_flags & RATE_MCS_SGI_MSK) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags, rx_status->band)) { kfree_skb(skb); return; } rx_status->rate_idx = rate; } #ifdef CPTCFG_IWLWIFI_DEBUGFS iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS); #endif if (unlikely((ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) && mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; if (unlikely(ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control))) rx_status->boottime_ns = ktime_get_boot_ns(); iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, crypt_len, rxb); } struct iwl_mvm_stat_data { struct iwl_mvm *mvm; __le32 mac_id; u8 beacon_filter_average_energy; void *general; }; static void iwl_mvm_stat_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_stat_data *data = _data; struct iwl_mvm *mvm = data->mvm; int sig = -data->beacon_filter_average_energy; int last_event; int thold = vif->bss_conf.cqm_rssi_thold; int hyst = vif->bss_conf.cqm_rssi_hyst; u16 id = le32_to_cpu(data->mac_id); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 vif_id = mvmvif->id; /* This doesn't need the MAC ID check since it's not taking the * data copied into the "data" struct, but rather the data from * the notification directly. */ if (iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_general *general = data->general; mvmvif->beacon_stats.num_beacons = le32_to_cpu(general->beacon_counter[vif_id]); mvmvif->beacon_stats.avg_signal = -general->beacon_average_energy[vif_id]; } else { struct mvm_statistics_general_v8 *general = data->general; mvmvif->beacon_stats.num_beacons = le32_to_cpu(general->beacon_counter[vif_id]); mvmvif->beacon_stats.avg_signal = -general->beacon_average_energy[vif_id]; } if (mvmvif->id != id) return; if (vif->type != NL80211_IFTYPE_STATION) return; if (sig == 0) { IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); return; } mvmvif->bf_data.ave_beacon_signal = sig; /* BT Coex */ if (mvmvif->bf_data.bt_coex_min_thold != mvmvif->bf_data.bt_coex_max_thold) { last_event = mvmvif->bf_data.last_bt_coex_event; if (sig > mvmvif->bf_data.bt_coex_max_thold && (last_event <= mvmvif->bf_data.bt_coex_min_thold || last_event == 0)) { mvmvif->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH); } else if (sig < mvmvif->bf_data.bt_coex_min_thold && (last_event >= mvmvif->bf_data.bt_coex_max_thold || last_event == 0)) { mvmvif->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW); } } if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) return; /* CQM Notification */ last_event = mvmvif->bf_data.last_cqm_event; if (thold && sig < thold && (last_event == 0 || sig < last_event - hyst)) { mvmvif->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", sig); ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { mvmvif->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", sig); ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL); } } static inline void iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_stats *trig_stats; u32 trig_offset, trig_thold; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS); if (!trig) return; trig_stats = (void *)trig->data; trig_offset = le32_to_cpu(trig_stats->stop_offset); trig_thold = le32_to_cpu(trig_stats->stop_threshold); if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt))) return; if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_stat_data data = { .mvm = mvm, }; int expected_size; int i; u8 *energy; __le32 *bytes, *air_time; __le32 flags; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { if (iwl_mvm_has_new_rx_api(mvm)) expected_size = sizeof(struct iwl_notif_statistics_v11); else expected_size = sizeof(struct iwl_notif_statistics_v10); } else { expected_size = sizeof(struct iwl_notif_statistics); } if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size, "received invalid statistics size (%d)!\n", iwl_rx_packet_payload_len(pkt))) return; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy; mvm->rx_stats_v3 = stats->rx; mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan); data.general = &stats->general; flags = stats->flag; } else { struct iwl_notif_statistics *stats = (void *)&pkt->data; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy; mvm->rx_stats = stats->rx; mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan); data.general = &stats->general; flags = stats->flag; } iwl_mvm_rx_stats_check_trigger(mvm, pkt); ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); if (!iwl_mvm_has_new_rx_api(mvm)) return; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data; energy = (void *)&v11->load_stats.avg_energy; bytes = (void *)&v11->load_stats.byte_count; air_time = (void *)&v11->load_stats.air_time; } else { struct iwl_notif_statistics *stats = (void *)&pkt->data; energy = (void *)&stats->load_stats.avg_energy; bytes = (void *)&stats->load_stats.byte_count; air_time = (void *)&stats->load_stats.air_time; } rcu_read_lock(); for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { struct iwl_mvm_sta *sta; if (!energy[i]) continue; sta = iwl_mvm_sta_from_staid_rcu(mvm, i); if (!sta) continue; sta->avg_energy = energy[i]; } rcu_read_unlock(); /* * Don't update in case the statistics are not cleared, since * we will end up counting twice the same airtime, once in TCM * request and once in statistics notification. */ if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)) return; spin_lock(&mvm->tcm.lock); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i]; u32 rx_bytes = le32_to_cpu(bytes[i]); u32 airtime = le32_to_cpu(air_time[i]); mdata->rx.airtime += airtime; mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes; if (airtime) { /* re-init every time to store rate from FW */ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, rx_bytes * 8 / airtime); } } spin_unlock(&mvm->tcm.lock); } void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); } void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_ba_window_status_notif *notif = (void *)pkt->data; int i; u32 pkt_len = iwl_rx_packet_payload_len(pkt); if (WARN_ONCE(pkt_len != sizeof(*notif), "Received window status notification of wrong size (%u)\n", pkt_len)) return; rcu_read_lock(); for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) { struct ieee80211_sta *sta; u8 sta_id, tid; u64 bitmap; u32 ssn; u16 ratid; u16 received_mpdu; ratid = le16_to_cpu(notif->ra_tid[i]); /* check that this TID is valid */ if (!(ratid & BA_WINDOW_STATUS_VALID_MSK)) continue; received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]); if (received_mpdu == 0) continue; tid = ratid & BA_WINDOW_STATUS_TID_MSK; /* get the station */ sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK) >> BA_WINDOW_STATUS_STA_ID_POS; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR_OR_NULL(sta)) continue; bitmap = le64_to_cpu(notif->bitmap[i]); ssn = le32_to_cpu(notif->start_seq_num[i]); /* update mac80211 with the bitmap for the reordering buffer */ ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap, received_mpdu); } rcu_read_unlock(); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c000066400000000000000000001734351351064634500265360ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include #include #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); u8 *data = skb->data; /* Alignment concerns */ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4); BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4); if (rx_status->flag & RX_FLAG_RADIOTAP_HE) data += sizeof(struct ieee80211_radiotap_he); if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) data += sizeof(struct ieee80211_radiotap_he_mu); if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG) data += sizeof(struct ieee80211_radiotap_lsig); if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { struct ieee80211_vendor_radiotap *radiotap = (void *)data; data += sizeof(*radiotap) + radiotap->len + radiotap->pad; } return data; } static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta; struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); struct iwl_mvm_key_pn *ptk_pn; int res; u8 tid, keyidx; u8 pn[IEEE80211_CCMP_PN_LEN]; u8 *extiv; /* do PN checking */ /* multicast and non-data only arrives on default queue */ if (!ieee80211_is_data(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return 0; /* do not check PN for open AP */ if (!(stats->flag & RX_FLAG_DECRYPTED)) return 0; /* * avoid checking for default queue - we don't want to replicate * all the logic that's necessary for checking the PN on fragmented * frames, leave that to mac80211 */ if (queue == 0) return 0; /* if we are here - this for sure is either CCMP or GCMP */ if (IS_ERR_OR_NULL(sta)) { IWL_ERR(mvm, "expected hw-decrypted unicast frame for station\n"); return -1; } mvmsta = iwl_mvm_sta_from_mac80211(sta); extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); keyidx = extiv[3] >> 6; ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]); if (!ptk_pn) return -1; if (ieee80211_is_data_qos(hdr->frame_control)) tid = ieee80211_get_tid(hdr); else tid = 0; /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */ if (tid >= IWL_MAX_TID_COUNT) return -1; /* load pn */ pn[0] = extiv[7]; pn[1] = extiv[6]; pn[2] = extiv[5]; pn[3] = extiv[4]; pn[4] = extiv[1]; pn[5] = extiv[0]; res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); if (res < 0) return -1; if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) return -1; memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); stats->flag |= RX_FLAG_PN_VALIDATED; return 0; } /* iwl_mvm_create_skb Adds the rxb to a new skb */ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { len -= 2; pad_len = 2; } /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that * splice() or TCP coalesce are more efficient. * * Since, in addition, ieee80211_data_to_8023() always pull in at * least 8 bytes (possibly more for mesh) we can do the same here * to save the cost of doing it later. That still doesn't pull in * the actual IP header since the typical case has a SNAP header. * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ headlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; /* The firmware may align the packet to DWORD. * The padding is inserted after the IV. * After copying the header + IV skip the padding if * present before copying packet data. */ hdrlen += crypt_len; if (WARN_ONCE(headlen < hdrlen, "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", hdrlen, len, crypt_len)) { /* * We warn and trace because we want to be able to see * it in trace-cmd as well. */ IWL_DEBUG_RX(mvm, "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", hdrlen, len, crypt_len); return -EINVAL; } skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); fraglen = len - headlen; if (fraglen) { int offset = (void *)hdr + headlen + pad_len - rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } return 0; } static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_vendor_radiotap *radiotap; const int size = sizeof(*radiotap) + sizeof(__le16); if (!mvm->cur_aid) return; /* ensure alignment */ BUILD_BUG_ON((size + 2) % 4); radiotap = skb_put(skb, size + 2); radiotap->align = 1; /* Intel OUI */ radiotap->oui[0] = 0xf6; radiotap->oui[1] = 0x54; radiotap->oui[2] = 0x25; /* radiotap sniffer config sub-namespace */ radiotap->subns = 1; radiotap->present = 0x1; radiotap->len = size - sizeof(*radiotap); radiotap->pad = 2; /* fill the data now */ memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid)); /* and clear the padding */ memset(radiotap->data + sizeof(__le16), 0, radiotap->pad); rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; } /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { if (iwl_mvm_check_pn(mvm, skb, queue, sta)) kfree_skb(skb); else ieee80211_rx_napi(mvm->hw, sta, skb, napi); } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct ieee80211_rx_status *rx_status, u32 rate_n_flags, int energy_a, int energy_b) { int max_energy; u32 rate_flags = rate_n_flags; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = energy_b ? -energy_b : S8_MIN; max_energy = max(energy_a, energy_b); IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n", energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; rx_status->chain_signal[2] = S8_MIN; } static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, u16 phy_info, struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, int queue, u8 *crypt_len) { u16 status = le16_to_cpu(desc->status); /* * Drop UNKNOWN frames in aggregation, unless in monitor mode * (where we don't have the keys). * We limit this to aggregation because in TKIP this is a valid * scenario, since we may not have the (correct) TTAK (phase 1 * key) in the firmware. */ if (phy_info & IWL_RX_MPDU_PHY_AMPDU && (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) return -1; if (!ieee80211_has_protected(hdr->frame_control) || (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) return 0; /* TODO: handle packets encrypted with unknown alg */ switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) { case IWL_RX_MPDU_STATUS_SEC_CCM: case IWL_RX_MPDU_STATUS_SEC_GCM: BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN); /* alg is CCM: check MIC only */ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; if (pkt_flags & FH_RSCSR_RADA_EN) stats->flag |= RX_FLAG_MIC_STRIPPED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case IWL_RX_MPDU_STATUS_SEC_TKIP: /* Don't drop the frame and decrypt it in SW */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_DEPRECATE_TTAK) && !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) return 0; if (mvm->trans->cfg->gen2 && !(status & RX_MPDU_RES_STATUS_MIC_OK)) stats->flag |= RX_FLAG_MMIC_ERROR; *crypt_len = IEEE80211_TKIP_IV_LEN; /* fall through */ case IWL_RX_MPDU_STATUS_SEC_WEP: if (!(status & IWL_RX_MPDU_STATUS_ICV_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_WEP) *crypt_len = IEEE80211_WEP_IV_LEN; if (pkt_flags & FH_RSCSR_RADA_EN) { stats->flag |= RX_FLAG_ICV_STRIPPED; if (mvm->trans->cfg->gen2) stats->flag |= RX_FLAG_MMIC_STRIPPED; } return 0; case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; return 0; default: /* Expected in monitor (not having the keys) */ if (!mvm->monitor_on) IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); } return 0; } static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); u16 flags = le16_to_cpu(desc->l3l4_flags); u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >> IWL_RX_L3_PROTO_POS); if (mvmvif->features & NETIF_F_RXCSUM && flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK && (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK || l3_prot == IWL_RX_L3_TYPE_IPV6 || l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG)) skb->ip_summed = CHECKSUM_UNNECESSARY; } /* * returns true if a packet is a duplicate and should be dropped. * Updates AMSDU PN tracking info */ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, struct ieee80211_rx_status *rx_status, struct ieee80211_hdr *hdr, struct iwl_rx_mpdu_desc *desc) { struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_rxq_dup_data *dup_data; u8 tid, sub_frame_idx; if (WARN_ON(IS_ERR_OR_NULL(sta))) return false; mvm_sta = iwl_mvm_sta_from_mac80211(sta); dup_data = &mvm_sta->dup_data[queue]; /* * Drop duplicate 802.11 retransmissions * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") */ if (ieee80211_is_ctl(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { rx_status->flag |= RX_FLAG_DUP_VALIDATED; return false; } if (ieee80211_is_data_qos(hdr->frame_control)) /* frame has qos control */ tid = ieee80211_get_tid(hdr); else tid = IWL_MAX_TID_COUNT; /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; if (unlikely(ieee80211_has_retry(hdr->frame_control) && dup_data->last_seq[tid] == hdr->seq_ctrl && dup_data->last_sub_frame[tid] >= sub_frame_idx)) return true; /* Allow same PN as the first subframe for following sub frames */ if (dup_data->last_seq[tid] == hdr->seq_ctrl && sub_frame_idx > dup_data->last_sub_frame[tid] && desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; dup_data->last_seq[tid] = hdr->seq_ctrl; dup_data->last_sub_frame[tid] = sub_frame_idx; rx_status->flag |= RX_FLAG_DUP_VALIDATED; return false; } int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, const u8 *data, u32 count, bool async) { u8 buf[sizeof(struct iwl_rxq_sync_cmd) + sizeof(struct iwl_mvm_rss_sync_notif)]; struct iwl_rxq_sync_cmd *cmd = (void *)buf; u32 data_size = sizeof(*cmd) + count; int ret; /* * size must be a multiple of DWORD * Ensure we don't overflow buf */ if (WARN_ON(count & 3 || count > sizeof(struct iwl_mvm_rss_sync_notif))) return -EINVAL; cmd->rxq_mask = cpu_to_le32(rxq_mask); cmd->count = cpu_to_le32(count); cmd->flags = 0; memcpy(cmd->payload, data, count); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), async ? CMD_ASYNC : 0, data_size, cmd); return ret; } /* * Returns true if sn2 - buffer_size < sn1 < sn2. * To be used only in order to compare reorder buffer head with NSSN. * We fully trust NSSN unless it is behind us due to reorder timeout. * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. */ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) { return ieee80211_sn_less(sn1, sn2) && !ieee80211_sn_less(sn1, sn2 - buffer_size); } static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { struct iwl_mvm_rss_sync_notif notif = { .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, .metadata.sync = 0, .nssn_sync.baid = baid, .nssn_sync.nssn = nssn, }; iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); } #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) enum iwl_mvm_release_flags { IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0), IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1), }; static void iwl_mvm_release_frames(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct napi_struct *napi, struct iwl_mvm_baid_data *baid_data, struct iwl_mvm_reorder_buffer *reorder_buf, u16 nssn, u32 flags) { struct iwl_mvm_reorder_buf_entry *entries = &baid_data->entries[reorder_buf->queue * baid_data->entries_per_queue]; u16 ssn = reorder_buf->head_sn; lockdep_assert_held(&reorder_buf->lock); /* * We keep the NSSN not too far behind, if we are sync'ing it and it * is more than 2048 ahead of us, it must be behind us. Discard it. * This can happen if the queue that hit the 0 / 2048 seqno was lagging * behind and this queue already processed packets. The next if * would have caught cases where this queue would have processed less * than 64 packets, but it may have processed more than 64 packets. */ if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) && ieee80211_sn_less(nssn, ssn)) goto set_timer; /* ignore nssn smaller than head sn - this can happen due to timeout */ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) goto set_timer; while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { int index = ssn % reorder_buf->buf_size; struct sk_buff_head *skb_list = &entries[index].e.frames; struct sk_buff *skb; ssn = ieee80211_sn_inc(ssn); if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) && (ssn == 2048 || ssn == 0)) iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn); /* * Empty the list. Will have more than one frame for A-MSDU. * Empty list is valid as well since nssn indicates frames were * received. */ while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, sta); reorder_buf->num_stored--; } } reorder_buf->head_sn = nssn; set_timer: if (reorder_buf->num_stored && !reorder_buf->removed) { u16 index = reorder_buf->head_sn % reorder_buf->buf_size; while (skb_queue_empty(&entries[index].e.frames)) index = (index + 1) % reorder_buf->buf_size; /* modify timer to match next frame's expiration time */ mod_timer(&reorder_buf->reorder_timer, entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } else { del_timer(&reorder_buf->reorder_timer); } } void iwl_mvm_reorder_timer_expired(struct timer_list *t) { struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer); struct iwl_mvm_baid_data *baid_data = iwl_mvm_baid_data_from_reorder_buf(buf); struct iwl_mvm_reorder_buf_entry *entries = &baid_data->entries[buf->queue * baid_data->entries_per_queue]; int i; u16 sn = 0, index = 0; bool expired = false; bool cont = false; spin_lock(&buf->lock); if (!buf->num_stored || buf->removed) { spin_unlock(&buf->lock); return; } for (i = 0; i < buf->buf_size ; i++) { index = (buf->head_sn + i) % buf->buf_size; if (skb_queue_empty(&entries[index].e.frames)) { /* * If there is a hole and the next frame didn't expire * we want to break and not advance SN */ cont = false; continue; } if (!cont && !time_after(jiffies, entries[index].e.reorder_time + RX_REORDER_BUF_TIMEOUT_MQ)) break; expired = true; /* continue until next hole after this expired frames */ cont = true; sn = ieee80211_sn_add(buf->head_sn, i + 1); } if (expired) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id = baid_data->sta_id; rcu_read_lock(); sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); mvmsta = iwl_mvm_sta_from_mac80211(sta); /* SN is set to the last expired frame + 1 */ IWL_DEBUG_HT(buf->mvm, "Releasing expired frames for sta %u, sn %d\n", sta_id, sn); iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, sta, baid_data->tid); iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); rcu_read_unlock(); } else { /* * If no frame expired and there are stored frames, index is now * pointing to the first unexpired frame - modify timer * accordingly to this frame. */ mod_timer(&buf->reorder_timer, entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } spin_unlock(&buf->lock); } static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, struct iwl_mvm_delba_data *data) { struct iwl_mvm_baid_data *ba_data; struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; u8 baid = data->baid; if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid)) return; rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); if (WARN_ON_ONCE(!ba_data)) goto out; sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) goto out; reorder_buf = &ba_data->reorder_buf[queue]; /* release all frames that are in the reorder buffer to the stack */ spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, ieee80211_sn_add(reorder_buf->head_sn, reorder_buf->buf_size), 0); spin_unlock_bh(&reorder_buf->lock); del_timer_sync(&reorder_buf->reorder_timer); out: rcu_read_unlock(); } static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, struct napi_struct *napi, u8 baid, u16 nssn, int queue, u32 flags) { struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; struct iwl_mvm_baid_data *ba_data; IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n", baid, nssn); if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || baid >= ARRAY_SIZE(mvm->baid_map))) return; rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); if (WARN_ON_ONCE(!ba_data)) goto out; sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) goto out; reorder_buf = &ba_data->reorder_buf[queue]; spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf, nssn, flags); spin_unlock_bh(&reorder_buf->lock); out: rcu_read_unlock(); } static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm, struct napi_struct *napi, int queue, const struct iwl_mvm_nssn_sync_data *data) { iwl_mvm_release_frames_from_notif(mvm, napi, data->baid, data->nssn, queue, IWL_MVM_RELEASE_FROM_RSS_SYNC); } void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rxq_sync_notification *notif; struct iwl_mvm_internal_rxq_notif *internal_notif; notif = (void *)pkt->data; internal_notif = (void *)notif->payload; if (internal_notif->sync && mvm->queue_sync_cookie != internal_notif->cookie) { WARN_ONCE(1, "Received expired RX queue sync message\n"); return; } switch (internal_notif->type) { case IWL_MVM_RXQ_EMPTY: break; case IWL_MVM_RXQ_NOTIF_DEL_BA: iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; case IWL_MVM_RXQ_NSSN_SYNC: iwl_mvm_nssn_sync(mvm, napi, queue, (void *)internal_notif->data); break; default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } if (internal_notif->sync && !atomic_dec_return(&mvm->queue_sync_counter)) wake_up(&mvm->rx_sync_waitq); } /* * Returns true if the MPDU was buffered\dropped, false if it should be passed * to upper layer. */ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct napi_struct *napi, int queue, struct ieee80211_sta *sta, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) { struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_baid_data *baid_data; struct iwl_mvm_reorder_buffer *buffer; struct sk_buff *tail; u32 reorder = le32_to_cpu(desc->reorder_data); bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; u8 tid = ieee80211_get_tid(hdr); u8 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; struct iwl_mvm_reorder_buf_entry *entries; int index; u16 nssn, sn; u8 baid; baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT; /* * This also covers the case of receiving a Block Ack Request * outside a BA session; we'll pass it to mac80211 and that * then sends a delBA action frame. * This also covers pure monitor mode, in which case we won't * have any BA sessions. */ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) return false; /* no sta yet */ if (WARN_ONCE(IS_ERR_OR_NULL(sta), "Got valid BAID without a valid station assigned\n")) return false; mvm_sta = iwl_mvm_sta_from_mac80211(sta); /* not a data packet or a bar */ if (!ieee80211_is_back_req(hdr->frame_control) && (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1))) return false; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return false; baid_data = rcu_dereference(mvm->baid_map[baid]); if (!baid_data) { IWL_DEBUG_RX(mvm, "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", baid, reorder); return false; } if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, tid)) return false; nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK; sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >> IWL_RX_MPDU_REORDER_SN_SHIFT; buffer = &baid_data->reorder_buf[queue]; entries = &baid_data->entries[queue * baid_data->entries_per_queue]; spin_lock_bh(&buffer->lock); if (!buffer->valid) { if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) { spin_unlock_bh(&buffer->lock); return false; } buffer->valid = true; } if (ieee80211_is_back_req(hdr->frame_control)) { iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn, 0); goto drop; } /* * If there was a significant jump in the nssn - adjust. * If the SN is smaller than the NSSN it might need to first go into * the reorder buffer, in which case we just release up to it and the * rest of the function will take care of storing it and releasing up to * the nssn. * This should not happen. This queue has been lagging and it should * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice * and update the other queues. */ if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, buffer->buf_size) || !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); } /* drop any oudated packets */ if (ieee80211_sn_less(sn, buffer->head_sn)) goto drop; /* release immediately if allowed by nssn and no stored frames */ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) && (!amsdu || last_subframe)) { /* * If we crossed the 2048 or 0 SN, notify all the * queues. This is done in order to avoid having a * head_sn that lags behind for too long. When that * happens, we can get to a situation where the head_sn * is within the interval [nssn - buf_size : nssn] * which will make us think that the nssn is a packet * that we already freed because of the reordering * buffer and we will ignore it. So maintain the * head_sn somewhat updated across all the queues: * when it crosses 0 and 2048. */ if (sn == 2048 || sn == 0) iwl_mvm_sync_nssn(mvm, baid, sn); buffer->head_sn = nssn; } /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } /* * release immediately if there are no stored frames, and the sn is * equal to the head. * This can happen due to reorder timer, where NSSN is behind head_sn. * When we released everything, and we got the next frame in the * sequence, according to the NSSN we can't release immediately, * while technically there is no hole and we can move forward. */ if (!buffer->num_stored && sn == buffer->head_sn) { if (!amsdu || last_subframe) { if (sn == 2048 || sn == 0) iwl_mvm_sync_nssn(mvm, baid, sn); buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); } /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } index = sn % buffer->buf_size; /* * Check if we already stored this frame * As AMSDU is either received or not as whole, logic is simple: * If we have frames in that position in the buffer and the last frame * originated from AMSDU had a different SN then it is a retransmission. * If it is the same SN then if the subframe index is incrementing it * is the same AMSDU - otherwise it is a retransmission. */ tail = skb_peek_tail(&entries[index].e.frames); if (tail && !amsdu) goto drop; else if (tail && (sn != buffer->last_amsdu || buffer->last_sub_index >= sub_frame_idx)) goto drop; /* put in reorder buffer */ __skb_queue_tail(&entries[index].e.frames, skb); buffer->num_stored++; entries[index].e.reorder_time = jiffies; if (amsdu) { buffer->last_amsdu = sn; buffer->last_sub_index = sub_frame_idx; } /* * We cannot trust NSSN for AMSDU sub-frames that are not the last. * The reason is that NSSN advances on the first sub-frame, and may * cause the reorder buffer to advance before all the sub-frames arrive. * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with * SN 1. NSSN for first sub frame will be 3 with the result of driver * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is * already ahead and it will be dropped. * If the last sub-frame is not on this queue - we will get frame * release notification with up to date NSSN. */ if (!amsdu || last_subframe) iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn, IWL_MVM_RELEASE_SEND_RSS_SYNC); spin_unlock_bh(&buffer->lock); return true; drop: kfree_skb(skb); spin_unlock_bh(&buffer->lock); return true; } static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u32 reorder_data, u8 baid) { unsigned long now = jiffies; unsigned long timeout; struct iwl_mvm_baid_data *data; rcu_read_lock(); data = rcu_dereference(mvm->baid_map[baid]); if (!data) { IWL_DEBUG_RX(mvm, "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", baid, reorder_data); goto out; } if (!data->timeout) goto out; timeout = data->timeout; /* * Do not update last rx all the time to avoid cache bouncing * between the rx queues. * Update it every timeout. Worst case is the session will * expire after ~ 2 * timeout, which doesn't matter that much. */ if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now)) /* Update is atomic */ data->last_rx = now; out: rcu_read_unlock(); } static void iwl_mvm_flip_address(u8 *addr) { int i; u8 mac_addr[ETH_ALEN]; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = addr[ETH_ALEN - i - 1]; ether_addr_copy(addr, mac_addr); } struct iwl_mvm_rx_phy_data { enum iwl_rx_phy_info_type info_type; __le32 d0, d1, d2, d3; __le16 d4; }; static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm, struct iwl_mvm_rx_phy_data *phy_data, u32 rate_n_flags, struct ieee80211_radiotap_he_mu *he_mu) { u32 phy_data2 = le32_to_cpu(phy_data->d2); u32 phy_data3 = le32_to_cpu(phy_data->d3); u16 phy_data4 = le16_to_cpu(phy_data->d4); if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN); he_mu->flags1 |= le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU, phy_data4), IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU); he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0, phy_data2); he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1, phy_data3); he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2, phy_data2); he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3, phy_data3); } if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) && (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN); he_mu->flags2 |= le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU, phy_data4), IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU); he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0, phy_data2); he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1, phy_data3); he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2, phy_data2); he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3, phy_data3); } } static void iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, u32 rate_n_flags, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status) { /* * Unfortunately, we have to leave the mac80211 data * incorrect for the case that we receive an HE-MU * transmission and *don't* have the HE phy data (due * to the bits being used for TSF). This shouldn't * happen though as management frames where we need * the TSF/timers are not be transmitted in HE-MU. */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); switch (ru) { case 0 ... 36: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; offs = ru; break; case 37 ... 52: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; offs = ru - 37; break; case 53 ... 60: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; offs = ru - 53; break; case 61 ... 64: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; offs = ru - 61; break; case 65 ... 66: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; offs = ru - 65; break; case 67: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; break; case 68: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; break; } he->data2 |= le16_encode_bits(offs, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80)) he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); #define CHECK_BW(bw) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) CHECK_BW(20); CHECK_BW(40); CHECK_BW(80); CHECK_BW(160); if (he_mu) he_mu->flags2 |= le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); else if (he_type == RATE_MCS_HE_TYPE_TRIG) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, struct iwl_mvm_rx_phy_data *phy_data, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status, u32 rate_n_flags, int queue) { switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_NONE: case IWL_RX_PHY_INFO_TYPE_CCK: case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY: case IWL_RX_PHY_INFO_TYPE_HT: case IWL_RX_PHY_INFO_TYPE_VHT_SU: case IWL_RX_PHY_INFO_TYPE_VHT_MU: return; case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); /* fall through */ case IWL_RX_PHY_INFO_TYPE_HE_SU: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_TB: /* HE common */ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK), IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB && phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) { he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_UPLINK), IEEE80211_RADIOTAP_HE_DATA3_UL_DL); } he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM), IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK), IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_PE_DISAMBIG), IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK), IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK), IEEE80211_RADIOTAP_HE_DATA6_TXOP); he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_DOPPLER), IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); break; } switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_SU: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK), IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); break; default: /* nothing here */ break; } switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: he_mu->flags1 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM), IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); he_mu->flags1 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); he_mu->flags2 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu); /* fall through */ case IWL_RX_PHY_INFO_TYPE_HE_MU: he_mu->flags2 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); he_mu->flags2 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION), IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); /* fall through */ case IWL_RX_PHY_INFO_TYPE_HE_TB: case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags, he, he_mu, rx_status); break; case IWL_RX_PHY_INFO_TYPE_HE_SU: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_BEAM_CHNG), IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); break; default: /* nothing */ break; } } static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_mvm_rx_phy_data *phy_data, u32 rate_n_flags, u16 phy_info, int queue) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL; u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 stbc, ltf; static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN), .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN), }; static const struct ieee80211_radiotap_he_mu mu_known = { .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN), .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), }; he = skb_put_data(skb, &known, sizeof(known)); rx_status->flag |= RX_FLAG_RADIOTAP_HE; if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU || phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) { he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; } /* report the AMPDU-EOF bit on single frames */ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status, rate_n_flags, queue); /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; /* toggle is switched whenever new aggregation starts */ if (toggle_bit != mvm->ampdu_toggle) { rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } } if (he_type == RATE_MCS_HE_TYPE_EXT_SU && rate_n_flags & RATE_MCS_HE_106T_MSK) { rx_status->bw = RATE_INFO_BW_HE_RU; rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; } /* actually data is filled in mac80211 */ if (he_type == RATE_MCS_HE_TYPE_SU || he_type == RATE_MCS_HE_TYPE_EXT_SU) he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_HE; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; rx_status->he_dcm = !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); #define CHECK_TYPE(F) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) CHECK_TYPE(SU); CHECK_TYPE(EXT_SU); CHECK_TYPE(MU); CHECK_TYPE(TRIG); he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS); if (rate_n_flags & RATE_MCS_BF_MSK) he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> RATE_MCS_HE_GI_LTF_POS) { case 0: if (he_type == RATE_MCS_HE_TYPE_TRIG) rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; if (he_type == RATE_MCS_HE_TYPE_MU) ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; else ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X; break; case 1: if (he_type == RATE_MCS_HE_TYPE_TRIG) rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; break; case 2: if (he_type == RATE_MCS_HE_TYPE_TRIG) { rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; } else { rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; } break; case 3: if ((he_type == RATE_MCS_HE_TYPE_SU || he_type == RATE_MCS_HE_TYPE_EXT_SU) && rate_n_flags & RATE_MCS_SGI_MSK) rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; break; } he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); } static void iwl_mvm_decode_lsig(struct sk_buff *skb, struct iwl_mvm_rx_phy_data *phy_data) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_lsig *lsig; switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HT: case IWL_RX_PHY_INFO_TYPE_VHT_SU: case IWL_RX_PHY_INFO_TYPE_VHT_MU: case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: case IWL_RX_PHY_INFO_TYPE_HE_SU: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_TB: lsig = skb_put(skb, sizeof(*lsig)); lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN); lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_LSIG_LEN_MASK), IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH); rx_status->flag |= RX_FLAG_RADIOTAP_LSIG; break; default: break; } } void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; struct ieee80211_hdr *hdr; u32 len = le16_to_cpu(desc->mpdu_len); u32 rate_n_flags, gp2_on_air_rise; u16 phy_info = le16_to_cpu(desc->phy_info); struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 crypt_len = 0, channel, energy_a, energy_b; size_t desc_size; struct iwl_mvm_rx_phy_data phy_data = { .d4 = desc->phy_data4, .info_type = IWL_RX_PHY_INFO_TYPE_NONE, }; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); channel = desc->v3.channel; gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); energy_a = desc->v3.energy_a; energy_b = desc->v3.energy_b; desc_size = sizeof(*desc); phy_data.d0 = desc->v3.phy_data0; phy_data.d1 = desc->v3.phy_data1; phy_data.d2 = desc->v3.phy_data2; phy_data.d3 = desc->v3.phy_data3; } else { rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); channel = desc->v1.channel; gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); energy_a = desc->v1.energy_a; energy_b = desc->v1.energy_b; desc_size = IWL_RX_DESC_SIZE_V1; phy_data.d0 = desc->v1.phy_data0; phy_data.d1 = desc->v1.phy_data1; phy_data.d2 = desc->v1.phy_data2; phy_data.d3 = desc->v1.phy_data3; } if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) phy_data.info_type = le32_get_bits(phy_data.d1, IWL_RX_PHY_DATA1_INFO_TYPE_MASK); hdr = (void *)(pkt->data + desc_size); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { /* * If the device inserted padding it means that (it thought) * the 802.11 header wasn't a multiple of 4 bytes long. In * this case, reserve two bytes at the start of the SKB to * align the payload properly in case we end up copying it. */ skb_reserve(skb, 2); } rx_status = IEEE80211_SKB_RXCB(skb); /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rx_status->bw = RATE_INFO_BW_160; break; } if (rate_n_flags & RATE_MCS_HE_MSK) iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, phy_info, queue); iwl_mvm_decode_lsig(skb, &phy_data); if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { kfree_skb(skb); return; } /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. */ if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) || !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", le16_to_cpu(desc->status)); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* set the preamble flag if appropriate */ if (rate_n_flags & RATE_MCS_CCK_MSK && phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { u64 tsf_on_air_rise; if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise); else tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise); rx_status->mactime = tsf_on_air_rise; /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; } rx_status->device_timestamp = gp2_on_air_rise; rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; rx_status->freq = ieee80211_channel_to_frequency(channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, energy_b); /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; rx_status->flag |= RX_FLAG_AMPDU_DETAILS; /* * Toggle is switched whenever new aggregation starts. Make * sure ampdu_reference is never 0 so we can later use it to * see if the frame was really part of an A-MPDU or not. */ if (toggle_bit != mvm->ampdu_toggle) { mvm->ampdu_ref++; if (mvm->ampdu_ref == 0) mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; } rx_status->ampdu_reference = mvm->ampdu_ref; } if (unlikely(mvm->monitor_on)) iwl_mvm_add_rtap_sniffer_config(mvm, skb); rcu_read_lock(); if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; } } else if (!is_multicast_ether_addr(hdr->addr2)) { /* * This is fine since we prevent two stations with the same * address from being added. */ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); struct iwl_fw_dbg_trigger_tlv *trig; struct ieee80211_vif *vif = mvmsta->vif; if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && ieee80211_is_data(hdr->frame_control) && time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); /* * We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); if (mvmvif->csa_target_freq == rx_status->freq) iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); } rs_update_last_rssi(mvm, mvmsta, rx_status); trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_RSSI); if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; s32 rssi; rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, desc); #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE /* * these packets are from the AP or the existing TDLS peer. * In both cases an existing station. */ iwl_mvm_tdls_peer_cache_pkt(mvm, hdr, len, queue); #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); goto out; } /* * Our hardware de-aggregates AMSDUs but copies the mac header * as it to the de-aggregated MPDUs. We need to turn off the * AMSDU bit in the QoS control ourselves. * In addition, HW reverses addr3 and addr4 - reverse it back. */ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { u8 *qc = ieee80211_get_qos_ctl(hdr); *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) { iwl_mvm_flip_address(hdr->addr3); if (ieee80211_has_a4(hdr->frame_control)) iwl_mvm_flip_address(hdr->addr4); } } if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) { u32 reorder_data = le32_to_cpu(desc->reorder_data); iwl_mvm_agg_rx_received(mvm, reorder_data, baid); } } if (!(rate_n_flags & RATE_MCS_CCK_MSK) && rate_n_flags & RATE_MCS_SGI_MSK) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags, rx_status->band)) { kfree_skb(skb); goto out; } rx_status->rate_idx = rate; } /* management stuff on default queue */ if (!queue) { if (unlikely((ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) && mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; if (unlikely(ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control))) rx_status->boottime_ns = ktime_get_boot_ns(); } if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) { kfree_skb(skb); goto out; } if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); out: rcu_read_unlock(); } void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_no_data *desc = (void *)pkt->data; u32 rate_n_flags = le32_to_cpu(desc->rate); u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); u32 rssi = le32_to_cpu(desc->rssi); u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 channel, energy_a, energy_b; struct iwl_mvm_rx_phy_data phy_data = { .d0 = desc->phy_info[0], .info_type = IWL_RX_PHY_INFO_TYPE_NONE, }; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS; energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS; channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS; phy_data.info_type = le32_get_bits(desc->phy_info[1], IWL_RX_PHY_DATA1_INFO_TYPE_MASK); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } rx_status = IEEE80211_SKB_RXCB(skb); /* 0-length PSDU */ rx_status->flag |= RX_FLAG_NO_PSDU; switch (info_type) { case RX_NO_DATA_INFO_TYPE_NDP: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; break; case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED: case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED; break; default: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR; break; } /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rx_status->bw = RATE_INFO_BW_160; break; } if (rate_n_flags & RATE_MCS_HE_MSK) iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, phy_info, queue); iwl_mvm_decode_lsig(skb, &phy_data); rx_status->device_timestamp = gp2_on_air_rise; rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; rx_status->freq = ieee80211_channel_to_frequency(channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, energy_b); rcu_read_lock(); if (!(rate_n_flags & RATE_MCS_CCK_MSK) && rate_n_flags & RATE_MCS_SGI_MSK) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; /* * take the nss from the rx_vec since the rate_n_flags has * only 2 bits for the nss which gives a max of 4 ss but * there may be up to 8 spatial streams */ rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; } else if (rate_n_flags & RATE_MCS_HE_MSK) { rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags, rx_status->band)) { kfree_skb(skb); goto out; } rx_status->rate_idx = rate; } ieee80211_rx_napi(mvm->hw, sta, skb, napi); out: rcu_read_unlock(); } void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_frame_release *release = (void *)pkt->data; iwl_mvm_release_frames_from_notif(mvm, napi, release->baid, le16_to_cpu(release->nssn), queue, 0); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/scan.c000066400000000000000000002001711351064634500264570ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "mvm.h" #include "fw/api/scan.h" #include "iwl-io.h" #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 #define IWL_SCAN_DWELL_ACTIVE 10 #define IWL_SCAN_DWELL_PASSIVE 110 #define IWL_SCAN_DWELL_FRAGMENTED 44 #define IWL_SCAN_DWELL_EXTENDED 90 #define IWL_SCAN_NUM_OF_FRAGS 3 #define IWL_SCAN_LAST_2_4_CHN 14 #define IWL_SCAN_BAND_5_2 0 #define IWL_SCAN_BAND_2_4 1 /* adaptive dwell max budget time [TU] for full scan */ #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 /* adaptive dwell max budget time [TU] for directed scan */ #define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100 /* adaptive dwell default high band APs number */ #define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8 /* adaptive dwell default low band APs number */ #define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2 /* adaptive dwell default APs number in social channels (1, 6, 11) */ #define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10 struct iwl_mvm_scan_timing_params { u32 suspend_time; u32 max_out_time; }; static struct iwl_mvm_scan_timing_params scan_timing[] = { [IWL_SCAN_TYPE_UNASSOC] = { .suspend_time = 0, .max_out_time = 0, }, [IWL_SCAN_TYPE_WILD] = { .suspend_time = 30, .max_out_time = 120, }, [IWL_SCAN_TYPE_MILD] = { .suspend_time = 120, .max_out_time = 120, }, [IWL_SCAN_TYPE_FRAGMENTED] = { .suspend_time = 95, .max_out_time = 44, }, [IWL_SCAN_TYPE_FAST_BALANCE] = { .suspend_time = 30, .max_out_time = 37, }, }; struct iwl_mvm_scan_params { /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type type; enum iwl_mvm_scan_type hb_type; u32 n_channels; u16 delay; int n_ssids; struct cfg80211_ssid *ssids; struct ieee80211_channel **channels; u32 flags; u8 *mac_addr; u8 *mac_addr_mask; bool no_cck; bool pass_all; int n_match_sets; struct iwl_scan_probe_req preq; struct cfg80211_match_set *match_sets; int n_scan_plans; struct cfg80211_sched_scan_plan *scan_plans; u32 measurement_dwell; }; static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) return (void *)&cmd->v8.data; if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return (void *)&cmd->v7.data; if (iwl_mvm_cdb_scan_api(mvm)) return (void *)&cmd->v6.data; return (void *)&cmd->v1.data; } static inline struct iwl_scan_umac_chan_param * iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) return &cmd->v8.channel; if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return &cmd->v7.channel; if (iwl_mvm_cdb_scan_api(mvm)) return &cmd->v6.channel; return &cmd->v1.channel; } static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) return mvm->scan_rx_ant; return iwl_mvm_get_valid_rx_ant(mvm); } static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) { u16 rx_chain; u8 rx_ant; rx_ant = iwl_mvm_scan_rx_ant(mvm); rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS; return cpu_to_le16(rx_chain); } static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band) { if (band == NL80211_BAND_2GHZ) return cpu_to_le32(PHY_BAND_24); else return cpu_to_le32(PHY_BAND_5); } static inline __le32 iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, bool no_cck) { u32 tx_ant; iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx); tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; if (band == NL80211_BAND_2GHZ && !no_cck) return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | tx_ant); else return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); } static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int *global_cnt = data; if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX) *global_cnt += 1; } static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) { return mvm->tcm.result.global_load; } static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band) { return mvm->tcm.result.band_load[band]; } struct iwl_is_dcm_with_go_iterator_data { struct ieee80211_vif *current_vif; bool is_dcm_with_p2p_go; }; static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_is_dcm_with_go_iterator_data *data = _data; struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif); /* exclude the given vif */ if (vif == data->current_vif) return; if (vif->type == NL80211_IFTYPE_AP && vif->p2p && other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) data->is_dcm_with_p2p_go = true; } static enum iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_traffic_load load, bool low_latency) { int global_cnt = 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_condition_iterator, &global_cnt); if (!global_cnt) return IWL_SCAN_TYPE_UNASSOC; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE)) return IWL_SCAN_TYPE_FRAGMENTED; /* in case of DCM with GO where BSS DTIM interval < 220msec * set all scan requests as fast-balance scan * */ if (vif && vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.dtim_period < 220) { struct iwl_is_dcm_with_go_iterator_data data = { .current_vif = vif, .is_dcm_with_p2p_go = false, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_is_dcm_with_go_iterator, &data); if (data.is_dcm_with_p2p_go) return IWL_SCAN_TYPE_FAST_BALANCE; } } if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) return IWL_SCAN_TYPE_MILD; return IWL_SCAN_TYPE_WILD; } static enum iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { enum iwl_mvm_traffic_load load; bool low_latency; load = iwl_mvm_get_traffic_load(mvm); low_latency = iwl_mvm_low_latency(mvm); return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } static enum iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band) { enum iwl_mvm_traffic_load load; bool low_latency; load = iwl_mvm_get_traffic_load_band(mvm, band); low_latency = iwl_mvm_low_latency_band(mvm, band); return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } static int iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm, struct cfg80211_scan_request *req, struct iwl_mvm_scan_params *params) { u32 duration = scan_timing[params->type].max_out_time; if (!req->duration) return 0; if (iwl_mvm_is_cdb_supported(mvm)) { u32 hb_time = scan_timing[params->hb_type].max_out_time; duration = min_t(u32, duration, hb_time); } if (req->duration_mandatory && req->duration > duration) { IWL_DEBUG_SCAN(mvm, "Measurement scan - too long dwell %hu (max out time %u)\n", req->duration, duration); return -EOPNOTSUPP; } return min_t(u32, (u32)req->duration, duration); } static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) { /* require rrm scan whenever the fw supports it */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT); } static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) { int max_probe_len; max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; /* we create the 802.11 header and SSID element */ max_probe_len -= 24 + 2; /* DS parameter set element is added on 2.4GHZ band if required */ if (iwl_mvm_rrm_scan_needed(mvm)) max_probe_len -= 3; return max_probe_len; } int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) { int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm); /* TODO: [BUG] This function should return the maximum allowed size of * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs * in the same command. So the correct implementation of this function * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan * command has only 512 bytes and it would leave us with about 240 * bytes for scan IEs, which is clearly not enough. So meanwhile * we will report an incorrect value. This may result in a failure to * issue a scan in unified_scan_lmac and unified_sched_scan_lmac * functions with -ENOBUFS, if a large enough probe will be provided. */ return max_ie_len; } void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; IWL_DEBUG_SCAN(mvm, "Scan offload iteration complete: status=0x%x scanned channels=%d\n", notif->status, notif->scanned_channels); if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); ieee80211_sched_scan_results(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; } } void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); ieee80211_sched_scan_results(mvm->hw); } static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) { switch (status) { case IWL_SCAN_EBS_SUCCESS: return "successful"; case IWL_SCAN_EBS_INACTIVE: return "inactive"; case IWL_SCAN_EBS_FAILED: case IWL_SCAN_EBS_CHAN_NOT_FOUND: default: return "failed"; } } void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); /* If this happens, the firmware has mistakenly sent an LMAC * notification during UMAC scans -- warn and ignore it. */ if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))) return; /* scan status must be locked for proper checking */ lockdep_assert_held(&mvm->mutex); /* We first check if we were stopping a scan, in which case we * just clear the stopping flag. Then we check if it was a * firmware initiated stop, in which case we need to inform * mac80211. * Note that we can have a stopping and a running scan * simultaneously, but we can't have two different types of * scans stopping or running at the same time (since LMAC * doesn't support it). */ if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) { WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR); IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); IWL_DEBUG_SCAN(mvm, "Last line %d, Last iteration %d, Time after last iteration %d\n", scan_notif->last_schedule_line, scan_notif->last_schedule_iteration, __le32_to_cpu(scan_notif->time_after_last_iter)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR; } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR); IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); IWL_DEBUG_SCAN(mvm, "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n", scan_notif->last_schedule_line, scan_notif->last_schedule_iteration, __le32_to_cpu(scan_notif->time_after_last_iter)); mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { struct cfg80211_scan_info info = { .aborted = aborted, }; IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; ieee80211_scan_completed(mvm->hw, &info); cancel_delayed_work(&mvm->scan_timeout_dwork); iwl_mvm_resume_tcm(mvm); } else { IWL_ERR(mvm, "got scan complete notification but no scan is running\n"); } mvm->last_ebs_successful = scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; } static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) { int i; for (i = 0; i < PROBE_OPTION_MAX; i++) { if (!ssid_list[i].len) break; if (ssid_list[i].len == ssid_len && !memcmp(ssid_list->ssid, ssid, ssid_len)) return i; } return -1; } /* We insert the SSIDs in an inverted order, because the FW will * invert it back. */ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params, struct iwl_ssid_ie *ssids, u32 *ssid_bitmap) { int i, j; int index; /* * copy SSIDs from match list. * iwl_config_sched_scan_profiles() uses the order of these ssids to * config match list. */ for (i = 0, j = params->n_match_sets - 1; j >= 0 && i < PROBE_OPTION_MAX; i++, j--) { /* skip empty SSID matchsets */ if (!params->match_sets[j].ssid.ssid_len) continue; ssids[i].id = WLAN_EID_SSID; ssids[i].len = params->match_sets[j].ssid.ssid_len; memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid, ssids[i].len); } /* add SSIDs from scan SSID list */ *ssid_bitmap = 0; for (j = params->n_ssids - 1; j >= 0 && i < PROBE_OPTION_MAX; i++, j--) { index = iwl_ssid_exist(params->ssids[j].ssid, params->ssids[j].ssid_len, ssids); if (index < 0) { ssids[i].id = WLAN_EID_SSID; ssids[i].len = params->ssids[j].ssid_len; memcpy(ssids[i].ssid, params->ssids[j].ssid, ssids[i].len); *ssid_bitmap |= BIT(i); } else { *ssid_bitmap |= BIT(index); } } } static int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req) { struct iwl_scan_offload_profile *profile; struct iwl_scan_offload_profile_cfg *profile_cfg; struct iwl_scan_offload_blacklist *blacklist; struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD, .len[1] = sizeof(*profile_cfg), .dataflags[0] = IWL_HCMD_DFL_NOCOPY, .dataflags[1] = IWL_HCMD_DFL_NOCOPY, }; int blacklist_len; int i; int ret; if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES)) return -EIO; if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL) blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN; else blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN; blacklist = kcalloc(blacklist_len, sizeof(*blacklist), GFP_KERNEL); if (!blacklist) return -ENOMEM; profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL); if (!profile_cfg) { ret = -ENOMEM; goto free_blacklist; } cmd.data[0] = blacklist; cmd.len[0] = sizeof(*blacklist) * blacklist_len; cmd.data[1] = profile_cfg; /* No blacklist configuration */ profile_cfg->num_profiles = req->n_match_sets; profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN; profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN; profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN; if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len) profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN; for (i = 0; i < req->n_match_sets; i++) { profile = &profile_cfg->profiles[i]; profile->ssid_index = i; /* Support any cipher and auth algorithm */ profile->unicast_cipher = 0xff; profile->auth_alg = 0xff; profile->network_type = IWL_NETWORK_TYPE_ANY; profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY; profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN; } IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n"); ret = iwl_mvm_send_cmd(mvm, &cmd); kfree(profile_cfg); free_blacklist: kfree(blacklist); return ret; } static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req) { if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { IWL_DEBUG_SCAN(mvm, "Sending scheduled scan with filtering, n_match_sets %d\n", req->n_match_sets); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; return false; } IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; return true; } static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) { int ret; struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_ABORT_CMD, }; u32 status = CAN_ABORT_STATUS; ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); if (ret) return ret; if (status != CAN_ABORT_STATUS) { /* * The scan abort will return 1 for success or * 2 for "failure". A failure condition can be * due to simply not being in an active scan which * can occur if we send the scan abort before the * microcode has notified us that a scan is completed. */ IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status); ret = -ENOENT; } return ret; } static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, struct iwl_scan_req_tx_cmd *tx_cmd, bool no_cck) { tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, NL80211_BAND_2GHZ, no_cck); tx_cmd[0].sta_id = mvm->aux_sta.sta_id; tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, NL80211_BAND_5GHZ, no_cck); tx_cmd[1].sta_id = mvm->aux_sta.sta_id; } static void iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm, struct ieee80211_channel **channels, int n_channels, u32 ssid_bitmap, struct iwl_scan_req_lmac *cmd) { struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data; int i; for (i = 0; i < n_channels; i++) { channel_cfg[i].channel_num = cpu_to_le16(channels[i]->hw_value); channel_cfg[i].iter_count = cpu_to_le16(1); channel_cfg[i].iter_interval = 0; channel_cfg[i].flags = cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL | ssid_bitmap); } } static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, size_t len, u8 *const pos) { static const u8 before_ds_params[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_REQUEST, WLAN_EID_EXT_SUPP_RATES, }; size_t offs; u8 *newpos = pos; if (!iwl_mvm_rrm_scan_needed(mvm)) { memcpy(newpos, ies, len); return newpos + len; } offs = ieee80211_ie_split(ies, len, before_ds_params, ARRAY_SIZE(before_ds_params), 0); memcpy(newpos, ies, offs); newpos += offs; /* Add a placeholder for DS Parameter Set element */ *newpos++ = WLAN_EID_DS_PARAMS; *newpos++ = 1; *newpos++ = 0; memcpy(newpos, ies + offs, len - offs); newpos += len - offs; return newpos; } #define WFA_TPC_IE_LEN 9 static void iwl_mvm_add_tpc_report_ie(u8 *pos) { pos[0] = WLAN_EID_VENDOR_SPECIFIC; pos[1] = WFA_TPC_IE_LEN - 2; pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff; pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff; pos[4] = WLAN_OUI_MICROSOFT & 0xff; pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC; pos[6] = 0; /* pos[7] - tx power will be inserted by the FW */ pos[7] = 0; pos[8] = 0; } static void iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_scan_ies *ies, struct iwl_mvm_scan_params *params) { struct ieee80211_mgmt *frame = (void *)params->preq.buf; u8 *pos, *newpos; const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? params->mac_addr : NULL; /* * Unfortunately, right now the offload scan doesn't support randomising * within the firmware, so until the firmware API is ready we implement * it in the driver. This means that the scan iterations won't really be * random, only when it's restarted, but at least that helps a bit. */ if (mac_addr) get_random_mask_addr(frame->sa, mac_addr, params->mac_addr_mask); else memcpy(frame->sa, vif->addr, ETH_ALEN); frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(frame->da); eth_broadcast_addr(frame->bssid); frame->seq_ctrl = 0; pos = frame->u.probe_req.variable; *pos++ = WLAN_EID_SSID; *pos++ = 0; params->preq.mac_header.offset = 0; params->preq.mac_header.len = cpu_to_le16(24 + 2); /* Insert ds parameter set element on 2.4 GHz band */ newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, ies->ies[NL80211_BAND_2GHZ], ies->len[NL80211_BAND_2GHZ], pos); params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[0].len = cpu_to_le16(newpos - pos); pos = newpos; memcpy(pos, ies->ies[NL80211_BAND_5GHZ], ies->len[NL80211_BAND_5GHZ]); params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[1].len = cpu_to_le16(ies->len[NL80211_BAND_5GHZ]); pos += ies->len[NL80211_BAND_5GHZ]; memcpy(pos, ies->common_ies, ies->common_ie_len); params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); if (iwl_mvm_rrm_scan_needed(mvm) && !fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) { iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len); params->preq.common_data.len = cpu_to_le16(ies->common_ie_len + WFA_TPC_IE_LEN); } else { params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); } } static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) { cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, struct ieee80211_scan_ies *ies, int n_channels) { return ((n_ssids <= PROBE_OPTION_MAX) && (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & (ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] <= iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); } static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; bool low_latency; if (iwl_mvm_is_cdb_supported(mvm)) low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ); else low_latency = iwl_mvm_low_latency(mvm); /* We can only use EBS if: * 1. the feature is supported; * 2. the last EBS was successful; * 3. if only single scan, the single scan EBS API is supported; * 4. it's not a p2p find operation. * 5. we are not in low latency mode, * or if fragmented ebs is supported by the FW */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && vif->type != NL80211_IFTYPE_P2P_DEVICE && (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm))); } static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) { return params->n_scan_plans == 1 && params->scan_plans[0].iterations == 1; } static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type) { return (type == IWL_SCAN_TYPE_FRAGMENTED || type == IWL_SCAN_TYPE_FAST_BALANCE); } static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { int flags = 0; if (params->n_ssids == 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED; if (params->pass_all) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; else flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; #ifdef CPTCFG_IWLWIFI_DEBUGFS if (mvm->scan_iter_notif_enabled) flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; #endif if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && !iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL; return flags; } static void iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req, struct iwl_scan_probe_req *src_p_req) { int i; p_req->mac_header = src_p_req->mac_header; for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++) p_req->band_data[i] = src_p_req->band_data[i]; p_req->common_data = src_p_req->common_data; memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf)); } static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params) { struct iwl_scan_req_lmac *cmd = mvm->scan_cmd; struct iwl_scan_probe_req_v1 *preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels); u32 ssid_bitmap = 0; int i; lockdep_assert_held(&mvm->mutex); memset(cmd, 0, ksize(cmd)); if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) return -EINVAL; iwl_mvm_scan_lmac_dwell(mvm, cmd, params); cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); cmd->iter_num = cpu_to_le32(1); cmd->n_channels = (u8)params->n_channels; cmd->delay = cpu_to_le32(params->delay); cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params, vif)); cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band); cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | MAC_FILTER_IN_BEACON); iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck); iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap); /* this API uses bits 1-20 instead of 0-19 */ ssid_bitmap <<= 1; for (i = 0; i < params->n_scan_plans; i++) { struct cfg80211_sched_scan_plan *scan_plan = ¶ms->scan_plans[i]; cmd->schedule[i].delay = cpu_to_le16(scan_plan->interval); cmd->schedule[i].iterations = scan_plan->iterations; cmd->schedule[i].full_scan_mul = 1; } /* * If the number of iterations of the last scan plan is set to * zero, it should run infinitely. However, this is not always the case. * For example, when regular scan is requested the driver sets one scan * plan with one iteration. */ if (!cmd->schedule[i - 1].iterations) cmd->schedule[i - 1].iterations = 0xff; if (iwl_mvm_scan_use_ebs(mvm, vif)) { cmd->channel_opt[0].flags = cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); cmd->channel_opt[0].non_ebs_ratio = cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); cmd->channel_opt[1].flags = cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); cmd->channel_opt[1].non_ebs_ratio = cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); } iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels, params->n_channels, ssid_bitmap, cmd); iwl_mvm_scan_set_legacy_probe_req(preq, ¶ms->preq); return 0; } static int rate_to_scan_rate_flag(unsigned int rate) { static const int rate_to_scan_rate[IWL_RATE_COUNT] = { [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M, [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M, [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M, [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M, [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M, [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M, [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M, [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M, [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M, [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M, [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M, [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M, }; return rate_to_scan_rate[rate]; } static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) { struct ieee80211_supported_band *band; unsigned int rates = 0; int i; band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; for (i = 0; i < band->n_bitrates; i++) rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; for (i = 0; i < band->n_bitrates; i++) rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); /* Set both basic rates and supported rates */ rates |= SCAN_CONFIG_SUPPORTED_RATE(rates); return cpu_to_le32(rates); } static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm, struct iwl_scan_dwell *dwell) { dwell->active = IWL_SCAN_DWELL_ACTIVE; dwell->passive = IWL_SCAN_DWELL_PASSIVE; dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED; dwell->extended = IWL_SCAN_DWELL_EXTENDED; } static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels, u32 max_channels) { struct ieee80211_supported_band *band; int i, j = 0; band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; for (i = 0; i < band->n_channels && j < max_channels; i++, j++) channels[j] = band->channels[i].hw_value; band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; for (i = 0; i < band->n_channels && j < max_channels; i++, j++) channels[j] = band->channels[i].hw_value; } static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, u32 flags, u8 channel_flags, u32 max_channels) { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); struct iwl_scan_config_v1 *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time); cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); } static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config, u32 flags, u8 channel_flags, u32 max_channels) { struct iwl_scan_config_v2 *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); if (iwl_mvm_is_cdb_supported(mvm)) { enum iwl_mvm_scan_type lb_type, hb_type; lb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_2GHZ); hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_5GHZ); cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[lb_type].max_out_time); cfg->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[lb_type].suspend_time); cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(scan_timing[hb_type].max_out_time); cfg->suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(scan_timing[hb_type].suspend_time); } else { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[type].max_out_time); cfg->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[type].suspend_time); } iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); } static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) { void *cfg; int ret, cmd_size; struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), }; enum iwl_mvm_scan_type type; enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET; int num_channels = mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; u32 flags; u8 channel_flags; if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) num_channels = mvm->fw->ucode_capa.n_scan_channels; if (iwl_mvm_is_cdb_supported(mvm)) { type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_2GHZ); hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_5GHZ); if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) return 0; } else { type = iwl_mvm_get_scan_type(mvm, NULL); if (type == mvm->scan_type) return 0; } if (iwl_mvm_cdb_scan_api(mvm)) cmd_size = sizeof(struct iwl_scan_config_v2); else cmd_size = sizeof(struct iwl_scan_config_v1); cmd_size += num_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); if (!cfg) return -ENOMEM; flags = SCAN_CONFIG_FLAG_ACTIVATE | SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | SCAN_CONFIG_FLAG_SET_TX_CHAINS | SCAN_CONFIG_FLAG_SET_RX_CHAINS | SCAN_CONFIG_FLAG_SET_AUX_STA_ID | SCAN_CONFIG_FLAG_SET_ALL_TIMES | SCAN_CONFIG_FLAG_SET_LEGACY_RATES | SCAN_CONFIG_FLAG_SET_MAC_ADDR | SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | SCAN_CONFIG_N_CHANNELS(num_channels) | (iwl_mvm_is_scan_fragmented(type) ? SCAN_CONFIG_FLAG_SET_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); channel_flags = IWL_CHANNEL_FLAG_EBS | IWL_CHANNEL_FLAG_ACCURATE_EBS | IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; /* * Check for fragmented scan on LMAC2 - high band. * LMAC1 - low band is checked above. */ if (iwl_mvm_cdb_scan_api(mvm)) { if (iwl_mvm_is_cdb_supported(mvm)) flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags, num_channels); } else { iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags, num_channels); } cmd.data[0] = cfg; cmd.len[0] = cmd_size; cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); ret = iwl_mvm_send_cmd(mvm, &cmd); if (!ret) { mvm->scan_type = type; mvm->hb_scan_type = hb_type; } kfree(cfg); return ret; } int iwl_mvm_config_scan(struct iwl_mvm *mvm) { struct iwl_scan_config cfg; struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), .len[0] = sizeof(cfg), .data[0] = &cfg, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; if (!iwl_mvm_is_reduced_config_scan_supported(mvm)) return iwl_mvm_legacy_config_scan(mvm); memset(&cfg, 0, sizeof(cfg)); cfg.bcast_sta_id = mvm->aux_sta.sta_id; cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); return iwl_mvm_send_cmd(mvm, &cmd); } static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) { int i; for (i = 0; i < mvm->max_scans; i++) if (mvm->scan_uid_status[i] == status) return i; return -ENOENT; } static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { struct iwl_mvm_scan_timing_params *timing, *hb_timing; u8 active_dwell, passive_dwell; timing = &scan_timing[params->type]; active_dwell = params->measurement_dwell ? params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE; passive_dwell = params->measurement_dwell ? params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE; if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { cmd->v7.adwell_default_n_aps_social = IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; cmd->v7.adwell_default_n_aps = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS; if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm)) cmd->v9.adwell_default_hb_n_aps = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS; /* if custom max budget was configured with debugfs */ if (IWL_MVM_ADWELL_MAX_BUDGET) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); else if (params->ssids && params->ssids[0].ssid_len) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { hb_timing = &scan_timing[params->hb_type]; cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->suspend_time); } if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) { cmd->v7.active_dwell = active_dwell; cmd->v7.passive_dwell = passive_dwell; cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; } else { cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell; cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell; if (iwl_mvm_is_cdb_supported(mvm)) { cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] = active_dwell; cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell; } } } else { cmd->v1.extended_dwell = params->measurement_dwell ? params->measurement_dwell : IWL_SCAN_DWELL_EXTENDED; cmd->v1.active_dwell = active_dwell; cmd->v1.passive_dwell = passive_dwell; cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_is_cdb_supported(mvm)) { hb_timing = &scan_timing[params->hb_type]; cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->suspend_time); } if (iwl_mvm_cdb_scan_api(mvm)) { cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); } else { cmd->v1.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time); cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time); } } if (iwl_mvm_is_regular_scan(params)) cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); else cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static void iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, struct ieee80211_channel **channels, int n_channels, u32 ssid_bitmap, struct iwl_scan_channel_cfg_umac *channel_cfg) { int i; for (i = 0; i < n_channels; i++) { channel_cfg[i].flags = cpu_to_le32(ssid_bitmap); channel_cfg[i].v1.channel_num = channels[i]->hw_value; if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { channel_cfg[i].v2.band = channels[i]->hw_value <= IWL_SCAN_LAST_2_4_CHN ? IWL_SCAN_BAND_2_4 : IWL_SCAN_BAND_5_2; channel_cfg[i].v2.iter_count = 1; channel_cfg[i].v2.iter_interval = 0; } else { channel_cfg[i].v1.iter_count = 1; channel_cfg[i].v1.iter_interval = 0; } } } static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { u16 flags = 0; if (params->n_ssids == 0) flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE; if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; if (iwl_mvm_is_cdb_supported(mvm) && iwl_mvm_is_scan_fragmented(params->hb_type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; if (params->pass_all) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL; else flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH; if (!iwl_mvm_is_regular_scan(params)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; if (params->measurement_dwell) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; #ifdef CPTCFG_IWLWIFI_DEBUGFS if (mvm->scan_iter_notif_enabled) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; #endif if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE && vif->type != NL80211_IFTYPE_P2P_DEVICE) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL; /* * Extended dwell is relevant only for low band to start with, as it is * being used for social channles only (1, 6, 11), so we can check * only scan type on low band also for CDB. */ if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && !iwl_mvm_is_scan_fragmented(params->type) && !iwl_mvm_is_adaptive_dwell_supported(mvm) && !iwl_mvm_is_oce_supported(mvm)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; if (iwl_mvm_is_oce_supported(mvm)) { if ((params->flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE; /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares * the same bit, we need to make sure that we use this bit here * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be * used. */ if ((params->flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) && !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm))) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP; if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME; } return flags; } static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; struct iwl_scan_umac_chan_param *chan_param; void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; struct iwl_scan_req_umac_tail_v2 *tail_v2 = (struct iwl_scan_req_umac_tail_v2 *)sec_part; struct iwl_scan_req_umac_tail_v1 *tail_v1; struct iwl_ssid_ie *direct_scan; int uid, i; u32 ssid_bitmap = 0; u8 channel_flags = 0; u16 gen_flags; struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); chan_param = iwl_mvm_get_scan_req_umac_channel(mvm); lockdep_assert_held(&mvm->mutex); if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) return -EINVAL; uid = iwl_mvm_scan_uid_by_status(mvm, 0); if (uid < 0) return uid; memset(cmd, 0, ksize(cmd)); iwl_mvm_scan_umac_dwell(mvm, cmd, params); mvm->scan_uid_status[uid] = type; cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif); cmd->general_flags = cpu_to_le16(gen_flags); if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) { if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED) cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED) cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; cmd->v8.general_flags2 = IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER; } cmd->scan_start_mac_id = scan_vif->id; if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); if (iwl_mvm_scan_use_ebs(mvm, vif)) { channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; /* set fragmented ebs for fragmented scan on HB channels */ if (iwl_mvm_is_frag_ebs_supported(mvm)) { if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED || (!iwl_mvm_is_cdb_supported(mvm) && gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)) channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; } } chan_param->flags = channel_flags; chan_param->count = params->n_channels; for (i = 0; i < params->n_scan_plans; i++) { struct cfg80211_sched_scan_plan *scan_plan = ¶ms->scan_plans[i]; tail_v2->schedule[i].iter_count = scan_plan->iterations; tail_v2->schedule[i].interval = cpu_to_le16(scan_plan->interval); } /* * If the number of iterations of the last scan plan is set to * zero, it should run infinitely. However, this is not always the case. * For example, when regular scan is requested the driver sets one scan * plan with one iteration. */ if (!tail_v2->schedule[i - 1].iter_count) tail_v2->schedule[i - 1].iter_count = 0xff; tail_v2->delay = cpu_to_le16(params->delay); if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { tail_v2->preq = params->preq; direct_scan = tail_v2->direct_scan; } else { tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part; iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq, ¶ms->preq); direct_scan = tail_v1->direct_scan; } iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap); iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, params->n_channels, ssid_bitmap, cmd_data); return 0; } static int iwl_mvm_num_scans(struct iwl_mvm *mvm) { return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK); } static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) { bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); /* This looks a bit arbitrary, but the idea is that if we run * out of possible simultaneous scans and the userspace is * trying to run a scan type that is already running, we * return -EBUSY. But if the userspace wants to start a * different type of scan, we stop the opposite type to make * space for the new request. The reason is backwards * compatibility with old wpa_supplicant that wouldn't stop a * scheduled scan before starting a normal scan. */ /* FW supports only a single periodic scan */ if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) && mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT)) return -EBUSY; if (iwl_mvm_num_scans(mvm) < mvm->max_scans) return 0; /* Use a switch, even though this is a bitmask, so that more * than one bits set will fall in default and we will warn. */ switch (type) { case IWL_MVM_SCAN_REGULAR: if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) return -EBUSY; return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); case IWL_MVM_SCAN_SCHED: if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return -EBUSY; return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); case IWL_MVM_SCAN_NETDETECT: /* For non-unified images, there's no need to stop * anything for net-detect since the firmware is * restarted anyway. This way, any sched scans that * were running will be restarted when we resume. */ if (!unified_image) return 0; /* If this is a unified image and we ran out of scans, * we need to stop something. Prefer stopping regular * scans, because the results are useless at this * point, and we should be able to keep running * another scheduled scan while suspended. */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); /* Something is wrong if no scan was running but we * ran out of scans. */ /* fall through */ default: WARN_ON(1); break; } return -EIO; } #define SCAN_TIMEOUT (CPTCFG_IWL_TIMEOUT_FACTOR * 20000) void iwl_mvm_scan_timeout_wk(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, scan_timeout_dwork); IWL_ERR(mvm, "regular scan timed out\n"); iwl_force_nmi(mvm->trans); } static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { if (iwl_mvm_is_cdb_supported(mvm)) { params->type = iwl_mvm_get_scan_type_band(mvm, vif, NL80211_BAND_2GHZ); params->hb_type = iwl_mvm_get_scan_type_band(mvm, vif, NL80211_BAND_5GHZ); } else { params->type = iwl_mvm_get_scan_type(mvm, vif); } } int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) { struct iwl_host_cmd hcmd = { .len = { iwl_mvm_scan_size(mvm), }, .data = { mvm->scan_cmd, }, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_mvm_scan_params params = {}; int ret; struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 }; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { IWL_ERR(mvm, "scan while LAR regdomain is not set\n"); return -EBUSY; } ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR); if (ret) return ret; /* we should have failed registration if scan_cmd was NULL */ if (WARN_ON(!mvm->scan_cmd)) return -ENOMEM; if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) return -ENOBUFS; params.n_ssids = req->n_ssids; params.flags = req->flags; params.n_channels = req->n_channels; params.delay = 0; params.ssids = req->ssids; params.channels = req->channels; params.mac_addr = req->mac_addr; params.mac_addr_mask = req->mac_addr_mask; params.no_cck = req->no_cck; params.pass_all = true; params.n_match_sets = 0; params.match_sets = NULL; params.scan_plans = &scan_plan; params.n_scan_plans = 1; iwl_mvm_fill_scan_type(mvm, ¶ms, vif); ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms); if (ret < 0) return ret; params.measurement_dwell = ret; iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_REGULAR); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); } if (ret) return ret; iwl_mvm_pause_tcm(mvm, false); ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { /* If the scan failed, it usually means that the FW was unable * to allocate the time events. Warn on it, but maybe we * should try to send the command again with different params. */ IWL_ERR(mvm, "Scan failed! ret %d\n", ret); iwl_mvm_resume_tcm(mvm); return ret; } IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); mvm->scan_status |= IWL_MVM_SCAN_REGULAR; mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); schedule_delayed_work(&mvm->scan_timeout_dwork, msecs_to_jiffies(SCAN_TIMEOUT)); return 0; } int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type) { struct iwl_host_cmd hcmd = { .len = { iwl_mvm_scan_size(mvm), }, .data = { mvm->scan_cmd, }, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_mvm_scan_params params = {}; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n"); return -EBUSY; } ret = iwl_mvm_check_running_scans(mvm, type); if (ret) return ret; /* we should have failed registration if scan_cmd was NULL */ if (WARN_ON(!mvm->scan_cmd)) return -ENOMEM; if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) return -ENOBUFS; params.n_ssids = req->n_ssids; params.flags = req->flags; params.n_channels = req->n_channels; params.ssids = req->ssids; params.channels = req->channels; params.mac_addr = req->mac_addr; params.mac_addr_mask = req->mac_addr_mask; params.no_cck = false; params.pass_all = iwl_mvm_scan_pass_all(mvm, req); params.n_match_sets = req->n_match_sets; params.match_sets = req->match_sets; if (!req->n_scan_plans) return -EINVAL; params.n_scan_plans = req->n_scan_plans; params.scan_plans = req->scan_plans; iwl_mvm_fill_scan_type(mvm, ¶ms, vif); /* In theory, LMAC scans can handle a 32-bit delay, but since * waiting for over 18 hours to start the scan is a bit silly * and to keep it aligned with UMAC scans (which only support * 16-bit delays), trim it down to 16-bits. */ if (req->delay > U16_MAX) { IWL_DEBUG_SCAN(mvm, "delay value is > 16-bits, set to max possible\n"); params.delay = U16_MAX; } else { params.delay = req->delay; } ret = iwl_mvm_config_sched_scan_profiles(mvm, req); if (ret) return ret; iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, type); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); } if (ret) return ret; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (!ret) { IWL_DEBUG_SCAN(mvm, "Sched scan request was sent successfully\n"); mvm->scan_status |= type; } else { /* If the scan failed, it usually means that the FW was unable * to allocate the time events. Warn on it, but maybe we * should try to send the command again with different params. */ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); } return ret; } void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) return; /* if the scan is already stopping, we don't need to notify mac80211 */ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { struct cfg80211_scan_info info = { .aborted = aborted, .scan_start_tsf = mvm->scan_start, }; memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN); ieee80211_scan_completed(mvm->hw, &info); mvm->scan_vif = NULL; cancel_delayed_work(&mvm->scan_timeout_dwork); iwl_mvm_resume_tcm(mvm); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } mvm->scan_status &= ~mvm->scan_uid_status[uid]; IWL_DEBUG_SCAN(mvm, "Scan completed, uid %u type %u, status %s, EBS status %s\n", uid, mvm->scan_uid_status[uid], notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? "completed" : "aborted", iwl_mvm_ebs_status_str(notif->ebs_status)); IWL_DEBUG_SCAN(mvm, "Last line %d, Last iteration %d, Time from last iteration %d\n", notif->last_schedule, notif->last_iter, __le32_to_cpu(notif->time_from_last_iter)); if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && notif->ebs_status != IWL_SCAN_EBS_INACTIVE) mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_SCAN_COMPLETE); } void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; mvm->scan_start = le64_to_cpu(notif->start_tsf); IWL_DEBUG_SCAN(mvm, "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n", notif->status, notif->scanned_channels); if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); ieee80211_sched_scan_results(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; } IWL_DEBUG_SCAN(mvm, "UMAC Scan iteration complete: scan started at %llu (TSF)\n", mvm->scan_start); } static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) { struct iwl_umac_scan_abort cmd = {}; int uid, ret; lockdep_assert_held(&mvm->mutex); /* We should always get a valid index here, because we already * checked that this type of scan was running in the generic * code. */ uid = iwl_mvm_scan_uid_by_status(mvm, type); if (WARN_ON_ONCE(uid < 0)) return uid; cmd.uid = cpu_to_le32(uid); IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SCAN_ABORT_UMAC, IWL_ALWAYS_LONG_GROUP, 0), 0, sizeof(cmd), &cmd); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; return ret; } static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, SCAN_OFFLOAD_COMPLETE, }; int ret; lockdep_assert_held(&mvm->mutex); iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, scan_done_notif, ARRAY_SIZE(scan_done_notif), NULL, NULL); IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) ret = iwl_mvm_umac_scan_abort(mvm, type); else ret = iwl_mvm_lmac_scan_abort(mvm); if (ret) { IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); return ret; } return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); } int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; int tail_size; if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V8; else if (iwl_mvm_is_adaptive_dwell_supported(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; else if (iwl_mvm_cdb_scan_api(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { if (iwl_mvm_is_scan_ext_chan_supported(mvm)) tail_size = sizeof(struct iwl_scan_req_umac_tail_v2); else tail_size = sizeof(struct iwl_scan_req_umac_tail_v1); return base_size + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels + tail_size; } return sizeof(struct iwl_scan_req_lmac) + sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels + sizeof(struct iwl_scan_probe_req_v1); } /* * This function is used in nic restart flow, to inform mac80211 about scans * that was aborted by restart flow or by an assert. */ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int uid, i; uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); if (uid >= 0) { struct cfg80211_scan_info info = { .aborted = true, }; ieee80211_scan_completed(mvm->hw, &info); mvm->scan_uid_status[uid] = 0; } uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); if (uid >= 0 && !mvm->fw_restart) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->scan_uid_status[uid] = 0; } /* We shouldn't have any UIDs still set. Loop over all the * UIDs to make sure there's nothing left there and warn if * any is found. */ for (i = 0; i < mvm->max_scans; i++) { if (WARN_ONCE(mvm->scan_uid_status[i], "UMAC scan UID %d status was not cleaned\n", i)) mvm->scan_uid_status[i] = 0; } } else { if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { struct cfg80211_scan_info info = { .aborted = true, }; ieee80211_scan_completed(mvm->hw, &info); } /* Sched scan will be restarted by mac80211 in * restart_hw, so do not report if FW is about to be * restarted. */ if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->fw_restart) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } } } int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) { int ret; if (!(mvm->scan_status & type)) return 0; if (iwl_mvm_is_radio_killed(mvm)) { ret = 0; goto out; } ret = iwl_mvm_scan_stop_wait(mvm, type); if (!ret) mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; out: /* Clear the scan status so the next scan requests will * succeed and mark the scan as stopping, so that the Rx * handler doesn't do anything, as the scan was stopped from * above. */ mvm->scan_status &= ~type; if (type == IWL_MVM_SCAN_REGULAR) { cancel_delayed_work(&mvm->scan_timeout_dwork); if (notify) { struct cfg80211_scan_info info = { .aborted = true, }; ieee80211_scan_completed(mvm->hw, &info); } } else if (notify) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } return ret; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/sf.c000066400000000000000000000234331351064634500261470ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "mvm.h" /* For counting bound interfaces */ struct iwl_mvm_active_iface_iterator_data { struct ieee80211_vif *ignore_vif; u8 sta_vif_ap_sta_id; enum iwl_sf_state sta_vif_state; u32 num_active_macs; }; /* * Count bound interfaces which are not p2p, besides data->ignore_vif. * data->station_vif will point to one bound vif of type station, if exists. */ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_active_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif == data->ignore_vif || !mvmvif->phy_ctxt || vif->type == NL80211_IFTYPE_P2P_DEVICE || vif->type == NL80211_IFTYPE_NAN) return; data->num_active_macs++; if (vif->type == NL80211_IFTYPE_STATION) { data->sta_vif_ap_sta_id = mvmvif->ap_sta_id; if (vif->bss_conf.assoc) data->sta_vif_state = SF_FULL_ON; else data->sta_vif_state = SF_INIT_OFF; } } /* * Aging and idle timeouts for the different possible scenarios * in default configuration */ static const __le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { { cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF), cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF), cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_MCAST_AGING_TIMER_DEF), cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_BA_AGING_TIMER_DEF), cpu_to_le32(SF_BA_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF), cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF) }, }; /* * Aging and idle timeouts for the different possible scenarios * in single BSS MAC configuration. */ static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { { cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER), cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER) }, { cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER), cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER) }, { cpu_to_le32(SF_MCAST_AGING_TIMER), cpu_to_le32(SF_MCAST_IDLE_TIMER) }, { cpu_to_le32(SF_BA_AGING_TIMER), cpu_to_le32(SF_BA_IDLE_TIMER) }, { cpu_to_le32(SF_TX_RE_AGING_TIMER), cpu_to_le32(SF_TX_RE_IDLE_TIMER) }, }; static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, struct iwl_sf_cfg_cmd *sf_cmd, struct ieee80211_sta *sta) { int i, j, watermark; sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN); /* * If we are in association flow - check antenna configuration * capabilities of the AP station, and choose the watermark accordingly. */ if (sta) { if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) { switch (sta->rx_nss) { case 1: watermark = SF_W_MARK_SISO; break; case 2: watermark = SF_W_MARK_MIMO2; break; default: watermark = SF_W_MARK_MIMO3; break; } } else { watermark = SF_W_MARK_LEGACY; } /* default watermark value for unassociated mode. */ } else { watermark = SF_W_MARK_MIMO2; } sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark); for (i = 0; i < SF_NUM_SCENARIO; i++) { for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) { sf_cmd->long_delay_timeouts[i][j] = cpu_to_le32(SF_LONG_DELAY_AGING_TIMER); } } if (sta) { BUILD_BUG_ON(sizeof(sf_full_timeout) != sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, sizeof(sf_full_timeout)); } else { BUILD_BUG_ON(sizeof(sf_full_timeout_def) != sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, sizeof(sf_full_timeout_def)); } } static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, enum iwl_sf_state new_state) { struct iwl_sf_cfg_cmd sf_cmd = { .state = cpu_to_le32(new_state), }; struct ieee80211_sta *sta; int ret = 0; if (mvm->cfg->disable_dummy_notification) sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); /* * If an associated AP sta changed its antenna configuration, the state * will remain FULL_ON but SF parameters need to be reconsidered. */ if (new_state != SF_FULL_ON && mvm->sf_state == new_state) return 0; switch (new_state) { case SF_UNINIT: iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; case SF_FULL_ON: if (sta_id == IWL_MVM_INVALID_STA) { IWL_ERR(mvm, "No station: Cannot switch SF to FULL_ON\n"); return -EINVAL; } rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR_OR_NULL(sta)) { IWL_ERR(mvm, "Invalid station id\n"); rcu_read_unlock(); return -EINVAL; } iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta); rcu_read_unlock(); break; case SF_INIT_OFF: iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; default: WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n", new_state); return -EINVAL; } ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC, sizeof(sf_cmd), &sf_cmd); if (!ret) mvm->sf_state = new_state; return ret; } /* * Update Smart fifo: * Count bound interfaces that are not to be removed, ignoring p2p devices, * and set new state accordingly. */ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, bool remove_vif) { enum iwl_sf_state new_state; u8 sta_id = IWL_MVM_INVALID_STA; struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_active_iface_iterator_data data = { .ignore_vif = changed_vif, .sta_vif_state = SF_UNINIT, .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA, }; /* * Ignore the call if we are in HW Restart flow, or if the handled * vif is a p2p device. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || (changed_vif && (changed_vif->type == NL80211_IFTYPE_P2P_DEVICE || changed_vif->type == NL80211_IFTYPE_NAN))) return 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bound_iface_iterator, &data); /* If changed_vif exists and is not to be removed, add to the count */ if (changed_vif && !remove_vif) data.num_active_macs++; switch (data.num_active_macs) { case 0: /* If there are no active macs - change state to SF_INIT_OFF */ new_state = SF_INIT_OFF; break; case 1: if (remove_vif) { /* The one active mac left is of type station * and we filled the relevant data during iteration */ new_state = data.sta_vif_state; sta_id = data.sta_vif_ap_sta_id; } else { if (WARN_ON(!changed_vif)) return -EINVAL; if (changed_vif->type != NL80211_IFTYPE_STATION) { new_state = SF_UNINIT; } else if (changed_vif->bss_conf.assoc && changed_vif->bss_conf.dtim_period) { mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); sta_id = mvmvif->ap_sta_id; new_state = SF_FULL_ON; } else { new_state = SF_INIT_OFF; } } break; default: /* If there are multiple active macs - change to SF_UNINIT */ new_state = SF_UNINIT; } return iwl_mvm_sf_config(mvm, sta_id, new_state); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/sta.c000066400000000000000000003244471351064634500263370ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "mvm.h" #include "sta.h" #include "rs.h" /* * New version of ADD_STA_sta command added new fields at the end of the * structure, so sending the size of the relevant API's structure is enough to * support both API versions. */ static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) { if (iwl_mvm_has_new_rx_api(mvm) || fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) return sizeof(struct iwl_mvm_add_sta_cmd); else return sizeof(struct iwl_mvm_add_sta_cmd_v7); } static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) { int sta_id; u32 reserved_ids = 0; BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); lockdep_assert_held(&mvm->mutex); /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ if (iftype != NL80211_IFTYPE_STATION) reserved_ids = BIT(0); /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) { if (BIT(sta_id) & reserved_ids) continue; if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex))) return sta_id; } return IWL_MVM_INVALID_STA; } /* send station add/update command to firmware */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd add_sta_cmd = { .sta_id = mvm_sta->sta_id, .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), .add_modify = update ? 1 : 0, .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | STA_FLG_MIMO_EN_MSK | STA_FLG_RTS_MIMO_PROT), .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), }; int ret; u32 status; u32 agg_size = 0, mpdu_dens = 0; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) add_sta_cmd.station_type = mvm_sta->sta_type; if (!update || (flags & STA_MODIFY_QUEUES)) { memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); if (!iwl_mvm_has_new_tx_api(mvm)) { add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); if (flags & STA_MODIFY_QUEUES) add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; } else { WARN_ON(flags & STA_MODIFY_QUEUES); } } switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); /* fall through */ case IEEE80211_STA_RX_BW_80: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); /* fall through */ case IEEE80211_STA_RX_BW_40: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); /* fall through */ case IEEE80211_STA_RX_BW_20: if (sta->ht_cap.ht_supported) add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_20MHZ); break; } switch (sta->rx_nss) { case 1: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; case 2: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); break; case 3 ... 8: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); break; } switch (sta->smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); break; case IEEE80211_SMPS_STATIC: /* override NSS */ add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; case IEEE80211_SMPS_DYNAMIC: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); break; case IEEE80211_SMPS_OFF: /* nothing */ break; } if (sta->ht_cap.ht_supported) { add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); mpdu_dens = sta->ht_cap.ampdu_density; } if (sta->vht_cap.vht_supported) { agg_size = sta->vht_cap.cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; agg_size >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; } else if (sta->ht_cap.ht_supported) { agg_size = sta->ht_cap.ampdu_factor; } add_sta_cmd.station_flags |= cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) add_sta_cmd.uapsd_acs |= BIT(AC_BK); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) add_sta_cmd.uapsd_acs |= BIT(AC_BE); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) add_sta_cmd.uapsd_acs |= BIT(AC_VI); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) add_sta_cmd.uapsd_acs |= BIT(AC_VO); add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; } status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); break; default: ret = -EIO; IWL_ERR(mvm, "ADD_STA failed\n"); break; } return ret; } static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) { struct iwl_mvm_baid_data *data = from_timer(data, t, session_timer); struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; struct iwl_mvm_baid_data *ba_data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; unsigned long timeout; rcu_read_lock(); ba_data = rcu_dereference(*rcu_ptr); if (WARN_ON(!ba_data)) goto unlock; if (!ba_data->timeout) goto unlock; timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); if (time_is_after_jiffies(timeout)) { mod_timer(&ba_data->session_timer, timeout); goto unlock; } /* Timer expired */ sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); /* * sta should be valid unless the following happens: * The firmware asserts which triggers a reconfig flow, but * the reconfig fails before we set the pointer to sta into * the fw_id_to_mac_id pointer table. Mac80211 can't stop * A-MDPU and hence the timer continues to run. Then, the * timer expires and sta is NULL. */ if (!sta) goto unlock; mvm_sta = iwl_mvm_sta_from_mac80211(sta); ieee80211_rx_ba_timer_expired(mvm_sta->vif, sta->addr, ba_data->tid); unlock: rcu_read_unlock(); } /* Disable aggregations for a bitmap of TIDs for a given station */ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, unsigned long disable_agg_tids, bool remove_queue) { struct iwl_mvm_add_sta_cmd cmd = {}; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u32 status; u8 sta_id; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return -EINVAL; } mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta->tid_disable_agg |= disable_agg_tids; cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_QUEUES; if (disable_agg_tids) cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; if (remove_queue) cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); rcu_read_unlock(); /* Notify FW of queue removal from the STA queues */ status = ADD_STA_SUCCESS; return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); } static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u8 tid, u8 flags) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; int ret; if (iwl_mvm_has_new_tx_api(mvm)) { iwl_trans_txq_free(mvm->trans, queue); return 0; } if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) return 0; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); cmd.action = mvm->queue_info[queue].tid_bitmap ? SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; if (cmd.action == SCD_CFG_DISABLE_QUEUE) mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, "Disabling TXQ #%d tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) return 0; cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tid = mvm->queue_info[queue].txq_tid; /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].tid_bitmap, "TXQ #%d info out-of-sync - tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* If we are here - the queue is freed and we can zero out these vals */ mvm->queue_info[queue].tid_bitmap = 0; if (sta) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); return ret; } static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long tid_bitmap; unsigned long agg_tids = 0; u8 sta_id; int tid; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) return -EINVAL; mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (mvmsta->tid_data[tid].state == IWL_AGG_ON) agg_tids |= BIT(tid); } spin_unlock_bh(&mvmsta->lock); return agg_tids; } /* * Remove a queue from a station's resources. * Note that this only marks as free. It DOESN'T delete a BA agreement, and * doesn't disable the queue */ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long tid_bitmap; unsigned long disable_agg_tids = 0; u8 sta_id; int tid; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return 0; } mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); /* Unmap MAC queues and TIDs from this queue */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); if (mvmsta->tid_data[tid].state == IWL_AGG_ON) disable_agg_tids |= BIT(tid); mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ spin_unlock_bh(&mvmsta->lock); rcu_read_unlock(); /* * The TX path may have been using this TXQ_ID from the tid_data, * so make sure it's no longer running so that we can safely reuse * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE * above, but nothing guarantees we've stopped using them. Thus, * without this, we could get to iwl_mvm_disable_txq() and remove * the queue while still sending frames to it. */ synchronize_net(); return disable_agg_tids; } static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, struct ieee80211_sta *old_sta, u8 new_sta_id) { struct iwl_mvm_sta *mvmsta; u8 sta_id, tid; unsigned long disable_agg_tids = 0; bool same_sta; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid = mvm->queue_info[queue].txq_tid; same_sta = sta_id == new_sta_id; same_sta = sta_id == new_sta_id; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (WARN_ON(!mvmsta)) return -EINVAL; disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); /* Disable the queue */ if (disable_agg_tids) iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", queue, ret); return ret; } /* If TXQ is allocated to another STA, update removal in FW */ if (!same_sta) iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); return 0; } static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, unsigned long tfd_queue_mask, u8 ac) { int queue = 0; u8 ac_to_queue[IEEE80211_NUM_ACS]; int i; /* * This protects us against grabbing a queue that's being reconfigured * by the inactivity checker. */ lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); /* See what ACs the existing queues for this STA have */ for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { /* Only DATA queues can be shared */ if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) continue; ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; } /* * The queue to share is chosen only from DATA queues as follows (in * descending priority): * 1. An AC_BE queue * 2. Same AC queue * 3. Highest AC queue that is lower than new AC * 4. Any existing AC (there always is at least 1 DATA queue) */ /* Priority 1: An AC_BE queue */ if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_BE]; /* Priority 2: Same AC queue */ else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[ac]; /* Priority 3a: If new AC is VO and VI exists - use VI */ else if (ac == IEEE80211_AC_VO && ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VI]; /* Priority 3b: No BE so only AC less than the new one is BK */ else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_BK]; /* Priority 4a: No BE nor BK - use VI if exists */ else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VI]; /* Priority 4b: No BE, BK nor VI - use VO if exists */ else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VO]; /* Make sure queue found (or not) is legal */ if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { IWL_ERR(mvm, "No DATA queues available to share\n"); return -ENOSPC; } return queue; } /* * If a given queue has a higher AC than the TID stream that is being compared * to, the queue needs to be redirected to the lower AC. This function does that * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, int ac, int ssn, unsigned int wdg_timeout, bool force, struct iwl_mvm_txq *txq) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; bool shared_queue; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; /* * If the AC is lower than current one - FIFO needs to be redirected to * the lowest one of the streams in the queue. Check if this is needed * here. * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with * value 3 and VO with value 0, so to check if ac X is lower than ac Y * we need to check if the numerical value of X is LARGER than of Y. */ if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { IWL_DEBUG_TX_QUEUES(mvm, "No redirection needed on TXQ #%d\n", queue); return 0; } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]); /* Stop the queue and wait for it to empty */ txq->stopped = true; ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue %d before reconfig\n", queue); ret = -EIO; goto out; } /* Before redirecting the queue we need to de-activate it */ iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, ret); /* Make sure the SCD wrptr is correctly set before reconfiguring */ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); /* Update the TID "owner" of the queue */ mvm->queue_info[queue].txq_tid = tid; /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ /* Redirect to lower AC */ iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); /* Update AC marking of the queue */ mvm->queue_info[queue].mac80211_ac = ac; /* * Mark queue as shared in transport if shared * Note this has to be done after queue enablement because enablement * can also set this value, and there is no indication there to shared * queues */ if (shared_queue) iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); out: /* Continue using the queue */ txq->stopped = false; return ret; } static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) { int i; lockdep_assert_held(&mvm->mutex); /* This should not be hit with new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -ENOSPC; /* Start by looking for a free queue */ for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].tid_bitmap == 0 && mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; return -ENOSPC; } static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, u8 sta_id, u8 tid, unsigned int timeout) { int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, mvm->trans->cfg->min_256_ba_txq_size); if (tid == IWL_MAX_TID_COUNT) { tid = IWL_MGMT_TID; size = max_t(u32, IWL_MGMT_QUEUE_SIZE, mvm->trans->cfg->min_txq_size); } queue = iwl_trans_txq_alloc(mvm->trans, cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), sta_id, tid, SCD_QUEUE_CFG, size, timeout); if (queue < 0) { IWL_DEBUG_TX_QUEUES(mvm, "Failed allocating TXQ for sta %d tid %d, ret: %d\n", sta_id, tid, queue); return queue; } IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid); IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue); return queue; } static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", mvmsta->sta_id, tid); queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); if (queue < 0) return queue; mvmtxq->txq_id = queue; mvm->tvqm_info[queue].txq_tid = tid; mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); spin_lock_bh(&mvmsta->lock); mvmsta->tid_data[tid].txq_id = queue; spin_unlock_bh(&mvmsta->lock); return 0; } static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u8 sta_id, u8 tid) { bool enable_queue = true; /* Make sure this TID isn't already enabled */ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", queue, tid); return false; } /* Update mappings and refcounts */ if (mvm->queue_info[queue].tid_bitmap) enable_queue = false; mvm->queue_info[queue].tid_bitmap |= BIT(tid); mvm->queue_info[queue].ra_sta_id = sta_id; if (enable_queue) { if (tid != IWL_MAX_TID_COUNT) mvm->queue_info[queue].mac80211_ac = tid_to_mac80211_ac[tid]; else mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; mvm->queue_info[queue].txq_tid = tid; } if (sta) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); mvmtxq->txq_id = queue; } IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); return enable_queue; } static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, .window = cfg->frame_limit, .sta_id = cfg->sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = cfg->fifo, .aggregate = cfg->aggregate, .tid = cfg->tid, }; bool inc_ssn; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; /* Send the enabling command if we need to */ if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) return false; inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); if (inc_ssn) le16_add_cpu(&cmd.ssn, 1); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); return inc_ssn; } static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_UPDATE_QUEUE_TID, }; int tid; unsigned long tid_bitmap; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; tid_bitmap = mvm->queue_info[queue].tid_bitmap; if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) return; /* Find any TID for queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); cmd.tid = tid; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); if (ret) { IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", queue, ret); return; } mvm->queue_info[queue].txq_tid = tid; IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", queue, tid); } static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id; int tid = -1; unsigned long tid_bitmap; unsigned int wdg_timeout; int ssn; int ret = true; /* queue sharing is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; lockdep_assert_held(&mvm->mutex); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* Find TID for queue, and make sure it is the only one on the queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); if (tid_bitmap != BIT(tid)) { IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", queue, tid_bitmap); return; } IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, tid); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) return; mvmsta = iwl_mvm_sta_from_mac80211(sta); wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); ret = iwl_mvm_redirect_queue(mvm, queue, tid, tid_to_mac80211_ac[tid], ssn, wdg_timeout, true, iwl_mvm_txq_from_tid(sta, tid)); if (ret) { IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); return; } /* If aggs should be turned back on - do it */ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { struct iwl_mvm_add_sta_cmd cmd = {0}; mvmsta->tid_disable_agg &= ~BIT(tid); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (!ret) { IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d is now aggregated again\n", queue); /* Mark queue intenally as aggregating again */ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); } } mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; } /* * Remove inactive TIDs of a given queue. * If all queue TIDs are inactive - mark the queue as inactive * If only some the queue TIDs are inactive - unmap them from the queue * * Returns %true if all TIDs were removed and the queue could be reused. */ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int queue, unsigned long tid_bitmap, unsigned long *unshare_queues, unsigned long *changetid_queues) { int tid; lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { /* If some TFDs are still queued - don't mark TID as inactive */ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) tid_bitmap &= ~BIT(tid); /* Don't mark as inactive any TID that has an active BA */ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) tid_bitmap &= ~BIT(tid); } /* If all TIDs in the queue are inactive - return it can be reused */ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); return true; } /* * If we are here, this is a shared queue and not all TIDs timed-out. * Remove the ones that did. */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { u16 tid_bitmap; mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* * We need to take into account a situation in which a TXQ was * allocated to TID x, and then turned shared by adding TIDs y * and z. If TID x becomes inactive and is removed from the TXQ, * ownership must be given to one of the remaining TIDs. * This is mainly because if TID x continues - a new queue can't * be allocated for it as long as it is an owner of another TXQ. * * Mark this queue in the right bitmap, we'll send the command * to the firmware later. */ if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) set_bit(queue, changetid_queues); IWL_DEBUG_TX_QUEUES(mvm, "Removing inactive TID %d from shared Q:%d\n", tid, queue); } IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d left with tid bitmap 0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* * There may be different TIDs with the same mac queues, so make * sure all TIDs have existing corresponding mac queues enabled */ tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* If the queue is marked as shared - "unshare" it */ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", queue); set_bit(queue, unshare_queues); } return false; } /* * Check for inactivity - this includes checking if any queue * can be unshared and finding one (and only one) that can be * reused. * This function is also invoked as a sort of clean-up task, * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. * * Returns the queue number, or -ENOSPC. */ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) { unsigned long now = jiffies; unsigned long unshare_queues = 0; unsigned long changetid_queues = 0; int i, ret, free_queue = -ENOSPC; struct ieee80211_sta *queue_owner = NULL; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return -ENOSPC; rcu_read_lock(); /* we skip the CMD queue below by starting at 1 */ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id; int tid; unsigned long inactive_tid_bitmap = 0; unsigned long queue_tid_bitmap; queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; if (!queue_tid_bitmap) continue; /* If TXQ isn't in active use anyway - nothing to do here... */ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) continue; /* Check to see if there are inactive TIDs on this queue */ for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_after(mvm->queue_info[i].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) continue; inactive_tid_bitmap |= BIT(tid); } /* If all TIDs are active - finish check on this queue */ if (!inactive_tid_bitmap) continue; /* * If we are here - the queue hadn't been served recently and is * in use */ sta_id = mvm->queue_info[i].ra_sta_id; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* * If the STA doesn't exist anymore, it isn't an error. It could * be that it was removed since getting the queues, and in this * case it should've inactivated its queues anyway. */ if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, inactive_tid_bitmap, &unshare_queues, &changetid_queues); if (ret >= 0 && free_queue < 0) { queue_owner = sta; free_queue = ret; } /* only unlock sta lock - we still need the queue info lock */ spin_unlock_bh(&mvmsta->lock); } /* Reconfigure queues requiring reconfiguation */ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) iwl_mvm_unshare_queue(mvm, i); for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); if (ret) { rcu_read_unlock(); return ret; } } rcu_read_unlock(); return free_queue; } static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false, inc_ssn; int ssn; unsigned long tfd_queue_mask; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); spin_lock_bh(&mvmsta->lock); tfd_queue_mask = mvmsta->tfd_queue_msk; ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); spin_unlock_bh(&mvmsta->lock); if (tid == IWL_MAX_TID_COUNT) { queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE, IWL_MVM_DQA_MAX_MGMT_QUEUE); if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", queue); /* If no such queue is found, we'll use a DATA queue instead */ } if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && (mvm->queue_info[mvmsta->reserved_queue].status == IWL_MVM_QUEUE_RESERVED)) { queue = mvmsta->reserved_queue; mvm->queue_info[queue].reserved = true; IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); } if (queue < 0) queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try harder - perhaps kill an inactive queue */ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); } /* No free queue - we'll have to share */ if (queue <= 0) { queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); if (queue > 0) { shared_queue = true; mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; } } /* * Mark TXQ as ready, even though it hasn't been fully configured yet, * to make sure no one else takes it. * This will allow avoiding re-acquiring the lock at the end of the * configuration. On error we'll mark it back as free. */ if (queue > 0 && !shared_queue) mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; /* This shouldn't happen - out of queues */ if (WARN_ON(queue <= 0)) { IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", tid, cfg.sta_id); return queue; } /* * Actual en/disablement of aggregations is through the ADD_STA HCMD, * but for configuring the SCD to send A-MPDUs we need to mark the queue * as aggregatable. * Mark all DATA queues as allowing to be aggregated at some point */ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n", shared_queue ? "shared " : "", queue, mvmsta->sta_id, tid); if (shared_queue) { /* Disable any open aggs on this queue */ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); if (disable_agg_tids) { IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", queue); iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); } } inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* * Mark queue as shared in transport if shared * Note this has to be done after queue enablement because enablement * can also set this value, and there is no indication there to shared * queues */ if (shared_queue) iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); spin_lock_bh(&mvmsta->lock); /* * This looks racy, but it is not. We have only one packet for * this ra/tid in our Tx path since we stop the Qdisc when we * need to allocate a new TFD queue. */ if (inc_ssn) { mvmsta->tid_data[tid].seq_number += 0x10; ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; } mvmsta->tid_data[tid].txq_id = queue; mvmsta->tfd_queue_msk |= BIT(queue); queue_state = mvmsta->tid_data[tid].state; if (mvmsta->reserved_queue == queue) mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; spin_unlock_bh(&mvmsta->lock); if (!shared_queue) { ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); if (ret) goto out_err; /* If we need to re-enable aggregations... */ if (queue_state == IWL_AGG_ON) { ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) goto out_err; } } else { /* Redirect queue, if needed */ ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, wdg_timeout, false, iwl_mvm_txq_from_tid(sta, tid)); if (ret) goto out_err; } return 0; out_err: iwl_mvm_disable_txq(mvm, sta, queue, tid, 0); return ret; } static inline u8 iwl_mvm_tid_to_ac_queue(int tid) { if (tid == IWL_MAX_TID_COUNT) return IEEE80211_AC_VO; /* MGMT */ return tid_to_mac80211_ac[tid]; } void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, add_stream_wk); mutex_lock(&mvm->mutex); iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); while (!list_empty(&mvm->add_stream_txqs)) { struct iwl_mvm_txq *mvmtxq; struct ieee80211_txq *txq; u8 tid; mvmtxq = list_first_entry(&mvm->add_stream_txqs, struct iwl_mvm_txq, list); txq = container_of((void *)mvmtxq, struct ieee80211_txq, drv_priv); tid = txq->tid; if (tid == IEEE80211_NUM_TIDS) tid = IWL_MAX_TID_COUNT; iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); list_del_init(&mvmtxq->list); local_bh_disable(); iwl_mvm_mac_itxq_xmit(mvm->hw, txq); local_bh_enable(); } mutex_unlock(&mvm->mutex); } static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_iftype vif_type) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; /* queue reserving is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return 0; /* run the general cleanup/unsharing of queues */ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; else queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try again - this time kick out a queue if needed */ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); if (queue < 0) { IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; } } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; mvmsta->reserved_queue = queue; IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id); return 0; } /* * In DQA mode, after a HW restart the queues should be allocated as before, in * order to avoid race conditions when there are shared queues. This function * does the re-mapping and queue allocation. * * Note that re-enabling aggregations isn't done in this function. */ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int wdg = iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); int i; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvm_sta->sta_id, .frame_limit = IWL_FRAME_LIMIT, }; /* Make sure reserved queue is still marked as such (if allocated) */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) mvm->queue_info[mvm_sta->reserved_queue].status = IWL_MVM_QUEUE_RESERVED; for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; int txq_id = tid_data->txq_id; int ac; if (txq_id == IWL_MVM_INVALID_QUEUE) continue; ac = tid_to_mac80211_ac[i]; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", mvm_sta->sta_id, i); txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, i, wdg); tid_data->txq_id = txq_id; /* * Since we don't set the seq number after reset, and HW * sets it now, FW reset will cause the seq num to start * at 0 again, so driver will need to update it * internally as well, so it keeps in sync with real val */ tid_data->seq_number = 0; } else { u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); cfg.tid = i; cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d to queue %d\n", mvm_sta->sta_id, i, txq_id); iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } } static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, const u8 *addr, u16 mac_id, u16 color) { struct iwl_mvm_add_sta_cmd cmd; int ret; u32 status = ADD_STA_SUCCESS; lockdep_assert_held(&mvm->mutex); memset(&cmd, 0, sizeof(cmd)); cmd.sta_id = sta->sta_id; cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color)); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) cmd.station_type = sta->type; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(0xffff); if (addr) memcpy(cmd.addr, addr, ETH_ALEN); ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Internal station added.\n"); return 0; default: ret = -EIO; IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", status); break; } return ret; } int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_rxq_dup_data *dup_data; int i, ret, sta_id; bool sta_update = false; unsigned int sta_flags = 0; lockdep_assert_held(&mvm->mutex); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) sta_id = iwl_mvm_find_free_sta_id(mvm, ieee80211_vif_type_p2p(vif)); else sta_id = mvm_sta->sta_id; if (sta_id == IWL_MVM_INVALID_STA) return -ENOSPC; spin_lock_init(&mvm_sta->lock); /* if this is a HW restart re-alloc existing queues */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_int_sta tmp_sta = { .sta_id = sta_id, .type = mvm_sta->sta_type, }; /* * First add an empty station since allocating * a queue requires a valid station */ ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, mvmvif->id, mvmvif->color); if (ret) goto err; iwl_mvm_realloc_queues_after_restart(mvm, sta); sta_update = true; sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; goto update_fw; } mvm_sta->sta_id = sta_id; mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); mvm_sta->vif = vif; if (!mvm->trans->cfg->gen2) mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; else mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; mvm_sta->tx_protection = 0; mvm_sta->tt_tx_protection = false; mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; /* HW restart, don't assume the memory has been zeroed */ mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ mvm_sta->tfd_queue_msk = 0; /* for HW restart - reset everything but the sequence number */ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; /* * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated */ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; INIT_LIST_HEAD(&mvmtxq->list); atomic_set(&mvmtxq->tx_request, 0); } mvm_sta->agg_tids = 0; if (iwl_mvm_has_new_rx_api(mvm) && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { int q; dup_data = kcalloc(mvm->trans->num_rx_queues, sizeof(*dup_data), GFP_KERNEL); if (!dup_data) return -ENOMEM; /* * Initialize all the last_seq values to 0xffff which can never * compare equal to the frame's seq_ctrl in the check in * iwl_mvm_is_dup() since the lower 4 bits are the fragment * number and fragmented packets don't reach that function. * * This thus allows receiving a packet with seqno 0 and the * retry bit set as the very first packet on a new TID. */ for (q = 0; q < mvm->trans->num_rx_queues; q++) memset(dup_data[q].last_seq, 0xff, sizeof(dup_data[q].last_seq)); mvm_sta->dup_data = dup_data; } if (!iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) goto err; } /* * if rs is registered with mac80211, then "add station" will be handled * via the corresponding ops, otherwise need to notify rate scaling here */ if (iwl_mvm_has_tlc_offload(mvm)) iwl_mvm_rs_add_sta(mvm, mvm_sta); else spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock); iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); update_fw: ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) goto err; if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); mvmvif->ap_sta_id = sta_id; } else { WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); } } rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); return 0; err: return ret; } int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain) { struct iwl_mvm_add_sta_cmd cmd = {}; int ret; u32 status; lockdep_assert_held(&mvm->mutex); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", mvmsta->sta_id); break; default: ret = -EIO; IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", mvmsta->sta_id); break; } return ret; } /* * Remove a station from the FW table. Before sending the command to remove * the station validate that the station is indeed known to the driver (sanity * only). */ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { .sta_id = sta_id, }; int ret; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* Note: internal stations are marked as error values */ if (!sta) { IWL_ERR(mvm, "Invalid station id\n"); return -EINVAL; } ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, sizeof(rm_sta_cmd), &rm_sta_cmd); if (ret) { IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); return ret; } return 0; } static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i, 0); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } } int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta) { int i; for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { u16 txq_id; int ret; spin_lock_bh(&mvm_sta->lock); txq_id = mvm_sta->tid_data[i].txq_id; spin_unlock_bh(&mvm_sta->lock); if (txq_id == IWL_MVM_INVALID_QUEUE) continue; ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); if (ret) return ret; } return 0; } int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); u8 sta_id = mvm_sta->sta_id; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_rx_api(mvm)) kfree(mvm_sta->dup_data); ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) return ret; /* flush its queues here since we are freeing mvm_sta */ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); if (ret) return ret; if (iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); } else { u32 q_mask = mvm_sta->tfd_queue_msk; ret = iwl_trans_wait_tx_queues_empty(mvm->trans, q_mask); } if (ret) return ret; ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); iwl_mvm_disable_sta_queues(mvm, vif, sta); /* If there is a TXQ still marked as reserved - free it */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { u8 reserved_txq = mvm_sta->reserved_queue; enum iwl_mvm_queue_status *status; /* * If no traffic has gone through the reserved TXQ - it * is still marked as IWL_MVM_QUEUE_RESERVED, and * should be manually marked as free again */ status = &mvm->queue_info[reserved_txq].status; if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE), "sta_id %d reserved txq %d status %d", sta_id, reserved_txq, *status)) return -EINVAL; *status = IWL_MVM_QUEUE_FREE; } if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id == sta_id) { /* if associated - we can't remove the AP STA now */ if (vif->bss_conf.assoc) return ret; /* unassoc - go ahead - remove the AP STA now */ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } /* * This shouldn't happen - the TDLS channel switch should be canceled * before the STA is removed. */ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; cancel_delayed_work(&mvm->tdls_cs.dwork); } /* * Make sure that the tx response code sees the station as -EBUSY and * calls the drain worker. */ spin_lock_bh(&mvm_sta->lock); spin_unlock_bh(&mvm_sta->lock); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); return ret; } int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { int ret = iwl_mvm_rm_sta_common(mvm, sta_id); lockdep_assert_held(&mvm->mutex); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); return ret; } int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, enum iwl_sta_type type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || sta->sta_id == IWL_MVM_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; } sta->tfd_queue_msk = qmask; sta->type = type; /* put a non-NULL value so iterating over the stations won't stop */ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); return 0; } void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); sta->sta_id = IWL_MVM_INVALID_STA; } static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, u8 sta_id, u8 fifo) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? mvm->cfg->base_params->wd_timeout : IWL_WATCHDOG_DISABLED; if (iwl_mvm_has_new_tx_api(mvm)) { int tvqm_queue = iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, wdg_timeout); *queue = tvqm_queue; } else { struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout); } } int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); /* Allocate aux station and assign to it the aux queue */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), NL80211_IFTYPE_UNSPECIFIED, IWL_STA_AUX_ACTIVITY); if (ret) return ret; /* Map Aux queue to fifo - needs to happen before adding Aux station */ if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, mvm->aux_sta.sta_id, IWL_MVM_TX_FIFO_MCAST); ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, MAC_INDEX_AUX, 0); if (ret) { iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); return ret; } /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, mvm->aux_sta.sta_id, IWL_MVM_TX_FIFO_MCAST); return 0; } int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); /* Map snif queue to fifo - must happen before adding snif station */ if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, mvm->snif_sta.sta_id, IWL_MVM_TX_FIFO_BE); ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, mvmvif->id, 0); if (ret) return ret; /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, mvm->snif_sta.sta_id, IWL_MVM_TX_FIFO_BE); return 0; } int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) { iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); } void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); } /* * Send the add station command for the vif's broadcast station. * Assumes that the station was already allocated. * * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; int queue; int ret; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_VO, .sta_id = mvmvif->bcast_sta.sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; lockdep_assert_held(&mvm->mutex); if (!iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { queue = mvm->probe_queue; } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { queue = mvm->p2p_dev_queue; } else { WARN(1, "Missing required TXQ for adding bcast STA\n"); return -EINVAL; } bsta->tfd_queue_msk |= BIT(queue); iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, mvmvif->id, mvmvif->color); if (ret) return ret; /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout); if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) mvm->probe_queue = queue; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_dev_queue = queue; } return 0; } static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int queue; lockdep_assert_held(&mvm->mutex); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0); switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: queue = mvm->probe_queue; break; case NL80211_IFTYPE_P2P_DEVICE: queue = mvm->p2p_dev_queue; break; default: WARN(1, "Can't free bcast queue on vif type %d\n", vif->type); return; } iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0); if (iwl_mvm_has_new_tx_api(mvm)) return; WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue))); mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue); } /* Send the FW a request to remove the station from it's internal data * structures, but DO NOT remove the entry from the local data structures. */ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); iwl_mvm_free_bcast_sta_queues(mvm, vif); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, ieee80211_vif_type_p2p(vif), IWL_STA_GENERAL_PURPOSE); } /* Allocate a new station entry for the broadcast station to the given vif, * and send it to the FW. * Note that each P2P mac should have its own broadcast station. * * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) return ret; ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) iwl_mvm_dealloc_int_sta(mvm, bsta); return ret; } void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); } /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_dealloc_bcast_sta(mvm, vif); return ret; } /* * Allocate a new station entry for the multicast station to the given vif, * and send it to the FW. * Note that each AP/GO mac should have its own multicast station. * * @mvm: the mvm component * @vif: the interface to which the multicast station is added */ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; const u8 *maddr = _maddr; struct iwl_trans_txq_scd_cfg cfg = { .fifo = vif->type == NL80211_IFTYPE_AP ? IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE, .sta_id = msta->sta_id, .tid = 0, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; /* * In IBSS, ieee80211_check_queues() sets the cab_queue to be * invalid, so make sure we use the queue we want. * Note that this is done here as we want to avoid making DQA * changes in mac80211 layer. */ if (vif->type == NL80211_IFTYPE_ADHOC) mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; /* * While in previous FWs we had to exclude cab queue from TFD queue * mask, now it is needed as any other queue. */ if (!iwl_mvm_has_new_tx_api(mvm) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout); msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); if (ret) { iwl_mvm_dealloc_int_sta(mvm, msta); return ret; } /* * Enable cab queue after the ADD_STA command is sent. * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG * command with unknown station id, and for FW that doesn't support * station API since the cab queue is not included in the * tfd_queue_mask. */ if (iwl_mvm_has_new_tx_api(mvm)) { int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 0, timeout); mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout); return 0; } static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_key_conf *keyconf, bool mcast) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; struct iwl_mvm_add_sta_key_cmd cmd; } u = {}; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); __le16 key_flags; int ret, size; u32 status; /* This is a valid situation for GTK removal */ if (sta_id == IWL_MVM_INVALID_STA) return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK); key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); /* * The fields assigned here are in the same location at the start * of the command, so we can do this union trick. */ u.cmd.common.key_flags = key_flags; u.cmd.common.key_offset = keyconf->hw_key_idx; u.cmd.common.sta_id = sta_id; size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); break; default: ret = -EIO; IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); break; } return ret; } /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } #define IWL_MAX_RX_BA_SESSIONS 16 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) { struct iwl_mvm_rss_sync_notif notif = { .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, .metadata.sync = 1, .delba.baid = baid, }; iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); }; static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data) { int i; iwl_mvm_sync_rxq_del_ba(mvm, data->baid); for (i = 0; i < mvm->trans->num_rx_queues; i++) { int j; struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = &data->entries[i * data->entries_per_queue]; spin_lock_bh(&reorder_buf->lock); if (likely(!reorder_buf->num_stored)) { spin_unlock_bh(&reorder_buf->lock); continue; } /* * This shouldn't happen in regular DELBA since the internal * delBA notification should trigger a release of all frames in * the reorder buffer. */ WARN_ON(1); for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_purge(&entries[j].e.frames); /* * Prevent timer re-arm. This prevents a very far fetched case * where we timed out on the notification. There may be prior * RX frames pending in the RX queue before the notification * that might get processed between now and the actual deletion * and we would re-arm the timer although we are deleting the * reorder buffer. */ reorder_buf->removed = true; spin_unlock_bh(&reorder_buf->lock); del_timer_sync(&reorder_buf->reorder_timer); } } static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, u16 ssn, u16 buf_size) { int i; for (i = 0; i < mvm->trans->num_rx_queues; i++) { struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = &data->entries[i * data->entries_per_queue]; int j; reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; reorder_buf->buf_size = buf_size; /* rx reorder timer */ timer_setup(&reorder_buf->reorder_timer, iwl_mvm_reorder_timer_expired, 0); spin_lock_init(&reorder_buf->lock); reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_head_init(&entries[j].e.frames); } } int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; struct iwl_mvm_baid_data *baid_data = NULL; int ret; u32 status; lockdep_assert_held(&mvm->mutex); if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); return -ENOSPC; } if (iwl_mvm_has_new_rx_api(mvm) && start) { u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); /* sparse doesn't like the __align() so don't check */ #ifndef __CHECKER__ /* * The division below will be OK if either the cache line size * can be divided by the entry size (ALIGN will round up) or if * if the entry size can be divided by the cache line size, in * which case the ALIGN() will do nothing. */ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); #endif /* * Upward align the reorder buffer size to fill an entire cache * line for each queue, to avoid sharing cache lines between * different queues. */ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); /* * Allocate here so if allocation fails we can bail out early * before starting the BA session in the firmware */ baid_data = kzalloc(sizeof(*baid_data) + mvm->trans->num_rx_queues * reorder_buf_size, GFP_KERNEL); if (!baid_data) return -ENOMEM; /* * This division is why we need the above BUILD_BUG_ON(), * if that doesn't hold then this will not be right. */ baid_data->entries_per_queue = reorder_buf_size / sizeof(baid_data->entries[0]); } cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; if (start) { cmd.add_immediate_ba_tid = (u8) tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); cmd.rx_ba_window = cpu_to_le16(buf_size); } else { cmd.remove_immediate_ba_tid = (u8) tid; } cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : STA_MODIFY_REMOVE_BA_TID; status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) goto out_free; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); break; case ADD_STA_IMMEDIATE_BA_FAILURE: IWL_WARN(mvm, "RX BA Session refused by fw\n"); ret = -ENOSPC; break; default: ret = -EIO; IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status); break; } if (ret) goto out_free; if (start) { u8 baid; mvm->rx_ba_sessions++; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { ret = -EINVAL; goto out_free; } baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> IWL_ADD_STA_BAID_SHIFT); baid_data->baid = baid; baid_data->timeout = timeout; baid_data->last_rx = jiffies; baid_data->rcu_ptr = &mvm->baid_map[baid]; timer_setup(&baid_data->session_timer, iwl_mvm_rx_agg_session_expired, 0); baid_data->mvm = mvm; baid_data->tid = tid; baid_data->sta_id = mvm_sta->sta_id; mvm_sta->tid_to_baid[tid] = baid; if (timeout) mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2)); iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); /* * protect the BA data with RCU to cover a case where our * internal RX sync mechanism will timeout (not that it's * supposed to happen) and we will free the session data while * RX is being processed in parallel */ IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", mvm_sta->sta_id, tid, baid); WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); rcu_assign_pointer(mvm->baid_map[baid], baid_data); } else { u8 baid = mvm_sta->tid_to_baid[tid]; if (mvm->rx_ba_sessions > 0) /* check that restart flow didn't zero the counter */ mvm->rx_ba_sessions--; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) return -EINVAL; baid_data = rcu_access_pointer(mvm->baid_map[baid]); if (WARN_ON(!baid_data)) return -EINVAL; /* synchronize all rx queues so we can safely delete */ iwl_mvm_free_reorder(mvm, baid_data); del_timer_sync(&baid_data->session_timer); RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); } return 0; out_free: kfree(baid_data); return ret; } int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; int ret; u32 status; lockdep_assert_held(&mvm->mutex); if (start) { mvm_sta->tfd_queue_msk |= BIT(queue); mvm_sta->tid_disable_agg &= ~BIT(tid); } else { /* In DQA-mode the queue isn't removed on agg termination */ mvm_sta->tid_disable_agg |= BIT(tid); } cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.modify_mask = STA_MODIFY_QUEUES; cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: break; default: ret = -EIO; IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status); break; } return ret; } const u8 tid_to_mac80211_ac[] = { IEEE80211_AC_BE, IEEE80211_AC_BK, IEEE80211_AC_BK, IEEE80211_AC_BE, IEEE80211_AC_VI, IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO, IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ }; static const u8 tid_to_ucode_ac[] = { AC_BE, AC_BK, AC_BK, AC_BE, AC_VI, AC_VI, AC_VO, AC_VO, }; int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data; u16 normalized_ssn; u16 txq_id; int ret; if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && mvmsta->tid_data[tid].state != IWL_AGG_OFF) { IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", mvmsta->tid_data[tid].state); return -ENXIO; } lockdep_assert_held(&mvm->mutex); if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && iwl_mvm_has_new_tx_api(mvm)) { u8 ac = tid_to_mac80211_ac[tid]; ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); if (ret) return ret; } spin_lock_bh(&mvmsta->lock); /* * Note the possible cases: * 1. An enabled TXQ - TXQ needs to become agg'ed * 2. The TXQ hasn't yet been enabled, so find a free one and mark * it as reserved */ txq_id = mvmsta->tid_data[tid].txq_id; if (txq_id == IWL_MVM_INVALID_QUEUE) { ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (ret < 0) { IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto out; } txq_id = ret; /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { ret = -ENXIO; IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", tid, IWL_MAX_HW_QUEUES - 1); goto out; } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid); goto out; } IWL_DEBUG_TX_QUEUES(mvm, "AGG for tid %d will be on queue #%d\n", tid, txq_id); tid_data = &mvmsta->tid_data[tid]; tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->txq_id = txq_id; *ssn = tid_data->ssn; IWL_DEBUG_TX_QUEUES(mvm, "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", mvmsta->sta_id, tid, txq_id, tid_data->ssn, tid_data->next_reclaimed); /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; if (mvm->trans->cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn == tid_data->next_reclaimed) { tid_data->state = IWL_AGG_STARTING; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); } else { tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; } ret = 0; out: spin_unlock_bh(&mvmsta->lock); return ret; } int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); int queue, ret; bool alloc_queue = true; enum iwl_mvm_queue_status queue_status; u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = buf_size, .aggregate = true, }; /* * When FW supports TLC_OFFLOAD, it also implements Tx aggregation * manager, so this function should never be called in this case. */ if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT); spin_lock_bh(&mvmsta->lock); ssn = tid_data->ssn; queue = tid_data->txq_id; tid_data->state = IWL_AGG_ON; mvmsta->agg_tids |= BIT(tid); tid_data->ssn = 0xffff; tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); if (iwl_mvm_has_new_tx_api(mvm)) { /* * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() * would have failed, so if we are here there is no need to * allocate a queue. * However, if aggregation size is different than the default * size, the scheduler should be reconfigured. * We cannot do this with the new TX API, so return unsupported * for now, until it will be offloaded to firmware.. * Note that if SCD default value changes - this condition * should be updated as well. */ if (buf_size < IWL_FRAME_LIMIT) return -ENOTSUPP; ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; goto out; } cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; queue_status = mvm->queue_info[queue].status; /* Maybe there is no need to even alloc a queue... */ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) alloc_queue = false; /* * Only reconfig the SCD for the queue if the window size has * changed from current (become smaller) */ if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { /* * If reconfiguring an existing queue, it first must be * drained */ ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue before reconfig\n"); return ret; } ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, mvmsta->sta_id, tid, buf_size, ssn); if (ret) { IWL_ERR(mvm, "Error reconfiguring TXQ #%d\n", queue); return ret; } } if (alloc_queue) iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* Send ADD_STA command to enable aggs only if the queue isn't shared */ if (queue_status != IWL_MVM_QUEUE_SHARED) { ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; } /* No need to mark as reserved */ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; out: /* * Even though in theory the peer could have different * aggregation reorder buffer sizes for different sessions, * our ucode doesn't allow for that and has a global limit * for each station. Therefore, use the minimum of all the * aggregation sessions and our default value. */ mvmsta->max_agg_bufsize = min(mvmsta->max_agg_bufsize, buf_size); mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", sta->addr, tid); return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq); } static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct iwl_mvm_tid_data *tid_data) { u16 txq_id = tid_data->txq_id; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return; /* * The TXQ is marked as reserved only if no traffic came through yet * This means no traffic has been sent on this TID (agg'd or not), so * we no longer have use for the queue. Since it hasn't even been * allocated through iwl_mvm_enable_txq, so we can just mark it back as * free. */ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; tid_data->txq_id = IWL_MVM_INVALID_QUEUE; } } int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; u16 txq_id; int err; /* * If mac80211 is cleaning its state, then say that we finished since * our state has been cleared anyway. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); return 0; } spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id, tid_data->state); mvmsta->agg_tids &= ~BIT(tid); iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); switch (tid_data->state) { case IWL_AGG_ON: tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); IWL_DEBUG_TX_QUEUES(mvm, "ssn = %d, next_recl = %d\n", tid_data->ssn, tid_data->next_reclaimed); tid_data->ssn = 0xffff; tid_data->state = IWL_AGG_OFF; spin_unlock_bh(&mvmsta->lock); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: /* * The agg session has been stopped before it was set up. This * can happen when the AddBA timer times out for example. */ /* No barriers since we are under mutex */ lockdep_assert_held(&mvm->mutex); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); tid_data->state = IWL_AGG_OFF; err = 0; break; default: IWL_ERR(mvm, "Stopping AGG while state not ON or starting for %d on %d (%d)\n", mvmsta->sta_id, tid, tid_data->state); IWL_ERR(mvm, "\ttid_data->txq_id = %d\n", tid_data->txq_id); err = -EINVAL; } spin_unlock_bh(&mvmsta->lock); return err; } int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; u16 txq_id; enum iwl_mvm_agg_state old_state; /* * First set the agg state to OFF to avoid calling * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. */ spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id, tid_data->state); old_state = tid_data->state; tid_data->state = IWL_AGG_OFF; mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); if (iwl_mvm_has_new_tx_api(mvm)) { if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, BIT(tid), 0)) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_txq_empty(mvm->trans, txq_id); } else { if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); } iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); } return 0; } static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) { int i, max = -1, max_offs = -1; lockdep_assert_held(&mvm->mutex); /* Pick the unused key offset with the highest 'deleted' * counter. Every time a key is deleted, all the counters * are incremented and the one that was just deleted is * reset to zero. Thus, the highest counter is the one * that was deleted longest ago. Pick that one. */ for (i = 0; i < STA_KEY_MAX_NUM; i++) { if (test_bit(i, mvm->fw_key_table)) continue; if (mvm->fw_key_deleted[i] > max) { max = mvm->fw_key_deleted[i]; max_offs = i; } } if (max_offs < 0) return STA_KEY_IDX_INVALID; return max_offs; } static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (sta) return iwl_mvm_sta_from_mac80211(sta); /* * The device expects GTKs for station interfaces to be * installed as GTKs for the AP station. If we have no * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* * It is possible that the 'sta' parameter is NULL, * for example when a GTK is removed - the sta_id will then * be the AP ID, and no station was passed by mac80211. */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } return NULL; } static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u32 sta_id, struct ieee80211_key_conf *key, bool mcast, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, u8 key_offset, bool mfp) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; struct iwl_mvm_add_sta_key_cmd cmd; } u = {}; __le16 key_flags; int ret; u32 status; u16 keyidx; u64 pn = 0; int i, size; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); if (sta_id == IWL_MVM_INVALID_STA) return -EINVAL; keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK; key_flags = cpu_to_le16(keyidx); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); if (new_api) { memcpy((void *)&u.cmd.tx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); memcpy((void *)&u.cmd.rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE); pn = atomic64_read(&key->tx_pn); } else { u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; for (i = 0; i < 5; i++) u.cmd_v1.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); } memcpy(u.cmd.common.key, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_CCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); memcpy(u.cmd.common.key, key->key, key->keylen); if (new_api) pn = atomic64_read(&key->tx_pn); break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); /* fall through */ case WLAN_CIPHER_SUITE_WEP40: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); memcpy(u.cmd.common.key + 3, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_GCMP_256: key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); /* fall through */ case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); memcpy(u.cmd.common.key, key->key, key->keylen); if (new_api) pn = atomic64_read(&key->tx_pn); break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); memcpy(u.cmd.common.key, key->key, key->keylen); } if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); if (mfp) key_flags |= cpu_to_le16(STA_KEY_MFP); u.cmd.common.key_offset = key_offset; u.cmd.common.key_flags = key_flags; u.cmd.common.sta_id = sta_id; if (new_api) { u.cmd.transmit_seq_cnt = cpu_to_le64(pn); size = sizeof(u.cmd); } else { size = sizeof(u.cmd_v1); } status = ADD_STA_SUCCESS; if (cmd_flags & CMD_ASYNC) ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, &u.cmd); else ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); break; default: ret = -EIO; IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); break; } return ret; } static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, struct ieee80211_key_conf *keyconf, u8 sta_id, bool remove_key) { struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; /* verify the key details match the required command's expectations */ if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || (keyconf->keyidx != 4 && keyconf->keyidx != 5) || (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) return -EINVAL; if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) return -EINVAL; igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); igtk_cmd.sta_id = cpu_to_le32(sta_id); if (remove_key) { igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); } else { struct ieee80211_key_seq seq; const u8 *pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); break; default: return -EINVAL; } memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | ((u64) pn[4] << 8) | ((u64) pn[3] << 16) | ((u64) pn[2] << 24) | ((u64) pn[1] << 32) | ((u64) pn[0] << 40)); } IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", remove_key ? "removing" : "installing", igtk_cmd.sta_id); if (!iwl_mvm_has_new_rx_api(mvm)) { struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { .ctrl_flags = igtk_cmd.ctrl_flags, .key_id = igtk_cmd.key_id, .sta_id = igtk_cmd.sta_id, .receive_seq_cnt = igtk_cmd.receive_seq_cnt }; memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, ARRAY_SIZE(igtk_cmd_v1.igtk)); return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd_v1), &igtk_cmd_v1); } return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd), &igtk_cmd); } static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (sta) return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); return sta->addr; } return NULL; } static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset, bool mcast) { int ret; const u8 *addr; struct ieee80211_key_seq seq; u16 p1k[5]; u32 sta_id; bool mfp = false; if (sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); sta_id = mvm_sta->sta_id; mfp = sta->mfp; } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); sta_id = mvmvif->mcast_sta.sta_id; } else { IWL_ERR(mvm, "Failed to find station id\n"); return -EINVAL; } switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_TKIP: addr = iwl_mvm_get_mac_addr(mvm, vif, sta); /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, seq.tkip.iv32, p1k, 0, key_offset, mfp); break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp); break; default: ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp); } return ret; } int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; u8 sta_id = IWL_MVM_INVALID_STA; int ret; static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; lockdep_assert_held(&mvm->mutex); if (vif->type != NL80211_IFTYPE_AP || keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { /* Get the station id from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (!mvm_sta) { IWL_ERR(mvm, "Failed to find station\n"); return -EINVAL; } sta_id = mvm_sta->sta_id; /* * It is possible that the 'sta' parameter is NULL, and thus * there is a need to retrieve the sta from the local station * table. */ if (!sta) { sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) { IWL_ERR(mvm, "Invalid station id\n"); return -EINVAL; } } if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; } else { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); sta_id = mvmvif->mcast_sta.sta_id; } if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); goto end; } /* If the key_offset is not pre-assigned, we need to find a * new offset to use. In normal cases, the offset is not * pre-assigned, but during HW_RESTART we want to reuse the * same indices, so we pass them when this function is called. * * In D3 entry, we need to hardcoded the indices (because the * firmware hardcodes the PTK offset to 0). In this case, we * need to make sure we don't overwrite the hw_key_idx in the * keyconf structure, because otherwise we cannot configure * the original ones back when resuming. */ if (key_offset == STA_KEY_IDX_INVALID) { key_offset = iwl_mvm_set_fw_key_idx(mvm); if (key_offset == STA_KEY_IDX_INVALID) return -ENOSPC; keyconf->hw_key_idx = key_offset; } ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); if (ret) goto end; /* * For WEP, the same key is used for multicast and unicast. Upload it * again, using the same key offset, and now pointing the other one * to the same key slot (offset). * If this fails, remove the original as well. */ if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && sta) { ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, !mcast); if (ret) { __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); goto end; } } __set_bit(key_offset, mvm->fw_key_table); end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta ? sta->addr : zero_addr, ret); return ret; } int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; u8 sta_id = IWL_MVM_INVALID_STA; int ret, i; lockdep_assert_held(&mvm->mutex); /* Get the station from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (mvm_sta) sta_id = mvm_sta->sta_id; else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id; IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { IWL_ERR(mvm, "offset %d not used in fw key table.\n", keyconf->hw_key_idx); return -ENOENT; } /* track which key was deleted last */ for (i = 0; i < STA_KEY_MAX_NUM; i++) { if (mvm->fw_key_deleted[i] < U8_MAX) mvm->fw_key_deleted[i]++; } mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; if (sta && !mvm_sta) { IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); return 0; } ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; /* delete WEP key twice to get rid of (now useless) offset */ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); return ret; } void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_mvm_sta *mvm_sta; bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); bool mfp = sta ? sta->mfp : false; rcu_read_lock(); mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (WARN_ON_ONCE(!mvm_sta)) goto unlock; iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, mfp); unlock: rcu_read_unlock(); } void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .station_flags_msk = cpu_to_le32(STA_FLG_PS), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, bool single_sta_queue) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, .sleep_tx_count = cpu_to_le16(cnt), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int tid, ret; unsigned long _tids = tids; /* convert TIDs to ACs - we don't support TSPEC so that's OK * Note that this field is reserved and unused by firmware not * supporting GO uAPSD, so it's safe to always do this. */ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); /* If we're releasing frames from aggregation or dqa queues then check * if all the queues that we're releasing frames from, combined, have: * - more frames than the service period, in which case more_data * needs to be set * - fewer than 'cnt' frames, in which case we need to adjust the * firmware command (but do that unconditionally) */ if (single_sta_queue) { int remaining = cnt; int sleep_tx_count; spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { struct iwl_mvm_tid_data *tid_data; u16 n_queued; tid_data = &mvmsta->tid_data[tid]; n_queued = iwl_mvm_tid_queued(mvm, tid_data); if (n_queued > remaining) { more_data = true; remaining = 0; break; } remaining -= n_queued; } sleep_tx_count = cnt - remaining; if (reason == IEEE80211_FRAME_RELEASE_UAPSD) mvmsta->sleep_tx_count = sleep_tx_count; spin_unlock_bh(&mvmsta->lock); cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); if (WARN_ON(cnt - remaining == 0)) { ieee80211_sta_eosp(sta); return; } } /* Note: this is ignored by firmware not supporting GO uAPSD */ if (more_data) cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { mvmsta->next_status_eosp = true; cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; } else { cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; } /* block the Tx queues until the FW updated the sleep Tx count */ iwl_trans_block_txq_ptrs(mvm->trans, true); ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; u32 sta_id = le32_to_cpu(notif->sta_id); if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (!IS_ERR_OR_NULL(sta)) ieee80211_sta_eosp(sta); rcu_read_unlock(); } void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool disable) { struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool disable) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvm_sta->lock); if (mvm_sta->disable_tx == disable) { spin_unlock_bh(&mvm_sta->lock); return; } mvm_sta->disable_tx = disable; /* Tell mac80211 to start/stop queuing tx for this station */ ieee80211_sta_block_awake(mvm->hw, sta, disable); iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); spin_unlock_bh(&mvm_sta->lock); } static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_int_sta *sta, bool disable) { u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = sta->sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(id), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; int i; lockdep_assert_held(&mvm->mutex); /* Block/unblock all the stations of the given mvmvif */ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (mvm_sta->mac_id_n_color != FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) continue; iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); } if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) return; /* Need to block/unblock also multicast station */ if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->mcast_sta, disable); /* * Only unblock the broadcast station (FW blocks it for immediate * quiet, not the driver) */ if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->bcast_sta, disable); } void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta; rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); if (!WARN_ON(!mvmsta)) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); rcu_read_unlock(); } u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) { u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ if (mvm->trans->cfg->gen2) sn &= 0xff; return ieee80211_sn_sub(sn, tid_data->next_reclaimed); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/sta.h000066400000000000000000000603001351064634500263250ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __sta_h__ #define __sta_h__ #include #include #include #include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ #include "fw-api.h" /* IWL_MVM_STATION_COUNT */ #include "rs.h" struct iwl_mvm; struct iwl_mvm_vif; /** * DOC: DQA - Dynamic Queue Allocation -introduction * * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi * driver to allow dynamic allocation of queues on-demand, rather than allocate * them statically ahead of time. Ideally, we would like to allocate one queue * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2 * even if it also needs to send traffic to a sleeping STA1, without being * blocked by the sleeping station. * * Although the queues in DQA mode are dynamically allocated, there are still * some queues that are statically allocated: * TXQ #0 - command queue * TXQ #1 - aux frames * TXQ #2 - P2P device frames * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames * TXQ #4 - BSS DATA frames queue * TXQ #5-8 - Non-QoS and MGMT frames queue pool * TXQ #9 - P2P GO/SoftAP probe responses * TXQ #10-31 - DATA frames queue pool * The queues are dynamically taken from either the MGMT frames queue pool or * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every * queue. * * When a frame for a previously unseen RA/TID comes in, it needs to be deferred * until a queue is allocated for it, and only then can be TXed. Therefore, it * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames. * * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT * queues in the pool. If there is no longer a free MGMT queue to allocate, a * queue will be allocated from the DATA pool instead. Since QoS NDPs can create * a problem for aggregations, they too will use a MGMT queue. * * When adding a STA, a DATA queue is reserved for it so that it can TX from * it. If no such free queue exists for reserving, the STA addition will fail. * * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a * new RA/TID comes in for an existing STA, one of the STA's queues will become * shared and will serve more than the single TID (but always for the same RA!). * * When a RA/TID needs to become aggregated, no new queue is required to be * allocated, only mark the queue as aggregated via the ADD_STA command. Note, * however, that a shared queue cannot be aggregated, and only after the other * TIDs become inactive and are removed - only then can the queue be * reconfigured and become aggregated. * * When removing a station, its queues are returned to the pool for reuse. Here * we also need to make sure that we are synced with the worker thread that TXes * the deferred frames so we don't get into a situation where the queues are * removed and then the worker puts deferred frames onto the released queues or * tries to allocate new queues for a STA we don't need anymore. */ /** * DOC: station table - introduction * * The station table is a list of data structure that reprensent the stations. * In STA/P2P client mode, the driver will hold one station for the AP/ GO. * In GO/AP mode, the driver will have as many stations as associated clients. * All these stations are reflected in the fw's station table. The driver * keeps the fw's station table up to date with the ADD_STA command. Stations * can be removed by the REMOVE_STA command. * * All the data related to a station is held in the structure %iwl_mvm_sta * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area. * This data includes the index of the station in the fw, per tid information * (sequence numbers, Block-ack state machine, etc...). The stations are * created and deleted by the %sta_state callback from %ieee80211_ops. * * The driver holds a map: %fw_id_to_mac_id that allows to fetch a * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw * station index. That way, the driver is able to get the tid related data in * O(1) in time sensitive paths (Tx / Tx response / BA notification). These * paths are triggered by the fw, and the driver needs to get a pointer to the * %ieee80211 structure. This map helps to get that pointer quickly. */ /** * DOC: station table - locking * * As stated before, the station is created / deleted by mac80211's %sta_state * callback from %ieee80211_ops which can sleep. The next paragraph explains * the locking of a single stations, the next ones relates to the station * table. * * The station holds the sequence number per tid. So this data needs to be * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack * information (the state machine / and the logic that checks if the queues * were drained), so it also needs to be accessible from the Tx response flow. * In short, the station needs to be access from sleepable context as well as * from tasklets, so the station itself needs a spinlock. * * The writers of %fw_id_to_mac_id map are serialized by the global mutex of * the mvm op_mode. This is possible since %sta_state can sleep. * The pointers in this map are RCU protected, hence we won't replace the * station while we have Tx / Tx response / BA notification running. * * If a station is deleted while it still has packets in its A-MPDU queues, * then the reclaim flow will notice that there is no station in the map for * sta_id and it will dump the responses. */ /** * DOC: station table - internal stations * * The FW needs a few internal stations that are not reflected in * mac80211, such as broadcast station in AP / GO mode, or AUX sta for * scanning and P2P device (during the GO negotiation). * For these kind of stations we have %iwl_mvm_int_sta struct which holds the * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta. * Usually the data for these stations is static, so no locking is required, * and no TID data as this is also not needed. * One thing to note, is that these stations have an ID in the fw, but not * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of * pointers from this mapping need to check that the value is not error * or NULL. * * Currently there is only one auxiliary station for scanning, initialized * on init. */ /** * DOC: station table - AP Station in STA mode * * %iwl_mvm_vif includes the index of the AP station in the fw's STA table: * %ap_sta_id. To get the point to the corresponding %ieee80211_sta, * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove * the AP station from the fw before setting the MAC context as unassociated. * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is * removed by mac80211, but the station won't be removed in the fw until the * VIF is set as unassociated. Then, %ap_sta_id will be invalidated. */ /** * DOC: station table - Drain vs. Flush * * Flush means that all the frames in the SCD queue are dumped regardless the * station to which they were sent. We do that when we disassociate and before * we remove the STA of the AP. The flush can be done synchronously against the * fw. * Drain means that the fw will drop all the frames sent to a specific station. * This is useful when a client (if we are IBSS / GO or AP) disassociates. */ /** * DOC: station table - fw restart * * When the fw asserts, or we have any other issue that requires to reset the * driver, we require mac80211 to reconfigure the driver. Since the private * data of the stations is embed in mac80211's %ieee80211_sta, that data will * not be zeroed and needs to be reinitialized manually. * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us * that we must not allocate a new sta_id but reuse the previous one. This * means that the stations being re-added after the reset will have the same * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id * map, since the stations aren't in the fw any more. Internal stations that * are not added by mac80211 will be re-added in the init flow that is called * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to * %iwl_mvm_up. */ /** * DOC: AP mode - PS * * When a station is asleep, the fw will set it as "asleep". All frames on * shared queues (i.e. non-aggregation queues) to that station will be dropped * by the fw (%TX_STATUS_FAIL_DEST_PS failure code). * * AMPDUs are in a separate queue that is stopped by the fw. We just need to * let mac80211 know when there are frames in these queues so that it can * properly handle trigger frames. * * When a trigger frame is received, mac80211 tells the driver to send frames * from the AMPDU queues or sends frames to non-aggregation queues itself, * depending on which ACs are delivery-enabled and what TID has frames to * transmit. Note that mac80211 has all the knowledge since all the non-agg * frames are buffered / filtered, and the driver tells mac80211 about agg * frames). The driver needs to tell the fw to let frames out even if the * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count. * * When we receive a frame from that station with PM bit unset, the driver * needs to let the fw know that this station isn't asleep any more. This is * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the * station's wakeup. * * For a GO, the Service Period might be cut short due to an absence period * of the GO. In this (and all other cases) the firmware notifies us with the * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we * already sent to the device will be rejected again. * * See also "AP support for powersaving clients" in mac80211.h. */ /** * enum iwl_mvm_agg_state * * The state machine of the BA agreement establishment / tear down. * These states relate to a specific RA / TID. * * @IWL_AGG_OFF: aggregation is not used * @IWL_AGG_QUEUED: aggregation start work has been queued * @IWL_AGG_STARTING: aggregation are starting (between start and oper) * @IWL_AGG_ON: aggregation session is up * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the * HW queue to be empty from packets for this RA /TID. * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the * HW queue to be empty from packets for this RA /TID. */ enum iwl_mvm_agg_state { IWL_AGG_OFF = 0, IWL_AGG_QUEUED, IWL_AGG_STARTING, IWL_AGG_ON, IWL_EMPTYING_HW_QUEUE_ADDBA, IWL_EMPTYING_HW_QUEUE_DELBA, }; /** * struct iwl_mvm_tid_data - holds the states for each RA / TID * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * @lq_color: the color of the LQ command as it appears in tx response. * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @state: state of the BA agreement establishment / tear down. * @txq_id: Tx queue used by the BA session / DQA * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or * the first packet to be sent in legacy HW queue in Tx AGG stop flow. * Basically when next_reclaimed reaches ssn, we can tell mac80211 that * we are ready to finish the Tx AGG stop / start flow. * @tx_time: medium time consumed by this A-MPDU * @tpt_meas_start: time of the throughput measurements start, is reset every HZ * @tx_count_last: number of frames transmitted during the last second * @tx_count: counts the number of frames transmitted since the last reset of * tpt_meas_start */ struct iwl_mvm_tid_data { u16 seq_number; u16 next_reclaimed; /* The rest is Tx AGG related */ u32 rate_n_flags; u8 lq_color; bool amsdu_in_ampdu_allowed; enum iwl_mvm_agg_state state; u16 txq_id; u16 ssn; u16 tx_time; unsigned long tpt_meas_start; u32 tx_count_last; u32 tx_count; }; struct iwl_mvm_key_pn { struct rcu_head rcu_head; struct { u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN]; } ____cacheline_aligned_in_smp q[]; }; struct iwl_mvm_delba_data { u32 baid; } __packed; struct iwl_mvm_nssn_sync_data { u32 baid; u32 nssn; } __packed; struct iwl_mvm_rss_sync_notif { struct iwl_mvm_internal_rxq_notif metadata; union { struct iwl_mvm_delba_data delba; struct iwl_mvm_nssn_sync_data nssn_sync; }; } __packed; /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection * @last_sub_frame: last subframe packet */ struct iwl_mvm_rxq_dup_data { __le16 last_seq[IWL_MAX_TID_COUNT + 1]; u8 last_sub_frame[IWL_MAX_TID_COUNT + 1]; } ____cacheline_aligned_in_smp; /** * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @tfd_queue_msk: the tfd queues used by the station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. * @max_agg_bufsize: the maximal size of the AGG buffer for this station * @sta_type: station type * @sta_state: station state according to enum %ieee80211_sta_state * @bt_reduced_txpower: is reduced tx power enabled for this station * @next_status_eosp: the next reclaimed packet is a PS-Poll response and * we need to signal the EOSP * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. * @tid_to_baid: a simple map of TID to baid * @lq_sta: holds rate scaling data, either for the case when RS is done in * the driver - %rs_drv or in the FW - %rs_fw. * @reserved_queue: the queue reserved for this STA for DQA purposes * Every STA has is given one reserved queue to allow it to operate. If no * such queue can be guaranteed, the STA addition will fail. * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. * In case TLC offload is not active it is either 0xFFFF or 0. * @max_amsdu_len: max AMSDU length * @orig_amsdu_len: used to save the original amsdu_len when it is changed via * debugfs. If it's set to 0, it means that it is it's not set via * debugfs. * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @sleep_tx_count: the number of frames that we told the firmware to let out * even when that station is asleep. This is useful in case the queue * gets empty before all the frames were sent, which can happen when * we are sending frames from an AMPDU queue and there was a hole in * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake * exchange). * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that * space. * */ struct iwl_mvm_sta { u32 sta_id; u32 tfd_queue_msk; u32 mac_id_n_color; u16 tid_disable_agg; u16 max_agg_bufsize; enum iwl_sta_type sta_type; enum ieee80211_sta_state sta_state; bool bt_reduced_txpower; bool next_status_eosp; spinlock_t lock; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; u8 tid_to_baid[IWL_MAX_TID_COUNT]; union { struct iwl_lq_sta_rs_fw rs_fw; struct iwl_lq_sta rs_drv; } lq_sta; struct ieee80211_vif *vif; struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; u8 reserved_queue; /* Temporary, until the new TLC will control the Tx protection */ s8 tx_protection; bool tt_tx_protection; bool disable_tx; u16 amsdu_enabled; u16 max_amsdu_len; u16 orig_amsdu_len; bool sleeping; u8 agg_tids; u8 sleep_tx_count; u8 avg_energy; u8 tx_ant; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); static inline struct iwl_mvm_sta * iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) { return (void *)sta->drv_priv; } /** * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or * broadcast) * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @type: station type * @tfd_queue_msk: the tfd queues used by the station */ struct iwl_mvm_int_sta { u32 sta_id; enum iwl_sta_type type; u32 tfd_queue_msk; }; /** * Send the STA info to the FW. * * @mvm: the iwl_mvm* to use * @sta: the STA * @update: this is true if the FW is being updated about a STA it already knows * about. Otherwise (if this is a new STA), this should be false. * @flags: if update==true, this marks what is being changed via ORs of values * from enum iwl_sta_modify_flag. Otherwise, this is ignored. */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags); int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); } int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta); int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id); int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset); int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf); void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu); int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, enum iwl_sta_type type); void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta); int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta); void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, bool single_sta_queue); int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain); void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool disable); void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool disable); void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); #endif /* __sta_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c000066400000000000000000000556761351064634500265230ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "mvm.h" #include "time-event.h" #include "iwl-io.h" #include "iwl-prph.h" #define TU_TO_US(x) (x * 1024) #define TU_TO_MS(x) (TU_TO_US(x) / 1000) void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; int i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta || IS_ERR(sta) || !sta->tdls) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, NL80211_TDLS_TEARDOWN, WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, GFP_KERNEL); } } int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; int count = 0; int i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta || IS_ERR(sta) || !sta->tdls) continue; if (vif) { mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->vif != vif) continue; } count++; } return count; } static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_rx_packet *pkt; struct iwl_tdls_config_res *resp; struct iwl_tdls_config_cmd tdls_cfg_cmd = {}; struct iwl_host_cmd cmd = { .id = TDLS_CONFIG_CMD, .flags = CMD_WANT_SKB, .data = { &tdls_cfg_cmd, }, .len = { sizeof(struct iwl_tdls_config_cmd), }, }; struct ieee80211_sta *sta; int ret, i, cnt; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); tdls_cfg_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID; tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */ /* for now the Tx cmd is empty and unused */ /* populate TDLS peer data */ cnt = 0; for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta) || !sta->tdls) continue; tdls_cfg_cmd.sta_info[cnt].sta_id = i; tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = IWL_MVM_TDLS_FW_TID; tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0); tdls_cfg_cmd.sta_info[cnt].is_initiator = cpu_to_le32(sta->tdls_initiator ? 1 : 0); cnt++; } tdls_cfg_cmd.tdls_peer_count = cnt; IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt); ret = iwl_mvm_send_cmd(mvm, &cmd); if (WARN_ON_ONCE(ret)) return; pkt = cmd.resp_pkt; WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)); /* we don't really care about the response at this point */ iwl_free_resp(&cmd); } void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added) { int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); /* when the first peer joins, send a power update first */ if (tdls_sta_cnt == 1 && sta_added) iwl_mvm_power_update_mac(mvm); /* Configure the FW with TDLS peer info only if TDLS channel switch * capability is set. * TDLS config data is used currently only in TDLS channel switch code. * Supposed to serve also TDLS buffer station which is not implemneted * yet in FW*/ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) iwl_mvm_tdls_config(mvm, vif); /* when the last peer leaves, send a power update last */ if (tdls_sta_cnt == 0 && !sta_added) iwl_mvm_power_update_mac(mvm); } void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; mutex_lock(&mvm->mutex); /* Protect the session to hear the TDLS setup response on the channel */ iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); mutex_unlock(&mvm->mutex); } static const char * iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state) { switch (state) { case IWL_MVM_TDLS_SW_IDLE: return "IDLE"; case IWL_MVM_TDLS_SW_REQ_SENT: return "REQ SENT"; case IWL_MVM_TDLS_SW_RESP_RCVD: return "RESP RECEIVED"; case IWL_MVM_TDLS_SW_REQ_RCVD: return "REQ RECEIVED"; case IWL_MVM_TDLS_SW_ACTIVE: return "ACTIVE"; } return NULL; } static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, enum iwl_mvm_tdls_cs_state state) { if (mvm->tdls_cs.state == state) return; IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n", iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), iwl_mvm_tdls_cs_state_str(state)); mvm->tdls_cs.state = state; /* we only send requests to our switching peer - update sent time */ if (state == IWL_MVM_TDLS_SW_REQ_SENT) mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm); if (state == IWL_MVM_TDLS_SW_IDLE) mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; } void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; struct ieee80211_sta *sta; unsigned int delay; struct iwl_mvm_sta *mvmsta; struct ieee80211_vif *vif; u32 sta_id = le32_to_cpu(notif->sta_id); lockdep_assert_held(&mvm->mutex); /* can fail sometimes */ if (!le32_to_cpu(notif->status)) { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); return; } if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) return; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* the station may not be here, but if it is, it must be a TDLS peer */ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) return; mvmsta = iwl_mvm_sta_from_mac80211(sta); vif = mvmsta->vif; /* * Update state and possibly switch again after this is over (DTIM). * Also convert TU to msec. */ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); } static int iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, enum iwl_tdls_channel_switch_type type, const u8 *peer, bool peer_initiator, u32 timestamp) { bool same_peer = false; int ret = 0; /* get the existing peer if it's there */ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { struct ieee80211_sta *sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex)); if (!IS_ERR_OR_NULL(sta)) same_peer = ether_addr_equal(peer, sta->addr); } switch (mvm->tdls_cs.state) { case IWL_MVM_TDLS_SW_IDLE: /* * might be spurious packet from the peer after the switch is * already done */ if (type == TDLS_MOVE_CH) ret = -EINVAL; break; case IWL_MVM_TDLS_SW_REQ_SENT: /* only allow requests from the same peer */ if (!same_peer) ret = -EBUSY; else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH && !peer_initiator) /* * We received a ch-switch request while an outgoing * one is pending. Allow it if the peer is the link * initiator. */ ret = -EBUSY; else if (type == TDLS_SEND_CHAN_SW_REQ) /* wait for idle before sending another request */ ret = -EBUSY; else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp) /* we got a stale response - ignore it */ ret = -EINVAL; break; case IWL_MVM_TDLS_SW_RESP_RCVD: /* * we are waiting for the FW to give an "active" notification, * so ignore requests in the meantime */ ret = -EBUSY; break; case IWL_MVM_TDLS_SW_REQ_RCVD: /* as above, allow the link initiator to proceed */ if (type == TDLS_SEND_CHAN_SW_REQ) { if (!same_peer) ret = -EBUSY; else if (peer_initiator) /* they are the initiator */ ret = -EBUSY; } else if (type == TDLS_MOVE_CH) { ret = -EINVAL; } break; case IWL_MVM_TDLS_SW_ACTIVE: /* * the only valid request when active is a request to return * to the base channel by the current off-channel peer */ if (type != TDLS_MOVE_CH || !same_peer) ret = -EBUSY; break; } if (ret) IWL_DEBUG_TDLS(mvm, "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n", type, mvm->tdls_cs.state, peer, same_peer, peer_initiator); return ret; } static int iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_tdls_channel_switch_type type, const u8 *peer, bool peer_initiator, u8 oper_class, struct cfg80211_chan_def *chandef, u32 timestamp, u16 switch_time, u16 switch_timeout, struct sk_buff *skb, u32 ch_sw_tm_ie) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct ieee80211_tx_info *info; struct ieee80211_hdr *hdr; struct iwl_tdls_channel_switch_cmd cmd = {0}; struct iwl_tdls_channel_switch_cmd_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci); u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator, timestamp); if (ret) return ret; if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) { ret = -EINVAL; goto out; } cmd.switch_type = type; tail->timing.frame_timestamp = cpu_to_le32(timestamp); tail->timing.switch_time = cpu_to_le32(switch_time); tail->timing.switch_timeout = cpu_to_le32(switch_timeout); rcu_read_lock(); sta = ieee80211_find_sta(vif, peer); if (!sta) { rcu_read_unlock(); ret = -ENOENT; goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id); if (!chandef) { if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && mvm->tdls_cs.peer.chandef.chan) { /* actually moving to the channel */ chandef = &mvm->tdls_cs.peer.chandef; } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && type == TDLS_MOVE_CH) { /* we need to return to base channel */ struct ieee80211_chanctx_conf *chanctx = rcu_dereference(vif->chanctx_conf); if (WARN_ON_ONCE(!chanctx)) { rcu_read_unlock(); goto out; } chandef = &chanctx->def; } } if (chandef) iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef); /* keep quota calculation simple for now - 50% of DTIM for TDLS */ tail->timing.max_offchan_duration = cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int) / 2); /* Switch time is the first element in the switch-timing IE. */ tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); info = IEEE80211_SKB_CB(skb); hdr = (void *)skb->data; if (info->control.hw_key) { if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) { rcu_read_unlock(); ret = -EINVAL; goto out; } iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd); } iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info, mvmsta->sta_id); iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta, hdr->frame_control); rcu_read_unlock(); memcpy(tail->frame.data, skb->data, skb->len); ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd); if (ret) { IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", ret); goto out; } /* channel switch has started, update state */ if (type != TDLS_MOVE_CH) { mvm->tdls_cs.cur_sta_id = mvmsta->sta_id; iwl_mvm_tdls_update_cs_state(mvm, type == TDLS_SEND_CHAN_SW_REQ ? IWL_MVM_TDLS_SW_REQ_SENT : IWL_MVM_TDLS_SW_REQ_RCVD); } else { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD); } out: /* channel switch failed - we are idle */ if (ret) iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); return ret; } void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) { struct iwl_mvm *mvm; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct ieee80211_vif *vif; unsigned int delay; int ret; mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); mutex_lock(&mvm->mutex); /* called after an active channel switch has finished or timed-out */ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); /* station might be gone, in that case do nothing */ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) goto out; sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], lockdep_is_held(&mvm->mutex)); /* the station may not be here, but if it is, it must be a TDLS peer */ if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); vif = mvmsta->vif; ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, TDLS_SEND_CHAN_SW_REQ, sta->addr, mvm->tdls_cs.peer.initiator, mvm->tdls_cs.peer.op_class, &mvm->tdls_cs.peer.chandef, 0, 0, 0, mvm->tdls_cs.peer.skb, mvm->tdls_cs.peer.ch_sw_tm_ie); if (ret) IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret); /* retry after a DTIM if we failed sending now */ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); out: mutex_unlock(&mvm->mutex); } int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta; unsigned int delay; int ret; mutex_lock(&mvm->mutex); IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", sta->addr, chandef->chan->center_freq, chandef->width); /* we only support a single peer for channel switching */ if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr); ret = -EBUSY; goto out; } ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, TDLS_SEND_CHAN_SW_REQ, sta->addr, sta->tdls_initiator, oper_class, chandef, 0, 0, 0, tmpl_skb, ch_sw_tm_ie); if (ret) goto out; /* * Mark the peer as "in tdls switch" for this vif. We only allow a * single such peer per vif. */ mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); if (!mvm->tdls_cs.peer.skb) { ret = -ENOMEM; goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); mvm->tdls_cs.peer.sta_id = mvmsta->sta_id; mvm->tdls_cs.peer.chandef = *chandef; mvm->tdls_cs.peer.initiator = sta->tdls_initiator; mvm->tdls_cs.peer.op_class = oper_class; mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie; /* * Wait for 2 DTIM periods before attempting the next switch. The next * switch will be made sooner if the current one completes before that. */ delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); out: mutex_unlock(&mvm->mutex); return ret; } void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_sta *cur_sta; bool wait_for_phy = false; mutex_lock(&mvm->mutex); IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); /* we only support a single peer for channel switching */ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); goto out; } cur_sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], lockdep_is_held(&mvm->mutex)); /* make sure it's the same peer */ if (cur_sta != sta) goto out; /* * If we're currently in a switch because of the now canceled peer, * wait a DTIM here to make sure the phy is back on the base channel. * We can't otherwise force it. */ if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id && mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) wait_for_phy = true; mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; dev_kfree_skb(mvm->tdls_cs.peer.skb); mvm->tdls_cs.peer.skb = NULL; out: mutex_unlock(&mvm->mutex); /* make sure the phy is on the base channel */ if (wait_for_phy) msleep(TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int)); /* flush the channel switch state */ flush_delayed_work(&mvm->tdls_cs.dwork); IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr); } void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tdls_ch_sw_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); enum iwl_tdls_channel_switch_type type; unsigned int delay; const char *action_str = params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? "REQ" : "RESP"; mutex_lock(&mvm->mutex); IWL_DEBUG_TDLS(mvm, "Received TDLS ch switch action %s from %pM status %d\n", action_str, params->sta->addr, params->status); /* * we got a non-zero status from a peer we were switching to - move to * the idle state and retry again later */ if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && params->status != 0 && mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { struct ieee80211_sta *cur_sta; /* make sure it's the same peer */ cur_sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex)); if (cur_sta == params->sta) { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); goto retry; } } type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ? TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH; iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr, params->sta->tdls_initiator, 0, params->chandef, params->timestamp, params->switch_time, params->switch_timeout, params->tmpl_skb, params->ch_sw_tm_ie); retry: /* register a timeout in case we don't succeed in switching */ delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * 1024 / 1000; mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); mutex_unlock(&mvm->mutex); } #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE void iwl_mvm_tdls_peer_cache_pkt(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, u32 len, int rxq) { struct iwl_mvm_tdls_peer_counter *cnt; u8 *addr; /* * To reduce code runtime and complexity, we don't check the packet * arrived on the correct vif, or even if the current vif is a station. * While it is theoretically possible for a TDLS peer to also be * connected to us in the capacity of a AP/GO, this will not happen * in practice. */ if (list_empty(&mvm->tdls_peer_cache_list)) return; if (len < sizeof(*hdr) || !ieee80211_is_data(hdr->frame_control)) return; addr = rxq < 0 ? ieee80211_get_DA(hdr) : ieee80211_get_SA(hdr); /* we rely on the Rx and Tx path mutual atomicity for the counters */ rcu_read_lock(); list_for_each_entry_rcu(cnt, &mvm->tdls_peer_cache_list, list) if (ether_addr_equal(cnt->mac.addr, addr)) { if (rxq < 0) cnt->tx_bytes += len; else cnt->rx[rxq].bytes += len; break; } rcu_read_unlock(); } void iwl_mvm_tdls_peer_cache_clear(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_tdls_peer_counter *cnt, *tmp; /* * mvm->mutex is held or the HW is already unregistered, barring * vendor commands that can change the list. */ list_for_each_entry_safe(cnt, tmp, &mvm->tdls_peer_cache_list, list) { if (vif && cnt->vif != vif) continue; mvm->tdls_peer_cache_cnt--; list_del_rcu(&cnt->list); kfree_rcu(cnt, rcu_head); } } /* requires RCU read side lock taken */ struct iwl_mvm_tdls_peer_counter * iwl_mvm_tdls_peer_cache_find(struct iwl_mvm *mvm, const u8 *addr) { struct iwl_mvm_tdls_peer_counter *cnt; list_for_each_entry_rcu(cnt, &mvm->tdls_peer_cache_list, list) if (memcmp(addr, cnt->mac.addr, ETH_ALEN) == 0) break; if (&cnt->list == &mvm->tdls_peer_cache_list) return NULL; return cnt; } #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c000066400000000000000000000672761351064634500276310ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "fw/notif-wait.h" #include "iwl-trans.h" #include "fw-api.h" #include "time-event.h" #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" /* * For the high priority TE use a time event type that has similar priority to * the FW's action scan priority. */ #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data) { lockdep_assert_held(&mvm->time_event_lock); if (!te_data || !te_data->vif) return; list_del(&te_data->list); te_data->running = false; te_data->uid = 0; te_data->id = TE_MAX; te_data->vif = NULL; } void iwl_mvm_roc_done_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); /* * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); synchronize_net(); /* * Flush the offchannel queue -- this is called when the time * event finishes or is canceled, so that frames queued for it * won't get stuck on the queue and be transmitted in the next * time event. * We have to send the command asynchronously since this cannot * be under the mutex for locking reasons, but that's not an * issue as it will have to complete before the next command is * executed, and a new time event means a new command. */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); /* Do the same for the P2P device queue (STA) */ if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { struct iwl_mvm_vif *mvmvif; /* * NB: access to this pointer would be racy, but the flush bit * can only be set when we had a P2P-Device VIF, and we have a * flush of this work in iwl_mvm_prepare_mac_removal() so it's * not really racy. */ if (!WARN_ON(!mvm->p2p_device_vif)) { mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, CMD_ASYNC); } } } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) { /* * Of course, our status bit is just as racy as mac80211, so in * addition, fire off the work struct which will drop all frames * from the hardware queues that made it through the race. First * it will of course synchronize the TX path to make sure that * any *new* TX will be rejected. */ schedule_work(&mvm->roc_done_wk); } static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) { struct ieee80211_vif *csa_vif; rcu_read_lock(); csa_vif = rcu_dereference(mvm->csa_vif); if (!csa_vif || !csa_vif->csa_active) goto out_unlock; IWL_DEBUG_TE(mvm, "CSA NOA started\n"); /* * CSA NoA is started but we still have beacons to * transmit on the current channel. * So we just do nothing here and the switch * will be performed on the last TBTT. */ if (!ieee80211_csa_is_complete(csa_vif)) { IWL_WARN(mvm, "CSA NOA started too early\n"); goto out_unlock; } ieee80211_csa_finish(csa_vif); rcu_read_unlock(); RCU_INIT_POINTER(mvm->csa_vif, NULL); return; out_unlock: rcu_read_unlock(); } static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type != NL80211_IFTYPE_STATION) return false; if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && vif->bss_conf.dtim_period) return false; if (errmsg) IWL_ERR(mvm, "%s\n", errmsg); iwl_mvm_connection_loss(mvm, vif, errmsg); return true; } static void iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, struct iwl_time_event_notif *notif) { struct ieee80211_vif *vif = te_data->vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (!notif->status) IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); switch (te_data->vif->type) { case NL80211_IFTYPE_AP: if (!notif->status) mvmvif->csa_failed = true; iwl_mvm_csa_noa_start(mvm); break; case NL80211_IFTYPE_STATION: if (!notif->status) { iwl_mvm_connection_loss(mvm, vif, "CSA TE failed to start"); break; } iwl_mvm_csa_client_absent(mvm, te_data->vif); cancel_delayed_work(&mvmvif->csa_work); ieee80211_chswitch_done(te_data->vif, true); break; default: /* should never happen */ WARN_ON_ONCE(1); break; } /* we don't need it anymore */ iwl_mvm_te_clear_data(mvm, te_data); } static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, struct iwl_time_event_notif *notif, struct iwl_mvm_time_event_data *te_data) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_time_event *te_trig; int i; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(te_data->vif), FW_DBG_TRIGGER_TIME_EVENT); if (!trig) return; te_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); u32 trig_action_bitmap = le32_to_cpu(te_trig->time_events[i].action_bitmap); u32 trig_status_bitmap = le32_to_cpu(te_trig->time_events[i].status_bitmap); if (trig_te_id != te_data->id || !(trig_action_bitmap & le32_to_cpu(notif->action)) || !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Time event %d Action 0x%x received status: %d", te_data->id, le32_to_cpu(notif->action), le32_to_cpu(notif->status)); break; } } /* * Handles a FW notification for an event that is known to the driver. * * @mvm: the mvm component * @te_data: the time event data * @notif: the notification data corresponding the time event data. */ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, struct iwl_time_event_notif *notif) { lockdep_assert_held(&mvm->time_event_lock); IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n", le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action)); iwl_mvm_te_check_trigger(mvm, notif, te_data); /* * The FW sends the start/end time event notifications even for events * that it fails to schedule. This is indicated in the status field of * the notification. This happens in cases that the scheduler cannot * find a schedule that can handle the event (for example requesting a * P2P Device discoveribility, while there are other higher priority * events in the system). */ if (!le32_to_cpu(notif->status)) { const char *msg; if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START)) msg = "Time Event start notification failure"; else msg = "Time Event end notification failure"; IWL_DEBUG_TE(mvm, "%s\n", msg); if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) { iwl_mvm_te_clear_data(mvm, te_data); return; } } if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) { IWL_DEBUG_TE(mvm, "TE ended - current time %lu, estimated end %lu\n", jiffies, te_data->end_jiffies); switch (te_data->vif->type) { case NL80211_IFTYPE_P2P_DEVICE: ieee80211_remain_on_channel_expired(mvm->hw); set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); iwl_mvm_roc_finished(mvm); break; case NL80211_IFTYPE_STATION: /* * By now, we should have finished association * and know the dtim period. */ iwl_mvm_te_check_disconnect(mvm, te_data->vif, "No beacon heard and the time event is over already..."); break; default: break; } iwl_mvm_te_clear_data(mvm, te_data); } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { te_data->running = true; te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); } } else { IWL_WARN(mvm, "Got TE with unknown action\n"); } } /* * Handle A Aux ROC time event */ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, struct iwl_time_event_notif *notif) { struct iwl_mvm_time_event_data *te_data, *tmp; bool aux_roc_te = false; list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) { if (le32_to_cpu(notif->unique_id) == te_data->uid) { aux_roc_te = true; break; } } if (!aux_roc_te) /* Not a Aux ROC time event */ return -EINVAL; iwl_mvm_te_check_trigger(mvm, notif, te_data); IWL_DEBUG_TE(mvm, "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n", le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action), le32_to_cpu(notif->status)); if (!le32_to_cpu(notif->status) || le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { /* End TE, notify mac80211 */ ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_roc_finished(mvm); /* flush aux queue */ list_del(&te_data->list); /* remove from list */ te_data->running = false; te_data->vif = NULL; te_data->uid = 0; te_data->id = TE_MAX; } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); te_data->running = true; ieee80211_ready_on_channel(mvm->hw); /* Start TE */ } else { IWL_DEBUG_TE(mvm, "ERROR: Unknown Aux ROC Time Event (action = %d)\n", le32_to_cpu(notif->action)); return -EINVAL; } return 0; } /* * The Rx handler for time event notifications */ void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_time_event_notif *notif = (void *)pkt->data; struct iwl_mvm_time_event_data *te_data, *tmp; IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n", le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action)); spin_lock_bh(&mvm->time_event_lock); /* This time event is triggered for Aux ROC request */ if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif)) goto unlock; list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) { if (le32_to_cpu(notif->unique_id) == te_data->uid) iwl_mvm_te_handle_notif(mvm, te_data, notif); } unlock: spin_unlock_bh(&mvm->time_event_lock); } static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_time_event_data *te_data = data; struct iwl_time_event_notif *resp; int resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n"); return true; } resp = (void *)pkt->data; /* te_data->uid is already set in the TIME_EVENT_CMD response */ if (le32_to_cpu(resp->unique_id) != te_data->uid) return false; IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", te_data->uid); if (!resp->status) IWL_ERR(mvm, "TIME_EVENT_NOTIFICATION received but not executed\n"); return true; } static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_time_event_data *te_data = data; struct iwl_time_event_resp *resp; int resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n"); return true; } resp = (void *)pkt->data; /* we should never get a response to another TIME_EVENT_CMD here */ if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) return false; te_data->uid = le32_to_cpu(resp->unique_id); IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid); return true; } static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_time_event_data *te_data, struct iwl_time_event_cmd *te_cmd) { static const u16 time_event_response[] = { TIME_EVENT_CMD }; struct iwl_notification_wait wait_time_event; int ret; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", le32_to_cpu(te_cmd->duration)); spin_lock_bh(&mvm->time_event_lock); if (WARN_ON(te_data->id != TE_MAX)) { spin_unlock_bh(&mvm->time_event_lock); return -EIO; } te_data->vif = vif; te_data->duration = le32_to_cpu(te_cmd->duration); te_data->id = le32_to_cpu(te_cmd->id); list_add_tail(&te_data->list, &mvm->time_event_list); spin_unlock_bh(&mvm->time_event_lock); /* * Use a notification wait, which really just processes the * command response and doesn't wait for anything, in order * to be able to process the response and get the UID inside * the RX path. Using CMD_WANT_SKB doesn't work because it * stores the buffer and then wakes up this thread, by which * time another notification (that the time event started) * might already be processed unsuccessfully. */ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response, ARRAY_SIZE(time_event_response), iwl_mvm_time_event_response, te_data); ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(*te_cmd), te_cmd); if (ret) { IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); iwl_remove_notification(&mvm->notif_wait, &wait_time_event); goto out_clear_te; } /* No need to wait for anything, so just pass 1 (0 isn't valid) */ ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); /* should never fail */ WARN_ON_ONCE(ret); if (ret) { out_clear_te: spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } return ret; } void iwl_mvm_protect_session(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, u32 max_delay, bool wait_for_notif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; struct iwl_notification_wait wait_te_notif; struct iwl_time_event_cmd time_cmd = {}; lockdep_assert_held(&mvm->mutex); if (te_data->running && time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", jiffies_to_msecs(te_data->end_jiffies - jiffies)); return; } if (te_data->running) { IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", te_data->uid, jiffies_to_msecs(te_data->end_jiffies - jiffies)); /* * we don't have enough time * cancel the current TE and issue a new one * Of course it would be better to remove the old one only * when the new one is added, but we don't care if we are off * channel for a bit. All we need to do, is not to return * before we actually begin to be on the channel. */ iwl_mvm_stop_session_protection(mvm, vif); } time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); time_cmd.apply_time = cpu_to_le32(0); time_cmd.max_frags = TE_V2_FRAG_NONE; time_cmd.max_delay = cpu_to_le32(max_delay); /* TODO: why do we need to interval = bi if it is not periodic? */ time_cmd.interval = cpu_to_le32(1); time_cmd.duration = cpu_to_le32(duration); time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | TE_V2_START_IMMEDIATELY); if (!wait_for_notif) { iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); return; } /* * Create notification_wait for the TIME_EVENT_NOTIFICATION to use * right after we send the time event */ iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, te_notif_response, ARRAY_SIZE(te_notif_response), iwl_mvm_te_notif, te_data); /* If TE was sent OK - wait for the notification that started */ if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) { IWL_ERR(mvm, "Failed to add TE to protect session\n"); iwl_remove_notification(&mvm->notif_wait, &wait_te_notif); } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, TU_TO_JIFFIES(max_delay))) { IWL_ERR(mvm, "Failed to protect session until TE\n"); } } static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, u32 *uid) { u32 id; /* * It is possible that by the time we got to this point the time * event was already removed. */ spin_lock_bh(&mvm->time_event_lock); /* Save time event uid before clearing its data */ *uid = te_data->uid; id = te_data->id; /* * The clear_data function handles time events that were already removed */ iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); /* * It is possible that by the time we try to remove it, the time event * has already ended and removed. In such a case there is no need to * send a removal command. */ if (id == TE_MAX) { IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid); return false; } return true; } /* * Explicit request to remove a aux roc time event. The removal of a time * event needs to be synchronized with the flow of a time event's end * notification, which also removes the time event from the op mode * data structures. */ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_time_event_data *te_data) { struct iwl_hs20_roc_req aux_cmd = {}; u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm); u32 uid; int ret; if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) return; aux_cmd.event_unique_id = cpu_to_le32(uid); aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); aux_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", le32_to_cpu(aux_cmd.event_unique_id)); ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_cmd); if (WARN_ON(ret)) return; } /* * Explicit request to remove a time event. The removal of a time event needs to * be synchronized with the flow of a time event's end notification, which also * removes the time event from the op mode data structures. */ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_time_event_data *te_data) { struct iwl_time_event_cmd time_cmd = {}; u32 uid; int ret; if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) return; /* When we remove a TE, the UID is to be set in the id field */ time_cmd.id = cpu_to_le32(uid); time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(time_cmd), &time_cmd); if (WARN_ON(ret)) return; } void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; u32 id; lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); id = te_data->id; spin_unlock_bh(&mvm->time_event_lock); if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) { IWL_DEBUG_TE(mvm, "don't remove TE with id=%u (not session protection)\n", id); return; } iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int duration, enum ieee80211_roc_type type) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; struct iwl_time_event_cmd time_cmd = {}; lockdep_assert_held(&mvm->mutex); if (te_data->running) { IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n"); return -EBUSY; } time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); switch (type) { case IEEE80211_ROC_TYPE_NORMAL: time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL); break; case IEEE80211_ROC_TYPE_MGMT_TX: time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX); break; default: WARN_ONCE(1, "Got an invalid ROC type\n"); return -EINVAL; } time_cmd.apply_time = cpu_to_le32(0); time_cmd.interval = cpu_to_le32(1); /* * The P2P Device TEs can have lower priority than other events * that are being scheduled by the driver/fw, and thus it might not be * scheduled. To improve the chances of it being scheduled, allow them * to be fragmented, and in addition allow them to be delayed. */ time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS); time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm) { struct iwl_mvm_time_event_data *te_data; lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); /* * Iterate over the list of time events and find the time event that is * associated with a P2P_DEVICE interface. * This assumes that a P2P_DEVICE interface can have only a single time * event at any given time and this time event coresponds to a ROC * request */ list_for_each_entry(te_data, &mvm->time_event_list, list) { if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) goto out; } /* There can only be at most one AUX ROC time event, we just use the * list to simplify/unify code. Remove it if it exists. */ te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, struct iwl_mvm_time_event_data, list); out: spin_unlock_bh(&mvm->time_event_lock); return te_data; } void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) { struct iwl_mvm_time_event_data *te_data; u32 uid; te_data = iwl_mvm_get_roc_te(mvm); if (te_data) __iwl_mvm_remove_time_event(mvm, te_data, &uid); } void iwl_mvm_stop_roc(struct iwl_mvm *mvm) { struct iwl_mvm_vif *mvmvif; struct iwl_mvm_time_event_data *te_data; te_data = iwl_mvm_get_roc_te(mvm); if (!te_data) { IWL_WARN(mvm, "No remain on channel event\n"); return; } mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_remove_time_event(mvm, mvmvif, te_data); set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); } iwl_mvm_roc_finished(mvm); } int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 apply_time) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; struct iwl_time_event_cmd time_cmd = {}; lockdep_assert_held(&mvm->mutex); if (te_data->running) { u32 id; spin_lock_bh(&mvm->time_event_lock); id = te_data->id; spin_unlock_bh(&mvm->time_event_lock); if (id == TE_CHANNEL_SWITCH_PERIOD) { IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); return -EBUSY; } /* * Remove the session protection time event to allow the * channel switch. If we got here, we just heard a beacon so * the session protection is not needed anymore anyway. */ iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD); time_cmd.apply_time = cpu_to_le32(apply_time); time_cmd.max_frags = TE_V2_FRAG_NONE; time_cmd.duration = cpu_to_le32(duration); time_cmd.repeat = 1; time_cmd.interval = cpu_to_le32(1); time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_ABSENCE); if (!apply_time) time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h000066400000000000000000000231011351064634500276110ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __time_event_h__ #define __time_event_h__ #include "fw-api.h" #include "mvm.h" /** * DOC: Time Events - what is it? * * Time Events are a fw feature that allows the driver to control the presence * of the device on the channel. Since the fw supports multiple channels * concurrently, the fw may choose to jump to another channel at any time. * In order to make sure that the fw is on a specific channel at a certain time * and for a certain duration, the driver needs to issue a time event. * * The simplest example is for BSS association. The driver issues a time event, * waits for it to start, and only then tells mac80211 that we can start the * association. This way, we make sure that the association will be done * smoothly and won't be interrupted by channel switch decided within the fw. */ /** * DOC: The flow against the fw * * When the driver needs to make sure we are in a certain channel, at a certain * time and for a certain duration, it sends a Time Event. The flow against the * fw goes like this: * 1) Driver sends a TIME_EVENT_CMD to the fw * 2) Driver gets the response for that command. This response contains the * Unique ID (UID) of the event. * 3) The fw sends notification when the event starts. * * Of course the API provides various options that allow to cover parameters * of the flow. * What is the duration of the event? * What is the start time of the event? * Is there an end-time for the event? * How much can the event be delayed? * Can the event be split? * If yes what is the maximal number of chunks? * etc... */ /** * DOC: Abstraction to the driver * * In order to simplify the use of time events to the rest of the driver, * we abstract the use of time events. This component provides the functions * needed by the driver. */ #define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 600 #define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400 /** * iwl_mvm_protect_session - start / extend the session protection. * @mvm: the mvm component * @vif: the virtual interface for which the session is issued * @duration: the duration of the session in TU. * @min_duration: will start a new session if the current session will end * in less than min_duration. * @max_delay: maximum delay before starting the time event (in TU) * @wait_for_notif: true if it is required that a time event notification be * waited for (that the time event has been scheduled before returning) * * This function can be used to start a session protection which means that the * fw will stay on the channel for %duration_ms milliseconds. This function * can block (sleep) until the session starts. This function can also be used * to extend a currently running session. * This function is meant to be used for BSS association for example, where we * want to make sure that the fw stays on the channel during the association. */ void iwl_mvm_protect_session(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, u32 max_delay, bool wait_for_notif); /** * iwl_mvm_stop_session_protection - cancel the session protection. * @mvm: the mvm component * @vif: the virtual interface for which the session is issued * * This functions cancels the session protection which is an act of good * citizenship. If it is not needed any more it should be canceled because * the other bindings wait for the medium during that time. * This funtions doesn't sleep. */ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION. */ void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /** * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality * @mvm: the mvm component * @vif: the virtual interface for which the roc is requested. It is assumed * that the vif type is NL80211_IFTYPE_P2P_DEVICE * @duration: the requested duration in millisecond for the fw to be on the * channel that is bound to the vif. * @type: the remain on channel request type * * This function can be used to issue a remain on channel session, * which means that the fw will stay in the channel for the request %duration * milliseconds. The function is async, meaning that it only issues the ROC * request but does not wait for it to start. Once the FW is ready to serve the * ROC request, it will issue a notification to the driver that it is on the * requested channel. Once the FW completes the ROC request it will issue * another notification to the driver. */ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int duration, enum ieee80211_roc_type type); /** * iwl_mvm_stop_roc - stop remain on channel functionality * @mvm: the mvm component * * This function can be used to cancel an ongoing ROC session. * The function is async, it will instruct the FW to stop serving the ROC * session, but will not wait for the actual stopping of the session. */ void iwl_mvm_stop_roc(struct iwl_mvm *mvm); /** * iwl_mvm_remove_time_event - general function to clean up of time event * @mvm: the mvm component * @vif: the vif to which the time event belongs * @te_data: the time event data that corresponds to that time event * * This function can be used to cancel a time event regardless its type. * It is useful for cleaning up time events running before removing an * interface. */ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_time_event_data *te_data); /** * iwl_mvm_te_clear_data - remove time event from list * @mvm: the mvm component * @te_data: the time event data to remove * * This function is mostly internal, it is made available here only * for firmware restart purposes. */ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data); void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm); void iwl_mvm_roc_done_wk(struct work_struct *wk); /** * iwl_mvm_schedule_csa_period - request channel switch absence period * @mvm: the mvm component * @vif: the virtual interface for which the channel switch is issued * @duration: the duration of the NoA in TU. * @apply_time: NoA start time in GP2. * * This function is used to schedule NoA time event and is used to perform * the channel switch flow. */ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 apply_time); /** * iwl_mvm_te_scheduled - check if the fw received the TE cmd * @te_data: the time event data that corresponds to that time event * * This function returns true iff this TE is added to the fw. */ static inline bool iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data) { if (!te_data) return false; return !!te_data->uid; } #endif /* __time_event_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/tt.c000066400000000000000000000554441351064634500261750ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "mvm.h" #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; u32 duration = tt->params.ct_kill_duration; if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; IWL_ERR(mvm, "Enter CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, true); if (!iwl_mvm_is_tt_in_fw(mvm)) { tt->throttle = false; tt->dynamic_smps = false; } /* Don't schedule an exit work if we're in test mode, since * the temperature will not change unless we manually set it * again (or disable testing). */ if (!mvm->temperature_test) schedule_delayed_work(&tt->ct_kill_exit, round_jiffies_relative(duration * HZ)); } static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) { if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; IWL_ERR(mvm, "Exit CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, false); } void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp) { /* ignore the notification if we are in test mode */ if (mvm->temperature_test) return; if (mvm->temperature == temp) return; mvm->temperature = temp; iwl_mvm_tt_handler(mvm); } static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_dts_measurement_notif_v1 *notif_v1; int len = iwl_rx_packet_payload_len(pkt); int temp; /* we can use notif_v1 only, because v2 only adds an additional * parameter, which is not used in this function. */ if (WARN_ON_ONCE(len < sizeof(*notif_v1))) { IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); return -EINVAL; } notif_v1 = (void *)pkt->data; temp = le32_to_cpu(notif_v1->temp); /* shouldn't be negative, but since it's s32, make sure it isn't */ if (WARN_ON_ONCE(temp < 0)) temp = 0; IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp); return temp; } static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); int *temp = data; int ret; ret = iwl_mvm_temp_notif_parse(mvm, pkt); if (ret < 0) return true; *temp = ret; return true; } void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_dts_measurement_notif_v2 *notif_v2; int len = iwl_rx_packet_payload_len(pkt); int temp; u32 ths_crossed; /* the notification is handled synchronously in ctkill, so skip here */ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; temp = iwl_mvm_temp_notif_parse(mvm, pkt); if (!iwl_mvm_is_tt_in_fw(mvm)) { if (temp >= 0) iwl_mvm_tt_temp_changed(mvm, temp); return; } if (WARN_ON_ONCE(len < sizeof(*notif_v2))) { IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); return; } notif_v2 = (void *)pkt->data; ths_crossed = le32_to_cpu(notif_v2->threshold_idx); /* 0xFF in ths_crossed means the notification is not related * to a trip, so we can ignore it here. */ if (ths_crossed == 0xFF) return; IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n", temp, ths_crossed); #ifdef CONFIG_THERMAL if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS)) return; if (mvm->tz_device.tzone) { struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; thermal_notify_framework(tz_dev->tzone, tz_dev->fw_trips_index[ths_crossed]); } #endif /* CONFIG_THERMAL */ } void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct ct_kill_notif *notif; int len = iwl_rx_packet_payload_len(pkt); if (WARN_ON_ONCE(len != sizeof(*notif))) { IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n"); return; } notif = (struct ct_kill_notif *)pkt->data; IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n", notif->temperature); iwl_mvm_enter_ctkill(mvm); } static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) { struct iwl_dts_measurement_cmd cmd = { .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), }; struct iwl_ext_dts_measurement_cmd extcmd = { .control_mode = cpu_to_le32(DTS_AUTOMATIC), }; u32 cmdid; cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE, PHY_OPS_GROUP, 0); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd); return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd); } int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) { struct iwl_notification_wait wait_temp_notif; static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE) }; int ret; lockdep_assert_held(&mvm->mutex); iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, temp_notif, ARRAY_SIZE(temp_notif), iwl_mvm_temp_notif_wait, temp); ret = iwl_mvm_get_temp_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret); iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif); return ret; } ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); if (ret) IWL_ERR(mvm, "Getting the temperature timed out\n"); return ret; } static void check_exit_ctkill(struct work_struct *work) { struct iwl_mvm_tt_mgmt *tt; struct iwl_mvm *mvm; u32 duration; s32 temp; int ret; tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); mvm = container_of(tt, struct iwl_mvm, thermal_throttle); if (iwl_mvm_is_tt_in_fw(mvm)) { iwl_mvm_exit_ctkill(mvm); return; } duration = tt->params.ct_kill_duration; mutex_lock(&mvm->mutex); if (__iwl_mvm_mac_start(mvm)) goto reschedule; ret = iwl_mvm_get_temp(mvm, &temp); __iwl_mvm_mac_stop(mvm); if (ret) goto reschedule; IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); if (temp <= tt->params.ct_kill_exit) { mutex_unlock(&mvm->mutex); iwl_mvm_exit_ctkill(mvm); return; } reschedule: mutex_unlock(&mvm->mutex); schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, round_jiffies(duration * HZ)); } static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = _data; enum ieee80211_smps_mode smps_mode; lockdep_assert_held(&mvm->mutex); if (mvm->thermal_throttle.dynamic_smps) smps_mode = IEEE80211_SMPS_DYNAMIC; else smps_mode = IEEE80211_SMPS_AUTOMATIC; if (vif->type != NL80211_IFTYPE_STATION) return; iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode); } static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) { struct iwl_mvm_sta *mvmsta; int i, err; for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i); if (!mvmsta) continue; if (enable == mvmsta->tt_tx_protection) continue; err = iwl_mvm_tx_protection(mvm, mvmsta, enable); if (err) { IWL_ERR(mvm, "Failed to %s Tx protection\n", enable ? "enable" : "disable"); } else { IWL_DEBUG_TEMP(mvm, "%s Tx protection\n", enable ? "Enable" : "Disable"); mvmsta->tt_tx_protection = enable; } } } void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff) { struct iwl_host_cmd cmd = { .id = REPLY_THERMAL_MNG_BACKOFF, .len = { sizeof(u32), }, .data = { &backoff, }, }; backoff = max(backoff, mvm->thermal_throttle.min_backoff); if (iwl_mvm_send_cmd(mvm, &cmd) == 0) { IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n", backoff); mvm->thermal_throttle.tx_backoff = backoff; } else { IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n"); } } void iwl_mvm_tt_handler(struct iwl_mvm *mvm) { struct iwl_tt_params *params = &mvm->thermal_throttle.params; struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; s32 temperature = mvm->temperature; bool throttle_enable = false; int i; u32 tx_backoff; IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature); if (params->support_ct_kill && temperature >= params->ct_kill_entry) { iwl_mvm_enter_ctkill(mvm); return; } if (params->support_ct_kill && temperature <= params->ct_kill_exit) { iwl_mvm_exit_ctkill(mvm); return; } if (params->support_dynamic_smps) { if (!tt->dynamic_smps && temperature >= params->dynamic_smps_entry) { IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n"); tt->dynamic_smps = true; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_tt_smps_iterator, mvm); throttle_enable = true; } else if (tt->dynamic_smps && temperature <= params->dynamic_smps_exit) { IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n"); tt->dynamic_smps = false; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_tt_smps_iterator, mvm); } } if (params->support_tx_protection) { if (temperature >= params->tx_protection_entry) { iwl_mvm_tt_tx_protection(mvm, true); throttle_enable = true; } else if (temperature <= params->tx_protection_exit) { iwl_mvm_tt_tx_protection(mvm, false); } } if (params->support_tx_backoff) { tx_backoff = tt->min_backoff; for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { if (temperature < params->tx_backoff[i].temperature) break; tx_backoff = max(tt->min_backoff, params->tx_backoff[i].backoff); } if (tx_backoff != tt->min_backoff) throttle_enable = true; if (tt->tx_backoff != tx_backoff) iwl_mvm_tt_tx_backoff(mvm, tx_backoff); } if (!tt->throttle && throttle_enable) { IWL_WARN(mvm, "Due to high temperature thermal throttling initiated\n"); tt->throttle = true; } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == tt->min_backoff && temperature <= params->tx_protection_exit) { IWL_WARN(mvm, "Temperature is back to normal thermal throttling stopped\n"); tt->throttle = false; } } static const struct iwl_tt_params iwl_mvm_default_tt_params = { .ct_kill_entry = 118, .ct_kill_exit = 96, .ct_kill_duration = 5, .dynamic_smps_entry = 114, .dynamic_smps_exit = 110, .tx_protection_entry = 114, .tx_protection_exit = 108, .tx_backoff = { {.temperature = 112, .backoff = 200}, {.temperature = 113, .backoff = 600}, {.temperature = 114, .backoff = 1200}, {.temperature = 115, .backoff = 2000}, {.temperature = 116, .backoff = 4000}, {.temperature = 117, .backoff = 10000}, }, .support_ct_kill = true, .support_dynamic_smps = true, .support_tx_protection = true, .support_tx_backoff = true, }; /* budget in mWatt */ static const u32 iwl_mvm_cdev_budgets[] = { 2000, /* cooling state 0 */ 1800, /* cooling state 1 */ 1600, /* cooling state 2 */ 1400, /* cooling state 3 */ 1200, /* cooling state 4 */ 1000, /* cooling state 5 */ 900, /* cooling state 6 */ 800, /* cooling state 7 */ 700, /* cooling state 8 */ 650, /* cooling state 9 */ 600, /* cooling state 10 */ 550, /* cooling state 11 */ 500, /* cooling state 12 */ 450, /* cooling state 13 */ 400, /* cooling state 14 */ 350, /* cooling state 15 */ 300, /* cooling state 16 */ 250, /* cooling state 17 */ 200, /* cooling state 18 */ 150, /* cooling state 19 */ }; int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) { struct iwl_mvm_ctdp_cmd cmd = { .operation = cpu_to_le32(op), .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]), .window_size = 0, }; int ret; u32 status; lockdep_assert_held(&mvm->mutex); status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD), sizeof(cmd), &cmd, &status); if (ret) { IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret); return ret; } switch (op) { case CTDP_CMD_OPERATION_START: #ifdef CONFIG_THERMAL mvm->cooling_dev.cur_state = state; #endif /* CONFIG_THERMAL */ break; case CTDP_CMD_OPERATION_REPORT: IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status); /* when the function is called with CTDP_CMD_OPERATION_REPORT * option the function should return the average budget value * that is received from the FW. * The budget can't be less or equal to 0, so it's possible * to distinguish between error values and budgets. */ return status; case CTDP_CMD_OPERATION_STOP: IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n"); break; } return 0; } #ifdef CONFIG_THERMAL static int compare_temps(const void *a, const void *b) { return ((s16)le16_to_cpu(*(__le16 *)a) - (s16)le16_to_cpu(*(__le16 *)b)); } int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) { struct temp_report_ths_cmd cmd = {0}; int ret, i, j, idx = 0; lockdep_assert_held(&mvm->mutex); if (!mvm->tz_device.tzone) return -EINVAL; /* The driver holds array of temperature trips that are unsorted * and uncompressed, the FW should get it compressed and sorted */ /* compress temp_trips to cmd array, remove uninitialized values*/ for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { if (mvm->tz_device.temp_trips[i] != S16_MIN) { cmd.thresholds[idx++] = cpu_to_le16(mvm->tz_device.temp_trips[i]); } } cmd.num_temps = cpu_to_le32(idx); if (!idx) goto send; /*sort cmd array*/ sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL); /* we should save the indexes of trips because we sort * and compress the orginal array */ for (i = 0; i < idx; i++) { for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) { if (le16_to_cpu(cmd.thresholds[i]) == mvm->tz_device.temp_trips[j]) mvm->tz_device.fw_trips_index[i] = j; } } send: ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, TEMP_REPORTING_THRESHOLDS_CMD), 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n", ret); return ret; } static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, int *temperature) { struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; int ret; int temp; mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -ENODATA; goto out; } ret = iwl_mvm_get_temp(mvm, &temp); if (ret) goto out; *temperature = temp * 1000; out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device, int trip, int *temp) { struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) return -EINVAL; *temp = mvm->tz_device.temp_trips[trip] * 1000; return 0; } static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device, int trip, enum thermal_trip_type *type) { if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) return -EINVAL; *type = THERMAL_TRIP_PASSIVE; return 0; } static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, int trip, int temp) { struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; struct iwl_mvm_thermal_device *tzone; int i, ret; s16 temperature; mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto out; } if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) { ret = -EINVAL; goto out; } if ((temp / 1000) > S16_MAX) { ret = -EINVAL; goto out; } temperature = (s16)(temp / 1000); tzone = &mvm->tz_device; if (!tzone) { ret = -EIO; goto out; } /* no updates*/ if (tzone->temp_trips[trip] == temperature) { ret = 0; goto out; } /* already existing temperature */ for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { if (tzone->temp_trips[i] == temperature) { ret = -EINVAL; goto out; } } tzone->temp_trips[trip] = temperature; ret = iwl_mvm_send_temp_report_ths_cmd(mvm); out: mutex_unlock(&mvm->mutex); return ret; } static struct thermal_zone_device_ops tzone_ops = { .get_temp = iwl_mvm_tzone_get_temp, .get_trip_temp = iwl_mvm_tzone_get_trip_temp, .get_trip_type = iwl_mvm_tzone_get_trip_type, .set_trip_temp = iwl_mvm_tzone_set_trip_temp, }; /* make all trips writable */ #define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1) static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) { int i; char name[] = "iwlwifi"; if (!iwl_mvm_is_tt_in_fw(mvm)) { mvm->tz_device.tzone = NULL; return; } BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); mvm->tz_device.tzone = thermal_zone_device_register(name, IWL_MAX_DTS_TRIPS, IWL_WRITABLE_TRIPS_MSK, mvm, &tzone_ops, NULL, 0, 0); if (IS_ERR(mvm->tz_device.tzone)) { IWL_DEBUG_TEMP(mvm, "Failed to register to thermal zone (err = %ld)\n", PTR_ERR(mvm->tz_device.tzone)); mvm->tz_device.tzone = NULL; return; } /* 0 is a valid temperature, * so initialize the array with S16_MIN which invalid temperature */ for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) mvm->tz_device.temp_trips[i] = S16_MIN; } static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1; return 0; } static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); *state = mvm->cooling_dev.cur_state; return 0; } static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, unsigned long new_state) { struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); int ret; mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto unlock; } if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { ret = -EINVAL; goto unlock; } ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, new_state); unlock: mutex_unlock(&mvm->mutex); return ret; } static const struct thermal_cooling_device_ops tcooling_ops = { .get_max_state = iwl_mvm_tcool_get_max_state, .get_cur_state = iwl_mvm_tcool_get_cur_state, .set_cur_state = iwl_mvm_tcool_set_cur_state, }; static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm) { char name[] = "iwlwifi"; if (!iwl_mvm_is_ctdp_supported(mvm)) return; BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); mvm->cooling_dev.cdev = thermal_cooling_device_register(name, mvm, &tcooling_ops); if (IS_ERR(mvm->cooling_dev.cdev)) { IWL_DEBUG_TEMP(mvm, "Failed to register to cooling device (err = %ld)\n", PTR_ERR(mvm->cooling_dev.cdev)); mvm->cooling_dev.cdev = NULL; return; } } static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) { if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone) return; IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); if (mvm->tz_device.tzone) { thermal_zone_device_unregister(mvm->tz_device.tzone); mvm->tz_device.tzone = NULL; } } static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) { if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev) return; IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); if (mvm->cooling_dev.cdev) { thermal_cooling_device_unregister(mvm->cooling_dev.cdev); mvm->cooling_dev.cdev = NULL; } } #endif /* CONFIG_THERMAL */ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n"); if (mvm->cfg->thermal_params) tt->params = *mvm->cfg->thermal_params; else tt->params = iwl_mvm_default_tt_params; tt->throttle = false; tt->dynamic_smps = false; tt->min_backoff = min_backoff; INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); #ifdef CONFIG_THERMAL iwl_mvm_cooling_device_register(mvm); iwl_mvm_thermal_zone_register(mvm); #endif mvm->init_status |= IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE; } void iwl_mvm_thermal_exit(struct iwl_mvm *mvm) { if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE)) return; cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); #ifdef CONFIG_THERMAL iwl_mvm_cooling_device_unregister(mvm); iwl_mvm_thermal_zone_unregister(mvm); #endif mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/tx.c000066400000000000000000001703031351064634500261710ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include "iwl-trans.h" #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, u16 tid, u16 ssn) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR sent to %pM, tid %d, ssn %d", addr, tid, ssn); } #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u16 offload_assist) { #if IS_ENABLED(CONFIG_INET) u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u8 protocol = 0; /* * Do not compute checksum if already computed or if transport will * compute it */ if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) goto out; /* We do not expect to be requested to csum stuff we do not support */ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)), "No support for requested checksum\n")) { skb_checksum_help(skb); goto out; } if (skb->protocol == htons(ETH_P_IP)) { protocol = ip_hdr(skb)->protocol; } else { #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_network_header(skb); unsigned int off = sizeof(*ipv6h); protocol = ipv6h->nexthdr; while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { struct ipv6_opt_hdr *hp; /* only supported extension headers */ if (protocol != NEXTHDR_ROUTING && protocol != NEXTHDR_HOP && protocol != NEXTHDR_DEST) { skb_checksum_help(skb); goto out; } hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); protocol = hp->nexthdr; off += ipv6_optlen(hp); } /* if we get here - protocol now should be TCP/UDP */ #endif } if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { WARN_ON_ONCE(1); skb_checksum_help(skb); goto out; } /* enable L4 csum */ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); /* * Set offset to IP header (snap). * We don't support tunneling so no need to take care of inner header. * Size is in words. */ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ if (skb->protocol == htons(ETH_P_IP) && (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) { ip_hdr(skb)->check = 0; offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); } /* reset UDP/TCP header csum */ if (protocol == IPPROTO_TCP) tcp_hdr(skb)->check = 0; else udp_hdr(skb)->check = 0; /* * mac header len should include IV, size is in words unless * the IV is added by the firmware like in WEP. * In new Tx API, the IV is always added by the firmware. */ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) mh_len += info->control.hw_key->iv_len; mh_len /= 2; offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; out: #endif return offload_assist; } /* * Sets most of the Tx cmd's fields */ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id) { struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); u32 len = skb->len + FCS_LEN; u16 offload_assist = 0; u8 ac; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || (ieee80211_is_probe_resp(fc) && !is_multicast_ether_addr(hdr->addr1))) tx_flags |= TX_CMD_FLG_ACK; else tx_flags &= ~TX_CMD_FLG_ACK; if (ieee80211_is_probe_resp(fc)) tx_flags |= TX_CMD_FLG_TSF; if (ieee80211_has_morefrags(fc)) tx_flags |= TX_CMD_FLG_MORE_FRAG; if (ieee80211_is_data_qos(fc)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); u16 ssn = le16_to_cpu(bar->start_seq_num); tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; tx_cmd->tid_tspec = (control & IEEE80211_BAR_CTRL_TID_INFO_MASK) >> IEEE80211_BAR_CTRL_TID_INFO_SHIFT; WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, ssn); } else { if (ieee80211_is_data(fc)) tx_cmd->tid_tspec = IWL_TID_NON_QOS; else tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) tx_flags |= TX_CMD_FLG_SEQ_CTL; else tx_flags &= ~TX_CMD_FLG_SEQ_CTL; } /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; else ac = tid_to_mac80211_ac[0]; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << TX_CMD_FLG_BT_PRIO_POS; if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); else if (ieee80211_is_action(fc)) tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); else tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); /* The spec allows Action frames in A-MPDU, we don't support * it */ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); } else { tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); } if (ieee80211_is_data(fc) && len > mvm->rts_threshold && !is_multicast_ether_addr(hdr->addr1)) tx_flags |= TX_CMD_FLG_PROT_REQUIRE; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && ieee80211_action_contains_tpc(skb)) tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; tx_cmd->tx_flags = cpu_to_le32(tx_flags); /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; /* padding is inserted later in transport */ if (ieee80211_hdrlen(fc) % 4 && !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) offload_assist |= BIT(TX_CMD_OFFLD_PAD); tx_cmd->offload_assist |= cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info, offload_assist)); } static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { if (info->band == NL80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; if (sta && ieee80211_is_data(fc)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; } return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; } static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta) { int rate_idx; u8 rate_plcp; u32 rate_flags = 0; /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n", info->control.rates[0].flags, info->control.rates[0].idx); rate_idx = info->control.rates[0].idx; /* if the rate isn't a well known legacy rate, take the lowest one */ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) rate_idx = rate_lowest_index( &mvm->nvm_data->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ if (info->band == NL80211_BAND_5GHZ) rate_idx += IWL_FIRST_OFDM_RATE; #ifdef CPTCFG_IWLWIFI_FORCE_OFDM_RATE /* Force OFDM on each TX packet */ rate_idx = IWL_FIRST_OFDM_RATE; #endif /* For 2.4 GHZ band, check that there is no need to remap */ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); /* Set CCK flag as needed */ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) rate_flags |= RATE_MCS_CCK_MSK; return (u32)rate_plcp | rate_flags; } static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { return iwl_mvm_get_tx_rate(mvm, info, sta) | iwl_mvm_get_tx_ant(mvm, info, sta, fc); } /* * Sets the fields in the Tx cmd that are rate related */ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { /* Set retry limit on RTS packets */ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; /* Set retry limit on DATA packets and Probe Responses*/ if (ieee80211_is_probe_resp(fc)) { tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; tx_cmd->rts_retry_limit = min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); } else if (ieee80211_is_back_req(fc)) { tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; } else { tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; } /* * for data packets, rate info comes from the table inside the fw. This * table is controlled by LINK_QUALITY commands */ #ifndef CPTCFG_IWLWIFI_FORCE_OFDM_RATE if (ieee80211_is_data(fc) && sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } } else if (ieee80211_is_back_req(fc)) { tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } #else if (ieee80211_is_back_req(fc)) tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); #endif /* Set the rate in the TX cmd */ tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, u8 *crypto_hdr) { struct ieee80211_key_conf *keyconf = info->control.hw_key; u64 pn; pn = atomic64_inc_return(&keyconf->tx_pn); crypto_hdr[0] = pn; crypto_hdr[2] = 0; crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); crypto_hdr[1] = pn >> 8; crypto_hdr[4] = pn >> 16; crypto_hdr[5] = pn >> 24; crypto_hdr[6] = pn >> 32; crypto_hdr[7] = pn >> 40; } /* * Sets the fields in the Tx cmd that are crypto related */ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd, struct sk_buff *skb_frag, int hdrlen) { struct ieee80211_key_conf *keyconf = info->control.hw_key; u8 *crypto_hdr = skb_frag->data + hdrlen; enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; pn = atomic64_inc_return(&keyconf->tx_pn); ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); break; case WLAN_CIPHER_SUITE_WEP104: tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; /* fall through */ case WLAN_CIPHER_SUITE_WEP40: tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & TX_CMD_SEC_WEP_KEY_IDX_MSK); memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: type = TX_CMD_SEC_GCMP; /* Fall through */ case WLAN_CIPHER_SUITE_CCMP_256: /* TODO: Taking the key from the table might introduce a race * when PTK rekeying is done, having an old packets with a PN * based on the old key but the message encrypted with a new * one. * Need to handle this. */ tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; tx_cmd->key[0] = keyconf->hw_key_idx; iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; default: tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; } } /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ static struct iwl_device_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); if (unlikely(!dev_cmd)) return NULL; /* Make sure we zero enough of dev_cmd */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { u16 offload_assist = 0; u32 rate_n_flags = 0; u16 flags = 0; struct iwl_mvm_sta *mvmsta = sta ? iwl_mvm_sta_from_mac80211(sta) : NULL; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); } offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info, offload_assist); /* padding is inserted later in transport */ if (ieee80211_hdrlen(hdr->frame_control) % 4 && !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) offload_assist |= BIT(TX_CMD_OFFLD_PAD); if (!info->control.hw_key) flags |= IWL_TX_FLAGS_ENCRYPT_DIS; /* * For data packets rate info comes from the fw. Only * set rate/antenna during connection establishment or in case * no station is given. */ if (!sta || !ieee80211_is_data(hdr->frame_control) || mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { flags |= IWL_TX_FLAGS_CMD_RATE; rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, hdr->frame_control); } if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; cmd->offload_assist |= cpu_to_le32(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, hdrlen); cmd->flags = cpu_to_le16(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } else { struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; cmd->offload_assist |= cpu_to_le16(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, hdrlen); cmd->flags = cpu_to_le32(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } goto out; } tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; if (info->control.hw_key) iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); out: return dev_cmd; } static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, struct iwl_device_cmd *cmd) { struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); memset(&skb_info->status, 0, sizeof(skb_info->status)); memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); skb_info->driver_data[1] = cmd; } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_hdr *hdr) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); __le16 fc = hdr->frame_control; switch (info->control.vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: /* * Non-bufferable frames use the broadcast station, thus they * use the probe queue. * Also take care of the case where we send a deauth to a * station that we don't have, or similarly an association * response (with non-success status) for a station we can't * accept. * Also, disassociate frames might happen, particular with * reason 7 ("Class 3 frame received from nonassociated STA"). */ if (ieee80211_is_mgmt(fc) && (!ieee80211_is_bufferable_mmpdu(fc) || ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) return mvm->probe_queue; if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && is_multicast_ether_addr(hdr->addr1)) return mvmvif->cab_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, "fc=0x%02x", le16_to_cpu(fc)); return mvm->probe_queue; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) return mvm->p2p_dev_queue; WARN_ON_ONCE(1); return mvm->p2p_dev_queue; default: WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); return -1; } } static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; struct iwl_probe_resp_data *resp_data; u8 *ie, *pos; u8 match[] = { (WLAN_OUI_WFA >> 16) & 0xff, (WLAN_OUI_WFA >> 8) & 0xff, WLAN_OUI_WFA & 0xff, WLAN_OUI_TYPE_WFA_P2P, }; rcu_read_lock(); resp_data = rcu_dereference(mvmvif->probe_resp_data); if (!resp_data) goto out; if (!resp_data->notif.noa_active) goto out; ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, mgmt->u.probe_resp.variable, skb->len - base_len, match, 4, 2); if (!ie) { IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); goto out; } if (skb_tailroom(skb) < resp_data->noa_len) { if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { IWL_ERR(mvm, "Failed to reallocate probe resp\n"); goto out; } } pos = skb_put(skb, resp_data->noa_len); *pos++ = WLAN_EID_VENDOR_SPECIFIC; /* Set length of IE body (not including ID and length itself) */ *pos++ = resp_data->noa_len - 2; *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; *pos++ = WLAN_OUI_WFA & 0xff; *pos++ = WLAN_OUI_TYPE_WFA_P2P; memcpy(pos, &resp_data->notif.noa_attr, resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); out: rcu_read_unlock(); } int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info info; struct iwl_device_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; int queue = -1; if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; memcpy(&info, skb->cb, sizeof(info)); if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen)) return -1; if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) return -1; if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { if (!ieee80211_is_data(hdr->frame_control)) sta_id = mvmvif->bcast_sta.sta_id; else sta_id = mvmvif->mcast_sta.sta_id; queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && offchannel) { /* * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets * that can be used in 2 different types of vifs, P2P & * STATION. * P2P uses the offchannel queue. * STATION (HS2.0) uses the auxiliary context of the FW, * and hence needs to be sent on the aux queue. */ sta_id = mvm->aux_sta.sta_id; queue = mvm->aux_queue; } } if (queue < 0) { IWL_ERR(mvm, "No queue was found. Dropping TX\n"); return -1; } if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); if (!dev_cmd) return -1; /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; } return 0; } unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; u8 ac = tid_to_mac80211_ac[tid]; unsigned int txf; int lmac = IWL_LMAC_24G_INDEX; if (iwl_mvm_is_cdb_supported(mvm) && band == NL80211_BAND_5GHZ) lmac = IWL_LMAC_5G_INDEX; /* For HE redirect to trigger based fifos */ if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) ac += 4; txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); /* * Don't send an AMSDU that will be longer than the TXF. * Add a security margin of 256 for the TX command + headers. * We also want to have the start of the next packet inside the * fifo to be able to send bursts. */ return min_t(unsigned int, mvmsta->max_amsdu_len, mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); } #ifdef CONFIG_INET static int iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, netdev_features_t netdev_flags, struct sk_buff_head *mpdus_skb) { struct sk_buff *tmp, *next; struct ieee80211_hdr *hdr = (void *)skb->data; char cb[sizeof(skb->cb)]; u16 i = 0; unsigned int tcp_payload_len; unsigned int mss = skb_shinfo(skb)->gso_size; bool ipv4 = (skb->protocol == htons(ETH_P_IP)); u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; skb_shinfo(skb)->gso_size = num_subframes * mss; memcpy(cb, skb->cb, sizeof(cb)); next = skb_gso_segment(skb, netdev_flags); skb_shinfo(skb)->gso_size = mss; if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; else if (next) consume_skb(skb); while (next) { tmp = next; next = tmp->next; memcpy(tmp->cb, cb, sizeof(tmp->cb)); /* * Compute the length of all the data added for the A-MSDU. * This will be used to compute the length to write in the TX * command. We have: SNAP + IP + TCP for n -1 subframes and * ETH header for n subframes. */ tcp_payload_len = skb_tail_pointer(tmp) - skb_transport_header(tmp) - tcp_hdrlen(tmp) + tmp->data_len; if (ipv4) ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); if (tcp_payload_len > mss) { skb_shinfo(tmp)->gso_size = mss; } else { if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc; if (ipv4) ip_send_check(ip_hdr(tmp)); qc = ieee80211_get_qos_ctl((void *)tmp->data); *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; } skb_shinfo(tmp)->gso_size = 0; } tmp->prev = NULL; tmp->next = NULL; __skb_queue_tail(mpdus_skb, tmp); i++; } return 0; } static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; u16 snap_ip_tcp, pad; netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 tid; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); if (!mvmsta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || !mvmsta->amsdu_enabled) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Do not build AMSDU for IPv6 with extension headers. * ask stack to segment and checkum the generated MPDUs for us. */ if (skb->protocol == htons(ETH_P_IPV6) && ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != IPPROTO_TCP) { netdev_flags &= ~NETIF_F_CSUM_MASK; return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); } tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; /* * No need to lock amsdu_in_ampdu_allowed since it can't be modified * during an BA session. */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) || !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid); /* * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not * supported. This is a spec requirement (IEEE 802.11-2015 * section 8.7.3 NOTE 3). */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !sta->vht_cap.vht_supported) max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); /* Sub frame header + SNAP + IP header + TCP header + MSS */ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; pad = (4 - subf_len) & 0x3; /* * If we have N subframes in the A-MSDU, then the A-MSDU's size is * N * subf_len + (N - 1) * pad. */ num_subframes = (max_amsdu_len + pad) / (subf_len + pad); if (sta->max_amsdu_subframes && num_subframes > sta->max_amsdu_subframes) num_subframes = sta->max_amsdu_subframes; tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; /* * Make sure we have enough TBs for the A-MSDU: * 2 for each subframe * 1 more for each fragment * 1 more for the potential data in the header */ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > mvm->trans->max_skb_frags) num_subframes = 1; if (num_subframes > 1) *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; /* This skb fits in one single A-MSDU */ if (num_subframes * mss >= tcp_payload_len) { __skb_queue_tail(mpdus_skb, skb); return 0; } /* * Trick the segmentation function to make it * create SKBs that can fit into one A-MSDU. */ return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skb); } #else /* CONFIG_INET */ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { /* Impossible to get TSO with CONFIG_INET */ WARN_ON(1); return -1; } #endif /* Check if there are any timed-out TIDs on a given shared TXQ */ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) { unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; unsigned long now = jiffies; int tid; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) return true; } return false; } static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int airtime) { int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; struct iwl_mvm_tcm_mac *mdata; if (mac >= NUM_MAC_INDEX_DRIVER) return; mdata = &mvm->tcm.data[mac]; if (mvm->tcm.paused) return; if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); mdata->tx.airtime += airtime; } static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int tid) { u32 ac = tid_to_mac80211_ac[tid]; int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; struct iwl_mvm_tcm_mac *mdata; if (mac >= NUM_MAC_INDEX_DRIVER) return -EINVAL; mdata = &mvm->tcm.data[mac]; mdata->tx.pkts[ac]++; return 0; } /* * Sets the fields in the Tx cmd that are crypto related. * * This function must be called with BHs disabled. */ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; struct iwl_device_cmd *dev_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; u16 txq_id; bool is_ampdu = false; int hdrlen; mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; if (WARN_ON_ONCE(!mvmsta)) return -1; if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, sta, mvmsta->sta_id); if (!dev_cmd) goto drop; /* * we handle that entirely ourselves -- for uAPSD the firmware * will always send a notification, and for PS-Poll responses * we'll notify mac80211 when getting frame status */ info->flags &= ~IEEE80211_TX_STATUS_EOSP; spin_lock(&mvmsta->lock); /* nullfunc frames should go to the MGMT queue regardless of QOS, * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default * assignment of MGMT TID */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { tid = ieee80211_get_tid(hdr); if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid)) goto drop_unlock_sta; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; if (WARN_ONCE(is_ampdu && mvmsta->tid_data[tid].state != IWL_AGG_ON, "Invalid internal agg state %d for TID %d", mvmsta->tid_data[tid].state, tid)) goto drop_unlock_sta; seq_number = mvmsta->tid_data[tid].seq_number; seq_number &= IEEE80211_SCTL_SEQ; if (!iwl_mvm_has_new_tx_api(mvm)) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); /* update the tx_cmd hdr as it was already copied */ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; } } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { tid = IWL_TID_NON_QOS; } txq_id = mvmsta->tid_data[tid].txq_id; WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return 0; } if (!iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; /* * If we have timed-out TIDs - schedule the worker that will * reconfig the queues and update them * * Note that the no lock is taken here in order to not serialize * the TX flow. This isn't dangerous because scheduling * mvm->add_stream_wk can't ruin the state, and if we DON'T * schedule it due to some race condition then next TX we get * here we will. */ if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED && iwl_mvm_txq_should_update(mvm, txq_id))) schedule_work(&mvm->add_stream_wk); } IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number), skb->len); /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) mvmsta->tid_data[tid].seq_number = seq_number + 0x10; spin_unlock(&mvmsta->lock); if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid)) goto drop; return 0; drop_unlock_sta: iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); drop: IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); return -1; } int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; if (WARN_ON_ONCE(!mvmsta)) return -1; if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); if (!skb_is_gso(skb)) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; if (payload_len <= skb_shinfo(skb)->gso_size) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); __skb_queue_head_init(&mpdus_skbs); ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); if (ret) return ret; if (WARN_ON(skb_queue_empty(&mpdus_skbs))) return ret; while (!skb_queue_empty(&mpdus_skbs)) { skb = __skb_dequeue(&mpdus_skbs); ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); if (ret) { __skb_queue_purge(&mpdus_skbs); return ret; } } return 0; } static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; struct ieee80211_vif *vif = mvmsta->vif; u16 normalized_ssn; lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && iwl_mvm_tid_queued(mvm, tid_data) == 0) { /* * Now that this aggregation or DQA queue is empty tell * mac80211 so it knows we no longer have frames buffered for * the station on this TID (for the TIM bitmap calculation.) */ ieee80211_sta_set_buffered(sta, tid, false); } /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; if (mvm->trans->cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn != tid_data->next_reclaimed) return; switch (tid_data->state) { case IWL_EMPTYING_HW_QUEUE_ADDBA: IWL_DEBUG_TX_QUEUES(mvm, "Can continue addBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); tid_data->state = IWL_AGG_STARTING; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IWL_EMPTYING_HW_QUEUE_DELBA: IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); tid_data->state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; default: break; } } #ifdef CPTCFG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status) { #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: return "SUCCESS"; TX_STATUS_POSTPONE(DELAY); TX_STATUS_POSTPONE(FEW_BYTES); TX_STATUS_POSTPONE(BT_PRIO); TX_STATUS_POSTPONE(QUIET_PERIOD); TX_STATUS_POSTPONE(CALC_TTAK); TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); TX_STATUS_FAIL(SHORT_LIMIT); TX_STATUS_FAIL(LONG_LIMIT); TX_STATUS_FAIL(UNDERRUN); TX_STATUS_FAIL(DRAIN_FLOW); TX_STATUS_FAIL(RFKILL_FLUSH); TX_STATUS_FAIL(LIFE_EXPIRE); TX_STATUS_FAIL(DEST_PS); TX_STATUS_FAIL(HOST_ABORTED); TX_STATUS_FAIL(BT_RETRY); TX_STATUS_FAIL(STA_INVALID); TX_STATUS_FAIL(FRAG_DROPPED); TX_STATUS_FAIL(TID_DISABLE); TX_STATUS_FAIL(FIFO_FLUSHED); TX_STATUS_FAIL(SMALL_CF_POLL); TX_STATUS_FAIL(FW_DROP); TX_STATUS_FAIL(STA_COLOR_MISMATCH); } return "UNKNOWN"; #undef TX_STATUS_FAIL #undef TX_STATUS_POSTPONE } #endif /* CPTCFG_IWLWIFI_DEBUG */ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r) { if (rate_n_flags & RATE_HT_MCS_GF_MSK) r->flags |= IEEE80211_TX_RC_GREEN_FIELD; switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; break; case RATE_MCS_CHAN_WIDTH_80: r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; break; case RATE_MCS_CHAN_WIDTH_160: r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; break; } if (rate_n_flags & RATE_MCS_SGI_MSK) r->flags |= IEEE80211_TX_RC_SHORT_GI; if (rate_n_flags & RATE_MCS_HT_MSK) { r->flags |= IEEE80211_TX_RC_MCS; r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { ieee80211_rate_set_vht( r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else { r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, band); } } /** * translate ucode response to mac80211 tx status control values */ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, struct ieee80211_tx_info *info) { struct ieee80211_tx_rate *r = &info->status.rates[0]; info->status.antenna = ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS static void iwl_mvm_tx_lat_add_ts_ack(struct sk_buff *skb) { s64 temp = ktime_to_ms(ktime_get()); s64 ts_1 = ktime_to_ns(skb->tstamp) >> 32; s64 diff = temp - ts_1; #if LINUX_VERSION_IS_LESS(4,10,0) skb->tstamp.tv64 += diff; #else skb->tstamp += diff; #endif } #endif static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, u32 status) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tx_status *status_trig; int i; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_TX_STATUS); if (!trig) return; status_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { /* don't collect on status 0 */ if (!status_trig->statuses[i].status) break; if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Tx status %d was received", status & TX_STATUS_MSK); break; } } /** * iwl_mvm_get_scd_ssn - returns the SSN of the SCD * @tx_resp: the Tx response from the fw (agg or non-agg) * * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since * it can't know that everything will go well until the end of the AMPDU, it * can't know in advance the number of MPDUs that will be sent in the current * batch. This is why it writes the agg Tx response while it fetches the MPDUs. * Hence, it can't know in advance what the SSN of the SCD will be at the end * of the batch. This is why the SSN of the SCD is written at the end of the * whole struct at a variable offset. This function knows how to cope with the * variable offset and returns the SSN of the SCD. */ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, struct iwl_mvm_tx_resp *tx_resp) { return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + tx_resp->frame_count) & 0xfff; } static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct ieee80211_sta *sta; u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); /* struct iwl_mvm_tx_resp_v3 is almost the same */ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); struct agg_tx_status *agg_status = iwl_mvm_get_agg_status(mvm, tx_resp); u32 status = le16_to_cpu(agg_status->status); u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); struct sk_buff_head skbs; u8 skb_freed = 0; u8 lq_color; u16 next_reclaimed, seq_ctl; bool is_ndp = false; __skb_queue_head_init(&skbs); if (iwl_mvm_has_new_tx_api(mvm)) txq_id = le16_to_cpu(tx_resp->tx_queue); seq_ctl = le16_to_cpu(tx_resp->seq_ctl); /* we can free until ssn % q.n_bd not inclusive */ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool flushed = false; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS iwl_mvm_tx_lat_add_ts_ack(skb); #endif skb_freed++; iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: case TX_STATUS_DIRECT_DONE: info->flags |= IEEE80211_TX_STAT_ACK; break; case TX_STATUS_FAIL_FIFO_FLUSHED: case TX_STATUS_FAIL_DRAIN_FLOW: flushed = true; break; case TX_STATUS_FAIL_DEST_PS: /* the FW should have stopped the queue and not * return this status */ WARN_ON(1); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: break; } if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && ieee80211_is_mgmt(hdr->frame_control)) iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); /* * If we are freeing multiple frames, mark all the frames * but the first one as acked, since they were acknowledged * before * */ if (skb_freed > 1) info->flags |= IEEE80211_TX_STAT_ACK; iwl_mvm_tx_status_check_trigger(mvm, status); info->status.rates[0].count = tx_resp->failure_frame + 1; iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), info); info->status.status_driver_data[1] = (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); /* Single frame failure in an AMPDU queue => send BAR */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !(info->flags & IEEE80211_TX_STAT_ACK) && !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ if (ieee80211_is_back_req(hdr->frame_control)) seq_ctl = 0; else if (status != TX_STATUS_SUCCESS) seq_ctl = le16_to_cpu(hdr->seq_ctrl); if (unlikely(!seq_ctl)) { struct ieee80211_hdr *hdr = (void *)skb->data; /* * If it is an NDP, we can't update next_reclaim since * its sequence control is 0. Note that for that same * reason, NDPs are never sent to A-MPDU'able queues * so that we can never have more than one freed frame * for a single Tx resonse (see WARN_ON below). */ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) is_ndp = true; } /* * TODO: this is not accurate if we are freeing more than one * packet. */ info->status.tx_time = le16_to_cpu(tx_resp->wireless_media_time); BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); info->status.status_driver_data[0] = RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE if (info->flags & IEEE80211_TX_STAT_ACK) iwl_mvm_tdls_peer_cache_pkt(mvm, (void *)skb->data, skb->len, -1); #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ ieee80211_tx_status(mvm->hw, skb); } /* This is an aggregation queue or might become one, so we use * the ssn since: ssn = wifi seq_num % 256. * The seq_ctl is the sequence control of the packet to which * this Tx response relates. But if there is a hole in the * bitmap of the BA we received, this Tx response may allow to * reclaim the hole and all the subsequent packets that were * already acked. In that case, seq_ctl != ssn, and the next * packet to be reclaimed will be ssn and not seq_ctl. In that * case, several packets will be reclaimed even if * frame_count = 1. * * The ssn is the index (% 256) of the latest packet that has * treated (acked / dropped) + 1. */ next_reclaimed = ssn; IWL_DEBUG_TX_REPLY(mvm, "TXQ %d status %s (0x%08x)\n", txq_id, iwl_mvm_get_tx_fail_reason(status), status); IWL_DEBUG_TX_REPLY(mvm, "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", le32_to_cpu(tx_resp->initial_rate), tx_resp->failure_frame, SEQ_TO_INDEX(sequence), ssn, next_reclaimed, seq_ctl); rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* * sta can't be NULL otherwise it'd mean that the sta has been freed in * the firmware while we still have packets for it in the Tx queues. */ if (WARN_ON_ONCE(!sta)) goto out; if (!IS_ERR(sta)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); if (sta->wme && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", next_reclaimed); } else { IWL_DEBUG_TX_REPLY(mvm, "NDP - don't update next_reclaimed\n"); } iwl_mvm_check_ratid_empty(mvm, sta, tid); if (mvmsta->sleep_tx_count) { mvmsta->sleep_tx_count--; if (mvmsta->sleep_tx_count && !iwl_mvm_tid_queued(mvm, tid_data)) { /* * The number of frames in the queue * dropped to 0 even if we sent less * frames than we thought we had on the * Tx queue. * This means we had holes in the BA * window that we just filled, ask * mac80211 to send EOSP since the * firmware won't know how to do that. * Send NDP and the firmware will send * EOSP notification that will trigger * a call to ieee80211_sta_eosp(). */ send_eosp_ndp = true; } } spin_unlock_bh(&mvmsta->lock); if (send_eosp_ndp) { iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, IEEE80211_FRAME_RELEASE_UAPSD, 1, tid, false, false); mvmsta->sleep_tx_count = 0; ieee80211_send_eosp_nullfunc(sta, tid); } } if (mvmsta->next_status_eosp) { mvmsta->next_status_eosp = false; ieee80211_sta_eosp(sta); } } out: rcu_read_unlock(); } #ifdef CPTCFG_IWLWIFI_DEBUG #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x static const char *iwl_get_agg_tx_status(u16 status) { switch (status & AGG_TX_STATE_STATUS_MSK) { AGG_TX_STATE_(TRANSMITTED); AGG_TX_STATE_(UNDERRUN); AGG_TX_STATE_(BT_PRIO); AGG_TX_STATE_(FEW_BYTES); AGG_TX_STATE_(ABORT); AGG_TX_STATE_(TX_ON_AIR_DROP); AGG_TX_STATE_(LAST_SENT_TRY_CNT); AGG_TX_STATE_(LAST_SENT_BT_KILL); AGG_TX_STATE_(SCD_QUERY); AGG_TX_STATE_(TEST_BAD_CRC32); AGG_TX_STATE_(RESPONSE); AGG_TX_STATE_(DUMP_TX); AGG_TX_STATE_(DELAY_TX); } return "UNKNOWN"; } static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; struct agg_tx_status *frame_status = iwl_mvm_get_agg_status(mvm, tx_resp); int i; for (i = 0; i < tx_resp->frame_count; i++) { u16 fstatus = le16_to_cpu(frame_status[i].status); IWL_DEBUG_TX_REPLY(mvm, "status %s (0x%04x), try-count (%d) seq (0x%x)\n", iwl_get_agg_tx_status(fstatus), fstatus & AGG_TX_STATE_STATUS_MSK, (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> AGG_TX_STATE_TRY_CNT_POS, le16_to_cpu(frame_status[i].sequence)); } } #else static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) {} #endif /* CPTCFG_IWLWIFI_DEBUG */ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); u16 sequence = le16_to_cpu(pkt->hdr.sequence); struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); struct ieee80211_sta *sta; if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) return; iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(!sta || !sta->wme)) { rcu_read_unlock(); return; } if (!WARN_ON_ONCE(!mvmsta)) { mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); mvmsta->tid_data[tid].tx_time = le16_to_cpu(tx_resp->wireless_media_time); mvmsta->tid_data[tid].lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); } rcu_read_unlock(); } void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; if (tx_resp->frame_count == 1) iwl_mvm_rx_tx_cmd_single(mvm, pkt); else iwl_mvm_rx_tx_cmd_agg(mvm, pkt); } static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, int txq, int index, struct ieee80211_tx_info *ba_info, u32 rate) { struct sk_buff_head reclaimed_skbs; struct iwl_mvm_tid_data *tid_data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct sk_buff *skb; int freed; if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || tid > IWL_MAX_TID_COUNT, "sta_id %d tid %d", sta_id, tid)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* Reclaiming frames for a station that has been deleted ? */ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return; } mvmsta = iwl_mvm_sta_from_mac80211(sta); tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id != txq) { IWL_ERR(mvm, "invalid BA notification: Q %d, tid %d\n", tid_data->txq_id, tid); rcu_read_unlock(); return; } __skb_queue_head_init(&reclaimed_skbs); /* * Release all TFDs before the SSN, i.e. all TFDs in front of * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); spin_lock_bh(&mvmsta->lock); tid_data->next_reclaimed = index; iwl_mvm_check_ratid_empty(mvm, sta, tid); freed = 0; /* pack lq color from tid_data along the reduced txp */ ba_info->status.status_driver_data[0] = RS_DRV_DATA_PACK(tid_data->lq_color, ba_info->status.status_driver_data[0]); ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS iwl_mvm_tx_lat_add_ts_ack(skb); #endif if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); /* Packet was transmitted successfully, failures come as single * frames because before failing a frame the firmware transmits * it without aggregation at least once. */ info->flags |= IEEE80211_TX_STAT_ACK; #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE iwl_mvm_tdls_peer_cache_pkt(mvm, hdr, skb->len, -1); #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ if (freed == 1) { info->flags |= IEEE80211_TX_STAT_AMPDU; memcpy(&info->status, &ba_info->status, sizeof(ba_info->status)); iwl_mvm_hwrate_to_tx_status(rate, info); } } spin_unlock_bh(&mvmsta->lock); /* We got a BA notif with 0 acked or scd_ssn didn't progress which is * possible (i.e. first MPDU in the aggregation wasn't acked) * Still it's important to update RS about sent vs. acked. */ if (skb_queue_empty(&reclaimed_skbs)) { struct ieee80211_chanctx_conf *chanctx_conf = NULL; if (mvmsta->vif) chanctx_conf = rcu_dereference(mvmsta->vif->chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) goto out; ba_info->band = chanctx_conf->def.chan->band; iwl_mvm_hwrate_to_tx_status(rate, ba_info); if (!iwl_mvm_has_tlc_offload(mvm)) { IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false); } } out: rcu_read_unlock(); while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); ieee80211_tx_status(mvm->hw, skb); } } void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); int sta_id, tid, txq, index; struct ieee80211_tx_info ba_info = {}; struct iwl_mvm_ba_notif *ba_notif; struct iwl_mvm_tid_data *tid_data; struct iwl_mvm_sta *mvmsta; ba_info.flags = IEEE80211_TX_STAT_AMPDU; if (iwl_mvm_has_new_tx_api(mvm)) { struct iwl_mvm_compressed_ba_notif *ba_res = (void *)pkt->data; u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); int i; sta_id = ba_res->sta_id; ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); ba_info.status.tx_time = (u16)le32_to_cpu(ba_res->wireless_time); ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_res->reduced_txp; if (!le16_to_cpu(ba_res->tfd_cnt)) goto out; rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); if (!mvmsta) goto out_unlock; /* Free per TID */ for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) { struct iwl_mvm_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; tid = ba_tfd->tid; if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; mvmsta->tid_data[i].lq_color = lq_color; iwl_mvm_tx_reclaim(mvm, sta_id, tid, (int)(le16_to_cpu(ba_tfd->q_num)), le16_to_cpu(ba_tfd->tfd_index), &ba_info, le32_to_cpu(ba_res->tx_rate)); } iwl_mvm_tx_airtime(mvm, mvmsta, le32_to_cpu(ba_res->wireless_time)); out_unlock: rcu_read_unlock(); out: IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", sta_id, le32_to_cpu(ba_res->flags), le16_to_cpu(ba_res->txed), le16_to_cpu(ba_res->done)); return; } ba_notif = (void *)pkt->data; sta_id = ba_notif->sta_id; tid = ba_notif->tid; /* "flow" corresponds to Tx queue */ txq = le16_to_cpu(ba_notif->scd_flow); /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ index = le16_to_cpu(ba_notif->scd_ssn); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); if (WARN_ON_ONCE(!mvmsta)) { rcu_read_unlock(); return; } tid_data = &mvmsta->tid_data[tid]; ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; ba_info.status.ampdu_len = ba_notif->txed; ba_info.status.tx_time = tid_data->tx_time; ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_notif->reduced_txp; rcu_read_unlock(); iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, tid_data->rate_n_flags); IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from %pM, sta_id = %d\n", ba_notif->sta_addr, ba_notif->sta_id); IWL_DEBUG_TX_REPLY(mvm, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), le64_to_cpu(ba_notif->bitmap), txq, index, ba_notif->txed, ba_notif->txed_2_done); IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", ba_notif->reduced_txp); } /* * Note that there are transports that buffer frames before they reach * the firmware. This means that after flush_tx_path is called, the * queue might not be empty. The race-free way to handle this is to: * 1) set the station as draining * 2) flush the Tx path * 3) wait for the transport queues to be empty */ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags) { int ret; struct iwl_tx_path_flush_cmd_v1 flush_cmd = { .queues_ctl = cpu_to_le32(tfd_msk), .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), }; WARN_ON(iwl_mvm_has_new_tx_api(mvm)); ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, sizeof(flush_cmd), &flush_cmd); if (ret) IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); return ret; } int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids, u32 flags) { int ret; struct iwl_tx_path_flush_cmd flush_cmd = { .sta_id = cpu_to_le32(sta_id), .tid_mask = cpu_to_le16(tids), }; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, sizeof(flush_cmd), &flush_cmd); if (ret) IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); return ret; } int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags) { struct iwl_mvm_int_sta *int_sta = sta; struct iwl_mvm_sta *mvm_sta = sta; BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != offsetof(struct iwl_mvm_sta, sta_id)); if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xff | BIT(IWL_MGMT_TID), flags); if (internal) return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, flags); return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/utils.c000066400000000000000000001260011351064634500266720ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" #include "mvm.h" #include "fw/api/rs.h" /* * Will return 0 even if the cmd failed when RFKILL is asserted unless * CMD_WANT_SKB is set in cmd->flags. */ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) { int ret; #if defined(CPTCFG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) if (WARN_ON(mvm->d3_test_active)) return -EIO; #endif /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ if (!(cmd->flags & CMD_ASYNC)) lockdep_assert_held(&mvm->mutex); ret = iwl_trans_send_cmd(mvm->trans, cmd); /* * If the caller wants the SKB, then don't hide any problems, the * caller might access the response buffer which will be NULL if * the command failed. */ if (cmd->flags & CMD_WANT_SKB) return ret; /* Silently ignore failures if RFKILL is asserted */ if (!ret || ret == -ERFKILL) return 0; return ret; } int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data) { struct iwl_host_cmd cmd = { .id = id, .len = { len, }, .data = { data, }, .flags = flags, }; return iwl_mvm_send_cmd(mvm, &cmd); } /* * We assume that the caller set the status to the success value */ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status) { struct iwl_rx_packet *pkt; struct iwl_cmd_response *resp; int ret, resp_len; lockdep_assert_held(&mvm->mutex); #if defined(CPTCFG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) if (WARN_ON(mvm->d3_test_active)) return -EIO; #endif /* * Only synchronous commands can wait for status, * we use WANT_SKB so the caller can't. */ if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), "cmd flags %x", cmd->flags)) return -EINVAL; cmd->flags |= CMD_WANT_SKB; ret = iwl_trans_send_cmd(mvm->trans, cmd); if (ret == -ERFKILL) { /* * The command failed because of RFKILL, don't update * the status, leave it as success and return 0. */ return 0; } else if (ret) { return ret; } pkt = cmd->resp_pkt; resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { ret = -EIO; goto out_free_resp; } resp = (void *)pkt->data; *status = le32_to_cpu(resp->status); out_free_resp: iwl_free_resp(cmd); return ret; } /* * We assume that the caller set the status to the sucess value */ int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status) { struct iwl_host_cmd cmd = { .id = id, .len = { len, }, .data = { data, }, }; return iwl_mvm_send_cmd_status(mvm, &cmd, status); } #define IWL_DECLARE_RATE_INFO(r) \ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP /* * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP */ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { IWL_DECLARE_RATE_INFO(1), IWL_DECLARE_RATE_INFO(2), IWL_DECLARE_RATE_INFO(5), IWL_DECLARE_RATE_INFO(11), IWL_DECLARE_RATE_INFO(6), IWL_DECLARE_RATE_INFO(9), IWL_DECLARE_RATE_INFO(12), IWL_DECLARE_RATE_INFO(18), IWL_DECLARE_RATE_INFO(24), IWL_DECLARE_RATE_INFO(36), IWL_DECLARE_RATE_INFO(48), IWL_DECLARE_RATE_INFO(54), }; int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band) { int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; int idx; int band_offset = 0; /* Legacy rate format, search for match in table */ if (band == NL80211_BAND_5GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) if (fw_rate_idx_to_plcp[idx] == rate) return idx - band_offset; return -1; } u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) { /* Get PLCP rate for tx_cmd->rate_n_flags */ return fw_rate_idx_to_plcp[rate_idx]; } u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac) { static const u8 mac80211_ac_to_ucode_ac[] = { AC_VO, AC_VI, AC_BE, AC_BK }; return mac80211_ac_to_ucode_ac[ac]; } void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_error_resp *err_resp = (void *)pkt->data; IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", le32_to_cpu(err_resp->error_type), err_resp->cmd_id); IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_service)); IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n", le64_to_cpu(err_resp->timestamp)); } /* * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. * The parameter should also be a combination of ANT_[ABC]. */ u8 first_antenna(u8 mask) { BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */ return BIT(0); return BIT(ffs(mask) - 1); } /* * Toggles between TX antennas to send the probe request on. * Receives the bitmask of valid TX antennas and the *index* used * for the last TX, and returns the next valid *index* to use. * In order to set it in the tx_cmd, must do BIT(idx). */ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) { u8 ind = last_idx; int i; for (i = 0; i < MAX_ANT_NUM; i++) { ind = (ind + 1) % MAX_ANT_NUM; if (valid & BIT(ind)) return ind; } WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); return last_idx; } #define FW_SYSASSERT_CPU_MASK 0xf0000000 static const struct { const char *name; u8 num; } advanced_lookup[] = { { "NMI_INTERRUPT_WDG", 0x34 }, { "SYSASSERT", 0x35 }, { "UCODE_VERSION_MISMATCH", 0x37 }, { "BAD_COMMAND", 0x38 }, { "BAD_COMMAND", 0x39 }, { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, { "FATAL_ERROR", 0x3D }, { "NMI_TRM_HW_ERR", 0x46 }, { "NMI_INTERRUPT_TRM", 0x4C }, { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, { "NMI_INTERRUPT_HOST", 0x66 }, { "NMI_INTERRUPT_LMAC_FATAL", 0x70 }, { "NMI_INTERRUPT_UMAC_FATAL", 0x71 }, { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 }, { "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, { "ADVANCED_SYSASSERT", 0 }, }; static const char *desc_lookup(u32 num) { int i; for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK)) return advanced_lookup[i].name; /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ return advanced_lookup[i].name; } /* * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_error_event_table_v1 { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 pc; /* program counter */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 bcon_time; /* beacon timer */ u32 tsf_low; /* network timestamp function timer */ u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ u32 gp3; /* GP3 timer register */ u32 ucode_ver; /* uCode version */ u32 hw_ver; /* HW Silicon version */ u32 brd_ver; /* HW board version */ u32 log_pc; /* log program counter */ u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ u32 isr0; /* isr status register LMPM_NIC_ISR0: * rxtx_flag */ u32 isr1; /* isr status register LMPM_NIC_ISR1: * host_flag */ u32 isr2; /* isr status register LMPM_NIC_ISR2: * enc_flag */ u32 isr3; /* isr status register LMPM_NIC_ISR3: * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ u32 lmpm_pmg_sel; /* indicate which clocks are turned on * (LMPM_PMG_SEL) */ u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; struct iwl_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 trm_hw_status0; /* TRM HW status */ u32 trm_hw_status1; /* TRM HW status */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 bcon_time; /* beacon timer */ u32 tsf_low; /* network timestamp function timer */ u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ u32 fw_rev_type; /* firmware revision type */ u32 major; /* uCode version major */ u32 minor; /* uCode version minor */ u32 hw_ver; /* HW Silicon version */ u32 brd_ver; /* HW board version */ u32 log_pc; /* log program counter */ u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ u32 isr0; /* isr status register LMPM_NIC_ISR0: * rxtx_flag */ u32 isr1; /* isr status register LMPM_NIC_ISR1: * host_flag */ u32 isr2; /* isr status register LMPM_NIC_ISR2: * enc_flag */ u32 isr3; /* isr status register LMPM_NIC_ISR3: * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ u32 last_cmd_id; /* last HCMD id handled by the firmware */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ u32 lmpm_pmg_sel; /* indicate which clocks are turned on * (LMPM_PMG_SEL) */ u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; /* * UMAC error struct - relevant starting from family 8000 chip. * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_umac_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 umac_major; u32 umac_minor; u32 frame_pointer; /* core register 27*/ u32 stack_pointer; /* core register 28 */ u32 cmd_header; /* latest host cmd sent to UMAC */ u32 nic_isr_pref; /* ISR status register */ } __packed; #define ERROR_START_OFFSET (1 * sizeof(u32)) #define ERROR_ELEM_SIZE (7 * sizeof(u32)) static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; struct iwl_umac_error_event_table table; u32 base = mvm->trans->dbg.umac_error_event_table; if (!mvm->support_umac_log && !(mvm->trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_UMAC)) return; iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (table.valid) mvm->fwrt.dump.umac_err_id = table.error_id; if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", mvm->status, table.valid); } IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table table; u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num]; if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { if (!base) base = mvm->fw->init_errlog_ptr; } else { if (!base) base = mvm->fw->inst_errlog_ptr; } if (base < 0x400000) { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT"); return; } /* check if there is a HW error */ val = iwl_trans_read_mem32(trans, base); if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) { int err; IWL_ERR(trans, "HW error, resetting before reading\n"); /* reset the device */ iwl_trans_sw_reset(trans); err = iwl_finish_nic_init(trans); if (err) return; } iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (table.valid) mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id; if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", mvm->status, table.valid); } /* Do not change this output - scripts rely on it */ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); IWL_ERR(mvm, "0x%08X | data1\n", table.data1); IWL_ERR(mvm, "0x%08X | data2\n", table.data2); IWL_ERR(mvm, "0x%08X | data3\n", table.data3); IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type); IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id); IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); } void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { IWL_ERR(mvm, "DEVICE_ENABLED bit is not set. Aborting dump.\n"); return; } iwl_mvm_dump_lmac_error_log(mvm, 0); if (mvm->trans->dbg.lmac_error_event_table[1]) iwl_mvm_dump_lmac_error_log(mvm, 1); iwl_mvm_dump_umac_error_log(mvm); iwl_fw_error_print_fseq_regs(&mvm->fwrt); } int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, .window = frame_limit, .sta_id = sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = fifo, .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), .tid = tid, }; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; if (WARN(mvm->queue_info[queue].tid_bitmap == 0, "Trying to reconfig unallocated queue %d\n", queue)) return -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", queue, fifo, ret); return ret; } /** * iwl_mvm_send_lq_cmd() - Send link quality command * @sync: This command can be sent synchronously. * * The link quality command is sent as the last step of station creation. * This is the special case in which init is set and we call a callback in * this case to clear the state indicating that station creation is in * progress. */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) { struct iwl_host_cmd cmd = { .id = LQ_CMD, .len = { sizeof(struct iwl_lq_cmd), }, .flags = CMD_ASYNC, .data = { lq, }, }; if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA || iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; return iwl_mvm_send_cmd(mvm, &cmd); } /** * iwl_mvm_update_smps - Get a request to change the SMPS mode * @req_type: The part of the driver who call for a change. * @smps_requests: The request to change the SMPS mode. * * Get a requst to change the SMPS mode, * and change it according to all other requests in the driver. */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, enum ieee80211_smps_mode smps_request) { struct iwl_mvm_vif *mvmvif; enum ieee80211_smps_mode smps_mode; int i; lockdep_assert_held(&mvm->mutex); /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) return; if (vif->type == NL80211_IFTYPE_AP) smps_mode = IEEE80211_SMPS_OFF; else smps_mode = IEEE80211_SMPS_AUTOMATIC; mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->smps_requests[req_type] = smps_request; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { smps_mode = IEEE80211_SMPS_STATIC; break; } if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) smps_mode = IEEE80211_SMPS_DYNAMIC; } ieee80211_request_smps(vif, smps_mode); } int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) { struct iwl_statistics_cmd scmd = { .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, }; struct iwl_host_cmd cmd = { .id = STATISTICS_CMD, .len[0] = sizeof(scmd), .data[0] = &scmd, .flags = CMD_WANT_SKB, }; int ret; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ret; iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); iwl_free_resp(&cmd); if (clear) iwl_mvm_accu_radio_stats(mvm); return 0; } void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) { mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; } static void iwl_mvm_diversity_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool *result = _data; int i; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) *result = false; } } bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) { bool result = true; lockdep_assert_held(&mvm->mutex); if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) return false; if (mvm->cfg->rx_with_siso_diversity) return false; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_diversity_iter, &result); return result; } void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency, u16 mac_id) { struct iwl_mac_low_latency_cmd cmd = { .mac_id = cpu_to_le32(mac_id) }; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) return; if (low_latency) { /* currently we don't care about the direction */ cmd.low_latency_rx = 1; cmd.low_latency_tx = 1; } if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD, MAC_CONF_GROUP, 0), 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "Failed to send low latency command\n"); } int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool low_latency, enum iwl_mvm_low_latency_cause cause) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int res; bool prev; lockdep_assert_held(&mvm->mutex); prev = iwl_mvm_vif_low_latency(mvmvif); iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause); low_latency = iwl_mvm_vif_low_latency(mvmvif); if (low_latency == prev) return 0; iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id); res = iwl_mvm_update_quotas(mvm, false, NULL); if (res) return res; iwl_mvm_bt_coex_vif_change(mvm); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS iwl_mvm_send_tcm_event(mvm, vif); #endif return iwl_mvm_power_update_mac(mvm); } struct iwl_mvm_low_latency_iter { bool result; bool result_per_band[NUM_NL80211_BANDS]; }; static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_low_latency_iter *result = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); enum nl80211_band band; if (iwl_mvm_vif_low_latency(mvmvif)) { result->result = true; if (!mvmvif->phy_ctxt) return; band = mvmvif->phy_ctxt->channel->band; result->result_per_band[band] = true; } } bool iwl_mvm_low_latency(struct iwl_mvm *mvm) { struct iwl_mvm_low_latency_iter data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ll_iter, &data); return data.result; } bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band) { struct iwl_mvm_low_latency_iter data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ll_iter, &data); return data.result_per_band[band]; } struct iwl_bss_iter_data { struct ieee80211_vif *vif; bool error; }; static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_bss_iter_data *data = _data; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return; if (data->vif) { data->error = true; return; } data->vif = vif; } struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) { struct iwl_bss_iter_data bss_iter_data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bss_iface_iterator, &bss_iter_data); if (bss_iter_data.error) { IWL_ERR(mvm, "More than one managed interface active!\n"); return ERR_PTR(-EINVAL); } return bss_iter_data.vif; } struct iwl_sta_iter_data { bool assoc; }; static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_sta_iter_data *data = _data; if (vif->type != NL80211_IFTYPE_STATION) return; if (vif->bss_conf.assoc) data->assoc = true; } bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm) { struct iwl_sta_iter_data data = { .assoc = false, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_sta_iface_iterator, &data); return data.assoc; } unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q) { struct iwl_fw_dbg_trigger_tlv *trigger; struct iwl_fw_dbg_trigger_txq_timer *txq_timer; unsigned int default_timeout = cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { /* * We can't know when the station is asleep or awake, so we * must disable the queue hang detection. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && vif && vif->type == NL80211_IFTYPE_AP) return IWL_WATCHDOG_DISABLED; return iwlmvm_mod_params.tfd_q_hang_detect ? default_timeout : IWL_WATCHDOG_DISABLED; } trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); txq_timer = (void *)trigger->data; if (tdls) return le32_to_cpu(txq_timer->tdls); if (cmd_q) return le32_to_cpu(txq_timer->command_queue); if (WARN_ON(!vif)) return default_timeout; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_ADHOC: return le32_to_cpu(txq_timer->ibss); case NL80211_IFTYPE_STATION: return le32_to_cpu(txq_timer->bss); case NL80211_IFTYPE_AP: return le32_to_cpu(txq_timer->softap); case NL80211_IFTYPE_P2P_CLIENT: return le32_to_cpu(txq_timer->p2p_client); case NL80211_IFTYPE_P2P_GO: return le32_to_cpu(txq_timer->p2p_go); case NL80211_IFTYPE_P2P_DEVICE: return le32_to_cpu(txq_timer->p2p_device); case NL80211_IFTYPE_MONITOR: return default_timeout; default: WARN_ON(1); return mvm->cfg->base_params->wd_timeout; } } void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MLME); if (!trig) goto out; trig_mlme = (void *)trig->data; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) goto out; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg); out: ieee80211_connection_loss(vif); } void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, u16 tid) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Frame from %pM timed out, tid %d", sta->addr, tid); } u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed) { if (!elapsed) return 0; return (100 * airtime / elapsed) / USEC_PER_MSEC; } static enum iwl_mvm_traffic_load iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed) { u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed); if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH) return IWL_MVM_TRAFFIC_HIGH; if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH) return IWL_MVM_TRAFFIC_MEDIUM; return IWL_MVM_TRAFFIC_LOW; } struct iwl_mvm_tcm_iter_data { struct iwl_mvm *mvm; bool any_sent; }; static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_tcm_iter_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC; if (mvmvif->id >= NUM_MAC_INDEX_DRIVER) return; low_latency = mvm->tcm.result.low_latency[mvmvif->id]; if (!mvm->tcm.result.change[mvmvif->id] && prev == low_latency) { iwl_mvm_update_quotas(mvm, false, NULL); return; } if (prev != low_latency) { /* this sends traffic load and updates quota as well */ iwl_mvm_update_low_latency(mvm, vif, low_latency, LOW_LATENCY_TRAFFIC); } else { #ifdef CPTCFG_IWLMVM_VENDOR_CMDS iwl_mvm_send_tcm_event(mvm, vif); #endif iwl_mvm_update_quotas(mvm, false, NULL); } data->any_sent = true; } static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) { struct iwl_mvm_tcm_iter_data data = { .mvm = mvm, .any_sent = false, }; mutex_lock(&mvm->mutex); ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_tcm_iter, &data); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS /* send global only */ if (mvm->tcm.result.global_change && !data.any_sent) iwl_mvm_send_tcm_event(mvm, NULL); #endif if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); mutex_unlock(&mvm->mutex); } static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk) { struct iwl_mvm *mvm; struct iwl_mvm_vif *mvmvif; struct ieee80211_vif *vif; mvmvif = container_of(wk, struct iwl_mvm_vif, uapsd_nonagg_detected_wk.work); vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); mvm = mvmvif->mvm; if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions) return; /* remember that this AP is broken */ memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr, vif->bss_conf.bssid, ETH_ALEN); mvm->uapsd_noagg_bssid_write_idx++; if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN) mvm->uapsd_noagg_bssid_write_idx = 0; iwl_mvm_connection_loss(mvm, vif, "AP isn't using AMPDU with uAPSD enabled"); } static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type != NL80211_IFTYPE_STATION) return; if (!vif->bss_conf.assoc) return; if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) return; if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected) return; mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true; IWL_INFO(mvm, "detected AP should do aggregation but isn't, likely due to U-APSD\n"); schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ); } static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, unsigned int elapsed, int mac) { u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; u64 tpt; unsigned long rate; struct ieee80211_vif *vif; rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions || mvm->tcm.data[mac].uapsd_nonagg_detect.detected) return; if (iwl_mvm_has_new_rx_api(mvm)) { tpt = 8 * bytes; /* kbps */ do_div(tpt, elapsed); rate *= 1000; /* kbps */ if (tpt < 22 * rate / 100) return; } else { /* * the rate here is actually the threshold, in 100Kbps units, * so do the needed conversion from bytes to 100Kbps: * 100kb = bits / (100 * 1000), * 100kbps = 100kb / (msecs / 1000) == * (bits / (100 * 1000)) / (msecs / 1000) == * bits / (100 * msecs) */ tpt = (8 * bytes); do_div(tpt, elapsed * 100); if (tpt < rate) return; } rcu_read_lock(); vif = rcu_dereference(mvm->vif_id_to_mac[mac]); if (vif) iwl_mvm_uapsd_agg_disconnect(mvm, vif); rcu_read_unlock(); } static void iwl_mvm_tcm_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 *band = _data; if (!mvmvif->phy_ctxt) return; band[mvmvif->id] = mvmvif->phy_ctxt->channel->band; } static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, unsigned long ts, bool handle_uapsd) { unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts); unsigned int uapsd_elapsed = jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts); u32 total_airtime = 0; u32 band_airtime[NUM_NL80211_BANDS] = {0}; u32 band[NUM_MAC_INDEX_DRIVER] = {0}; int ac, mac, i; bool low_latency = false; enum iwl_mvm_traffic_load load, band_load; bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD); if (handle_ll) mvm->tcm.ll_ts = ts; if (handle_uapsd) mvm->tcm.uapsd_nonagg_ts = ts; mvm->tcm.result.elapsed = elapsed; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_tcm_iterator, &band); for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; u32 vo_vi_pkts = 0; u32 airtime = mdata->rx.airtime + mdata->tx.airtime; total_airtime += airtime; band_airtime[band[mac]] += airtime; load = iwl_mvm_tcm_load(mvm, airtime, elapsed); mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac]; mvm->tcm.result.load[mac] = load; mvm->tcm.result.airtime[mac] = airtime; for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++) vo_vi_pkts += mdata->rx.pkts[ac] + mdata->tx.pkts[ac]; /* enable immediately with enough packets but defer disabling */ if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH) mvm->tcm.result.low_latency[mac] = true; else if (handle_ll) mvm->tcm.result.low_latency[mac] = false; if (handle_ll) { /* clear old data */ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts)); memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); } low_latency |= mvm->tcm.result.low_latency[mac]; if (!mvm->tcm.result.low_latency[mac] && handle_uapsd) iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed, mac); /* clear old data */ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); if (handle_uapsd) mdata->uapsd_nonagg_detect.rx_bytes = 0; } load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed); mvm->tcm.result.global_change = load != mvm->tcm.result.global_load; mvm->tcm.result.global_load = load; for (i = 0; i < NUM_NL80211_BANDS; i++) { band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed); mvm->tcm.result.band_load[i] = band_load; } /* * If the current load isn't low we need to force re-evaluation * in the TCM period, so that we can return to low load if there * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get * triggered by traffic). */ if (load != IWL_MVM_TRAFFIC_LOW) return MVM_TCM_PERIOD; /* * If low-latency is active we need to force re-evaluation after * (the longer) MVM_LL_PERIOD, so that we can disable low-latency * when there's no traffic at all. */ if (low_latency) return MVM_LL_PERIOD; /* * Otherwise, we don't need to run the work struct because we're * in the default "idle" state - traffic indication is low (which * also covers the "no traffic" case) and low-latency is disabled * so there's no state that may need to be disabled when there's * no traffic at all. * * Note that this has no impact on the regular scheduling of the * updates triggered by traffic - those happen whenever one of the * two timeouts expire (if there's traffic at all.) */ return 0; } void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) { unsigned long ts = jiffies; bool handle_uapsd = time_after(ts, mvm->tcm.uapsd_nonagg_ts + msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD)); spin_lock(&mvm->tcm.lock); if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { spin_unlock(&mvm->tcm.lock); return; } spin_unlock(&mvm->tcm.lock); if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) { mutex_lock(&mvm->mutex); if (iwl_mvm_request_statistics(mvm, true)) handle_uapsd = false; mutex_unlock(&mvm->mutex); } spin_lock(&mvm->tcm.lock); /* re-check if somebody else won the recheck race */ if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { /* calculate statistics */ unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts, handle_uapsd); /* the memset needs to be visible before the timestamp */ smp_mb(); mvm->tcm.ts = ts; if (work_delay) schedule_delayed_work(&mvm->tcm.work, work_delay); } spin_unlock(&mvm->tcm.lock); iwl_mvm_tcm_results(mvm); } void iwl_mvm_tcm_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, tcm.work); iwl_mvm_recalc_tcm(mvm); } void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel) { spin_lock_bh(&mvm->tcm.lock); mvm->tcm.paused = true; spin_unlock_bh(&mvm->tcm.lock); if (with_cancel) cancel_delayed_work_sync(&mvm->tcm.work); } void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) { int mac; bool low_latency = false; spin_lock_bh(&mvm->tcm.lock); mvm->tcm.ts = jiffies; mvm->tcm.ll_ts = jiffies; for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts)); memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); if (mvm->tcm.result.low_latency[mac]) low_latency = true; } /* The TCM data needs to be reset before "paused" flag changes */ smp_mb(); mvm->tcm.paused = false; /* * if the current load is not low or low latency is active, force * re-evaluation to cover the case of no traffic. */ if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW) schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD); else if (low_latency) schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD); spin_unlock_bh(&mvm->tcm.lock); } void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk, iwl_mvm_tcm_uapsd_nonagg_detected_wk); } void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk); } u32 iwl_mvm_get_systime(struct iwl_mvm *mvm) { u32 reg_addr = DEVICE_SYSTEM_TIME_REG; if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000 && mvm->trans->cfg->gp2_reg_addr) reg_addr = mvm->trans->cfg->gp2_reg_addr; return iwl_read_prph(mvm->trans, reg_addr); } void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) { bool ps_disabled; lockdep_assert_held(&mvm->mutex); /* Disable power save when reading GP2 */ ps_disabled = mvm->ps_disabled; if (!ps_disabled) { mvm->ps_disabled = true; iwl_mvm_power_update_device(mvm); } *gp2 = iwl_mvm_get_systime(mvm); *boottime = ktime_get_boot_ns(); if (!ps_disabled) { mvm->ps_disabled = ps_disabled; iwl_mvm_power_update_device(mvm); } } #ifdef CPTCFG_IWLMVM_VENDOR_CMDS int iwl_mvm_send_csi_cmd(struct iwl_mvm *mvm) { /* * Note: v1 and v2 are compatible, except for the * reserved value and the new flag, both of which * are ignored by older FW, and the additional * fields which we need to strip. */ struct iwl_channel_estimation_cfg cfg = { .flags = cpu_to_le32(mvm->csi_cfg.flags), .timer = cpu_to_le32(mvm->csi_cfg.timer), .count = cpu_to_le32(mvm->csi_cfg.count), .frame_types = cpu_to_le64(mvm->csi_cfg.frame_types), .rate_n_flags_val = cpu_to_le32(mvm->csi_cfg.rate_n_flags_val), .rate_n_flags_mask = cpu_to_le32(mvm->csi_cfg.rate_n_flags_mask), .min_time_between_collection = cpu_to_le32(mvm->csi_cfg.interval), .num_filter_addrs = cpu_to_le32(mvm->csi_cfg.num_filter_addrs), }; u32 id = iwl_cmd_id(CHEST_COLLECTOR_FILTER_CONFIG_CMD, DATA_PATH_GROUP, 0); unsigned int size = sizeof(cfg); int i; for (i = 0; i < mvm->csi_cfg.num_filter_addrs; i++) ether_addr_copy(cfg.filter_addrs[i].addr, mvm->csi_cfg.filter_addrs[i].addr); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSI_REPORTING_V2)) size = sizeof(struct iwl_channel_estimation_cfg_v1); return iwl_mvm_send_cmd_pdu(mvm, id, 0, size, &cfg); } #endif /* CPTCFG_IWLMVM_VENDOR_CMDS */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c000066400000000000000000001241141351064634500275730ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "mvm.h" #include "iwl-vendor-cmd.h" #include "iwl-io.h" #include "iwl-prph.h" static LIST_HEAD(device_list); static DEFINE_SPINLOCK(device_list_lock); static int iwl_mvm_netlink_notifier(struct notifier_block *nb, unsigned long state, void *_notify) { struct netlink_notify *notify = _notify; struct iwl_mvm *mvm; if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC) return NOTIFY_DONE; spin_lock_bh(&device_list_lock); list_for_each_entry(mvm, &device_list, list) { if (mvm->csi_portid == netlink_notify_portid(notify)) mvm->csi_portid = 0; } spin_unlock_bh(&device_list_lock); return NOTIFY_OK; } static struct notifier_block iwl_mvm_netlink_notifier_block = { .notifier_call = iwl_mvm_netlink_notifier, }; void iwl_mvm_vendor_cmd_init(void) { WARN_ON(netlink_register_notifier(&iwl_mvm_netlink_notifier_block)); spin_lock_init(&device_list_lock); } void iwl_mvm_vendor_cmd_exit(void) { netlink_unregister_notifier(&iwl_mvm_netlink_notifier_block); } static const struct nla_policy iwl_mvm_vendor_attr_policy[NUM_IWL_MVM_VENDOR_ATTR] = { [IWL_MVM_VENDOR_ATTR_LOW_LATENCY] = { .type = NLA_FLAG }, [IWL_MVM_VENDOR_ATTR_COUNTRY] = { .type = NLA_STRING, .len = 2 }, [IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA] = { .type = NLA_FLAG }, [IWL_MVM_VENDOR_ATTR_FILTER_GTK] = { .type = NLA_FLAG }, [IWL_MVM_VENDOR_ATTR_ADDR] = { .len = ETH_ALEN }, [IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_OPPPS_WA] = { .type = NLA_FLAG }, [IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR] = { .len = ETH_ALEN }, [IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR_MASK] = { .len = ETH_ALEN }, [IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_PER_SCAN] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_GSCAN_BUCKET_SPECS] = { .type = NLA_NESTED }, [IWL_MVM_VENDOR_ATTR_GSCAN_LOST_AP_SAMPLE_SIZE] = { .type = NLA_U8 }, [IWL_MVM_VENDOR_ATTR_GSCAN_AP_LIST] = { .type = NLA_NESTED }, [IWL_MVM_VENDOR_ATTR_GSCAN_RSSI_SAMPLE_SIZE] = { .type = NLA_U8 }, [IWL_MVM_VENDOR_ATTR_GSCAN_MIN_BREACHING] = { .type = NLA_U8 }, [IWL_MVM_VENDOR_ATTR_RXFILTER] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_RXFILTER_OP] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER] = { .type = NLA_STRING }, [IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS] = { .type = NLA_U8 }, [IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD_NUM] = { .type = NLA_U32 }, [IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE] = { .type = NLA_U8 }, [IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE] = { .type = NLA_U8 }, [IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM] = { .type = NLA_NESTED }, [IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM] = { .type = NLA_NESTED }, [IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES] = { .type = NLA_NESTED }, }; static struct nlattr **iwl_mvm_parse_vendor_data(const void *data, int data_len) { struct nlattr **tb; int err; if (!data) return ERR_PTR(-EINVAL); tb = kcalloc(MAX_IWL_MVM_VENDOR_ATTR + 1, sizeof(*tb), GFP_KERNEL); if (!tb) return ERR_PTR(-ENOMEM); err = nla_parse(tb, MAX_IWL_MVM_VENDOR_ATTR, data, data_len, iwl_mvm_vendor_attr_policy, NULL); if (err) { kfree(tb); return ERR_PTR(err); } return tb; } static int iwl_mvm_set_low_latency(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct nlattr **tb; int err; struct ieee80211_vif *vif = wdev_to_ieee80211_vif(wdev); bool low_latency; if (!vif) return -ENODEV; if (data) { tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); low_latency = tb[IWL_MVM_VENDOR_ATTR_LOW_LATENCY]; kfree(tb); } else { low_latency = false; } mutex_lock(&mvm->mutex); err = iwl_mvm_update_low_latency(mvm, vif, low_latency, LOW_LATENCY_VCMD); mutex_unlock(&mvm->mutex); return err; } static int iwl_mvm_get_low_latency(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_vif *vif = wdev_to_ieee80211_vif(wdev); struct iwl_mvm_vif *mvmvif; struct sk_buff *skb; if (!vif) return -ENODEV; mvmvif = iwl_mvm_vif_from_mac80211(vif); skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100); if (!skb) return -ENOMEM; if (iwl_mvm_vif_low_latency(mvmvif) && nla_put_flag(skb, IWL_MVM_VENDOR_ATTR_LOW_LATENCY)) { kfree_skb(skb); return -ENOBUFS; } return cfg80211_vendor_cmd_reply(skb); } static int iwl_mvm_set_country(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_regdomain *regd; struct nlattr **tb; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int retval; if (!iwl_mvm_is_lar_supported(mvm)) return -EOPNOTSUPP; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (!tb[IWL_MVM_VENDOR_ATTR_COUNTRY]) { retval = -EINVAL; goto free; } mutex_lock(&mvm->mutex); /* set regdomain information to FW */ regd = iwl_mvm_get_regdomain(wiphy, nla_data(tb[IWL_MVM_VENDOR_ATTR_COUNTRY]), iwl_mvm_is_wifi_mcc_supported(mvm) ? MCC_SOURCE_3G_LTE_HOST : MCC_SOURCE_OLD_FW, NULL); if (IS_ERR_OR_NULL(regd)) { retval = -EIO; goto unlock; } retval = regulatory_set_wiphy_regd(wiphy, regd); kfree(regd); unlock: mutex_unlock(&mvm->mutex); free: kfree(tb); return retval; } static int iwl_vendor_frame_filter_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct nlattr **tb; struct ieee80211_vif *vif = wdev_to_ieee80211_vif(wdev); if (!vif) return -EINVAL; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); vif->filter_grat_arp_unsol_na = tb[IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA]; vif->filter_gtk = tb[IWL_MVM_VENDOR_ATTR_FILTER_GTK]; kfree(tb); return 0; } #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE static int iwl_vendor_tdls_peer_cache_add(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct nlattr **tb; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_tdls_peer_counter *cnt; u8 *addr; struct ieee80211_vif *vif = wdev_to_ieee80211_vif(wdev); int err; if (!vif) return -ENODEV; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (vif->type != NL80211_IFTYPE_STATION || !tb[IWL_MVM_VENDOR_ATTR_ADDR]) { err = -EINVAL; goto free; } mutex_lock(&mvm->mutex); if (mvm->tdls_peer_cache_cnt >= IWL_MVM_TDLS_CNT_MAX_PEERS) { err = -ENOSPC; goto out_unlock; } addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]); rcu_read_lock(); cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr); rcu_read_unlock(); if (cnt) { err = -EEXIST; goto out_unlock; } cnt = kzalloc(sizeof(*cnt) + sizeof(cnt->rx[0]) * mvm->trans->num_rx_queues, GFP_KERNEL); if (!cnt) { err = -ENOMEM; goto out_unlock; } IWL_DEBUG_TDLS(mvm, "Adding %pM to TDLS peer cache\n", addr); ether_addr_copy(cnt->mac.addr, addr); cnt->vif = vif; list_add_tail_rcu(&cnt->list, &mvm->tdls_peer_cache_list); mvm->tdls_peer_cache_cnt++; err = 0; out_unlock: mutex_unlock(&mvm->mutex); free: kfree(tb); return err; } static int iwl_vendor_tdls_peer_cache_del(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct nlattr **tb; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_tdls_peer_counter *cnt; u8 *addr; int err; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (!tb[IWL_MVM_VENDOR_ATTR_ADDR]) { err = -EINVAL; goto free; } addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]); mutex_lock(&mvm->mutex); rcu_read_lock(); cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr); if (!cnt) { IWL_DEBUG_TDLS(mvm, "%pM not found in TDLS peer cache\n", addr); err = -ENOENT; goto out_unlock; } IWL_DEBUG_TDLS(mvm, "Removing %pM from TDLS peer cache\n", addr); mvm->tdls_peer_cache_cnt--; list_del_rcu(&cnt->list); kfree_rcu(cnt, rcu_head); err = 0; out_unlock: rcu_read_unlock(); mutex_unlock(&mvm->mutex); free: kfree(tb); return err; } static int iwl_vendor_tdls_peer_cache_query(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct nlattr **tb; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_tdls_peer_counter *cnt; struct sk_buff *skb; u32 rx_bytes, tx_bytes; u8 *addr; int err; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (!tb[IWL_MVM_VENDOR_ATTR_ADDR]) { kfree(tb); return -EINVAL; } addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]); /* we can free the tb, the addr pointer is still valid into the msg */ kfree(tb); rcu_read_lock(); cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr); if (!cnt) { IWL_DEBUG_TDLS(mvm, "%pM not found in TDLS peer cache\n", addr); err = -ENOENT; } else { int q; tx_bytes = cnt->tx_bytes; rx_bytes = 0; for (q = 0; q < mvm->trans->num_rx_queues; q++) rx_bytes += cnt->rx[q].bytes; err = 0; } rcu_read_unlock(); if (err) return err; skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100); if (!skb) return -ENOMEM; if (nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_TX_BYTES, tx_bytes) || nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_RX_BYTES, rx_bytes)) { kfree_skb(skb); return -ENOBUFS; } return cfg80211_vendor_cmd_reply(skb); } #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ static int iwl_vendor_set_nic_txpower_limit(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); union { struct iwl_dev_tx_power_cmd_v4 v4; struct iwl_dev_tx_power_cmd v5; } cmd = { .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_DEVICE), .v5.v3.dev_24 = cpu_to_le16(IWL_DEV_MAX_TX_POWER), .v5.v3.dev_52_low = cpu_to_le16(IWL_DEV_MAX_TX_POWER), .v5.v3.dev_52_high = cpu_to_le16(IWL_DEV_MAX_TX_POWER), }; struct nlattr **tb; int len = sizeof(cmd); int err; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24]) { s32 txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24]); if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) { err = -EINVAL; goto free; } cmd.v5.v3.dev_24 = cpu_to_le16(txp); } if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L]) { s32 txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L]); if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) { err = -EINVAL; goto free; } cmd.v5.v3.dev_52_low = cpu_to_le16(txp); } if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H]) { s32 txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H]); if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) { err = -EINVAL; goto free; } cmd.v5.v3.dev_52_high = cpu_to_le16(txp); } mvm->txp_cmd.v5 = cmd.v5; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(mvm->txp_cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(mvm->txp_cmd.v4); else len = sizeof(mvm->txp_cmd.v4.v3); mutex_lock(&mvm->mutex); err = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); mutex_unlock(&mvm->mutex); if (err) IWL_ERR(mvm, "failed to update device TX power: %d\n", err); err = 0; free: kfree(tb); return err; } #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA static int iwl_mvm_oppps_wa_update_quota(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool enable) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; bool force_update = true; if (enable && noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) mvm->p2p_opps_test_wa_vif = mvmvif; else mvm->p2p_opps_test_wa_vif = NULL; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) { return -EOPNOTSUPP; } return iwl_mvm_update_quotas(mvm, force_update, NULL); } static int iwl_mvm_oppps_wa(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct nlattr **tb; int err; struct ieee80211_vif *vif = wdev_to_ieee80211_vif(wdev); if (!vif) return -ENODEV; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); mutex_lock(&mvm->mutex); if (vif->type == NL80211_IFTYPE_STATION && vif->p2p) { bool enable = !!tb[IWL_MVM_VENDOR_ATTR_OPPPS_WA]; err = iwl_mvm_oppps_wa_update_quota(mvm, vif, enable); } mutex_unlock(&mvm->mutex); kfree(tb); return err; } #endif void iwl_mvm_active_rx_filters(struct iwl_mvm *mvm) { int i, len, total = 0; struct iwl_mcast_filter_cmd *cmd; static const u8 ipv4mc[] = {0x01, 0x00, 0x5e}; static const u8 ipv6mc[] = {0x33, 0x33}; static const u8 ipv4_mdns[] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0xfb}; static const u8 ipv6_mdns[] = {0x33, 0x33, 0x00, 0x00, 0x00, 0xfb}; lockdep_assert_held(&mvm->mutex); if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_EINVAL) return; for (i = 0; i < mvm->mcast_filter_cmd->count; i++) { if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST4 && memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4mc, sizeof(ipv4mc)) == 0) total++; else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4_mdns, sizeof(ipv4_mdns)) == 0) total++; else if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST6 && memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6mc, sizeof(ipv6mc)) == 0) total++; else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6_mdns, sizeof(ipv6_mdns)) == 0) total++; } /* FW expects full words */ len = roundup(sizeof(*cmd) + total * ETH_ALEN, 4); cmd = kzalloc(len, GFP_KERNEL); if (!cmd) return; memcpy(cmd, mvm->mcast_filter_cmd, sizeof(*cmd)); cmd->count = 0; for (i = 0; i < mvm->mcast_filter_cmd->count; i++) { bool copy_filter = false; if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST4 && memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4mc, sizeof(ipv4mc)) == 0) copy_filter = true; else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4_mdns, sizeof(ipv4_mdns)) == 0) copy_filter = true; else if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST6 && memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6mc, sizeof(ipv6mc)) == 0) copy_filter = true; else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6_mdns, sizeof(ipv6_mdns)) == 0) copy_filter = true; if (!copy_filter) continue; ether_addr_copy(&cmd->addr_list[cmd->count * ETH_ALEN], &mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN]); cmd->count++; } kfree(mvm->mcast_active_filter_cmd); mvm->mcast_active_filter_cmd = cmd; } static int iwl_mvm_vendor_rxfilter(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct nlattr **tb; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); enum iwl_mvm_vendor_rxfilter_flags filter, rx_filters, old_rx_filters; enum iwl_mvm_vendor_rxfilter_op op; bool first_set; u32 mask; int err; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (!tb[IWL_MVM_VENDOR_ATTR_RXFILTER]) { err = -EINVAL; goto free; } if (!tb[IWL_MVM_VENDOR_ATTR_RXFILTER_OP]) { err = -EINVAL; goto free; } filter = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_RXFILTER]); op = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_RXFILTER_OP]); if (filter != IWL_MVM_VENDOR_RXFILTER_UNICAST && filter != IWL_MVM_VENDOR_RXFILTER_BCAST && filter != IWL_MVM_VENDOR_RXFILTER_MCAST4 && filter != IWL_MVM_VENDOR_RXFILTER_MCAST6) { err = -EINVAL; goto free; } rx_filters = mvm->rx_filters & ~IWL_MVM_VENDOR_RXFILTER_EINVAL; switch (op) { case IWL_MVM_VENDOR_RXFILTER_OP_DROP: rx_filters &= ~filter; break; case IWL_MVM_VENDOR_RXFILTER_OP_PASS: rx_filters |= filter; break; default: err = -EINVAL; goto free; } first_set = mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_EINVAL; /* If first time set - clear EINVAL value */ mvm->rx_filters &= ~IWL_MVM_VENDOR_RXFILTER_EINVAL; err = 0; if (rx_filters == mvm->rx_filters && !first_set) goto free; mutex_lock(&mvm->mutex); old_rx_filters = mvm->rx_filters; mvm->rx_filters = rx_filters; mask = IWL_MVM_VENDOR_RXFILTER_MCAST4 | IWL_MVM_VENDOR_RXFILTER_MCAST6; if ((old_rx_filters & mask) != (rx_filters & mask) || first_set) { iwl_mvm_active_rx_filters(mvm); iwl_mvm_recalc_multicast(mvm); } mask = IWL_MVM_VENDOR_RXFILTER_BCAST; if ((old_rx_filters & mask) != (rx_filters & mask) || first_set) iwl_mvm_configure_bcast_filter(mvm); mutex_unlock(&mvm->mutex); free: kfree(tb); return err; } static int iwl_mvm_vendor_dbg_collect(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct nlattr **tb; int err, len = 0; const char *trigger_desc; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (!tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]) { err = -EINVAL; goto free; } trigger_desc = nla_data(tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]); len = nla_len(tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]); iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER_EXTENDED, trigger_desc, len, NULL); err = 0; free: kfree(tb); return err; } static int iwl_mvm_vendor_nan_faw_conf(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct nlattr **tb; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct cfg80211_chan_def def = {}; struct ieee80211_channel *chan; u32 freq; u8 slots; int err; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (!tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS]) { err = -EINVAL; goto free; } if (!tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ]) { err = -EINVAL; goto free; } freq = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ]); slots = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS]); chan = ieee80211_get_channel(wiphy, freq); if (!chan) { err = -EINVAL; goto free; } cfg80211_chandef_create(&def, chan, NL80211_CHAN_NO_HT); if (!cfg80211_chandef_usable(wiphy, &def, IEEE80211_CHAN_DISABLED)) { err = -EINVAL; goto free; } err = iwl_mvm_nan_config_nan_faw_cmd(mvm, &def, slots); free: kfree(tb); return err; } #ifdef CONFIG_ACPI static int iwl_mvm_vendor_set_dynamic_txp_profile(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct nlattr **tb; u8 chain_a, chain_b; int err; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (!tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE] || !tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE]) { err = -EINVAL; goto free; } chain_a = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE]); chain_b = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE]); if (mvm->sar_chain_a_profile == chain_a && mvm->sar_chain_b_profile == chain_b) { err = 0; goto free; } mvm->sar_chain_a_profile = chain_a; mvm->sar_chain_b_profile = chain_b; err = iwl_mvm_sar_select_profile(mvm, chain_a, chain_b); free: kfree(tb); return err; } static int iwl_mvm_vendor_get_sar_profile_info(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct sk_buff *skb; int i; u32 n_profiles = 0; for (i = 0; i < ACPI_SAR_PROFILE_NUM; i++) { if (mvm->sar_profiles[i].enabled) n_profiles++; } skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100); if (!skb) return -ENOMEM; if (nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_ENABLED_PROFILE_NUM, n_profiles) || nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE, mvm->sar_chain_a_profile) || nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE, mvm->sar_chain_b_profile)) { kfree_skb(skb); return -ENOBUFS; } return cfg80211_vendor_cmd_reply(skb); } #define IWL_MVM_SAR_GEO_NUM_BANDS 2 static int iwl_mvm_vendor_get_geo_profile_info(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct sk_buff *skb; struct nlattr *nl_profile; int i, tbl_idx; tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (tbl_idx < 0) return tbl_idx; skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100); if (!skb) return -ENOMEM; nl_profile = nla_nest_start(skb, IWL_MVM_VENDOR_ATTR_SAR_GEO_PROFILE); if (!nl_profile) { kfree_skb(skb); return -ENOBUFS; } if (!tbl_idx) goto out; for (i = 0; i < IWL_MVM_SAR_GEO_NUM_BANDS; i++) { u8 *value; struct nlattr *nl_chain = nla_nest_start(skb, i + 1); int idx = i * ACPI_GEO_PER_CHAIN_SIZE; if (!nl_chain) { kfree_skb(skb); return -ENOBUFS; } value = &mvm->geo_profiles[tbl_idx - 1].values[idx]; nla_put_u8(skb, IWL_VENDOR_SAR_GEO_MAX_TXP, value[0]); nla_put_u8(skb, IWL_VENDOR_SAR_GEO_CHAIN_A_OFFSET, value[1]); nla_put_u8(skb, IWL_VENDOR_SAR_GEO_CHAIN_B_OFFSET, value[2]); nla_nest_end(skb, nl_chain); } out: nla_nest_end(skb, nl_profile); return cfg80211_vendor_cmd_reply(skb); } #endif static const struct nla_policy iwl_mvm_vendor_fips_hw_policy[NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW] = { [IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] = { .type = NLA_BINARY }, [IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE] = { .type = NLA_BINARY }, [IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] = { .type = NLA_BINARY }, [IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD] = { .type = NLA_BINARY }, [IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS] = { .type = NLA_U8 }, }; static int iwl_mvm_vendor_validate_ccm_vector(struct nlattr **tb) { if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] || !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE] || !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] || nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 || nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]) != FIPS_CCM_NONCE_LEN) return -EINVAL; return 0; } static int iwl_mvm_vendor_validate_gcm_vector(struct nlattr **tb) { if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] || !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE] || !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] || (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 && nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_256) || nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]) != FIPS_GCM_NONCE_LEN) return -EINVAL; return 0; } static int iwl_mvm_vendor_validate_aes_vector(struct nlattr **tb) { if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] || (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 && nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_256)) return -EINVAL; return 0; } /** * iwl_mvm_vendor_build_vector - build FIPS test vector for AES/CCM/GCM tests * * @cmd_buf: the command buffer is returned by this pointer in case of success. * @vector: test vector attributes. * @flags: specifies which encryption algorithm to use. One of * &IWL_FIPS_TEST_VECTOR_FLAGS_CCM, &IWL_FIPS_TEST_VECTOR_FLAGS_GCM and * &IWL_FIPS_TEST_VECTOR_FLAGS_AES. * * This function returns the length of the command buffer (in bytes) in case of * success, or a negative error code on failure. */ static int iwl_mvm_vendor_build_vector(u8 **cmd_buf, struct nlattr *vector, u8 flags) { struct nlattr *tb[NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW]; struct iwl_fips_test_cmd *cmd; int err; int payload_len = 0; u8 *buf; err = nla_parse_nested(tb, MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HW, vector, iwl_mvm_vendor_fips_hw_policy, NULL); if (err) return err; switch (flags) { case IWL_FIPS_TEST_VECTOR_FLAGS_CCM: err = iwl_mvm_vendor_validate_ccm_vector(tb); break; case IWL_FIPS_TEST_VECTOR_FLAGS_GCM: err = iwl_mvm_vendor_validate_gcm_vector(tb); break; case IWL_FIPS_TEST_VECTOR_FLAGS_AES: err = iwl_mvm_vendor_validate_aes_vector(tb); break; default: return -EINVAL; } if (err) return err; if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] && nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]) > FIPS_MAX_AAD_LEN) return -EINVAL; if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]) payload_len = nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]); buf = kzalloc(sizeof(*cmd) + payload_len, GFP_KERNEL); if (!buf) return -ENOMEM; cmd = (void *)buf; cmd->flags = cpu_to_le32(flags); memcpy(cmd->key, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]), nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY])); if (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) == FIPS_KEY_LEN_256) cmd->flags |= cpu_to_le32(IWL_FIPS_TEST_VECTOR_FLAGS_KEY_256); if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]) memcpy(cmd->nonce, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]), nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE])); if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]) { memcpy(cmd->aad, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]), nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD])); cmd->aad_len = cpu_to_le32(nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD])); } if (payload_len) { memcpy(cmd->payload, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]), payload_len); cmd->payload_len = cpu_to_le32(payload_len); } if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS]) { u8 hw_flags = nla_get_u8(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS]); if (hw_flags & IWL_VENDOR_FIPS_TEST_VECTOR_FLAGS_ENCRYPT) cmd->flags |= cpu_to_le32(IWL_FIPS_TEST_VECTOR_FLAGS_ENC); } *cmd_buf = buf; return sizeof(*cmd) + payload_len; } static int iwl_mvm_vendor_test_fips_send_resp(struct wiphy *wiphy, struct iwl_fips_test_resp *resp) { struct sk_buff *skb; u32 resp_len = le32_to_cpu(resp->len); u32 *status = (void *)(resp->payload + resp_len); skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, sizeof(*resp)); if (!skb) return -ENOMEM; if ((*status) == IWL_FIPS_TEST_STATUS_SUCCESS && nla_put(skb, IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT, resp_len, resp->payload)) { kfree_skb(skb); return -ENOBUFS; } return cfg80211_vendor_cmd_reply(skb); } static int iwl_mvm_vendor_test_fips(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct nlattr **tb; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_host_cmd hcmd = { .id = iwl_cmd_id(FIPS_TEST_VECTOR_CMD, LEGACY_GROUP, 0), .flags = CMD_WANT_SKB, .dataflags = { IWL_HCMD_DFL_NOCOPY }, }; struct iwl_rx_packet *pkt; struct iwl_fips_test_resp *resp; struct nlattr *vector; u8 flags; u8 *buf = NULL; int ret; tb = iwl_mvm_parse_vendor_data(data, data_len); if (IS_ERR(tb)) return PTR_ERR(tb); if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM]) { vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM]; flags = IWL_FIPS_TEST_VECTOR_FLAGS_CCM; } else if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM]) { vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM]; flags = IWL_FIPS_TEST_VECTOR_FLAGS_GCM; } else if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES]) { vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES]; flags = IWL_FIPS_TEST_VECTOR_FLAGS_AES; } else { ret = -EINVAL; goto free; } ret = iwl_mvm_vendor_build_vector(&buf, vector, flags); if (ret <= 0) goto free; hcmd.data[0] = buf; hcmd.len[0] = ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); if (ret) goto free; pkt = hcmd.resp_pkt; resp = (void *)pkt->data; iwl_mvm_vendor_test_fips_send_resp(wiphy, resp); iwl_free_resp(&hcmd); free: kfree(buf); kfree(tb); return ret; } static int iwl_mvm_vendor_csi_register(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mvm->csi_portid = cfg80211_vendor_cmd_get_sender(wiphy); return 0; } static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = { { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_SET_LOW_LATENCY, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_set_low_latency, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_GET_LOW_LATENCY, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_get_low_latency, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_SET_COUNTRY, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_set_country, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_PROXY_FRAME_FILTERING, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_vendor_frame_filter_cmd, }, #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_ADD, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_vendor_tdls_peer_cache_add, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_DEL, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_vendor_tdls_peer_cache_del, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_QUERY, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_vendor_tdls_peer_cache_query, }, #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */ { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_SET_NIC_TXPOWER_LIMIT, }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_vendor_set_nic_txpower_limit, }, #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_OPPPS_WA, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_oppps_wa, }, #endif { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_RXFILTER, }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_vendor_rxfilter, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_DBG_COLLECT, }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_vendor_dbg_collect, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_NAN_FAW_CONF, }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_vendor_nan_faw_conf, }, #ifdef CONFIG_ACPI { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_SET_SAR_PROFILE, }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV, .doit = iwl_mvm_vendor_set_dynamic_txp_profile, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_GET_SAR_PROFILE_INFO, }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV, .doit = iwl_mvm_vendor_get_sar_profile_info, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_GET_SAR_GEO_PROFILE, }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_vendor_get_geo_profile_info, }, { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_TEST_FIPS, }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = iwl_mvm_vendor_test_fips, }, #endif { .info = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_CSI_EVENT, }, .doit = iwl_mvm_vendor_csi_register, }, }; enum iwl_mvm_vendor_events_idx { IWL_MVM_VENDOR_EVENT_IDX_TCM, IWL_MVM_VENDOR_EVENT_IDX_CSI, NUM_IWL_MVM_VENDOR_EVENT_IDX }; static const struct nl80211_vendor_cmd_info iwl_mvm_vendor_events[NUM_IWL_MVM_VENDOR_EVENT_IDX] = { [IWL_MVM_VENDOR_EVENT_IDX_TCM] = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_TCM_EVENT, }, [IWL_MVM_VENDOR_EVENT_IDX_CSI] = { .vendor_id = INTEL_OUI, .subcmd = IWL_MVM_VENDOR_CMD_CSI_EVENT, }, }; void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) { mvm->hw->wiphy->vendor_commands = iwl_mvm_vendor_commands; mvm->hw->wiphy->n_vendor_commands = ARRAY_SIZE(iwl_mvm_vendor_commands); mvm->hw->wiphy->vendor_events = iwl_mvm_vendor_events; mvm->hw->wiphy->n_vendor_events = ARRAY_SIZE(iwl_mvm_vendor_events); spin_lock_bh(&device_list_lock); list_add_tail(&mvm->list, &device_list); spin_unlock_bh(&device_list_lock); } void iwl_mvm_vendor_cmds_unregister(struct iwl_mvm *mvm) { spin_lock_bh(&device_list_lock); list_del(&mvm->list); spin_unlock_bh(&device_list_lock); } static enum iwl_mvm_vendor_load iwl_mvm_get_vendor_load(enum iwl_mvm_traffic_load load) { switch (load) { case IWL_MVM_TRAFFIC_HIGH: return IWL_MVM_VENDOR_LOAD_HIGH; case IWL_MVM_TRAFFIC_MEDIUM: return IWL_MVM_VENDOR_LOAD_MEDIUM; case IWL_MVM_TRAFFIC_LOW: return IWL_MVM_VENDOR_LOAD_LOW; default: break; } return IWL_MVM_VENDOR_LOAD_LOW; } void iwl_mvm_send_tcm_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct sk_buff *msg = cfg80211_vendor_event_alloc(mvm->hw->wiphy, ieee80211_vif_to_wdev(vif), 200, IWL_MVM_VENDOR_EVENT_IDX_TCM, GFP_ATOMIC); if (!msg) return; if (vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (nla_put(msg, IWL_MVM_VENDOR_ATTR_VIF_ADDR, ETH_ALEN, vif->addr) || nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_VIF_LL, iwl_mvm_vif_low_latency(mvmvif)) || nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_VIF_LOAD, mvm->tcm.result.load[mvmvif->id])) goto nla_put_failure; } if (nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_LL, iwl_mvm_low_latency(mvm)) || nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_LOAD, iwl_mvm_get_vendor_load(mvm->tcm.result.global_load))) goto nla_put_failure; cfg80211_vendor_event(msg, GFP_ATOMIC); return; nla_put_failure: kfree_skb(msg); } static void iwl_mvm_send_csi_event(struct iwl_mvm *mvm, void *hdr, unsigned int hdr_len, void **data, unsigned int *len) { unsigned int data_len = 0; struct sk_buff *msg; struct nlattr *dattr; u8 *pos; int i; if (!mvm->csi_portid) return; for (i = 0; len[i] && data[i]; i++) data_len += len[i]; msg = cfg80211_vendor_event_alloc_ucast(mvm->hw->wiphy, NULL, mvm->csi_portid, 100 + hdr_len + data_len, IWL_MVM_VENDOR_EVENT_IDX_CSI, GFP_KERNEL); if (!msg) return; if (nla_put(msg, IWL_MVM_VENDOR_ATTR_CSI_HDR, hdr_len, hdr)) goto nla_put_failure; dattr = nla_reserve(msg, IWL_MVM_VENDOR_ATTR_CSI_DATA, data_len); if (!dattr) goto nla_put_failure; pos = nla_data(dattr); for (i = 0; len[i] && data[i]; i++) { memcpy(pos, data[i], len[i]); pos += len[i]; } cfg80211_vendor_event(msg, GFP_KERNEL); return; nla_put_failure: kfree_skb(msg); } static void iwl_mvm_csi_free_pages(struct iwl_mvm *mvm) { int idx; for (idx = 0; idx < ARRAY_SIZE(mvm->csi_data_entries); idx++) { if (mvm->csi_data_entries[idx].page) { __free_pages(mvm->csi_data_entries[idx].page, mvm->csi_data_entries[idx].page_order); memset(&mvm->csi_data_entries[idx], 0, sizeof(mvm->csi_data_entries[idx])); } } } static void iwl_mvm_csi_steal(struct iwl_mvm *mvm, unsigned int idx, struct iwl_rx_cmd_buffer *rxb) { /* firmware is ... confused */ if (WARN_ON(mvm->csi_data_entries[idx].page)) { iwl_mvm_csi_free_pages(mvm); return; } mvm->csi_data_entries[idx].page = rxb_steal_page(rxb); mvm->csi_data_entries[idx].page_order = rxb->_rx_page_order; mvm->csi_data_entries[idx].offset = rxb->_offset; } void iwl_mvm_rx_csi_header(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_mvm_csi_steal(mvm, 0, rxb); } static void iwl_mvm_csi_complete(struct iwl_mvm *mvm) { struct iwl_rx_packet *hdr_pkt; struct iwl_csi_data_buffer *hdr_buf = &mvm->csi_data_entries[0]; void *data[IWL_CSI_MAX_EXPECTED_CHUNKS + 1] = {}; unsigned int len[IWL_CSI_MAX_EXPECTED_CHUNKS + 1] = {}; unsigned int csi_hdr_len; void *csi_hdr; int i; /* * Ensure we have the right # of entries, the local data/len * variables include a terminating entry, csi_data_entries * instead has one place for the header. */ BUILD_BUG_ON(ARRAY_SIZE(data) < ARRAY_SIZE(mvm->csi_data_entries)); BUILD_BUG_ON(ARRAY_SIZE(len) < ARRAY_SIZE(mvm->csi_data_entries)); /* need at least the header and first fragment */ if (WARN_ON(!hdr_buf->page || !mvm->csi_data_entries[1].page)) { iwl_mvm_csi_free_pages(mvm); return; } hdr_pkt = (void *)((unsigned long)page_address(hdr_buf->page) + hdr_buf->offset); csi_hdr = (void *)hdr_pkt->data; csi_hdr_len = iwl_rx_packet_payload_len(hdr_pkt); for (i = 1; i < ARRAY_SIZE(mvm->csi_data_entries); i++) { struct iwl_csi_data_buffer *buf = &mvm->csi_data_entries[i]; struct iwl_rx_packet *pkt; struct iwl_csi_chunk_notification *chunk; unsigned int chunk_len; if (!buf->page) break; pkt = (void *)((unsigned long)page_address(buf->page) + buf->offset); chunk = (void *)pkt->data; chunk_len = iwl_rx_packet_payload_len(pkt); if (sizeof(*chunk) + le32_to_cpu(chunk->size) > chunk_len) goto free; data[i - 1] = chunk->data; len[i - 1] = le32_to_cpu(chunk->size); } iwl_mvm_send_csi_event(mvm, csi_hdr, csi_hdr_len, data, len); free: iwl_mvm_csi_free_pages(mvm); } void iwl_mvm_rx_csi_chunk(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_csi_chunk_notification *chunk = (void *)pkt->data; int num; int idx; switch (mvm->cmd_ver.csi_notif) { case 1: num = le16_get_bits(chunk->ctl, IWL_CSI_CHUNK_CTL_NUM_MASK_VER_1); idx = le16_get_bits(chunk->ctl, IWL_CSI_CHUNK_CTL_IDX_MASK_VER_1) + 1; break; case 2: num = le16_get_bits(chunk->ctl, IWL_CSI_CHUNK_CTL_NUM_MASK_VER_2); idx = le16_get_bits(chunk->ctl, IWL_CSI_CHUNK_CTL_IDX_MASK_VER_2) + 1; break; default: WARN_ON(1); return; } /* -1 to account for the header we also store there */ if (WARN_ON_ONCE(idx >= ARRAY_SIZE(mvm->csi_data_entries) - 1)) return; iwl_mvm_csi_steal(mvm, idx, rxb); if (num == idx) iwl_mvm_csi_complete(mvm); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/000077500000000000000000000000001351064634500255075ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c000066400000000000000000000167511351064634500304320ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "iwl-prph.h" int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info_gen3 *ctxt_info_gen3; struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; struct iwl_prph_info *prph_info; void *iml_img; u32 control_flags = 0; int ret; int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); /* Allocate prph scratch */ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), &trans_pcie->prph_scratch_dma_addr, GFP_KERNEL); if (!prph_scratch) return -ENOMEM; prph_sc_ctrl = &prph_scratch->ctrl_cfg; prph_sc_ctrl->version.version = 0; prph_sc_ctrl->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K | IWL_PRPH_SCRATCH_MTR_MODE | (IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT) | IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); /* initialize RX default queue */ prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); /* Configure debug, for integration */ if (!trans->dbg.ini_valid) iwl_pcie_alloc_fw_monitor(trans, 0); if (trans->dbg.num_blocks) { prph_sc_ctrl->hwm_cfg.hwm_base_addr = cpu_to_le64(trans->dbg.fw_mon[0].physical); prph_sc_ctrl->hwm_cfg.hwm_size = cpu_to_le32(trans->dbg.fw_mon[0].size); } /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); if (ret) { dma_free_coherent(trans->dev, sizeof(*prph_scratch), prph_scratch, trans_pcie->prph_scratch_dma_addr); return ret; } /* Allocate prph information * currently we don't assign to the prph info anything, but it would get * assigned later */ prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info), &trans_pcie->prph_info_dma_addr, GFP_KERNEL); if (!prph_info) return -ENOMEM; /* Allocate context info */ ctxt_info_gen3 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_gen3), &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL); if (!ctxt_info_gen3) return -ENOMEM; ctxt_info_gen3->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr); ctxt_info_gen3->prph_scratch_base_addr = cpu_to_le64(trans_pcie->prph_scratch_dma_addr); ctxt_info_gen3->prph_scratch_size = cpu_to_le32(sizeof(*prph_scratch)); ctxt_info_gen3->cr_head_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); ctxt_info_gen3->tr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->tr_tail_dma); ctxt_info_gen3->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->cr_tail_dma); ctxt_info_gen3->cr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS); ctxt_info_gen3->tr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS); ctxt_info_gen3->mtr_base_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); ctxt_info_gen3->mtr_size = cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); ctxt_info_gen3->mcr_size = cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE)); trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; trans_pcie->prph_info = prph_info; trans_pcie->prph_scratch = prph_scratch; /* Allocate IML */ iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL); if (!iml_img) return -ENOMEM; memcpy(iml_img, trans->iml, trans->iml_len); iwl_enable_fw_load_int_ctx_info(trans); /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr); iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr); iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); else iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); return 0; } void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans_pcie->ctxt_info_gen3) return; dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), trans_pcie->ctxt_info_gen3, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_dma_addr = 0; trans_pcie->ctxt_info_gen3 = NULL; iwl_pcie_ctxt_info_free_fw_img(trans); dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), trans_pcie->prph_scratch, trans_pcie->prph_scratch_dma_addr); trans_pcie->prph_scratch_dma_addr = 0; trans_pcie->prph_scratch = NULL; dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info), trans_pcie->prph_info, trans_pcie->prph_info_dma_addr); trans_pcie->prph_info_dma_addr = 0; trans_pcie->prph_info = NULL; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c000066400000000000000000000200211351064634500275610ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info.h" #include "internal.h" #include "iwl-prph.h" void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) { struct iwl_self_init_dram *dram = &trans->init_dram; int i; if (!dram->paging) { WARN_ON(dram->paging_cnt); return; } /* free paging*/ for (i = 0; i < dram->paging_cnt; i++) dma_free_coherent(trans->dev, dram->paging[i].size, dram->paging[i].block, dram->paging[i].physical); kfree(dram->paging); dram->paging_cnt = 0; dram->paging = NULL; } int iwl_pcie_init_fw_sec(struct iwl_trans *trans, const struct fw_img *fw, struct iwl_context_info_dram *ctxt_dram) { struct iwl_self_init_dram *dram = &trans->init_dram; int i, ret, lmac_cnt, umac_cnt, paging_cnt; if (WARN(dram->paging, "paging shouldn't already be initialized (%d pages)\n", dram->paging_cnt)) iwl_pcie_ctxt_info_free_paging(trans); lmac_cnt = iwl_pcie_get_num_sections(fw, 0); /* add 1 due to separator */ umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1); /* add 2 due to separators */ paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2); dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL); if (!dram->fw) return -ENOMEM; dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL); if (!dram->paging) return -ENOMEM; /* initialize lmac sections */ for (i = 0; i < lmac_cnt; i++) { ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i], &dram->fw[dram->fw_cnt]); if (ret) return ret; ctxt_dram->lmac_img[i] = cpu_to_le64(dram->fw[dram->fw_cnt].physical); dram->fw_cnt++; } /* initialize umac sections */ for (i = 0; i < umac_cnt; i++) { /* access FW with +1 to make up for lmac separator */ ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[dram->fw_cnt + 1], &dram->fw[dram->fw_cnt]); if (ret) return ret; ctxt_dram->umac_img[i] = cpu_to_le64(dram->fw[dram->fw_cnt].physical); dram->fw_cnt++; } /* * Initialize paging. * Paging memory isn't stored in dram->fw as the umac and lmac - it is * stored separately. * This is since the timing of its release is different - * while fw memory can be released on alive, the paging memory can be * freed only when the device goes down. * Given that, the logic here in accessing the fw image is a bit * different - fw_cnt isn't changing so loop counter is added to it. */ for (i = 0; i < paging_cnt; i++) { /* access FW with +2 to make up for lmac & umac separators */ int fw_idx = dram->fw_cnt + i + 2; ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx], &dram->paging[i]); if (ret) return ret; ctxt_dram->virtual_img[i] = cpu_to_le64(dram->paging[i].physical); dram->paging_cnt++; } return 0; } int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info *ctxt_info; struct iwl_context_info_rbd_cfg *rx_cfg; u32 control_flags = 0, rb_size; int ret; ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL); if (!ctxt_info) return -ENOMEM; ctxt_info->version.version = 0; ctxt_info->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); /* size is in DWs */ ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); switch (trans_pcie->rx_buf_size) { case IWL_AMSDU_2K: rb_size = IWL_CTXT_INFO_RB_SIZE_2K; break; case IWL_AMSDU_4K: rb_size = IWL_CTXT_INFO_RB_SIZE_4K; break; case IWL_AMSDU_8K: rb_size = IWL_CTXT_INFO_RB_SIZE_8K; break; case IWL_AMSDU_12K: rb_size = IWL_CTXT_INFO_RB_SIZE_12K; break; default: WARN_ON(1); rb_size = IWL_CTXT_INFO_RB_SIZE_4K; } BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF); control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG | (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << IWL_CTXT_INFO_RB_CB_SIZE_POS) | (rb_size << IWL_CTXT_INFO_RB_SIZE_POS); ctxt_info->control.control_flags = cpu_to_le32(control_flags); /* initialize RX default queue */ rx_cfg = &ctxt_info->rbd_cfg; rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); /* initialize TX command queue */ ctxt_info->hcmd_cfg.cmd_queue_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); if (ret) { dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), ctxt_info, trans_pcie->ctxt_info_dma_addr); return ret; } trans_pcie->ctxt_info = ctxt_info; iwl_enable_fw_load_int_ctx_info(trans); /* Configure debug, if exists */ if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); /* Context info will be released upon alive or failure to get one */ return 0; } void iwl_pcie_ctxt_info_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans_pcie->ctxt_info) return; dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), trans_pcie->ctxt_info, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_dma_addr = 0; trans_pcie->ctxt_info = NULL; iwl_pcie_ctxt_info_free_fw_img(trans); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/drv.c000066400000000000000000001400031351064634500264440ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include "fw/acpi.h" #include "iwl-trans.h" #include "iwl-drv.h" #include "internal.h" #define IWL_PCI_DEVICE(dev, subdev, cfg) \ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ .driver_data = (kernel_ulong_t)&(cfg) /* Hardware specific file defines the PCI IDs table for that hardware module */ static const struct pci_device_id iwl_hw_card_ids[] = { #if IS_ENABLED(CPTCFG_IWLMVM) /* 7260 Series */ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, /* 3160 Series */ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, /* 3165 Series */ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 3168 Series */ {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, #endif /* IS_ENABLED(CPTCFG_IWLMVM) */ #if IS_ENABLED(CPTCFG_IWLMVM) || IS_ENABLED(CPTCFG_IWLFMAC) /* 9000 Series */ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)}, /* 22000 Series */ /* TODO: temporary solution to support qnj hr b0 due to HW bug */ {IWL_PCI_DEVICE(0x2526, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0)}, /* TODO: remove this entry */ {IWL_PCI_DEVICE(0x0000, 0x0000, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0070, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x2720, 0x2074, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x4244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr)}, /* TODO: This is only for initial pre-production devices */ {IWL_PCI_DEVICE(0x2723, 0x0000, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)}, {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)}, #endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ {0} }; MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; struct iwl_trans *iwl_trans; int ret; if (WARN_ONCE(!cfg->csr, "CSR addresses aren't configured\n")) return -EINVAL; iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans); #if IS_ENABLED(CPTCFG_IWLMVM) /* * special-case 7265D, it has the same PCI IDs. * * Note that because we already pass the cfg to the transport above, * all the parameters that the transport uses must, until that is * changed, be identical to the ones in the 7265D configuration. */ if (cfg == &iwl7265_2ac_cfg) cfg_7265d = &iwl7265d_2ac_cfg; else if (cfg == &iwl7265_2n_cfg) cfg_7265d = &iwl7265d_2n_cfg; else if (cfg == &iwl7265_n_cfg) cfg_7265d = &iwl7265d_n_cfg; if (cfg_7265d && (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { cfg = cfg_7265d; iwl_trans->cfg = cfg_7265d; } #endif #if IS_ENABLED(CPTCFG_IWLMVM) || IS_ENABLED(CPTCFG_IWLFMAC) if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb && iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) { u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id); u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF); u32 hr_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR); if (rf_id_chp == jf_chp_id) { if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) cfg = &iwl9560_2ac_cfg_qnj_jf_b0; else cfg = &iwl22000_2ac_cfg_jf; } else if (rf_id_chp == hr_chp_id) { if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) cfg = &iwl22000_2ax_cfg_qnj_hr_a0; else if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) cfg = &iwl22000_2ax_cfg_qnj_hr_b0; else if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_B0) cfg = &iwl22000_2ax_cfg_qnj_hr_b0_f0; else cfg = &iwl22000_2ac_cfg_hr; } iwl_trans->cfg = cfg; } /* * This is a hack to switch from Qu B0 to Qu C0. We need to * do this for all cfgs that use Qu B0. All this code is in * urgent need for a refactor, but for now this is the easiest * thing to do to support Qu C-step. */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0; else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0; else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0; else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; } #endif pci_set_drvdata(pdev, iwl_trans); iwl_trans->drv = iwl_drv_start(iwl_trans); if (IS_ERR(iwl_trans->drv)) { ret = PTR_ERR(iwl_trans->drv); goto out_free_trans; } /* register transport layer debugfs here */ iwl_trans_pcie_dbgfs_register(iwl_trans); /* The PCI device starts with a reference taken and we are * supposed to release it here. But to simplify the * interaction with the opmode, we don't do it now, but let * the opmode release it when it's ready. */ return 0; out_free_trans: iwl_trans_pcie_free(iwl_trans); return ret; } static void iwl_pci_remove(struct pci_dev *pdev) { struct iwl_trans *trans = pci_get_drvdata(pdev); iwl_drv_stop(trans->drv); iwl_trans_pcie_free(trans); } #ifdef CONFIG_PM_SLEEP static int iwl_pci_suspend(struct device *device) { /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. */ return 0; } static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct iwl_trans *trans = pci_get_drvdata(pdev); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - the NIC may be alive. */ /* * We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state. */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); if (!trans->op_mode) return 0; /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return 0; /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Enable rfkill interrupt (in order to keep track of the rfkill * status). Must be locked to avoid processing a possible rfkill * interrupt while in iwl_pcie_check_hw_rf_kill(). */ mutex_lock(&trans_pcie->mutex); iwl_enable_rfkill_int(trans); iwl_pcie_check_hw_rf_kill(trans); mutex_unlock(&trans_pcie->mutex); return 0; } static const struct dev_pm_ops iwl_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, iwl_pci_resume) }; #define IWL_PM_OPS (&iwl_dev_pm_ops) #else /* CONFIG_PM_SLEEP */ #define IWL_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static struct pci_driver iwl_pci_driver = { .name = DRV_NAME, .id_table = iwl_hw_card_ids, .probe = iwl_pci_probe, .remove = iwl_pci_remove, .driver.pm = IWL_PM_OPS, }; int __must_check iwl_pci_register_driver(void) { int ret; ret = pci_register_driver(&iwl_pci_driver); if (ret) pr_err("Unable to initialize PCI module\n"); return ret; } void iwl_pci_unregister_driver(void) { pci_unregister_driver(&iwl_pci_driver); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/internal.h000066400000000000000000001073531351064634500275050ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_trans_int_pcie_h__ #define __iwl_trans_int_pcie_h__ #include #include #include #include #include #include #include #include "iwl-fh.h" #include "iwl-csr.h" #include "iwl-trans.h" #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-op-mode.h" #include "iwl-drv.h" /* We need 2 entries for the TX command and header, and another one might * be needed for potential data in the SKB's head. The remaining ones can * be used for frags. */ #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) /* * RX related structures and functions */ #define RX_NUM_QUEUES 1 #define RX_POST_REQ_ALLOC 2 #define RX_CLAIM_REQ_ALLOC 8 #define RX_PENDING_WATERMARK 16 #define FIRST_RX_QUEUE 512 struct iwl_host_cmd; /*This file includes the declaration that are internal to the * trans_pcie layer */ /** * struct iwl_rx_mem_buffer * @page_dma: bus address of rxb page * @page: driver's pointer to the rxb page * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; struct page *page; u16 vid; bool invalid; struct list_head list; }; /** * struct isr_statistics - interrupt statistics * */ struct isr_statistics { u32 hw; u32 sw; u32 err_code; u32 sch; u32 alive; u32 rfkill; u32 ctkill; u32 wakeup; u32 rx; u32 tx; u32 unhandled; }; /** * struct iwl_rx_transfer_desc - transfer descriptor * @addr: ptr to free buffer start address * @rbid: unique tag of the buffer * @reserved: reserved */ struct iwl_rx_transfer_desc { __le16 rbid; __le16 reserved[3]; __le64 addr; } __packed; #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) /** * struct iwl_rx_completion_desc - completion descriptor * @reserved1: reserved * @rbid: unique tag of the received buffer * @flags: flags (0: fragmented, all others: reserved) * @reserved2: reserved */ struct iwl_rx_completion_desc { __le32 reserved1; __le16 rbid; u8 flags; u8 reserved2[25]; } __packed; /** * struct iwl_rxq - Rx queue * @id: queue index * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @tr_tail: driver's pointer to the transmission ring tail buffer * @tr_tail_dma: physical address of the buffer for the transmission ring tail * @cr_tail: driver's pointer to the completion ring tail buffer * @cr_tail_dma: physical address of the buffer for the completion ring tail * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: * @rx_free: list of RBDs with allocated RB ready for use * @rx_used: list of RBDs with no RB attached * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: * @queue: actual rx queue. Not used for multi-rx queue. * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { int id; void *bd; dma_addr_t bd_dma; union { void *used_bd; __le32 *bd_32; struct iwl_rx_completion_desc *cd; }; dma_addr_t used_bd_dma; __le16 *tr_tail; dma_addr_t tr_tail_dma; __le16 *cr_tail; dma_addr_t cr_tail_dma; u32 read; u32 write; u32 free_count; u32 used_count; u32 write_actual; u32 queue_size; struct list_head rx_free; struct list_head rx_used; bool need_update; void *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; struct napi_struct napi; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; }; /** * struct iwl_rb_allocator - Rx allocator * @req_pending: number of requests the allcator had not processed yet * @req_ready: number of requests honored and ready for claiming * @rbd_allocated: RBDs with pages allocated and ready to be handled to * the queue. This is a list of &struct iwl_rx_mem_buffer * @rbd_empty: RBDs with no page attached for allocator use. This is a list * of &struct iwl_rx_mem_buffer * @lock: protects the rbd_allocated and rbd_empty lists * @alloc_wq: work queue for background calls * @rx_alloc: work struct for background calls */ struct iwl_rb_allocator { atomic_t req_pending; atomic_t req_ready; struct list_head rbd_allocated; struct list_head rbd_empty; spinlock_t lock; struct workqueue_struct *alloc_wq; struct work_struct rx_alloc; }; struct iwl_dma_ptr { dma_addr_t dma; void *addr; size_t size; }; /** * iwl_queue_inc_wrap - increment queue index, wrap back to beginning * @index -- current index */ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) { return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); } /** * iwl_get_closed_rb_stts - get closed rb stts from different structs * @rxq - the rxq to get the rb stts from */ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, struct iwl_rxq *rxq) { if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { __le16 *rb_stts = rxq->rb_stts; return READ_ONCE(*rb_stts); } else { struct iwl_rb_status *rb_stts = rxq->rb_stts; return READ_ONCE(rb_stts->closed_rb_num); } } /** * iwl_queue_dec_wrap - decrement queue index, wrap back to end * @index -- current index */ static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) { return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); } struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; u32 flags; u32 tbs; }; /* * The FH will write back to the first TB only, so we need to copy some data * into the buffer regardless of whether it should be mapped or not. * This indicates how big the first TB must be to include the scratch buffer * and the assigned PN. * Since PN location is 8 bytes at offset 12, it's 20 now. * If we make it bigger then allocations will be bigger and copy slower, so * that's probably not useful. */ #define IWL_FIRST_TB_SIZE 20 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { struct iwl_device_cmd *cmd; struct sk_buff *skb; /* buffer to free after command completes */ const void *free_buf; struct iwl_cmd_meta meta; }; struct iwl_pcie_first_tb_buf { u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; }; /** * struct iwl_txq - Tx Queue for DMA * @q: generic Rx/Tx queue descriptor * @tfds: transmit frame descriptors (DMA memory) * @first_tb_bufs: start of command headers, including scratch buffers, for * the writeback -- this is DMA memory and an array holding one buffer * for each command on the queue * @first_tb_dma: DMA address for the first_tb_bufs start * @entries: transmit entries (driver state) * @lock: queue lock * @stuck_timer: timer that fires if queue gets stuck * @trans_pcie: pointer back to transport (for timer) * @need_update: indicates need to update read/write index * @ampdu: true if this queue is an ampdu queue for an specific RA/TID * @wd_timeout: queue watchdog timeout (jiffies) - per queue * @frozen: tx stuck queue timer is frozen * @frozen_expiry_remainder: remember how long until the timer fires * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) * @write_ptr: 1-st empty entry (index) host_w * @read_ptr: last used entry (index) host_r * @dma_addr: physical addr for BD's * @n_window: safe queue window * @id: queue id * @low_mark: low watermark, resume queue if free space more than this * @high_mark: high watermark, stop queue if free space less than this * * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame * descriptors) and required locking structures. * * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless * there might be HW changes in the future). For the normal TX * queues, n_window, which is the size of the software queue data * is also 256; however, for the command queue, n_window is only * 32 since we don't need so many commands pending. Since the HW * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. * This means that we end up with the following: * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | * SW entries: | 0 | ... | 31 | * where N is a number between 0 and 7. This means that the SW * data is a window overlayed over the HW queue. */ struct iwl_txq { void *tfds; struct iwl_pcie_first_tb_buf *first_tb_bufs; dma_addr_t first_tb_dma; struct iwl_pcie_txq_entry *entries; spinlock_t lock; unsigned long frozen_expiry_remainder; struct timer_list stuck_timer; struct iwl_trans_pcie *trans_pcie; bool need_update; bool frozen; bool ampdu; int block; unsigned long wd_timeout; struct sk_buff_head overflow_q; struct iwl_dma_ptr bc_tbl; int write_ptr; int read_ptr; dma_addr_t dma_addr; int n_window; u32 id; int low_mark; int high_mark; bool overflow_tx; }; static inline dma_addr_t iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) { return txq->first_tb_dma + sizeof(struct iwl_pcie_first_tb_buf) * idx; } struct iwl_tso_hdr_page { struct page *page; u8 *pos; }; #ifdef CPTCFG_IWLWIFI_DEBUGFS /** * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data * debugfs file * * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is * set the file can no longer be used. */ enum iwl_fw_mon_dbgfs_state { IWL_FW_MON_DBGFS_STATE_CLOSED, IWL_FW_MON_DBGFS_STATE_OPEN, IWL_FW_MON_DBGFS_STATE_DISABLED, }; #endif /** * enum iwl_shared_irq_flags - level of sharing for irq * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. */ enum iwl_shared_irq_flags { IWL_SHARED_IRQ_NON_RX = BIT(0), IWL_SHARED_IRQ_FIRST_RSS = BIT(1), }; /** * enum iwl_image_response_code - image response values * @IWL_IMAGE_RESP_DEF: the default value of the register * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully * @IWL_IMAGE_RESP_FAIL: iml reading failed */ enum iwl_image_response_code { IWL_IMAGE_RESP_DEF = 0, IWL_IMAGE_RESP_SUCCESS = 1, IWL_IMAGE_RESP_FAIL = 2, }; /** * struct cont_rec: continuous recording data structure * @prev_wr_ptr: the last address that was read in monitor_data * debugfs file * @prev_wrap_cnt: the wrap count that was used during the last read in * monitor_data debugfs file * @state: the state of monitor_data debugfs file as described * in &iwl_fw_mon_dbgfs_state enum * @mutex: locked while reading from monitor_data debugfs file */ #ifdef CPTCFG_IWLWIFI_DEBUGFS struct cont_rec { u32 prev_wr_ptr; u32 prev_wrap_cnt; u8 state; /* Used to sync monitor_data debugfs file with driver unload flow */ struct mutex mutex; }; #endif /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @ctxt_info: context information for FW self init * @ctxt_info_gen3: context information for gen3 devices * @prph_info: prph info for self init * @prph_scratch: prph scratch for self init * @ctxt_info_dma_addr: dma addr of context information * @prph_info_dma_addr: dma addr of prph info * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information * @init_dram: DRAM data of firmware image (including paging). * Context information addresses will be taken from here. * This is driver's local copy for keeping track of size and * count for allocating and freeing the memory. * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM * @scd_bc_tbls: pointer to the byte count table of the scheduler * @kw: keep warm address * @pci_dev: basic pci-network driver stuff * @hw_base: pci hardware address support * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load * @cmd_queue - command queue number * @def_rx_queue - default rx queue number * @rx_buf_size: Rx buffer size * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue * @sw_csum_tx: if true, then the transport will compute the csum of the TXed * frame. * @rx_page_order: page order for receive buffer size * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw * @cmd_in_flight: true when we have a host command in flight #ifdef CPTCFG_IWLWIFI_DEBUGFS * @fw_mon_data: fw continuous recording data #endif * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X * @shared_vec_mask: the type of causes the shared vector handles * (see iwl_shared_irq_flags). * @alloc_vecs: the number of interrupt vectors allocated by the OS * @def_irq: default irq for non rx causes * @fh_init_mask: initial unmasked fh causes * @hw_init_mask: initial unmasked hw causes * @fh_mask: current unmasked fh causes * @hw_mask: current unmasked hw causes * @in_rescan: true if we have triggered a device rescan * @base_rb_stts: base virtual address of receive buffer status for all queues * @base_rb_stts_dma: base physical address of receive buffer status */ struct iwl_trans_pcie { struct iwl_rxq *rxq; struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rb_allocator rba; union { struct iwl_context_info *ctxt_info; struct iwl_context_info_gen3 *ctxt_info_gen3; }; struct iwl_prph_info *prph_info; struct iwl_prph_scratch *prph_scratch; dma_addr_t ctxt_info_dma_addr; dma_addr_t prph_info_dma_addr; dma_addr_t prph_scratch_dma_addr; dma_addr_t iml_dma_addr; struct iwl_trans *trans; struct net_device napi_dev; struct __percpu iwl_tso_hdr_page *tso_hdr_page; /* INT ICT Table */ __le32 *ict_tbl; dma_addr_t ict_tbl_dma; int ict_index; bool use_ict; bool is_down, opmode_down; s8 debug_rfkill; struct isr_statistics isr_stats; spinlock_t irq_lock; struct mutex mutex; u32 inta_mask; u32 scd_base_addr; struct iwl_dma_ptr scd_bc_tbls; struct iwl_dma_ptr kw; struct iwl_txq *txq_memory; struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; /* PCI bus related data */ struct pci_dev *pci_dev; void __iomem *hw_base; bool ucode_write_complete; wait_queue_head_t ucode_write_waitq; wait_queue_head_t wait_command_queue; u8 page_offs, dev_cmd_offs; u8 cmd_queue; u8 def_rx_queue; u8 cmd_fifo; unsigned int cmd_q_wdg_timeout; u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u8 max_tbs; u16 tfd_size; enum iwl_amsdu_size rx_buf_size; bool bc_table_dword; bool scd_set_active; bool sw_csum_tx; bool pcie_dbg_dumped_once; u32 rx_page_order; /*protect hw register */ spinlock_t reg_lock; bool cmd_hold_nic_awake; #ifdef CPTCFG_IWLWIFI_DEBUGFS struct cont_rec fw_mon_data; #endif struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; bool msix_enabled; u8 shared_vec_mask; u32 alloc_vecs; u32 def_irq; u32 fh_init_mask; u32 hw_init_mask; u32 fh_mask; u32 hw_mask; cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; bool in_rescan; void *base_rb_stts; dma_addr_t base_rb_stts_dma; }; static inline struct iwl_trans_pcie * IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) { return (void *)trans->trans_specific; } static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, struct msix_entry *entry) { /* * Before sending the interrupt the HW disables it to prevent * a nested interrupt. This is done by writing 1 to the corresponding * bit in the mask register. After handling the interrupt, it should be * re-enabled by clearing this bit. This register is defined as * write 1 clear (W1C) register, meaning that it's being clear * by writing 1 to the bit. */ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); } static inline struct iwl_trans * iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) { return container_of((void *)trans_pcie, struct iwl_trans, trans_specific); } /* * Convention: trans API functions: iwl_trans_pcie_XXX * Other functions: iwl_pcie_XXX */ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg *cfg); void iwl_trans_pcie_free(struct iwl_trans *trans); /***************************************************** * RX ******************************************************/ int _iwl_pcie_rx_init(struct iwl_trans *trans); int iwl_pcie_rx_init(struct iwl_trans *trans); int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); irqreturn_t iwl_pcie_msix_isr(int irq, void *data); irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); int iwl_pcie_rx_stop(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans); void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, struct iwl_rxq *rxq); int iwl_pcie_rx_alloc(struct iwl_trans *trans); /***************************************************** * ICT - interrupt handling ******************************************************/ irqreturn_t iwl_pcie_isr(int irq, void *data); int iwl_pcie_alloc_ict(struct iwl_trans *trans); void iwl_pcie_free_ict(struct iwl_trans *trans); void iwl_pcie_reset_ict(struct iwl_trans *trans); void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ int iwl_pcie_tx_init(struct iwl_trans *trans); int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout); void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd); void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode); void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, struct iwl_txq *txq, u16 byte_cnt, int num_tbs); static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, u8 idx) { if (trans->cfg->use_tfh) { struct iwl_tfh_tfd *tfd = _tfd; struct iwl_tfh_tb *tb = &tfd->tbs[idx]; return le16_to_cpu(tb->tb_len); } else { struct iwl_tfd *tfd = _tfd; struct iwl_tfd_tb *tb = &tfd->tbs[idx]; return le16_to_cpu(tb->hi_n_len) >> 4; } } /***************************************************** * Error handling ******************************************************/ void iwl_pcie_dump_csr(struct iwl_trans *trans); /***************************************************** * Helpers ******************************************************/ static inline void _iwl_disable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); clear_bit(STATUS_INT_ENABLED, &trans->status); if (!trans_pcie->msix_enabled) { /* disable interrupts from uCode/NIC to host */ iwl_write32(trans, CSR_INT_MASK, 0x00000000); /* acknowledge/clear/reset any interrupts still pending * from uCode or flow handler (Rx/Tx DMA) */ iwl_write32(trans, CSR_INT, 0xffffffff); iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); } else { /* disable all the interrupt we might use */ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, trans_pcie->fh_init_mask); iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, trans_pcie->hw_init_mask); } IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); } #define IWL_NUM_OF_COMPLETION_RINGS 31 #define IWL_NUM_OF_TRANSFER_RINGS 527 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, int start) { int i = 0; while (start < fw->num_sec && fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { start++; i++; } return i; } static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, const struct fw_desc *sec, struct iwl_dram_data *dram) { dram->block = dma_alloc_coherent(trans->dev, sec->len, &dram->physical, GFP_KERNEL); if (!dram->block) return -ENOMEM; dram->size = sec->len; memcpy(dram->block, sec->data, sec->len); return 0; } static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) { struct iwl_self_init_dram *dram = &trans->init_dram; int i; if (!dram->fw) { WARN_ON(dram->fw_cnt); return; } for (i = 0; i < dram->fw_cnt; i++) dma_free_coherent(trans->dev, dram->fw[i].size, dram->fw[i].block, dram->fw[i].physical); kfree(dram->fw); dram->fw_cnt = 0; dram->fw = NULL; } static inline void iwl_disable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock(&trans_pcie->irq_lock); _iwl_disable_interrupts(trans); spin_unlock(&trans_pcie->irq_lock); } static inline void _iwl_enable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); set_bit(STATUS_INT_ENABLED, &trans->status); if (!trans_pcie->msix_enabled) { trans_pcie->inta_mask = CSR_INI_SET_MASK; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { /* * fh/hw_mask keeps all the unmasked causes. * Unlike msi, in msix cause is enabled when it is unset. */ trans_pcie->hw_mask = trans_pcie->hw_init_mask; trans_pcie->fh_mask = trans_pcie->fh_init_mask; iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~trans_pcie->fh_mask); iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~trans_pcie->hw_mask); } } static inline void iwl_enable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock(&trans_pcie->irq_lock); _iwl_enable_interrupts(trans); spin_unlock(&trans_pcie->irq_lock); } static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); trans_pcie->hw_mask = msk; } static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); trans_pcie->fh_mask = msk; } static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); if (!trans_pcie->msix_enabled) { trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, trans_pcie->hw_init_mask); iwl_enable_fh_int_msk_msix(trans, MSIX_FH_INT_CAUSES_D2S_CH0_NUM); } } static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); if (!trans_pcie->msix_enabled) { /* * When we'll receive the ALIVE interrupt, the ISR will call * iwl_enable_fw_load_int_ctx_info again to set the ALIVE * interrupt (which is not really needed anymore) but also the * RX interrupt which will allow us to receive the ALIVE * notification (which is Rx) and continue the flow. */ trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { iwl_enable_hw_int_msk_msix(trans, MSIX_HW_INT_CAUSES_REG_ALIVE); /* * Leave all the FH causes enabled to get the ALIVE * notification. */ iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); } } static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) { return index & (q->n_window - 1); } static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, struct iwl_txq *txq, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans->cfg->use_tfh) idx = iwl_pcie_get_cmd_index(txq, idx); return txq->tfds + trans_pcie->tfd_size * idx; } static inline const char *queue_name(struct device *dev, struct iwl_trans_pcie *trans_p, int i) { if (trans_p->shared_vec_mask) { int vec = trans_p->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; if (i == 0) return DRV_NAME ": shared IRQ"; return devm_kasprintf(dev, GFP_KERNEL, DRV_NAME ": queue %d", i + vec); } if (i == 0) return DRV_NAME ": default queue"; if (i == trans_p->alloc_vecs - 1) return DRV_NAME ": exception"; return devm_kasprintf(dev, GFP_KERNEL, DRV_NAME ": queue %d", i); } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); if (!trans_pcie->msix_enabled) { trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, trans_pcie->fh_init_mask); iwl_enable_hw_int_msk_msix(trans, MSIX_HW_INT_CAUSES_REG_RF_KILL); } if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) { /* * On 9000-series devices this bit isn't enabled by default, so * when we power down the device we need set the bit to allow it * to wake up the PCI-E bus for RF-kill interrupts. */ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); } } void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); static inline void iwl_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); iwl_op_mode_queue_not_full(trans->op_mode, txq->id); } } static inline void iwl_stop_queue(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { iwl_op_mode_queue_full(trans->op_mode, txq->id); IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); } else IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", txq->id); } static inline bool iwl_queue_used(const struct iwl_txq *q, int i) { int index = iwl_pcie_get_cmd_index(q, i); int r = iwl_pcie_get_cmd_index(q, q->read_ptr); int w = iwl_pcie_get_cmd_index(q, q->write_ptr); return w >= r ? (index >= r && index < w) : !(index < r && index >= w); } static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); if (trans_pcie->debug_rfkill == 1) return true; #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES if (trans_pcie->debug_rfkill == -1 && trans->dbg_cfg.STARTUP_RFKILL) return true; #endif return !(iwl_read32(trans, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); } static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) { u32 v; #ifdef CPTCFG_IWLWIFI_DEBUG WARN_ON_ONCE(value & ~mask); #endif v = iwl_read32(trans, reg); v &= ~mask; v |= value; iwl_write32(trans, reg, v); } static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) { __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); } static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) { __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); } static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) { return (trans->dbg.dest_tlv || trans->dbg.ini_valid); } void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); #ifdef CPTCFG_IWLWIFI_DEBUGFS void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); #else static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } #endif void iwl_pcie_rx_allocator_work(struct work_struct *data); /* common functions that are used by gen2 transport */ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); void iwl_pcie_apm_config(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); void iwl_pcie_apm_stop_master(struct iwl_trans *trans); void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue); int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue); int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size); void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); int iwl_trans_pcie_power_device_off(struct iwl_trans_pcie *trans_pcie); void iwl_pcie_apply_destination(struct iwl_trans *trans); void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb); #ifdef CONFIG_INET struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); #endif /* common functions that are used by gen3 transport */ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); /* transport gen 2 exported functions */ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, struct iwl_txq **intxq, int size, unsigned int timeout); int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_host_cmd *hcmd); int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, int cmd_id, int size, unsigned int timeout); void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); #endif /* __iwl_trans_int_pcie_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/rx.c000066400000000000000000002043061351064634500263110ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include "iwl-prph.h" #include "iwl-io.h" #include "internal.h" #include "iwl-op-mode.h" #include "iwl-context-info-gen3.h" /****************************************************************************** * * RX path functions * ******************************************************************************/ /* * Rx theory of operation * * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), * each of which point to Receive Buffers to be filled by the NIC. These get * used not only for Rx frames, but for any command response or notification * from the NIC. The driver and NIC manage the Rx buffers by means * of indexes into the circular buffer. * * Rx Queue Indexes * The host/firmware share two index registers for managing the Rx buffers. * * The READ index maps to the first position that the firmware may be writing * to -- the driver can read up to (but not including) this position and get * good data. * The READ index is managed by the firmware once the card is enabled. * * The WRITE index maps to the last position the driver has read from -- the * position preceding WRITE is the last slot the firmware can place a packet. * * The queue is empty (no good data) if WRITE = READ - 1, and is full if * WRITE = READ. * * During initialization, the host sets up the READ queue position to the first * INDEX position, and WRITE to the last (READ - 1 wrapped) * * When the firmware places a packet in a buffer, it will advance the READ index * and fire the RX interrupt. The driver can then query the READ index and * process as many packets as possible, moving the WRITE index forward as it * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. * When the interrupt handler is called, the request is processed. * The page is either stolen - transferred to the upper layer * or reused - added immediately to the iwl->rxq->rx_free list. * + When the page is stolen - the driver updates the matching queue's used * count, detaches the RBD and transfers it to the queue used list. * When there are two used RBDs - they are transferred to the allocator empty * list. Work is then scheduled for the allocator to start allocating * eight buffers. * When there are another 6 used RBDs - they are transferred to the allocator * empty list and the driver tries to claim the pre-allocated buffers and * add them to iwl->rxq->rx_free. If it fails - it continues to claim them * until ready. * When there are 8+ buffers in the free list - either from allocation or from * 8 reused unstolen pages - restock is called to update the FW and indexes. * + In order to make sure the allocator always has RBDs to use for allocation * the allocator has initial pool in the size of num_queues*(8-2) - the * maximum missing RBDs per allocation request (request posted with 2 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). * The queues supplies the recycle of the rest of the RBDs. * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. * + If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls * iwl_pcie_rxq_restock. * Used only during initialization. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE index. * iwl_pcie_rx_allocator() Background work for allocating pages. * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. * * RBD life-cycle: * * Init: * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue * * Regular Receive interrupt: * Page Stolen: * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue * Page not Stolen: * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ /* * iwl_rxq_space - Return number of free slots available in queue. */ static int iwl_rxq_space(const struct iwl_rxq *rxq) { /* Make sure rx queue size is a power of 2 */ WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); /* * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity * between empty and completely full queues. * The following is equivalent to modulo by RX_QUEUE_SIZE and is well * defined for negative dividends. */ return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); } /* * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr */ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) { return cpu_to_le32((u32)(dma_addr >> 8)); } /* * iwl_pcie_rx_stop - stops the Rx DMA */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { /* TODO: remove this for 22560 once fw does it */ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); } else if (trans->cfg->mq_rx_supported) { iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); } else { iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); } } /* * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *rxq) { u32 reg; lockdep_assert_held(&rxq->lock); /* * explicitly wake up the NIC if: * 1. shadow registers aren't enabled * 2. there is a chance that the NIC is asleep */ if (!trans->cfg->base_params->shadow_reg_enable && test_bit(STATUS_TPOWER_PMI, &trans->status)) { reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", reg); iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); rxq->need_update = true; return; } } rxq->write_actual = round_down(rxq->write, 8); if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) iwl_write32(trans, HBUS_TARG_WRPTR, (rxq->write_actual | ((FIRST_RX_QUEUE + rxq->id) << 16))); else if (trans->cfg->mq_rx_supported) iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual); else iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; if (!rxq->need_update) continue; spin_lock(&rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, rxq); rxq->need_update = false; spin_unlock(&rxq->lock); } } static void iwl_pcie_restock_bd(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb) { if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { struct iwl_rx_transfer_desc *bd = rxq->bd; BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); bd[rxq->write].rbid = cpu_to_le16(rxb->vid); } else { __le64 *bd = rxq->bd; bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); } IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", (u32)rxb->vid, rxq->id, rxq->write); } /* * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx */ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; /* * If the device isn't enabled - no need to try to add buffers... * This can happen when we stop the device and still have an interrupt * pending. We stop the APM before we sync the interrupts because we * have to (see comment there). On the other hand, since the APM is * stopped, we cannot access the HW (in particular not prph). * So don't try to restock if the APM has been already stopped. */ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return; spin_lock(&rxq->lock); while (rxq->free_count) { /* Get next free Rx buffer, remove from free list */ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); rxb->invalid = false; /* 12 first bits are expected to be empty */ WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); /* Point to Rx buffer via next RBD in circular buffer */ iwl_pcie_restock_bd(trans, rxq, rxb); rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; rxq->free_count--; } spin_unlock(&rxq->lock); /* * If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock(&rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock(&rxq->lock); } } /* * iwl_pcie_rxsq_restock - restock implementation for single queue rx */ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; /* * If the device isn't enabled - not need to try to add buffers... * This can happen when we stop the device and still have an interrupt * pending. We stop the APM before we sync the interrupts because we * have to (see comment there). On the other hand, since the APM is * stopped, we cannot access the HW (in particular not prph). * So don't try to restock if the APM has been already stopped. */ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return; spin_lock(&rxq->lock); while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { __le32 *bd = (__le32 *)rxq->bd; /* The overwritten rxb must be a used one */ rxb = rxq->queue[rxq->write]; BUG_ON(rxb && rxb->page); /* Get next free Rx buffer, remove from free list */ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); rxb->invalid = false; /* Point to Rx buffer via next RBD in circular buffer */ bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; } spin_unlock(&rxq->lock); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock(&rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock(&rxq->lock); } } /* * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool * * If there are slots in the RX queue that need to be restocked, * and we have free pre-allocated buffers, fill the ranks as much * as we can, pulling from rx_free. * * This moves the 'write' index forward to catch up with 'processed', and * also updates the memory address in the firmware to reference the new * target buffer. */ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { if (trans->cfg->mq_rx_supported) iwl_pcie_rxmq_restock(trans, rxq); else iwl_pcie_rxsq_restock(trans, rxq); } /* * iwl_pcie_rx_alloc_page - allocates and returns a page. * */ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct page *page; gfp_t gfp_mask = priority; if (trans_pcie->rx_page_order > 0) gfp_mask |= __GFP_COMP; /* Alloc a new receive buffer */ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); if (!page) { if (net_ratelimit()) IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", trans_pcie->rx_page_order); /* * Issue an error if we don't have enough pre-allocated * buffers. */ if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) IWL_CRIT(trans, "Failed to alloc_pages\n"); return NULL; } return page; } /* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again * a page must be allocated and the RBD must point to the page. This function * doesn't change the HW pointer but handles the list of pages that is used by * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; struct page *page; while (1) { spin_lock(&rxq->lock); if (list_empty(&rxq->rx_used)) { spin_unlock(&rxq->lock); return; } spin_unlock(&rxq->lock); /* Alloc a new receive buffer */ page = iwl_pcie_rx_alloc_page(trans, priority); if (!page) return; spin_lock(&rxq->lock); if (list_empty(&rxq->rx_used)) { spin_unlock(&rxq->lock); __free_pages(page, trans_pcie->rx_page_order); return; } rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); spin_unlock(&rxq->lock); BUG_ON(rxb->page); rxb->page = page; /* Get physical address of the RB */ rxb->page_dma = dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { rxb->page = NULL; spin_lock(&rxq->lock); list_add(&rxb->list, &rxq->rx_used); spin_unlock(&rxq->lock); __free_pages(page, trans_pcie->rx_page_order); return; } spin_lock(&rxq->lock); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; spin_unlock(&rxq->lock); } } void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < RX_POOL_SIZE; i++) { if (!trans_pcie->rx_pool[i].page) continue; dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); __free_pages(trans_pcie->rx_pool[i].page, trans_pcie->rx_page_order); trans_pcie->rx_pool[i].page = NULL; } } /* * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues * * Allocates for each received request 8 pages * Called as a scheduled work item. */ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; struct list_head local_empty; int pending = atomic_read(&rba->req_pending); IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); /* If we were scheduled - there is at least one request */ spin_lock(&rba->lock); /* swap out the rba->rbd_empty to a local list */ list_replace_init(&rba->rbd_empty, &local_empty); spin_unlock(&rba->lock); while (pending) { int i; LIST_HEAD(local_allocated); gfp_t gfp_mask = GFP_KERNEL; /* Do not post a warning if there are only a few requests */ if (pending < RX_PENDING_WATERMARK) gfp_mask |= __GFP_NOWARN; for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { struct iwl_rx_mem_buffer *rxb; struct page *page; /* List should never be empty - each reused RBD is * returned to the list, and initial pool covers any * possible gap between the time the page is allocated * to the time the RBD is added. */ BUG_ON(list_empty(&local_empty)); /* Get the first rxb from the rbd list */ rxb = list_first_entry(&local_empty, struct iwl_rx_mem_buffer, list); BUG_ON(rxb->page); /* Alloc a new receive buffer */ page = iwl_pcie_rx_alloc_page(trans, gfp_mask); if (!page) continue; rxb->page = page; /* Get physical address of the RB */ rxb->page_dma = dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { rxb->page = NULL; __free_pages(page, trans_pcie->rx_page_order); continue; } /* move the allocated entry to the out list */ list_move(&rxb->list, &local_allocated); i++; } atomic_dec(&rba->req_pending); pending--; if (!pending) { pending = atomic_read(&rba->req_pending); if (pending) IWL_DEBUG_TPT(trans, "Got more pending allocation requests = %d\n", pending); } spin_lock(&rba->lock); /* add the allocated rbds to the allocator allocated list */ list_splice_tail(&local_allocated, &rba->rbd_allocated); /* get more empty RBDs for current pending requests */ list_splice_tail_init(&rba->rbd_empty, &local_empty); spin_unlock(&rba->lock); atomic_inc(&rba->req_ready); } spin_lock(&rba->lock); /* return unused rbds to the allocator empty list */ list_splice_tail(&local_empty, &rba->rbd_empty); spin_unlock(&rba->lock); IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); } /* * iwl_pcie_rx_allocator_get - returns the pre-allocated pages .* .* Called by queue when the queue posted allocation request and * has freed 8 RBDs in order to restock itself. * This function directly moves the allocated RBs to the queue's ownership * and updates the relevant counters. */ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; lockdep_assert_held(&rxq->lock); /* * atomic_dec_if_positive returns req_ready - 1 for any scenario. * If req_ready is 0 atomic_dec_if_positive will return -1 and this * function will return early, as there are no ready requests. * atomic_dec_if_positive will perofrm the *actual* decrement only if * req_ready > 0, i.e. - there are ready requests and the function * hands one request to the caller. */ if (atomic_dec_if_positive(&rba->req_ready) < 0) return; spin_lock(&rba->lock); for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { /* Get next free Rx buffer, remove it from free list */ struct iwl_rx_mem_buffer *rxb = list_first_entry(&rba->rbd_allocated, struct iwl_rx_mem_buffer, list); list_move(&rxb->list, &rxq->rx_free); } spin_unlock(&rba->lock); rxq->used_count -= RX_CLAIM_REQ_ALLOC; rxq->free_count += RX_CLAIM_REQ_ALLOC; } void iwl_pcie_rx_allocator_work(struct work_struct *data) { struct iwl_rb_allocator *rba_p = container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = container_of(rba_p, struct iwl_trans_pcie, rba); iwl_pcie_rx_allocator(trans_pcie->trans); } static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) { struct iwl_rx_transfer_desc *rx_td; if (use_rx_td) return sizeof(*rx_td); else return trans->cfg->mq_rx_supported ? sizeof(__le64) : sizeof(__le32); } static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct device *dev = trans->dev; bool use_rx_td = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560); int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); if (rxq->bd) dma_free_coherent(trans->dev, free_size * rxq->queue_size, rxq->bd, rxq->bd_dma); rxq->bd_dma = 0; rxq->bd = NULL; rxq->rb_stts_dma = 0; rxq->rb_stts = NULL; if (rxq->used_bd) dma_free_coherent(trans->dev, (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, rxq->used_bd, rxq->used_bd_dma); rxq->used_bd_dma = 0; rxq->used_bd = NULL; if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) return; if (rxq->tr_tail) dma_free_coherent(dev, sizeof(__le16), rxq->tr_tail, rxq->tr_tail_dma); rxq->tr_tail_dma = 0; rxq->tr_tail = NULL; if (rxq->cr_tail) dma_free_coherent(dev, sizeof(__le16), rxq->cr_tail, rxq->cr_tail_dma); rxq->cr_tail_dma = 0; rxq->cr_tail = NULL; } static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct device *dev = trans->dev; int i; int free_size; bool use_rx_td = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560); size_t rb_stts_size = use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status); spin_lock_init(&rxq->lock); if (trans->cfg->mq_rx_supported) rxq->queue_size = MQ_RX_TABLE_SIZE; else rxq->queue_size = RX_QUEUE_SIZE; free_size = iwl_pcie_free_bd_size(trans, use_rx_td); /* * Allocate the circular buffer of Read Buffer Descriptors * (RBDs) */ rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err; if (trans->cfg->mq_rx_supported) { rxq->used_bd = dma_alloc_coherent(dev, (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, &rxq->used_bd_dma, GFP_KERNEL); if (!rxq->used_bd) goto err; } rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size; rxq->rb_stts_dma = trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; if (!use_rx_td) return 0; /* Allocate the driver's pointer to TR tail */ rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), &rxq->tr_tail_dma, GFP_KERNEL); if (!rxq->tr_tail) goto err; /* Allocate the driver's pointer to CR tail */ rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), &rxq->cr_tail_dma, GFP_KERNEL); if (!rxq->cr_tail) goto err; /* * W/A 22560 device step Z0 must be non zero bug * TODO: remove this when stop supporting Z0 */ *rxq->cr_tail = cpu_to_le16(500); return 0; err: for (i = 0; i < trans->num_rx_queues; i++) { rxq = &trans_pcie->rxq[i]; iwl_pcie_free_rxq_dma(trans, rxq); } return -ENOMEM; } int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, ret; size_t rb_stts_size = trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 ? sizeof(__le16) : sizeof(struct iwl_rb_status); if (WARN_ON(trans_pcie->rxq)) return -EINVAL; trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), GFP_KERNEL); if (!trans_pcie->rxq) return -ENOMEM; spin_lock_init(&rba->lock); /* * Allocate the driver's pointer to receive buffer status. * Allocate for all queues continuously (HW requirement). */ trans_pcie->base_rb_stts = dma_alloc_coherent(trans->dev, rb_stts_size * trans->num_rx_queues, &trans_pcie->base_rb_stts_dma, GFP_KERNEL); if (!trans_pcie->base_rb_stts) { ret = -ENOMEM; goto err; } for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; rxq->id = i; ret = iwl_pcie_alloc_rxq_dma(trans, rxq); if (ret) goto err; } return 0; err: if (trans_pcie->base_rb_stts) { dma_free_coherent(trans->dev, rb_stts_size * trans->num_rx_queues, trans_pcie->base_rb_stts, trans_pcie->base_rb_stts_dma); trans_pcie->base_rb_stts = NULL; trans_pcie->base_rb_stts_dma = 0; } kfree(trans_pcie->rxq); return ret; } static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size; unsigned long flags; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ switch (trans_pcie->rx_buf_size) { case IWL_AMSDU_4K: rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; break; case IWL_AMSDU_8K: rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; break; case IWL_AMSDU_12K: rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; break; default: WARN_ON(1); rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; } if (!iwl_trans_grab_nic_access(trans, &flags)) return; /* Stop Rx DMA */ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); /* reset and flush pointers */ iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); /* Reset driver's Rx queue write index */ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); /* Tell device where to find RBD circular buffer in DRAM */ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, (u32)(rxq->bd_dma >> 8)); /* Tell device where in DRAM to update its Rx status */ iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); /* Enable Rx DMA * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in * the credit mechanism in 5000 HW RX FIFO * Direct rx interrupts to hosts * Rx buffer size 4 or 8k or 12k * RB timeout 0x10 * 256 RBDs */ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | rb_size | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); iwl_trans_release_nic_access(trans, &flags); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); /* W/A for interrupt coalescing bug in 7260 and 3160 */ if (trans->cfg->host_interrupt_operation_mode) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size, enabled = 0; unsigned long flags; int i; switch (trans_pcie->rx_buf_size) { case IWL_AMSDU_2K: rb_size = RFH_RXF_DMA_RB_SIZE_2K; break; case IWL_AMSDU_4K: rb_size = RFH_RXF_DMA_RB_SIZE_4K; break; case IWL_AMSDU_8K: rb_size = RFH_RXF_DMA_RB_SIZE_8K; break; case IWL_AMSDU_12K: rb_size = RFH_RXF_DMA_RB_SIZE_12K; break; default: WARN_ON(1); rb_size = RFH_RXF_DMA_RB_SIZE_4K; } if (!iwl_trans_grab_nic_access(trans, &flags)) return; /* Stop Rx DMA */ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); /* disable free amd used rx queue operation */ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); for (i = 0; i < trans->num_rx_queues; i++) { /* Tell device where to find RBD free table in DRAM */ iwl_write_prph64_no_grab(trans, RFH_Q_FRBDCB_BA_LSB(i), trans_pcie->rxq[i].bd_dma); /* Tell device where to find RBD used table in DRAM */ iwl_write_prph64_no_grab(trans, RFH_Q_URBDCB_BA_LSB(i), trans_pcie->rxq[i].used_bd_dma); /* Tell device where in DRAM to update its Rx status */ iwl_write_prph64_no_grab(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), trans_pcie->rxq[i].rb_stts_dma); /* Reset device indice tables */ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); enabled |= BIT(i) | BIT(i + 16); } /* * Enable Rx DMA * Rx buffer size 4 or 8k or 12k * Min RB size 4 or 8 * Drop frames that exceed RB size * 512 RBDs */ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, RFH_DMA_EN_ENABLE_VAL | rb_size | RFH_RXF_DMA_MIN_RB_4_8 | RFH_RXF_DMA_DROP_TOO_LARGE_MASK | RFH_RXF_DMA_RBDCB_SIZE_512); /* * Activate DMA snooping. * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe * Default queue is 0 */ iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | RFH_GEN_CFG_SERVICE_DMA_SNOOP | RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, trans->cfg->integrated ? RFH_GEN_CFG_RB_CHUNK_SIZE_64 : RFH_GEN_CFG_RB_CHUNK_SIZE_128)); /* Enable the relevant rx queues */ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); iwl_trans_release_nic_access(trans, &flags); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); } void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { lockdep_assert_held(&rxq->lock); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; rxq->used_count = 0; } int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) { WARN_ON(1); return 0; } int _iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err, queue_size, allocator_pool_size, num_alloc; if (!trans_pcie->rxq) { err = iwl_pcie_rx_alloc(trans); if (err) return err; } def_rxq = trans_pcie->rxq; cancel_work_sync(&rba->rx_alloc); spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); INIT_LIST_HEAD(&rba->rbd_allocated); INIT_LIST_HEAD(&rba->rbd_empty); spin_unlock(&rba->lock); /* free all first - we might be reconfigured for a different size */ iwl_pcie_free_rbs_pool(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) def_rxq->queue[i] = NULL; for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; spin_lock(&rxq->lock); /* * Set read write pointer to reflect that we have processed * and used all buffers, but have not restocked the Rx queue * with fresh buffers */ rxq->read = 0; rxq->write = 0; rxq->write_actual = 0; memset(rxq->rb_stts, 0, (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(__le16) : sizeof(struct iwl_rb_status)); iwl_pcie_rx_init_rxb_lists(rxq); if (!rxq->napi.poll) netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, iwl_pcie_dummy_napi_poll, 64); spin_unlock(&rxq->lock); } /* move the pool to the default queue and allocator ownerships */ queue_size = trans->cfg->mq_rx_supported ? MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); num_alloc = queue_size + allocator_pool_size; BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != ARRAY_SIZE(trans_pcie->rx_pool)); for (i = 0; i < num_alloc; i++) { struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; if (i < allocator_pool_size) list_add(&rxb->list, &rba->rbd_empty); else list_add(&rxb->list, &def_rxq->rx_used); trans_pcie->global_table[i] = rxb; rxb->vid = (u16)(i + 1); rxb->invalid = true; } iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); return 0; } int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret = _iwl_pcie_rx_init(trans); if (ret) return ret; if (trans->cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); else iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); iwl_pcie_rxq_restock(trans, trans_pcie->rxq); spin_lock(&trans_pcie->rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); spin_unlock(&trans_pcie->rxq->lock); return 0; } int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) { /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); /* * We don't configure the RFH. * Restock will be done at alive, after firmware configured the RFH. */ return _iwl_pcie_rx_init(trans); } void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; size_t rb_stts_size = trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 ? sizeof(__le16) : sizeof(struct iwl_rb_status); /* * if rxq is NULL, it means that nothing has been allocated, * exit now */ if (!trans_pcie->rxq) { IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); return; } cancel_work_sync(&rba->rx_alloc); iwl_pcie_free_rbs_pool(trans); if (trans_pcie->base_rb_stts) { dma_free_coherent(trans->dev, rb_stts_size * trans->num_rx_queues, trans_pcie->base_rb_stts, trans_pcie->base_rb_stts_dma); trans_pcie->base_rb_stts = NULL; trans_pcie->base_rb_stts_dma = 0; } for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_free_rxq_dma(trans, rxq); if (rxq->napi.poll) netif_napi_del(&rxq->napi); } kfree(trans_pcie->rxq); } static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, struct iwl_rb_allocator *rba) { spin_lock(&rba->lock); list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); spin_unlock(&rba->lock); } /* * iwl_pcie_rx_reuse_rbd - Recycle used RBDs * * Called when a RBD can be reused. The RBD is transferred to the allocator. * When there are 2 empty RBDs - a request for allocation is posted */ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb, struct iwl_rxq *rxq, bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; /* Move the RBD to the used list, will be moved to allocator in batches * before claiming or posting a request*/ list_add_tail(&rxb->list, &rxq->rx_used); if (unlikely(emergency)) return; /* Count the allocator owned RBDs */ rxq->used_count++; /* If we have RX_POST_REQ_ALLOC new released rx buffers - * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, * after but we still need to post another request. */ if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { /* Move the 2 RBDs to the allocator ownership. Allocator has another 6 from pool for the request completion*/ iwl_pcie_rx_move_to_allocator(rxq, rba); atomic_inc(&rba->req_pending); queue_work(rba->alloc_wq, &rba->rx_alloc); } } static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb, bool emergency, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; u32 offset = 0; if (WARN_ON(!rxb)) return; dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { struct iwl_rx_packet *pkt; u16 sequence; bool reclaim; int index, cmd_index, len; struct iwl_rx_cmd_buffer rxcb = { ._offset = offset, ._rx_page_order = trans_pcie->rx_page_order, ._page = rxb->page, ._page_stolen = false, .truesize = max_len, }; pkt = rxb_addr(&rxcb); if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { IWL_DEBUG_RX(trans, "Q %d: RB end marker at offset %d\n", rxq->id, offset); break; } WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> FH_RSCSR_RXQ_POS != rxq->id, "frame on invalid queue - is on %d and indicates %d\n", rxq->id, (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> FH_RSCSR_RXQ_POS); IWL_DEBUG_RX(trans, "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", rxq->id, offset, iwl_get_cmd_string(trans, iwl_cmd_id(pkt->hdr.cmd, pkt->hdr.group_id, 0)), pkt->hdr.group_id, pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); len = iwl_rx_packet_len(pkt); len += sizeof(u32); /* account for status word */ trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. * If the packet (e.g. Rx frame) originated from uCode, * there is no command buffer to reclaim. * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); if (reclaim && !pkt->hdr.group_id) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { reclaim = false; break; } } } sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); cmd_index = iwl_pcie_get_cmd_index(txq, index); if (rxq->id == trans_pcie->def_rx_queue) iwl_op_mode_rx(trans->op_mode, &rxq->napi, &rxcb); else iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, &rxcb, rxq->id); if (reclaim) { kzfree(txq->entries[cmd_index].free_buf); txq->entries[cmd_index].free_buf = NULL; } /* * After here, we should always check rxcb._page_stolen, * if it is true then one of the handlers took the page. */ if (reclaim) { /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) iwl_pcie_hcmd_complete(trans, &rxcb); else IWL_WARN(trans, "Claim null rxb?\n"); } page_stolen |= rxcb._page_stolen; if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) break; offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } /* page was stolen from us -- free our reference */ if (page_stolen) { __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; } /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { /* * free the page(s) as well to not break * the invariant that the items on the used * list have no page(s) */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, struct iwl_rxq *rxq, int i) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; u16 vid; BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); if (!trans->cfg->mq_rx_supported) { rxb = rxq->queue[i]; rxq->queue[i] = NULL; return rxb; } /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; else vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) goto out_err; rxb = trans_pcie->global_table[vid - 1]; if (rxb->invalid) goto out_err; IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); rxb->invalid = true; return rxb; out_err: WARN(1, "Invalid rxb from HW %u\n", (u32)vid); iwl_force_nmi(trans); return NULL; } /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq; u32 r, i, count = 0; bool emergency = false; if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) return; rxq = &trans_pcie->rxq[queue]; restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ r &= (rxq->queue_size - 1); /* Rx interrupt, but nothing sent from uCode */ if (i == r) IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); while (i != r) { struct iwl_rb_allocator *rba = &trans_pcie->rba; struct iwl_rx_mem_buffer *rxb; /* number of RBDs still waiting for page allocation */ u32 rb_pending_alloc = atomic_read(&trans_pcie->rba.req_pending) * RX_CLAIM_REQ_ALLOC; if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && !emergency)) { iwl_pcie_rx_move_to_allocator(rxq, rba); emergency = true; IWL_DEBUG_TPT(trans, "RX path is in emergency. Pending allocations %d\n", rb_pending_alloc); } IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); rxb = iwl_pcie_get_rxb(trans, rxq, i); if (!rxb) goto out; iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); i = (i + 1) & (rxq->queue_size - 1); /* * If we have RX_CLAIM_REQ_ALLOC released rx buffers - * try to claim the pre-allocated buffers from the allocator. * If not ready - will try to reclaim next time. * There is no need to reschedule work - allocator exits only * on success */ if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) iwl_pcie_rx_allocator_get(trans, rxq); if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { /* Add the remaining empty RBDs for allocator use */ iwl_pcie_rx_move_to_allocator(rxq, rba); } else if (emergency) { count++; if (count == 8) { count = 0; if (rb_pending_alloc < rxq->queue_size / 3) { IWL_DEBUG_TPT(trans, "RX path exited emergency. Pending allocations %d\n", rb_pending_alloc); emergency = false; } rxq->read = i; spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); iwl_pcie_rxq_restock(trans, rxq); goto restart; } } } out: /* Backtrack one entry */ rxq->read = i; /* update cr tail with the rxq read pointer */ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) *rxq->cr_tail = cpu_to_le16(r); spin_unlock(&rxq->lock); /* * handle a case where in emergency there are some unallocated RBDs. * those RBDs are in the used list, but are not tracked by the queue's * used_count which counts allocator owned RBDs. * unallocated emergency RBDs must be allocated on exit, otherwise * when called again the function may not be in emergency mode and * they will be handed to the allocator with no tracking in the RBD * allocator counters, which will lead to them never being claimed back * by the queue. * by allocating them here, they are now in the queue free list, and * will be restocked by the next call of iwl_pcie_rxq_restock. */ if (unlikely(emergency && count)) iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); if (rxq->napi.poll) napi_gro_flush(&rxq->napi, false); iwl_pcie_rxq_restock(trans, rxq); } static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) { u8 queue = entry->entry; struct msix_entry *entries = entry - queue; return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); } /* * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw * This interrupt handler should be used with RSS queue only. */ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) { struct msix_entry *entry = dev_id; struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); if (WARN_ON(entry->entry >= trans->num_rx_queues)) return IRQ_NONE; lock_map_acquire(&trans->sync_cmd_lockdep_map); local_bh_disable(); iwl_pcie_rx_handle(trans, entry->entry); local_bh_enable(); iwl_pcie_clear_irq(trans, entry); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_HANDLED; } /* * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card */ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && !trans->cfg->apmg_not_supported && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) { clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); iwl_op_mode_wimax_active(trans->op_mode); wake_up(&trans_pcie->wait_command_queue); return; } for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { if (!trans_pcie->txq[i]) continue; del_timer(&trans_pcie->txq[i]->stuck_timer); } /* The STATUS_FW_ERROR bit is set in this function. This must happen * before we wake up the command caller, to ensure a proper cleanup. */ iwl_trans_fw_error(trans); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans_pcie->wait_command_queue); } static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) { u32 inta; lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); trace_iwlwifi_dev_irq(trans->dev); /* Discover which interrupts are active/pending */ inta = iwl_read32(trans, CSR_INT); /* the thread will service interrupts and re-enable them */ return inta; } /* a device (PCI-E) page is 4096 bytes long */ #define ICT_SHIFT 12 #define ICT_SIZE (1 << ICT_SHIFT) #define ICT_COUNT (ICT_SIZE / sizeof(u32)) /* interrupt handler using ict table, with this interrupt driver will * stop using INTA register to get device's interrupt, reading this register * is expensive, device will write interrupts in ICT dram table, increment * index then will fire interrupt to driver, driver will OR all ICT table * entries from current index up to table entry with 0 value. the result is * the interrupt we need to service, driver will set the entries back to 0 and * set index. */ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 inta; u32 val = 0; u32 read; trace_iwlwifi_dev_irq(trans->dev); /* Ignore interrupt if there's nothing in NIC to service. * This may be due to IRQ shared with another device, * or due to sporadic interrupts thrown from our NIC. */ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); if (!read) return 0; /* * Collect all entries up to the first 0, starting from ict_index; * note we already read at ict_index. */ do { val |= read; IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", trans_pcie->ict_index, read); trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; trans_pcie->ict_index = ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); } while (read); /* We should not get this value, just ignore it. */ if (val == 0xffffffff) val = 0; /* * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit * (bit 15 before shifting it to 31) to clear when using interrupt * coalescing. fortunately, bits 18 and 19 stay set when this happens * so we use them to decide on the real state of the Rx bit. * In order words, bit 15 is set if bit 18 or bit 19 are set. */ if (val & 0xC0000) val |= 0x8000; inta = (0xff & val) | ((0xff00 & val) << 16); return inta; } void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; bool hw_rfkill, prev, report; mutex_lock(&trans_pcie->mutex); prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) { set_bit(STATUS_RFKILL_OPMODE, &trans->status); set_bit(STATUS_RFKILL_HW, &trans->status); } if (trans_pcie->opmode_down) report = hw_rfkill; else report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio"); isr_stats->rfkill++; if (prev != report) iwl_trans_pcie_rf_kill(trans, report); mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) IWL_DEBUG_RF_KILL(trans, "Rfkill while SYNC HCMD in flight\n"); wake_up(&trans_pcie->wait_command_queue); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); if (trans_pcie->opmode_down) clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } } irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) { struct iwl_trans *trans = dev_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta = 0; u32 handled = 0; lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock(&trans_pcie->irq_lock); /* dram interrupt table not set yet, * use legacy interrupt. */ if (likely(trans_pcie->use_ict)) inta = iwl_pcie_int_cause_ict(trans); else inta = iwl_pcie_int_cause_non_ict(trans); if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", inta, trans_pcie->inta_mask, iwl_read32(trans, CSR_INT_MASK), iwl_read32(trans, CSR_FH_INT_STATUS)); if (inta & (~trans_pcie->inta_mask)) IWL_DEBUG_ISR(trans, "We got a masked interrupt (0x%08x)\n", inta & (~trans_pcie->inta_mask)); } inta &= trans_pcie->inta_mask; /* * Ignore interrupt if there's nothing in NIC to service. * This may be due to IRQ shared with another device, * or due to sporadic interrupts thrown from our NIC. */ if (unlikely(!inta)) { IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); /* * Re-enable interrupts here since we don't * have anything to service */ if (test_bit(STATUS_INT_ENABLED, &trans->status)) _iwl_enable_interrupts(trans); spin_unlock(&trans_pcie->irq_lock); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_NONE; } if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { /* * Hardware disappeared. It might have * already raised an interrupt. */ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); spin_unlock(&trans_pcie->irq_lock); goto out; } /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, */ /* There is a hardware bug in the interrupt mask function that some * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if * they are disabled in the CSR_INT_MASK register. Furthermore the * ICT interrupt handling mechanism has another bug that might cause * these unmasked interrupts fail to be detected. We workaround the * hardware bugs here by ACKing all the possible interrupts so that * interrupt coalescing can still be achieved. */ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); if (iwl_have_debug_level(IWL_DL_ISR)) IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", inta, iwl_read32(trans, CSR_INT_MASK)); spin_unlock(&trans_pcie->irq_lock); /* Now service all interrupt bits discovered above. */ if (inta & CSR_INT_BIT_HW_ERR) { IWL_ERR(trans, "Hardware error detected. Restarting.\n"); /* Tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); isr_stats->hw++; iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_HW_ERR; goto out; } /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { IWL_DEBUG_ISR(trans, "Scheduler finished to transmit the frame/frames.\n"); isr_stats->sch++; } /* Alive notification via Rx interrupt will do the real work */ if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; if (trans->cfg->gen2) { /* * We can restock, since firmware configured * the RFH */ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); } handled |= CSR_INT_BIT_ALIVE; } /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); /* HW RF KILL switch toggled */ if (inta & CSR_INT_BIT_RF_KILL) { iwl_pcie_handle_rfkill_irq(trans); handled |= CSR_INT_BIT_RF_KILL; } /* Chip got too hot and stopped itself */ if (inta & CSR_INT_BIT_CT_KILL) { IWL_ERR(trans, "Microcode CT kill error detected.\n"); isr_stats->ctkill++; handled |= CSR_INT_BIT_CT_KILL; } /* Error detected by uCode */ if (inta & CSR_INT_BIT_SW_ERR) { IWL_ERR(trans, "Microcode SW error detected. " " Restarting 0x%X.\n", inta); isr_stats->sw++; iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_SW_ERR; } /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_pcie_rxq_check_wrptr(trans); iwl_pcie_txq_check_wrptrs(trans); isr_stats->wakeup++; handled |= CSR_INT_BIT_WAKEUP; } /* All uCode command responses, including Tx command responses, * Rx "responses" (frame-received notification), and other * notifications from uCode come through here*/ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | CSR_INT_BIT_RX_PERIODIC)) { IWL_DEBUG_ISR(trans, "Rx interrupt\n"); if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_RX_MASK); } if (inta & CSR_INT_BIT_RX_PERIODIC) { handled |= CSR_INT_BIT_RX_PERIODIC; iwl_write32(trans, CSR_INT, CSR_INT_BIT_RX_PERIODIC); } /* Sending RX interrupt require many steps to be done in the * the device: * 1- write interrupt to current index in ICT table. * 2- dma RX frame. * 3- update RX shared data to indicate last write index. * 4- send interrupt. * This could lead to RX race, driver could receive RX interrupt * but the shared data changes does not reflect this; * periodic interrupt will detect any dangling Rx activity. */ /* Disable periodic interrupt; we use it as just a one-shot. */ iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS); /* * Enable periodic interrupt in 8 msec only if we received * real RX interrupt (instead of just periodic int), to catch * any dangling Rx interrupt. If it was just the periodic * interrupt, there was no dangling Rx activity, and no need * to extend the periodic interrupt; one-shot is enough. */ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_ENA); isr_stats->rx++; local_bh_disable(); iwl_pcie_rx_handle(trans, 0); local_bh_enable(); } /* This "Tx" DMA channel is used only for loading uCode */ if (inta & CSR_INT_BIT_FH_TX) { iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); isr_stats->tx++; handled |= CSR_INT_BIT_FH_TX; /* Wake up uCode load routine, now that load is complete */ trans_pcie->ucode_write_complete = true; wake_up(&trans_pcie->ucode_write_waitq); } if (inta & ~handled) { IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); isr_stats->unhandled++; } if (inta & ~(trans_pcie->inta_mask)) { IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", inta & ~trans_pcie->inta_mask); } spin_lock(&trans_pcie->irq_lock); /* only Re-enable all interrupt if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &trans->status)) _iwl_enable_interrupts(trans); /* we are loading the firmware, enable FH_TX interrupt only */ else if (handled & CSR_INT_BIT_FH_TX) iwl_enable_fw_load_int(trans); /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_enable_rfkill_int(trans); /* Re-enable the ALIVE / Rx interrupt if it occurred */ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) iwl_enable_fw_load_int_ctx_info(trans); spin_unlock(&trans_pcie->irq_lock); out: lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_HANDLED; } /****************************************************************************** * * ICT functions * ******************************************************************************/ /* Free dram table */ void iwl_pcie_free_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->ict_tbl) { dma_free_coherent(trans->dev, ICT_SIZE, trans_pcie->ict_tbl, trans_pcie->ict_tbl_dma); trans_pcie->ict_tbl = NULL; trans_pcie->ict_tbl_dma = 0; } } /* * allocate dram shared table, it is an aligned memory * block of ICT_SIZE. * also reset all data related to ICT table interrupt. */ int iwl_pcie_alloc_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->ict_tbl = dma_alloc_coherent(trans->dev, ICT_SIZE, &trans_pcie->ict_tbl_dma, GFP_KERNEL); if (!trans_pcie->ict_tbl) return -ENOMEM; /* just an API sanity check ... it is guaranteed to be aligned */ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { iwl_pcie_free_ict(trans); return -EINVAL; } return 0; } /* Device is going up inform it about using ICT interrupt table, * also we need to tell the driver to start using ICT interrupt. */ void iwl_pcie_reset_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; if (!trans_pcie->ict_tbl) return; spin_lock(&trans_pcie->irq_lock); _iwl_disable_interrupts(trans); memset(trans_pcie->ict_tbl, 0, ICT_SIZE); val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; val |= CSR_DRAM_INT_TBL_ENABLE | CSR_DRAM_INIT_TBL_WRAP_CHECK | CSR_DRAM_INIT_TBL_WRITE_POINTER; IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); trans_pcie->use_ict = true; trans_pcie->ict_index = 0; iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); _iwl_enable_interrupts(trans); spin_unlock(&trans_pcie->irq_lock); } /* Device is going down disable ict interrupt usage */ void iwl_pcie_disable_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock(&trans_pcie->irq_lock); trans_pcie->use_ict = false; spin_unlock(&trans_pcie->irq_lock); } irqreturn_t iwl_pcie_isr(int irq, void *data) { struct iwl_trans *trans = data; if (!trans) return IRQ_NONE; /* Disable (but don't clear!) interrupts here to avoid * back-to-back ISRs and sporadic interrupts from our NIC. * If we have something to service, the tasklet will re-enable ints. * If we *don't* have something, we'll re-enable before leaving here. */ iwl_write32(trans, CSR_INT_MASK, 0x00000000); return IRQ_WAKE_THREAD; } irqreturn_t iwl_pcie_msix_isr(int irq, void *data) { return IRQ_WAKE_THREAD; } irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) { struct msix_entry *entry = dev_id; struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta_fh, inta_hw; lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock(&trans_pcie->irq_lock); inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); /* * Clear causes registers to avoid being handling the same cause. */ iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); spin_unlock(&trans_pcie->irq_lock); trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); if (unlikely(!(inta_fh | inta_hw))) { IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_NONE; } if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", inta_fh, trans_pcie->fh_mask, iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); if (inta_fh & ~trans_pcie->fh_mask) IWL_DEBUG_ISR(trans, "We got a masked interrupt (0x%08x)\n", inta_fh & ~trans_pcie->fh_mask); } inta_fh &= trans_pcie->fh_mask; if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && inta_fh & MSIX_FH_INT_CAUSES_Q0) { local_bh_disable(); iwl_pcie_rx_handle(trans, 0); local_bh_enable(); } if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && inta_fh & MSIX_FH_INT_CAUSES_Q1) { local_bh_disable(); iwl_pcie_rx_handle(trans, 1); local_bh_enable(); } /* This "Tx" DMA channel is used only for loading uCode */ if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); isr_stats->tx++; /* * Wake up uCode load routine, * now that load is complete */ trans_pcie->ucode_write_complete = true; wake_up(&trans_pcie->ucode_write_waitq); } /* Error detected by uCode */ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) || (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { IWL_ERR(trans, "Microcode SW error detected. Restarting 0x%X.\n", inta_fh); isr_stats->sw++; iwl_pcie_irq_handle_error(trans); } /* After checking FH register check HW register */ if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", inta_hw, trans_pcie->hw_mask, iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); if (inta_hw & ~trans_pcie->hw_mask) IWL_DEBUG_ISR(trans, "We got a masked interrupt 0x%08x\n", inta_hw & ~trans_pcie->hw_mask); } inta_hw &= trans_pcie->hw_mask; /* Alive notification via Rx interrupt will do the real work */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; if (trans->cfg->gen2) { /* We can restock, since firmware configured the RFH */ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); } } if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 && inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { /* Reflect IML transfer status */ int res = iwl_read32(trans, CSR_IML_RESP_ADDR); IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); if (res == IWL_IMAGE_RESP_FAIL) { isr_stats->sw++; iwl_pcie_irq_handle_error(trans); } } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { /* uCode wakes up after power-down sleep */ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_pcie_rxq_check_wrptr(trans); iwl_pcie_txq_check_wrptrs(trans); isr_stats->wakeup++; } if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { /* Reflect IML transfer status */ int res = iwl_read32(trans, CSR_IML_RESP_ADDR); IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); if (res == IWL_IMAGE_RESP_FAIL) { isr_stats->sw++; iwl_pcie_irq_handle_error(trans); } } /* Chip got too hot and stopped itself */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { IWL_ERR(trans, "Microcode CT kill error detected.\n"); isr_stats->ctkill++; } /* HW RF KILL switch toggled */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) iwl_pcie_handle_rfkill_irq(trans); if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { IWL_ERR(trans, "Hardware error detected. Restarting.\n"); isr_stats->hw++; trans->dbg.hw_error = true; iwl_pcie_irq_handle_error(trans); } iwl_pcie_clear_irq(trans, entry); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_HANDLED; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c000066400000000000000000000257051351064634500276440ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-trans.h" #include "iwl-prph.h" #include "iwl-context-info.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "fw/dbg.h" /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) { int ret = 0; IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); /* * Use "set_bit" below rather than "write", to preserve any hardware * bits already set by default after reset. */ /* * Disable L0s without affecting L1; * don't wait for ICH L0s (ICH bug W/A) */ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); /* Set FH wait threshold to maximum (HW error during stress W/A) */ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); /* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); iwl_pcie_apm_config(trans); ret = iwl_finish_nic_init(trans); if (ret) return ret; set_bit(STATUS_DEVICE_ENABLED, &trans->status); return 0; } static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) { IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); if (op_mode_leave) { if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_pcie_gen2_apm_init(trans); /* inform ME that we are leaving */ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); mdelay(1); iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); mdelay(5); } clear_bit(STATUS_DEVICE_ENABLED, &trans->status); /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); iwl_trans_sw_reset(trans); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_init_done)); } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); if (trans_pcie->is_down) return; trans_pcie->is_down = true; /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); /* device going down, Stop using ICT table */ iwl_pcie_disable_ict(trans); /* * If a HW restart happens during firmware loading, * then the firmware loading might call this function * and later it might be called again due to the * restart. So don't process again if the device is * already dead. */ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); iwl_pcie_gen2_tx_stop(trans); iwl_pcie_rx_stop(trans); } iwl_pcie_ctxt_info_free_paging(trans); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) iwl_pcie_ctxt_info_gen3_free(trans); else iwl_pcie_ctxt_info_free(trans); /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); iwl_trans_sw_reset(trans); /* * Upon stop, the IVAR table gets erased, so msi-x won't * work. This causes a bug in RF-KILL flows, since the interrupt * that enables radio won't fire on the correct irq, and the * driver won't be able to handle the interrupt. * Configure the IVAR table again after reset. */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. * Certain devices also keep sending HW RF kill interrupt all * the time, unless the interrupt is ACKed even if the interrupt * should be masked. Re-ACK all the interrupts here. */ iwl_disable_interrupts(trans); /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status); /* * Even if we stop the HW, we still want the RF kill * interrupt */ iwl_enable_rfkill_int(trans); /* re-take ownership to prevent other users from stealing the device */ iwl_pcie_prepare_card_hw(trans); } void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); _iwl_trans_pcie_gen2_stop_device(trans); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); /* TODO: most of the logic can be removed in A0 - but not in Z0 */ spin_lock(&trans_pcie->irq_lock); iwl_pcie_gen2_apm_init(trans); spin_unlock(&trans_pcie->irq_lock); iwl_op_mode_nic_config(trans->op_mode); /* Allocate the RX queue, or reset if it is already allocated */ if (iwl_pcie_gen2_rx_init(trans)) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); return 0; } void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_pcie_reset_ict(trans); /* make sure all queue are not stopped/used */ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); /* now that we got alive we can free the fw image & the context info. * paging memory cannot be freed included since FW will still use it */ iwl_pcie_ctxt_info_free(trans); /* * Re-enable all the interrupts, including the RF-Kill one, now that * the firmware is alive. */ iwl_enable_interrupts(trans); mutex_lock(&trans_pcie->mutex); iwl_pcie_check_hw_rf_kill(trans); mutex_unlock(&trans_pcie->mutex); } int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int ret; /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); ret = -EIO; goto out; } iwl_enable_rfkill_int(trans); iwl_write32(trans, CSR_INT, 0xFFFFFFFF); /* * We enabled the RF-Kill interrupt and the handler may very * well be running. Disable the interrupts to make sure no other * interrupt can be fired. */ iwl_disable_interrupts(trans); /* Make sure it finished running */ iwl_pcie_synchronize_irqs(trans); mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; } /* Someone called stop_device, don't try to start_fw */ if (trans_pcie->is_down) { IWL_WARN(trans, "Can't start_fw since the HW hasn't been started\n"); ret = -EIO; goto out; } /* make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_gen2_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); goto out; } if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); else ret = iwl_pcie_ctxt_info_init(trans, fw); if (ret) goto out; /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; out: mutex_unlock(&trans_pcie->mutex); return ret; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/trans.c000066400000000000000000003240651351064634500270140ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-scd.h" #include "iwl-agn-hw.h" #include "fw/error-dump.h" #include "fw/dbg.h" #include "internal.h" #include "iwl-fh.h" #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE #include "iwl-dnt-cfg.h" #endif /* extended range in FW SRAM */ #define IWL_FW_MEM_EXTENDED_START 0x40000 #define IWL_FW_MEM_EXTENDED_END 0x57FFF void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) { #define PCI_DUMP_SIZE 352 #define PCI_MEM_DUMP_SIZE 64 #define PCI_PARENT_DUMP_SIZE 524 #define PREFIX_LEN 32 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct pci_dev *pdev = trans_pcie->pci_dev; u32 i, pos, alloc_size, *ptr, *buf; char *prefix; if (trans_pcie->pcie_dbg_dumped_once) return; /* Should be a multiple of 4 */ BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); /* Alloc a max size buffer */ alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); buf = kmalloc(alloc_size, GFP_ATOMIC); if (!buf) return; prefix = (char *)buf + alloc_size - PREFIX_LEN; IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); /* Print wifi device registers */ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); IWL_ERR(trans, "iwlwifi device config registers:\n"); for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) if (pci_read_config_dword(pdev, i, ptr)) goto err_read; print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) *ptr = iwl_read32(trans, i); print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); if (pos) { IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) if (pci_read_config_dword(pdev, pos + i, ptr)) goto err_read; print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); } /* Print parent device registers next */ if (!pdev->bus->self) goto out; pdev = pdev->bus->self; sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", pci_name(pdev)); for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) if (pci_read_config_dword(pdev, i, ptr)) goto err_read; print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); /* Print root port AER registers */ pos = 0; pdev = pcie_find_root_port(pdev); if (pdev) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); if (pos) { IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", pci_name(pdev)); sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) if (pci_read_config_dword(pdev, pos + i, ptr)) goto err_read; print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); } goto out; err_read: print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); IWL_ERR(trans, "Read failed at 0x%X\n", i); out: trans_pcie->pcie_dbg_dumped_once = 1; kfree(buf); } static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, BIT(trans->cfg->csr->flag_sw_reset)); usleep_range(5000, 6000); } static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) { int i; for (i = 0; i < trans->dbg.num_blocks; i++) { dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size, trans->dbg.fw_mon[i].block, trans->dbg.fw_mon[i].physical); trans->dbg.fw_mon[i].block = NULL; trans->dbg.fw_mon[i].physical = 0; trans->dbg.fw_mon[i].size = 0; trans->dbg.num_blocks--; } } static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, u8 max_power, u8 min_power) { void *cpu_addr = NULL; dma_addr_t phys = 0; u32 size = 0; u8 power; for (power = max_power; power >= min_power; power--) { size = BIT(power); cpu_addr = dma_alloc_coherent(trans->dev, size, &phys, GFP_KERNEL | __GFP_NOWARN); if (!cpu_addr) continue; IWL_INFO(trans, "Allocated 0x%08x bytes for firmware monitor.\n", size); break; } if (WARN_ON_ONCE(!cpu_addr)) return; if (power != max_power) IWL_ERR(trans, "Sorry - debug buffer is only %luK while you requested %luK\n", (unsigned long)BIT(power - 10), (unsigned long)BIT(max_power - 10)); trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr; trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys; trans->dbg.fw_mon[trans->dbg.num_blocks].size = size; trans->dbg.num_blocks++; } void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) { if (!max_power) { /* default max_power is maximum */ max_power = 26; } else { max_power += 11; } if (WARN(max_power > 26, "External buffer size for monitor is too big %d, check the FW TLV\n", max_power)) return; /* * This function allocats the default fw monitor. * The optional additional ones will be allocated in runtime */ if (trans->dbg.num_blocks) return; iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); } static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) { iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, ((reg & 0x0000ffff) | (2 << 28))); return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); } static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) { iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, ((reg & 0x0000ffff) | (3 << 28))); } static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { if (trans->cfg->apmg_not_supported) return; if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, ~APMG_PS_CTRL_MSK_PWR_SRC); else iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~APMG_PS_CTRL_MSK_PWR_SRC); } /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 void iwl_pcie_apm_config(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 lctl; u16 cap; /* * HW bug W/A for instability in PCIe bus L0S->L1 transition. * Check if BIOS (or OS) enabled L1-ASPM on this device. * If so (likely), disable L0S, so device moves directly L0->L1; * costs negligible amount of power savings. * If not (unlikely), enable L0S, so there is at least some * power savings, even without L1. */ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); if (lctl & PCI_EXP_LNKCTL_ASPM_L1) iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); else iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", trans->ltr_enabled ? "En" : "Dis"); } /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ static int iwl_pcie_apm_init(struct iwl_trans *trans) { int ret; IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); /* * Use "set_bit" below rather than "write", to preserve any hardware * bits already set by default after reset. */ /* Disable L0S exit timer (platform NMI Work/Around) */ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); /* * Disable L0s without affecting L1; * don't wait for ICH L0s (ICH bug W/A) */ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); /* Set FH wait threshold to maximum (HW error during stress W/A) */ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); /* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); iwl_pcie_apm_config(trans); /* Configure analog phase-lock-loop before activating to D0A */ if (trans->cfg->base_params->pll_cfg) iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); ret = iwl_finish_nic_init(trans); if (ret) return ret; if (trans->cfg->host_interrupt_operation_mode) { /* * This is a bit of an abuse - This is needed for 7260 / 3160 * only check host_interrupt_operation_mode even if this is * not related to host_interrupt_operation_mode. * * Enable the oscillator to count wake up time for L1 exit. This * consumes slightly more power (100uA) - but allows to be sure * that we wake up from L1 on time. * * This looks weird: read twice the same register, discard the * value, set a bit, and yet again, read that same register * just to discard the value. But that's the way the hardware * seems to like it. */ iwl_read_prph(trans, OSC_CLK); iwl_read_prph(trans, OSC_CLK); iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); iwl_read_prph(trans, OSC_CLK); iwl_read_prph(trans, OSC_CLK); } /* * Enable DMA clock and wait for it to stabilize. * * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" * bits do not disable clocks. This preserves any hardware * bits already set by default in "CLK_CTRL_REG" after reset. */ if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); /* Disable L1-Active */ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); /* Clear the interrupt in APMG if the NIC is in RFKILL */ iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL); } set_bit(STATUS_DEVICE_ENABLED, &trans->status); return 0; } /* * Enable LP XTAL to avoid HW bug where device may consume much power if * FW is not loaded after device reset. LP XTAL is disabled by default * after device HW reset. Do it only if XTAL is fed by internal source. * Configure device's "persistence" mode to avoid resetting XTAL again when * SHRD_HW_RST occurs in S3. */ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) { int ret; u32 apmg_gp1_reg; u32 apmg_xtal_cfg_reg; u32 dl_cfg_reg; /* Force XTAL ON */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); iwl_trans_pcie_sw_reset(trans); ret = iwl_finish_nic_init(trans); if (WARN_ON(ret)) { /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); return; } /* * Clear "disable persistence" to avoid LP XTAL resetting when * SHRD_HW_RST is applied in S3. */ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_PERSIST_DIS); /* * Force APMG XTAL to be active to prevent its disabling by HW * caused by APMG idle state. */ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_XTAL_CFG_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, apmg_xtal_cfg_reg | SHR_APMG_XTAL_CFG_XTAL_ON_REQ); iwl_trans_pcie_sw_reset(trans); /* Enable LP XTAL by indirect access through CSR */ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | SHR_APMG_GP1_WF_XTAL_LP_EN | SHR_APMG_GP1_CHICKEN_BIT_SELECT); /* Clear delay line clock power up */ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); /* * Enable persistence mode to avoid LP XTAL resetting when * SHRD_HW_RST is applied in S3. */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_init_done)); /* Activates XTAL resources monitor */ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, CSR_MONITOR_XTAL_RESOURCES); /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); udelay(10); /* Release APMG XTAL */ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, apmg_xtal_cfg_reg & ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); } void iwl_pcie_apm_stop_master(struct iwl_trans *trans) { int ret; /* stop device's busmaster DMA activity */ iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, BIT(trans->cfg->csr->flag_stop_master)); ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset, BIT(trans->cfg->csr->flag_master_dis), BIT(trans->cfg->csr->flag_master_dis), 100); if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); IWL_DEBUG_INFO(trans, "stop master\n"); } static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) { IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); if (op_mode_leave) { if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_pcie_apm_init(trans); /* inform ME that we are leaving */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) { iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); mdelay(1); iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); } mdelay(5); } clear_bit(STATUS_DEVICE_ENABLED, &trans->status); /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); if (trans->cfg->lp_xtal_workaround) { iwl_pcie_apm_lp_xtal_enable(trans); return; } iwl_trans_pcie_sw_reset(trans); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_init_done)); } static int iwl_pcie_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; /* nic_init */ spin_lock(&trans_pcie->irq_lock); ret = iwl_pcie_apm_init(trans); spin_unlock(&trans_pcie->irq_lock); if (ret) return ret; iwl_pcie_set_pwr(trans, false); iwl_op_mode_nic_config(trans->op_mode); /* Allocate the RX queue, or reset if it is already allocated */ iwl_pcie_rx_init(trans); /* Allocate or reset and init all Tx and Command queues */ if (iwl_pcie_tx_init(trans)) return -ENOMEM; if (trans->cfg->base_params->shadow_reg_enable) { /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); } return 0; } #define HW_READY_TIMEOUT (50) /* Note: returns poll_bit return value, which is >= 0 if success */ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) { int ret; iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); /* See if we got it */ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); if (ret >= 0) iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); return ret; } /* Note: returns standard 0/-ERROR code */ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) { int ret; int t = 0; int iter; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); ret = iwl_pcie_set_hw_ready(trans); /* If the card is ready, exit 0 */ if (ret >= 0) return 0; iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); usleep_range(1000, 2000); for (iter = 0; iter < 10; iter++) { /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); do { ret = iwl_pcie_set_hw_ready(trans); if (ret >= 0) return 0; usleep_range(200, 1000); t += 200; } while (t < 150000); msleep(25); } IWL_ERR(trans, "Couldn't prepare the card\n"); return ret; } /* * ucode */ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), (iwl_get_dma_hi_addr(phy_addr) << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); } static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; int ret; trans_pcie->ucode_write_complete = false; if (!iwl_trans_grab_nic_access(trans, &flags)) return -EIO; iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, byte_cnt); iwl_trans_release_nic_access(trans, &flags); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, trans_pcie->ucode_write_complete, 5 * HZ); if (!ret) { IWL_ERR(trans, "Failed to load firmware chunk!\n"); iwl_trans_pcie_dump_regs(trans); return -ETIMEDOUT; } return 0; } static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, const struct fw_desc *section) { u8 *v_addr; dma_addr_t p_addr; u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); int ret = 0; IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", section_num); v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, GFP_KERNEL | __GFP_NOWARN); if (!v_addr) { IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); chunk_sz = PAGE_SIZE; v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, GFP_KERNEL); if (!v_addr) return -ENOMEM; } for (offset = 0; offset < section->len; offset += chunk_sz) { u32 copy_size, dst_addr; bool extended_addr = false; copy_size = min_t(u32, chunk_sz, section->len - offset); dst_addr = section->offset + offset; if (dst_addr >= IWL_FW_MEM_EXTENDED_START && dst_addr <= IWL_FW_MEM_EXTENDED_END) extended_addr = true; if (extended_addr) iwl_set_bits_prph(trans, LMPM_CHICK, LMPM_CHICK_EXTENDED_ADDR_SPACE); memcpy(v_addr, (u8 *)section->data + offset, copy_size); ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, copy_size); if (extended_addr) iwl_clear_bits_prph(trans, LMPM_CHICK, LMPM_CHICK_EXTENDED_ADDR_SPACE); if (ret) { IWL_ERR(trans, "Could not load the [%d] uCode section\n", section_num); break; } } dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); return ret; } #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES static void iwl_pcie_override_secure_boot_cfg(struct iwl_trans *trans) { u32 val; if (!trans->dbg_cfg.secure_boot_cfg) return; /* Verify AUX address space is not locked */ val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); if (val & BIT((SB_CFG_OVERRIDE_ADDR - SB_CFG_BASE_OVERRIDE) >> 10)) { IWL_ERR(trans, "AUX address space is locked for override, (AUX val=0x%x)\n", val); return; } /* Modify secure boot cfg flags */ iwl_write_prph(trans, SB_MODIFY_CFG_FLAG, trans->dbg_cfg.secure_boot_cfg); /* take ownership on the AUX IF */ iwl_set_bits_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK); /* indicate secure boot cfg override */ iwl_set_bits_prph(trans, SB_CFG_OVERRIDE_ADDR, SB_CFG_OVERRIDE_ENABLE); return; } #endif static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, const struct fw_img *image, int cpu, int *first_ucode_section) { int shift_param; int i, ret = 0, sec_num = 0x1; u32 val, last_read_idx = 0; if (cpu == 1) { shift_param = 0; *first_ucode_section = 0; } else { shift_param = 16; (*first_ucode_section)++; } for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between * CPU1 to CPU2. * PAGING_SEPARATOR_SECTION delimiter - separate between * CPU2 non paged to CPU2 paging sec. */ if (!image->sec[i].data || image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); break; } ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret; /* Notify ucode of loaded section number and status */ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); val = val | (sec_num << shift_param); iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); sec_num = (sec_num << 1) | 0x1; } *first_ucode_section = last_read_idx; iwl_enable_interrupts(trans); if (trans->cfg->use_tfh) { if (cpu == 1) iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFF); else iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFFFFFF); } else { if (cpu == 1) iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); else iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); } return 0; } static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, const struct fw_img *image, int cpu, int *first_ucode_section) { int i, ret = 0; u32 last_read_idx = 0; if (cpu == 1) *first_ucode_section = 0; else (*first_ucode_section)++; for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between * CPU1 to CPU2. * PAGING_SEPARATOR_SECTION delimiter - separate between * CPU2 non paged to CPU2 paging sec. */ if (!image->sec[i].data || image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); break; } ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret; } *first_ucode_section = last_read_idx; return 0; } void iwl_pcie_apply_destination(struct iwl_trans *trans) { const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; int i; if (trans->dbg.ini_valid) { if (!trans->dbg.num_blocks) return; IWL_DEBUG_FW(trans, "WRT: applying DRAM buffer[0] destination\n"); iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, trans->dbg.fw_mon[0].physical >> MON_BUFF_SHIFT_VER2); iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, (trans->dbg.fw_mon[0].physical + trans->dbg.fw_mon[0].size - 256) >> MON_BUFF_SHIFT_VER2); return; } IWL_INFO(trans, "Applying debug destination %s\n", get_fw_dbg_mode_string(dest->monitor_mode)); if (dest->monitor_mode == EXTERNAL_MODE) iwl_pcie_alloc_fw_monitor(trans, dest->size_power); else IWL_WARN(trans, "PCI should have external buffer debug\n"); for (i = 0; i < trans->dbg.n_dest_reg; i++) { u32 addr = le32_to_cpu(dest->reg_ops[i].addr); u32 val = le32_to_cpu(dest->reg_ops[i].val); switch (dest->reg_ops[i].op) { case CSR_ASSIGN: iwl_write32(trans, addr, val); break; case CSR_SETBIT: iwl_set_bit(trans, addr, BIT(val)); break; case CSR_CLEARBIT: iwl_clear_bit(trans, addr, BIT(val)); break; case PRPH_ASSIGN: iwl_write_prph(trans, addr, val); break; case PRPH_SETBIT: iwl_set_bits_prph(trans, addr, BIT(val)); break; case PRPH_CLEARBIT: iwl_clear_bits_prph(trans, addr, BIT(val)); break; case PRPH_BLOCKBIT: if (iwl_read_prph(trans, addr) & BIT(val)) { IWL_ERR(trans, "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", val, addr); goto monitor; } break; default: IWL_ERR(trans, "FW debug - unknown OP %d\n", dest->reg_ops[i].op); break; } } monitor: if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), trans->dbg.fw_mon[0].physical >> dest->base_shift); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) iwl_write_prph(trans, le32_to_cpu(dest->end_reg), (trans->dbg.fw_mon[0].physical + trans->dbg.fw_mon[0].size - 256) >> dest->end_shift); else iwl_write_prph(trans, le32_to_cpu(dest->end_reg), (trans->dbg.fw_mon[0].physical + trans->dbg.fw_mon[0].size) >> dest->end_shift); } } static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, const struct fw_img *image) { int ret = 0; int first_ucode_section; IWL_DEBUG_FW(trans, "working with %s CPU\n", image->is_dual_cpus ? "Dual" : "Single"); /* load to FW the binary non secured sections of CPU1 */ ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); if (ret) return ret; if (image->is_dual_cpus) { /* set CPU2 header address */ iwl_write_prph(trans, LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, LMPM_SECURE_CPU2_HDR_MEM_SPACE); /* load to FW the binary sections of CPU2 */ ret = iwl_pcie_load_cpu_sections(trans, image, 2, &first_ucode_section); if (ret) return ret; } /* supported for 7000 only for the moment */ if (iwlwifi_mod_params.fw_monitor && trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_pcie_alloc_fw_monitor(trans, 0); if (trans->dbg.fw_mon[0].size) { iwl_write_prph(trans, MON_BUFF_BASE_ADDR, trans->dbg.fw_mon[0].physical >> 4); iwl_write_prph(trans, MON_BUFF_END_ADDR, (trans->dbg.fw_mon[0].physical + trans->dbg.fw_mon[0].size) >> 4); } } else if (iwl_pcie_dbg_on(trans)) { iwl_pcie_apply_destination(trans); } #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_configure(trans, image); #endif iwl_enable_interrupts(trans); /* release CPU reset */ iwl_write32(trans, CSR_RESET, 0); return 0; } static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, const struct fw_img *image) { int ret = 0; int first_ucode_section; IWL_DEBUG_FW(trans, "working with %s CPU\n", image->is_dual_cpus ? "Dual" : "Single"); if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE iwl_dnt_configure(trans, image); #endif #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES iwl_pcie_override_secure_boot_cfg(trans); #endif IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", iwl_read_prph(trans, WFPM_GP2)); /* * Set default value. On resume reading the values that were * zeored can provide debug data on the resume flow. * This is for debugging only and has no functional impact. */ iwl_write_prph(trans, WFPM_GP2, 0x01010101); /* configure the ucode to be ready to get the secured image */ /* release CPU reset */ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); /* load to FW the binary Secured sections of CPU1 */ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, &first_ucode_section); if (ret) return ret; /* load to FW the binary sections of CPU2 */ return iwl_pcie_load_cpu_sections_8000(trans, image, 2, &first_ucode_section); } bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill = iwl_is_rfkill_set(trans); bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); bool report; if (hw_rfkill) { set_bit(STATUS_RFKILL_HW, &trans->status); set_bit(STATUS_RFKILL_OPMODE, &trans->status); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); if (trans_pcie->opmode_down) clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); if (prev != report) iwl_trans_pcie_rf_kill(trans, report); return hw_rfkill; } struct iwl_causes_list { u32 cause_num; u32 mask_reg; u8 addr; }; static struct iwl_causes_list causes_list[] = { {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, {MSIX_HW_INT_CAUSES_REG_IML, CSR_MSIX_HW_INT_MASK_AD, 0x12}, {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, }; static struct iwl_causes_list causes_list_v2[] = { {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11}, {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15}, {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, }; static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; int i, arr_size = (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ? ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2); /* * Access all non RX causes and map them to the default irq. * In case we are missing at least one interrupt vector, * the first interrupt vector will serve non-RX and FBQ causes. */ for (i = 0; i < arr_size; i++) { struct iwl_causes_list *causes = (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ? causes_list : causes_list_v2; iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); iwl_clear_bit(trans, causes[i].mask_reg, causes[i].cause_num); } } static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 offset = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; u32 val, idx; /* * The first RX queue - fallback queue, which is designated for * management frame, command responses etc, is always mapped to the * first interrupt vector. The other RX queues are mapped to * the other (N - 2) interrupt vectors. */ val = BIT(MSIX_FH_INT_CAUSES_Q(0)); for (idx = 1; idx < trans->num_rx_queues; idx++) { iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), MSIX_FH_INT_CAUSES_Q(idx - offset)); val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); } iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); val = MSIX_FH_INT_CAUSES_Q(0); if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) val |= MSIX_NON_AUTO_CLEAR_CAUSE; iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); } void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; if (!trans_pcie->msix_enabled) { if (trans->cfg->mq_rx_supported && test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSI_ENABLE); return; } /* * The IVAR table needs to be configured again after reset, * but if the device is disabled, we can't write to * prph. */ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); /* * Each cause from the causes list above and the RX causes is * represented as a byte in the IVAR table. The first nibble * represents the bound interrupt vector of the cause, the second * represents no auto clear for this cause. This will be set if its * interrupt vector is bound to serve other causes. */ iwl_pcie_map_rx_causes(trans); iwl_pcie_map_non_rx_causes(trans); } static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; iwl_pcie_conf_msix_hw(trans_pcie); if (!trans_pcie->msix_enabled) return; trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); trans_pcie->fh_mask = trans_pcie->fh_init_mask; trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); trans_pcie->hw_mask = trans_pcie->hw_init_mask; } static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); if (trans_pcie->is_down) return; trans_pcie->is_down = true; /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); /* device going down, Stop using ICT table */ iwl_pcie_disable_ict(trans); /* * If a HW restart happens during firmware loading, * then the firmware loading might call this function * and later it might be called again due to the * restart. So don't process again if the device is * already dead. */ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); } } /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); iwl_trans_pcie_sw_reset(trans); /* * Upon stop, the IVAR table gets erased, so msi-x won't * work. This causes a bug in RF-KILL flows, since the interrupt * that enables radio won't fire on the correct irq, and the * driver won't be able to handle the interrupt. * Configure the IVAR table again after reset. */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. * Certain devices also keep sending HW RF kill interrupt all * the time, unless the interrupt is ACKed even if the interrupt * should be masked. Re-ACK all the interrupts here. */ iwl_disable_interrupts(trans); /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status); /* * Even if we stop the HW, we still want the RF kill * interrupt */ iwl_enable_rfkill_int(trans); /* re-take ownership to prevent other users from stealing the device */ iwl_pcie_prepare_card_hw(trans); } void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->msix_enabled) { int i; for (i = 0; i < trans_pcie->alloc_vecs; i++) synchronize_irq(trans_pcie->msix_entries[i].vector); } else { synchronize_irq(trans_pcie->pci_dev->irq); } } static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int ret; /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); ret = -EIO; goto out; } iwl_enable_rfkill_int(trans); iwl_write32(trans, CSR_INT, 0xFFFFFFFF); /* * We enabled the RF-Kill interrupt and the handler may very * well be running. Disable the interrupts to make sure no other * interrupt can be fired. */ iwl_disable_interrupts(trans); /* Make sure it finished running */ iwl_pcie_synchronize_irqs(trans); mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; } /* Someone called stop_device, don't try to start_fw */ if (trans_pcie->is_down) { IWL_WARN(trans, "Can't start_fw since the HW hasn't been started\n"); ret = -EIO; goto out; } /* make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); goto out; } /* * Now, we load the firmware and don't want to be interrupted, even * by the RF-Kill interrupt (hence mask all the interrupt besides the * FH_TX interrupt which is needed to load the firmware). If the * RF-Kill switch is toggled, we will find out after having loaded * the firmware and return the proper value to the caller. */ iwl_enable_fw_load_int(trans); /* really make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Load the given image to the HW */ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) ret = iwl_pcie_load_given_ucode_8000(trans, fw); else ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; out: mutex_unlock(&trans_pcie->mutex); return ret; } static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) { iwl_pcie_reset_ict(trans); iwl_pcie_tx_start(trans, scd_addr); } void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill) { bool hw_rfkill; /* * Check again since the RF kill state may have changed while * all the interrupts were disabled, in this case we couldn't * receive the RF kill interrupt and update the state in the * op_mode. * Don't call the op_mode if the rkfill state hasn't changed. * This allows the op_mode to call stop_device from the rfkill * notification without endless recursion. Under very rare * circumstances, we might have a small recursion if the rfkill * state changed exactly now while we were called from stop_device. * This is very unlikely but can happen and is supported. */ hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) { set_bit(STATUS_RFKILL_HW, &trans->status); set_bit(STATUS_RFKILL_OPMODE, &trans->status); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } if (hw_rfkill != was_in_rfkill) iwl_trans_pcie_rf_kill(trans, hw_rfkill); } static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); _iwl_trans_pcie_stop_device(trans); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) { struct iwl_trans_pcie __maybe_unused *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", state ? "disabled" : "enabled"); if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { if (trans->cfg->gen2) _iwl_trans_pcie_gen2_stop_device(trans); else _iwl_trans_pcie_stop_device(trans); } } static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { if (!reset) { /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); } iwl_disable_interrupts(trans); /* * in testing mode, the host stays awake and the * hardware won't be reset (not even partially) */ if (test) return; iwl_pcie_disable_ict(trans); iwl_pcie_synchronize_irqs(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_init_done)); if (reset) { /* * reset TX queues -- some of their registers reset during S3 * so if we don't reset everything here the D3 image would try * to execute some invalid memory upon resume */ iwl_trans_pcie_tx_reset(trans); } iwl_pcie_set_pwr(trans, true); } static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; int ret; if (test) { iwl_enable_interrupts(trans); *status = IWL_D3_STATUS_ALIVE; return 0; } iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); ret = iwl_finish_nic_init(trans); if (ret) return ret; /* * Reconfigure IVAR table in case of MSIX or reset ict table in * MSI mode since HW reset erased it. * Also enables interrupts - none will happen as * the device doesn't know we're waking it up, only when * the opmode actually tells it after this call. */ iwl_pcie_conf_msix_hw(trans_pcie); if (!trans_pcie->msix_enabled) iwl_pcie_reset_ict(trans); iwl_enable_interrupts(trans); iwl_pcie_set_pwr(trans, false); if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); } else { iwl_trans_pcie_tx_reset(trans); ret = iwl_pcie_rx_init(trans); if (ret) { IWL_ERR(trans, "Failed to resume the device (RX reset)\n"); return ret; } } IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", iwl_read_umac_prph(trans, WFPM_GP2)); val = iwl_read32(trans, CSR_RESET); if (val & CSR_RESET_REG_FLAG_NEVO_RESET) *status = IWL_D3_STATUS_RESET; else *status = IWL_D3_STATUS_ALIVE; return 0; } static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_irqs, num_irqs, i, ret; u16 pci_cmd; if (!trans->cfg->mq_rx_supported || iwlwifi_mod_params.disable_msix) goto enable_msi; max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES); for (i = 0; i < max_irqs; i++) trans_pcie->msix_entries[i].entry = i; num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, MSIX_MIN_INTERRUPT_VECTORS, max_irqs); if (num_irqs < 0) { IWL_DEBUG_INFO(trans, "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", num_irqs); goto enable_msi; } trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; IWL_DEBUG_INFO(trans, "MSI-X enabled. %d interrupt vectors were allocated\n", num_irqs); /* * In case the OS provides fewer interrupts than requested, different * causes will share the same interrupt vector as follows: * One interrupt less: non rx causes shared with FBQ. * Two interrupts less: non rx causes shared with FBQ and RSS. * More than two interrupts: we will use fewer RSS queues. */ if (num_irqs <= max_irqs - 2) { trans_pcie->trans->num_rx_queues = num_irqs + 1; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | IWL_SHARED_IRQ_FIRST_RSS; } else if (num_irqs == max_irqs - 1) { trans_pcie->trans->num_rx_queues = num_irqs; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; } else { trans_pcie->trans->num_rx_queues = num_irqs - 1; } WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); trans_pcie->alloc_vecs = num_irqs; trans_pcie->msix_enabled = true; return; enable_msi: ret = pci_enable_msi(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } } } static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) { int iter_rx_q, i, ret, cpu, offset; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; offset = 1 + i; for (; i < iter_rx_q ; i++) { /* * Get the cpu prior to the place to search * (i.e. return will be > i - 1). */ cpu = cpumask_next(i - offset, cpu_online_mask); cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, &trans_pcie->affinity_mask[i]); if (ret) IWL_ERR(trans_pcie->trans, "Failed to set affinity mask for IRQ %d\n", i); } } static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie) { int i; for (i = 0; i < trans_pcie->alloc_vecs; i++) { int ret; struct msix_entry *msix_entry; const char *qname = queue_name(&pdev->dev, trans_pcie, i); if (!qname) return -ENOMEM; msix_entry = &trans_pcie->msix_entries[i]; ret = devm_request_threaded_irq(&pdev->dev, msix_entry->vector, iwl_pcie_msix_isr, (i == trans_pcie->def_irq) ? iwl_pcie_irq_msix_handler : iwl_pcie_irq_rx_msix_handler, IRQF_SHARED, qname, msix_entry); if (ret) { IWL_ERR(trans_pcie->trans, "Error allocating IRQ %d\n", i); return ret; } } iwl_pcie_irq_set_affinity(trans_pcie->trans); return 0; } static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) { u32 hpm, wprot; switch (trans->cfg->device_family) { case IWL_DEVICE_FAMILY_9000: wprot = PREG_PRPH_WPROT_9000; break; case IWL_DEVICE_FAMILY_22000: wprot = PREG_PRPH_WPROT_22000; break; default: return 0; } hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); if (wprot_val & PREG_WFPM_ACCESS) { IWL_ERR(trans, "Error, can not clear persistence bit\n"); return -EPERM; } iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT); } return 0; } static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int err; lockdep_assert_held(&trans_pcie->mutex); err = iwl_pcie_prepare_card_hw(trans); if (err) { IWL_ERR(trans, "Error while preparing HW: %d\n", err); return err; } err = iwl_trans_pcie_clear_persistence_bit(trans); if (err) return err; iwl_trans_pcie_sw_reset(trans); err = iwl_pcie_apm_init(trans); if (err) return err; iwl_pcie_init_msix(trans_pcie); /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); trans_pcie->opmode_down = false; /* Set is_down to false here so that...*/ trans_pcie->is_down = false; /* ...rfkill can call stop_device and set it false if needed */ iwl_pcie_check_hw_rf_kill(trans); return 0; } static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; mutex_lock(&trans_pcie->mutex); ret = _iwl_trans_pcie_start_hw(trans); mutex_unlock(&trans_pcie->mutex); return ret; } static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); mutex_lock(&trans_pcie->mutex); /* disable interrupts - don't enable HW RF kill interrupt */ iwl_disable_interrupts(trans); iwl_pcie_apm_stop(trans, true); iwl_disable_interrupts(trans); iwl_pcie_disable_ict(trans); mutex_unlock(&trans_pcie->mutex); iwl_pcie_synchronize_irqs(trans); } static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) { writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) { writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) { return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) { if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) return 0x00FFFFFF; else return 0x000FFFFF; } static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) { u32 mask = iwl_trans_pcie_prph_msk(trans); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, ((reg & mask) | (3 << 24))); return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); } static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) { u32 mask = iwl_trans_pcie_prph_msk(trans); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24))); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->cmd_queue = trans_cfg->cmd_queue; trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; else trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; if (trans_pcie->n_no_reclaim_cmds) memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, trans_pcie->n_no_reclaim_cmds * sizeof(u8)); trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; trans_pcie->rx_page_order = iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; trans_pcie->page_offs = trans_cfg->cb_data_offs; trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; /* Initialize NAPI here - it should be before registering to mac80211 * in the opmode but after the HW struct is allocated. * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) init_dummy_netdev(&trans_pcie->napi_dev); } void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; iwl_pcie_synchronize_irqs(trans); if (trans->cfg->gen2) iwl_pcie_gen2_tx_free(trans); else iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); if (trans_pcie->rba.alloc_wq) { destroy_workqueue(trans_pcie->rba.alloc_wq); trans_pcie->rba.alloc_wq = NULL; } if (trans_pcie->msix_enabled) { for (i = 0; i < trans_pcie->alloc_vecs; i++) { irq_set_affinity_hint( trans_pcie->msix_entries[i].vector, NULL); } trans_pcie->msix_enabled = false; } else { iwl_pcie_free_ict(trans); } iwl_pcie_free_fw_monitor(trans); for_each_possible_cpu(i) { struct iwl_tso_hdr_page *p = per_cpu_ptr(trans_pcie->tso_hdr_page, i); if (p->page) __free_page(p->page); } free_percpu(trans_pcie->tso_hdr_page); mutex_destroy(&trans_pcie->mutex); iwl_trans_free(trans); } static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) { if (state) set_bit(STATUS_TPOWER_PMI, &trans->status); else clear_bit(STATUS_TPOWER_PMI, &trans->status); } struct iwl_trans_pcie_removal { struct pci_dev *pdev; struct work_struct work; }; static void iwl_trans_pcie_removal_wk(struct work_struct *wk) { struct iwl_trans_pcie_removal *removal = container_of(wk, struct iwl_trans_pcie_removal, work); struct pci_dev *pdev = removal->pdev; #if LINUX_VERSION_IS_LESS(3,14,0) dev_err(&pdev->dev, "Device gone - can't remove on old kernels.\n"); #else static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; dev_err(&pdev->dev, "Device gone - attempting removal\n"); kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); pci_lock_rescan_remove(); pci_dev_put(pdev); pci_stop_and_remove_bus_device(pdev); pci_unlock_rescan_remove(); #endif /* LINUX_VERSION_IS_LESS(3,14,0) */ kfree(removal); module_put(THIS_MODULE); } static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, unsigned long *flags) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_irqsave(&trans_pcie->reg_lock, *flags); if (trans_pcie->cmd_hold_nic_awake) goto out; /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); /* * These bits say the device is running, and should keep running for * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), * but they do not indicate that embedded SRAM is restored yet; * HW with volatile SRAM must save/restore contents to/from * host DRAM when sleeping/waking for power-saving. * Each direction takes approximately 1/4 millisecond; with this * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a * series of register accesses are expected (e.g. reading Event Log), * to keep device from sleeping. * * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that * SRAM is okay/restored. We don't check that here because this call * is just for hardware register access; but GP1 MAC_SLEEP * check is a good idea before accessing the SRAM of HW with * volatile SRAM (e.g. reading Event Log). * * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_val_mac_access_en), (BIT(trans->cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (unlikely(ret < 0)) { u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", cntrl); iwl_trans_pcie_dump_regs(trans); if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { struct iwl_trans_pcie_removal *removal; if (test_bit(STATUS_TRANS_DEAD, &trans->status)) goto err; IWL_ERR(trans, "Device gone - scheduling removal!\n"); /* * get a module reference to avoid doing this * while unloading anyway and to avoid * scheduling a work with code that's being * removed. */ if (!try_module_get(THIS_MODULE)) { IWL_ERR(trans, "Module is being unloaded - abort\n"); goto err; } removal = kzalloc(sizeof(*removal), GFP_ATOMIC); if (!removal) { module_put(THIS_MODULE); goto err; } /* * we don't need to clear this flag, because * the trans will be freed and reallocated. */ set_bit(STATUS_TRANS_DEAD, &trans->status); removal->pdev = to_pci_dev(trans->dev); INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); pci_dev_get(removal->pdev); schedule_work(&removal->work); } else { iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); } err: spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); return false; } out: /* * Fool sparse by faking we release the lock - sparse will * track nic_access anyway. */ __release(&trans_pcie->reg_lock); return true; } static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, unsigned long *flags) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->reg_lock); /* * Fool sparse by faking we acquiring the lock - sparse will * track nic_access anyway. */ __acquire(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); /* * Above we read the CSR_GP_CNTRL register, which will flush * any previous writes, but we need the write that clears the * MAC_ACCESS_REQ bit to be performed before any other writes * scheduled on different CPUs (after we drop reg_lock). */ out: spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); } static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { unsigned long flags; int offs, ret = 0; u32 *vals = buf; if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); for (offs = 0; offs < dwords; offs++) vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); iwl_trans_release_nic_access(trans, &flags); } else { ret = -EBUSY; } return ret; } static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, const void *buf, int dwords) { unsigned long flags; int offs, ret = 0; const u32 *vals = buf; if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); for (offs = 0; offs < dwords; offs++) iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals ? vals[offs] : 0); iwl_trans_release_nic_access(trans, &flags); } else { ret = -EBUSY; } return ret; } static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int queue; for_each_set_bit(queue, &txqs, BITS_PER_LONG) { struct iwl_txq *txq = trans_pcie->txq[queue]; unsigned long now; spin_lock_bh(&txq->lock); now = jiffies; if (txq->frozen == freeze) goto next_queue; IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", freeze ? "Freezing" : "Waking", queue); txq->frozen = freeze; if (txq->read_ptr == txq->write_ptr) goto next_queue; if (freeze) { if (unlikely(time_after(now, txq->stuck_timer.expires))) { /* * The timer should have fired, maybe it is * spinning right now on the lock. */ goto next_queue; } /* remember how long until the timer fires */ txq->frozen_expiry_remainder = txq->stuck_timer.expires - now; del_timer(&txq->stuck_timer); goto next_queue; } /* * Wake a non-empty queue -> arm timer with the * remainder before it froze */ mod_timer(&txq->stuck_timer, now + txq->frozen_expiry_remainder); next_queue: spin_unlock_bh(&txq->lock); } } static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txq[i]; if (i == trans_pcie->cmd_queue) continue; spin_lock_bh(&txq->lock); if (!block && !(WARN_ON_ONCE(!txq->block))) { txq->block--; if (!txq->block) { iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (i << 8)); } } else if (block) { txq->block++; } spin_unlock_bh(&txq->lock); } } #define IWL_FLUSH_WAIT_MS 2000 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) { u32 txq_id = txq->id; u32 status; bool active; u8 fifo; if (trans->cfg->use_tfh) { IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, txq->read_ptr, txq->write_ptr); /* TODO: access new SCD registers and dump them */ return; } status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); IWL_ERR(trans, "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", txq_id, active ? "" : "in", fifo, jiffies_to_msecs(txq->wd_timeout), txq->read_ptr, txq->write_ptr, iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (trans->cfg->base_params->max_tfd_queue_size - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & (trans->cfg->base_params->max_tfd_queue_size - 1), iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (queue >= trans->num_rx_queues || !trans_pcie->rxq) return -EINVAL; data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; data->fr_bd_wid = 0; return 0; } static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; unsigned long now = jiffies; bool overflow_tx; u8 wr_ptr; /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; if (!test_bit(txq_idx, trans_pcie->queue_used)) return -EINVAL; IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); txq = trans_pcie->txq[txq_idx]; spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || !skb_queue_empty(&txq->overflow_q); spin_unlock_bh(&txq->lock); wr_ptr = READ_ONCE(txq->write_ptr); while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || overflow_tx) && !time_after(jiffies, now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { u8 write_ptr = READ_ONCE(txq->write_ptr); /* * If write pointer moved during the wait, warn only * if the TX came from op mode. In case TX came from * trans layer (overflow TX) don't warn. */ if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, "WR pointer moved while flushing %d -> %d\n", wr_ptr, write_ptr)) return -ETIMEDOUT; wr_ptr = write_ptr; usleep_range(1000, 2000); spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || !skb_queue_empty(&txq->overflow_q); spin_unlock_bh(&txq->lock); } if (txq->read_ptr != txq->write_ptr) { IWL_ERR(trans, "fail to flush all tx fifo queues Q %d\n", txq_idx); iwl_trans_pcie_log_scd_error(trans, txq); return -ETIMEDOUT; } IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); return 0; } static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int cnt; int ret = 0; /* waiting for all the tx frames complete might take a while */ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { if (cnt == trans_pcie->cmd_queue) continue; if (!test_bit(cnt, trans_pcie->queue_used)) continue; if (!(BIT(cnt) & txq_bm)) continue; ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); if (ret) break; } return ret; } static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; spin_lock_irqsave(&trans_pcie->reg_lock, flags); __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } static const char *get_csr_string(int cmd) { #define IWL_CMD(x) case x: return #x switch (cmd) { IWL_CMD(CSR_HW_IF_CONFIG_REG); IWL_CMD(CSR_INT_COALESCING); IWL_CMD(CSR_INT); IWL_CMD(CSR_INT_MASK); IWL_CMD(CSR_FH_INT_STATUS); IWL_CMD(CSR_GPIO_IN); IWL_CMD(CSR_RESET); IWL_CMD(CSR_GP_CNTRL); IWL_CMD(CSR_HW_REV); IWL_CMD(CSR_EEPROM_REG); IWL_CMD(CSR_EEPROM_GP); IWL_CMD(CSR_OTP_GP_REG); IWL_CMD(CSR_GIO_REG); IWL_CMD(CSR_GP_UCODE_REG); IWL_CMD(CSR_GP_DRIVER_REG); IWL_CMD(CSR_UCODE_DRV_GP1); IWL_CMD(CSR_UCODE_DRV_GP2); IWL_CMD(CSR_LED_REG); IWL_CMD(CSR_DRAM_INT_TBL_REG); IWL_CMD(CSR_GIO_CHICKEN_BITS); IWL_CMD(CSR_ANA_PLL_CFG); IWL_CMD(CSR_HW_REV_WA_REG); IWL_CMD(CSR_MONITOR_STATUS_REG); IWL_CMD(CSR_DBG_HPET_MEM_REG); default: return "UNKNOWN"; } #undef IWL_CMD } void iwl_pcie_dump_csr(struct iwl_trans *trans) { int i; static const u32 csr_tbl[] = { CSR_HW_IF_CONFIG_REG, CSR_INT_COALESCING, CSR_INT, CSR_INT_MASK, CSR_FH_INT_STATUS, CSR_GPIO_IN, CSR_RESET, CSR_GP_CNTRL, CSR_HW_REV, CSR_EEPROM_REG, CSR_EEPROM_GP, CSR_OTP_GP_REG, CSR_GIO_REG, CSR_GP_UCODE_REG, CSR_GP_DRIVER_REG, CSR_UCODE_DRV_GP1, CSR_UCODE_DRV_GP2, CSR_LED_REG, CSR_DRAM_INT_TBL_REG, CSR_GIO_CHICKEN_BITS, CSR_ANA_PLL_CFG, CSR_MONITOR_STATUS_REG, CSR_HW_REV_WA_REG, CSR_DBG_HPET_MEM_REG }; IWL_ERR(trans, "CSR values:\n"); IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " "CSR_INT_PERIODIC_REG)\n"); for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { IWL_ERR(trans, " %25s: 0X%08x\n", get_csr_string(csr_tbl[i]), iwl_read32(trans, csr_tbl[i])); } } #ifdef CPTCFG_IWLWIFI_DEBUGFS /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ debugfs_create_file(#name, mode, parent, trans, \ &iwl_dbgfs_##name##_ops); \ } while (0) /* file operation */ #define DEBUGFS_READ_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; char *buf; int pos = 0; int cnt; int ret; size_t bufsz; bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; if (!trans_pcie->txq_memory) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { txq = trans_pcie->txq[cnt]; pos += scnprintf(buf + pos, bufsz - pos, "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", cnt, txq->read_ptr, txq->write_ptr, !!test_bit(cnt, trans_pcie->queue_used), !!test_bit(cnt, trans_pcie->queue_stopped), txq->need_update, txq->frozen, (cnt == trans_pcie->cmd_queue ? " HCMD" : "")); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); char *buf; int pos = 0, i, ret; size_t bufsz = sizeof(buf); bufsz = sizeof(char) * 121 * trans->num_rx_queues; if (!trans_pcie->rxq) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", i); pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", rxq->read); pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", rxq->write); pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", rxq->write_actual); pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", rxq->need_update); pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", rxq->free_count); if (rxq->rb_stts) { u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)); pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: %u\n", r & 0x0FFF); } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; int pos = 0; char *buf; int bufsz = 24 * 64; /* 24 items * 64 char per item */ ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos += scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n"); pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", isr_stats->hw); pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", isr_stats->sw); if (isr_stats->sw || isr_stats->hw) { pos += scnprintf(buf + pos, bufsz - pos, "\tLast Restarting Code: 0x%X\n", isr_stats->err_code); } #ifdef CPTCFG_IWLWIFI_DEBUG pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", isr_stats->sch); pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", isr_stats->alive); #endif pos += scnprintf(buf + pos, bufsz - pos, "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", isr_stats->ctkill); pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", isr_stats->wakeup); pos += scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n", isr_stats->rx); pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", isr_stats->tx); pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", isr_stats->unhandled); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_interrupt_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 reset_flag; int ret; ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); if (ret) return ret; if (reset_flag == 0) memset(isr_stats, 0, sizeof(*isr_stats)); return count; } static ssize_t iwl_dbgfs_csr_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; iwl_pcie_dump_csr(trans); return count; } static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; char *buf = NULL; ssize_t ret; ret = iwl_dump_fh(trans, &buf); if (ret < 0) return ret; if (!buf) return -EINVAL; ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static ssize_t iwl_dbgfs_rfkill_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); char buf[100]; int pos; pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", trans_pcie->debug_rfkill, !(iwl_read32(trans, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_rfkill_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool new_value; int ret; ret = kstrtobool_from_user(user_buf, count, &new_value); if (ret) return ret; if (new_value == trans_pcie->debug_rfkill) return count; IWL_WARN(trans, "changing debug rfkill %d->%d\n", trans_pcie->debug_rfkill, new_value); trans_pcie->debug_rfkill = new_value; iwl_pcie_handle_rfkill_irq(trans); return count; } static int iwl_dbgfs_monitor_data_open(struct inode *inode, struct file *file) { struct iwl_trans *trans = inode->i_private; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans->dbg.dest_tlv || trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { IWL_ERR(trans, "Debug destination is not set to DRAM\n"); return -ENOENT; } if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) return -EBUSY; trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; return simple_open(inode, file); } static int iwl_dbgfs_monitor_data_release(struct inode *inode, struct file *file) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(inode->i_private); if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; return 0; } static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, void *buf, ssize_t *size, ssize_t *bytes_copied) { int buf_size_left = count - *bytes_copied; buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); if (*size > buf_size_left) *size = buf_size_left; *size -= copy_to_user(user_buf, buf, *size); *bytes_copied += *size; if (buf_size_left == *size) return true; return false; } static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf; struct cont_rec *data = &trans_pcie->fw_mon_data; u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; ssize_t size, bytes_copied = 0; bool b_full; if (trans->dbg.dest_tlv) { write_ptr_addr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); } else { write_ptr_addr = MON_BUFF_WRPTR; wrap_cnt_addr = MON_BUFF_CYCLE_CNT; } if (unlikely(!trans->dbg.rec_on)) return 0; mutex_lock(&data->mutex); if (data->state == IWL_FW_MON_DBGFS_STATE_DISABLED) { mutex_unlock(&data->mutex); return 0; } /* write_ptr position in bytes rather then DW */ write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); if (data->prev_wrap_cnt == wrap_cnt) { size = write_ptr - data->prev_wr_ptr; curr_buf = cpu_addr + data->prev_wr_ptr; b_full = iwl_write_to_user_buf(user_buf, count, curr_buf, &size, &bytes_copied); data->prev_wr_ptr += size; } else if (data->prev_wrap_cnt == wrap_cnt - 1 && write_ptr < data->prev_wr_ptr) { size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr; curr_buf = cpu_addr + data->prev_wr_ptr; b_full = iwl_write_to_user_buf(user_buf, count, curr_buf, &size, &bytes_copied); data->prev_wr_ptr += size; if (!b_full) { size = write_ptr; b_full = iwl_write_to_user_buf(user_buf, count, cpu_addr, &size, &bytes_copied); data->prev_wr_ptr = size; data->prev_wrap_cnt++; } } else { if (data->prev_wrap_cnt == wrap_cnt - 1 && write_ptr > data->prev_wr_ptr) IWL_WARN(trans, "write pointer passed previous write pointer, start copying from the beginning\n"); else if (!unlikely(data->prev_wrap_cnt == 0 && data->prev_wr_ptr == 0)) IWL_WARN(trans, "monitor data is out of sync, start copying from the beginning\n"); size = write_ptr; b_full = iwl_write_to_user_buf(user_buf, count, cpu_addr, &size, &bytes_copied); data->prev_wr_ptr = size; data->prev_wrap_cnt = wrap_cnt; } mutex_unlock(&data->mutex); return bytes_copied; } DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(fh_reg); DEBUGFS_READ_FILE_OPS(rx_queue); DEBUGFS_READ_FILE_OPS(tx_queue); DEBUGFS_WRITE_FILE_OPS(csr); DEBUGFS_READ_WRITE_FILE_OPS(rfkill); static const struct file_operations iwl_dbgfs_monitor_data_ops = { .read = iwl_dbgfs_monitor_data_read, .open = iwl_dbgfs_monitor_data_open, .release = iwl_dbgfs_monitor_data_release, }; /* Create the debugfs files and directories */ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { struct dentry *dir = trans->dbgfs_dir; DEBUGFS_ADD_FILE(rx_queue, dir, 0400); DEBUGFS_ADD_FILE(tx_queue, dir, 0400); DEBUGFS_ADD_FILE(interrupt, dir, 0600); DEBUGFS_ADD_FILE(csr, dir, 0200); DEBUGFS_ADD_FILE(fh_reg, dir, 0400); DEBUGFS_ADD_FILE(rfkill, dir, 0600); DEBUGFS_ADD_FILE(monitor_data, dir, 0400); } static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct cont_rec *data = &trans_pcie->fw_mon_data; mutex_lock(&data->mutex); data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; mutex_unlock(&data->mutex); } #endif /*CPTCFG_IWLWIFI_DEBUGFS */ static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 cmdlen = 0; int i; for (i = 0; i < trans_pcie->max_tbs; i++) cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i); return cmdlen; } static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, int allocated_rb_nums) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_len = PAGE_SIZE << trans_pcie->rx_page_order; /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 i, r, j, rb_len = 0; spin_lock(&rxq->lock); r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; i = (i + 1) & RX_QUEUE_MASK, j++) { struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; struct iwl_fw_error_dump_rb *rb; dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); rb_len += sizeof(**data) + sizeof(*rb) + max_len; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); rb = (void *)(*data)->data; rb->index = cpu_to_le32(i); memcpy(rb->data, page_address(rxb->page), max_len); /* remap the page for the free benefit */ rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, max_len, DMA_FROM_DEVICE); *data = iwl_fw_error_next_data(*data); } spin_unlock(&rxq->lock); return rb_len; } #define IWL_CSR_TO_DUMP (0x250) static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data) { u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; __le32 *val; int i; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); val = (void *)(*data)->data; for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); *data = iwl_fw_error_next_data(*data); return csr_len; } static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data) { u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; unsigned long flags; __le32 *val; int i; if (!iwl_trans_grab_nic_access(trans, &flags)) return 0; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); (*data)->len = cpu_to_le32(fh_regs_len); val = (void *)(*data)->data; if (!trans->cfg->gen2) for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); else for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, i)); iwl_trans_release_nic_access(trans, &flags); *data = iwl_fw_error_next_data(*data); return sizeof(**data) + fh_regs_len; } static u32 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data, u32 monitor_len) { u32 buf_size_in_dwords = (monitor_len >> 2); u32 *buffer = (u32 *)fw_mon_data->data; unsigned long flags; u32 i; if (!iwl_trans_grab_nic_access(trans, &flags)) return 0; iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); for (i = 0; i < buf_size_in_dwords; i++) buffer[i] = iwl_read_umac_prph_no_grab(trans, MON_DMARB_RD_DATA_ADDR); iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); iwl_trans_release_nic_access(trans, &flags); return monitor_len; } static void iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data) { u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; write_ptr = DBGC_CUR_DBGBUF_STATUS; wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; } else if (trans->dbg.dest_tlv) { write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); } else { base = MON_BUFF_BASE_ADDR; write_ptr = MON_BUFF_WRPTR; wrap_cnt = MON_BUFF_CYCLE_CNT; } write_ptr_val = iwl_read_prph(trans, write_ptr); fw_mon_data->fw_mon_cycle_cnt = cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = cpu_to_le32(iwl_read_prph(trans, base)); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { fw_mon_data->fw_mon_base_high_ptr = cpu_to_le32(iwl_read_prph(trans, base_high)); write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; } fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); } static u32 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, u32 monitor_len) { u32 len = 0; if (trans->dbg.dest_tlv || (trans->dbg.num_blocks && (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 || trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_mon_data = (void *)(*data)->data; iwl_trans_pcie_dump_pointers(trans, fw_mon_data); len += sizeof(**data) + sizeof(*fw_mon_data); if (trans->dbg.num_blocks) { memcpy(fw_mon_data->data, trans->dbg.fw_mon[0].block, trans->dbg.fw_mon[0].size); monitor_len = trans->dbg.fw_mon[0].size; } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); /* * Update pointers to reflect actual values after * shifting */ if (trans->dbg.dest_tlv->version) { base = (iwl_read_prph(trans, base) & IWL_LDBG_M2S_BUF_BA_MSK) << trans->dbg.dest_tlv->base_shift; base *= IWL_M2S_UNIT_SIZE; base += trans->cfg->smem_offset; } else { base = iwl_read_prph(trans, base) << trans->dbg.dest_tlv->base_shift; } iwl_trans_read_mem(trans, base, fw_mon_data->data, monitor_len / sizeof(u32)); } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { monitor_len = iwl_trans_pci_dump_marbh_monitor(trans, fw_mon_data, monitor_len); } else { /* Didn't match anything - output no monitor data */ monitor_len = 0; } len += monitor_len; (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); } return len; } static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) { if (trans->dbg.num_blocks) { *len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fw_mon) + trans->dbg.fw_mon[0].size; return trans->dbg.fw_mon[0].size; } else if (trans->dbg.dest_tlv) { u32 base, end, cfg_reg, monitor_len; if (trans->dbg.dest_tlv->version == 1) { cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); cfg_reg = iwl_read_prph(trans, cfg_reg); base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << trans->dbg.dest_tlv->base_shift; base *= IWL_M2S_UNIT_SIZE; base += trans->cfg->smem_offset; monitor_len = (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> trans->dbg.dest_tlv->end_shift; monitor_len *= IWL_M2S_UNIT_SIZE; } else { base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); base = iwl_read_prph(trans, base) << trans->dbg.dest_tlv->base_shift; end = iwl_read_prph(trans, end) << trans->dbg.dest_tlv->end_shift; /* Make "end" point to the actual end */ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 || trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) end += (1 << trans->dbg.dest_tlv->end_shift); monitor_len = end - base; } *len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fw_mon) + monitor_len; return monitor_len; } return 0; } static struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs = 0, monitor_len = 0; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && !trans->cfg->mq_rx_supported && dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); if (!dump_mask) return NULL; /* transport dump header */ len = sizeof(*dump_data); /* host commands */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) len += sizeof(*data) + cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); /* FW monitor */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); /* CSR registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) len += sizeof(*data) + IWL_CSR_TO_DUMP; /* FH registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { if (trans->cfg->gen2) len += sizeof(*data) + (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); else len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); } if (dump_rbs) { /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ num_rbs = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + (PAGE_SIZE << trans_pcie->rx_page_order)); } /* Paged memory for gen2 HW */ if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) for (i = 0; i < trans->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + trans->init_dram.paging[i].size; dump_data = vzalloc(len); if (!dump_data) return NULL; len = 0; data = (void *)dump_data->data; if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { u16 tfd_size = trans_pcie->tfd_size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); txcmd = (void *)data->data; spin_lock_bh(&cmdq->lock); ptr = cmdq->write_ptr; for (i = 0; i < cmdq->n_window; i++) { u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); u32 caplen, cmdlen; cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + tfd_size * ptr); caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); if (cmdlen) { len += sizeof(*txcmd) + caplen; txcmd->cmdlen = cpu_to_le32(cmdlen); txcmd->caplen = cpu_to_le32(caplen); memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); txcmd = (void *)((u8 *)txcmd->data + caplen); } ptr = iwl_queue_dec_wrap(trans, ptr); } spin_unlock_bh(&cmdq->lock); data->len = cpu_to_le32(len); len += sizeof(*data); data = iwl_fw_error_next_data(data); } if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) len += iwl_trans_pcie_dump_csr(trans, &data); if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) len += iwl_trans_pcie_fh_regs_dump(trans, &data); if (dump_rbs) len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); /* Paged memory for gen2 HW */ if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { for (i = 0; i < trans->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; u32 page_len = trans->init_dram.paging[i].size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); data->len = cpu_to_le32(sizeof(*paging) + page_len); paging = (void *)data->data; paging->index = cpu_to_le32(i); memcpy(paging->data, trans->init_dram.paging[i].block, page_len); data = iwl_fw_error_next_data(data); len += sizeof(*data) + sizeof(*paging) + page_len; } } if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; return dump_data; } #ifdef CONFIG_PM_SLEEP static int iwl_trans_pcie_suspend(struct iwl_trans *trans) { return 0; } static void iwl_trans_pcie_resume(struct iwl_trans *trans) { } #endif /* CONFIG_PM_SLEEP */ #define IWL_TRANS_COMMON_OPS \ .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ .write8 = iwl_trans_pcie_write8, \ .write32 = iwl_trans_pcie_write32, \ .read32 = iwl_trans_pcie_read32, \ .read_prph = iwl_trans_pcie_read_prph, \ .write_prph = iwl_trans_pcie_write_prph, \ .read_mem = iwl_trans_pcie_read_mem, \ .write_mem = iwl_trans_pcie_write_mem, \ .configure = iwl_trans_pcie_configure, \ .set_pmi = iwl_trans_pcie_set_pmi, \ .sw_reset = iwl_trans_pcie_sw_reset, \ .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ .release_nic_access = iwl_trans_pcie_release_nic_access, \ .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ .dump_data = iwl_trans_pcie_dump_data, \ .d3_suspend = iwl_trans_pcie_d3_suspend, \ .d3_resume = iwl_trans_pcie_d3_resume, \ .sync_nmi = iwl_trans_pcie_sync_nmi #ifdef CONFIG_PM_SLEEP #define IWL_TRANS_PM_OPS \ .suspend = iwl_trans_pcie_suspend, \ .resume = iwl_trans_pcie_resume, #else #define IWL_TRANS_PM_OPS #endif /* CONFIG_PM_SLEEP */ static const struct iwl_trans_ops trans_ops_pcie = { IWL_TRANS_COMMON_OPS, IWL_TRANS_PM_OPS .start_hw = iwl_trans_pcie_start_hw, .fw_alive = iwl_trans_pcie_fw_alive, .start_fw = iwl_trans_pcie_start_fw, .stop_device = iwl_trans_pcie_stop_device, .send_cmd = iwl_trans_pcie_send_hcmd, .tx = iwl_trans_pcie_tx, .reclaim = iwl_trans_pcie_reclaim, .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, #ifdef CPTCFG_IWLWIFI_DEBUGFS .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, #endif }; static const struct iwl_trans_ops trans_ops_pcie_gen2 = { IWL_TRANS_COMMON_OPS, IWL_TRANS_PM_OPS .start_hw = iwl_trans_pcie_start_hw, .fw_alive = iwl_trans_pcie_gen2_fw_alive, .start_fw = iwl_trans_pcie_gen2_start_fw, .stop_device = iwl_trans_pcie_gen2_stop_device, .send_cmd = iwl_trans_pcie_gen2_send_hcmd, .tx = iwl_trans_pcie_gen2_tx, .reclaim = iwl_trans_pcie_reclaim, .set_q_ptrs = iwl_trans_pcie_set_q_ptrs, .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, .txq_free = iwl_trans_pcie_dyn_txq_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, #ifdef CPTCFG_IWLWIFI_DEBUGFS .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, #endif }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg *cfg) { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; int ret, addr_size; ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); if (cfg->gen2) trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie_gen2); else trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie); if (!trans) return ERR_PTR(-ENOMEM); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->trans = trans; trans_pcie->opmode_down = true; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); if (!trans_pcie->tso_hdr_page) { ret = -ENOMEM; goto out_no_pci; } trans_pcie->debug_rfkill = -1; if (!cfg->base_params->pcie_l1_allowed) { /* * W/A - seems to solve weird behavior. We need to remove this * if we don't want to stay in L1 all the time. This wastes a * lot of power. */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); } trans_pcie->def_rx_queue = 0; if (cfg->use_tfh) { addr_size = 64; trans_pcie->max_tbs = IWL_TFH_NUM_TBS; trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd); } else { addr_size = 36; trans_pcie->max_tbs = IWL_NUM_OF_TBS; trans_pcie->tfd_size = sizeof(struct iwl_tfd); } trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie); pci_set_master(pdev); ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); if (!ret) ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(addr_size)); if (ret) { ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!ret) ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto out_no_pci; } } ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); if (ret) { dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); goto out_no_pci; } trans_pcie->hw_base = pcim_iomap_table(pdev)[0]; if (!trans_pcie->hw_base) { dev_err(&pdev->dev, "pcim_iomap_table failed\n"); ret = -ENODEV; goto out_no_pci; } /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); trans->hw_rev = iwl_read32(trans, CSR_HW_REV); if (trans->hw_rev == 0xffffffff) { dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); ret = -EIO; goto out_no_pci; } /* * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have * changed, and now the revision step also includes bit 0-1 (no more * "dash" value). To keep hw_rev backwards compatible - we'll store it * in the old format. */ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) { unsigned long flags; trans->hw_rev = (trans->hw_rev & 0xfff0) | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); ret = iwl_pcie_prepare_card_hw(trans); if (ret) { IWL_WARN(trans, "Exit HW not ready\n"); goto out_no_pci; } /* * in-order to recognize C step driver should read chip version * id located at the AUX bus MISC address space. */ ret = iwl_finish_nic_init(trans); if (ret) goto out_no_pci; if (iwl_trans_grab_nic_access(trans, &flags)) { u32 hw_step; hw_step = iwl_read_umac_prph_no_grab(trans, WFPM_CTRL_REG); hw_step |= ENABLE_WFPM; iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG, hw_step); hw_step = iwl_read_prph_no_grab(trans, CNVI_AUX_MISC_CHIP); hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; if (hw_step == 0x3) trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | (SILICON_C_STEP << 2); iwl_trans_release_nic_access(trans, &flags); } } IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); #if IS_ENABLED(CPTCFG_IWLMVM) || IS_ENABLED(CPTCFG_IWLFMAC) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); if (cfg == &iwlax210_2ax_cfg_so_hr_a0) { if (trans->hw_rev == CSR_HW_REV_TYPE_TY) { trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { trans->cfg = &iwlax210_2ax_cfg_so_jf_a0; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { trans->cfg = &iwlax211_2ax_cfg_so_gf_a0; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) { trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0; } } else if (cfg == &iwl_ax101_cfg_qu_hr) { if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) || (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) { trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { trans->cfg = &iwl_ax101_cfg_quz_hr; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { trans->cfg = &iwl_ax101_cfg_qu_hr; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { trans->cfg = &iwl22000_2ax_cfg_jf; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) { IWL_ERR(trans, "RF ID HRCDB is not supported\n"); ret = -EINVAL; goto out_no_pci; } else { IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n", CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id)); ret = -EINVAL; goto out_no_pci; } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && ((trans->cfg != &iwl_ax200_cfg_cc && trans->cfg != &killer1650x_2ax_cfg && trans->cfg != &killer1650w_2ax_cfg && trans->cfg != &iwl_ax201_cfg_quz_hr) || trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP) { if (hw_status & UMAG_GEN_HW_IS_FPGA) trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0_f0; else trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; } else if ((hw_status & UMAG_GEN_HW_IS_FPGA) && CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) { trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0; } else { /* * a step no FPGA */ trans->cfg = &iwl22000_2ac_cfg_hr; } } /* * The RF_ID is set to zero in blank OTP so read version * to extract the RF_ID. */ if (trans->cfg->rf_id && !CSR_HW_RFID_TYPE(trans->hw_rf_id)) { unsigned long flags; if (iwl_trans_grab_nic_access(trans, &flags)) { u32 val; val = iwl_read_umac_prph_no_grab(trans, WFPM_CTRL_REG); val |= ENABLE_WFPM; iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG, val); val = iwl_read_prph_no_grab(trans, SD_REG_VER); val &= 0xff00; switch (val) { case REG_VER_RF_ID_JF: trans->hw_rf_id = CSR_HW_RF_ID_TYPE_JF; break; /* TODO: get value for REG_VER_RF_ID_HR */ default: trans->hw_rf_id = CSR_HW_RF_ID_TYPE_HR; } iwl_trans_release_nic_access(trans, &flags); } } #endif iwl_pcie_set_interrupt_capa(pdev, trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); if (trans_pcie->msix_enabled) { ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); if (ret) goto out_no_pci; } else { ret = iwl_pcie_alloc_ict(trans); if (ret) goto out_no_pci; ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, IRQF_SHARED, DRV_NAME, trans); if (ret) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } trans_pcie->inta_mask = CSR_INI_SET_MASK; } trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", WQ_HIGHPRI | WQ_UNBOUND, 1); INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); #ifdef CPTCFG_IWLWIFI_DEBUGFS trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; mutex_init(&trans_pcie->fw_mon_data.mutex); #endif return trans; out_free_ict: iwl_pcie_free_ict(trans); out_no_pci: free_percpu(trans_pcie->tso_hdr_page); iwl_trans_free(trans); return ERR_PTR(ret); } void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status); u32 inta_addr, sw_err_bit; if (trans_pcie->msix_enabled) { inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; } else { inta_addr = CSR_INT; sw_err_bit = CSR_INT_BIT_SW_ERR; } /* if the interrupts were already disabled, there is no point in * calling iwl_disable_interrupts */ if (interrupts_enabled) iwl_disable_interrupts(trans); iwl_force_nmi(trans); while (time_after(timeout, jiffies)) { u32 inta_hw = iwl_read32(trans, inta_addr); /* Error detected by uCode */ if (inta_hw & sw_err_bit) { /* Clear causes register */ iwl_write32(trans, inta_addr, inta_hw & sw_err_bit); break; } mdelay(1); } /* enable interrupts only if there were already enabled before this * function to avoid a case were the driver enable interrupts before * proper configurations were made */ if (interrupts_enabled) iwl_enable_interrupts(trans); iwl_trans_fw_error(trans); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c000066400000000000000000001123451351064634500271450ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-debug.h" #include "iwl-csr.h" #include "iwl-io.h" #include "internal.h" #include "fw/api/tx.h" /* * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels */ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id; /* * This function can be called before the op_mode disabled the * queues. This happens when we have an rfkill interrupt. * Since we stop Tx altogether - mark the queues as stopped. */ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); /* Unmap DMA from host system and free skb's */ for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { if (!trans_pcie->txq[txq_id]) continue; iwl_pcie_gen2_txq_unmap(trans, txq_id); } } /* * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array */ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); u8 filled_tfd_size, num_fetch_chunks; u16 len = byte_cnt; __le16 bc_ent; if (trans_pcie->bc_table_dword) len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) return; filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + num_tbs * sizeof(struct iwl_tfh_tb); /* * filled_tfd_size contains the number of filled bytes in the TFD. * Dividing it by 64 will give the number of chunks to fetch * to SRAM- 0 for one chunk, 1 for 2 and so on. * If, for example, TFD contains only 3 TBs then 32 bytes * of the TFD are used, and only one chunk of 64 bytes should * be fetched */ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; else scd_bc_tbl->tfd_offset[idx] = bc_ent; } /* * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware */ void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { lockdep_assert_held(&txq->lock); IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); /* * if not in power-save mode, uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx). */ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); } static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd) { return le16_to_cpu(tfd->num_tbs) & 0x1f; } static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfh_tfd *tfd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i, num_tbs; /* Sanity check on number of chunks */ num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); if (num_tbs > trans_pcie->max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); return; } /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->tbs & BIT(i)) dma_unmap_page(trans->dev, le64_to_cpu(tfd->tbs[i].addr), le16_to_cpu(tfd->tbs[i].tb_len), DMA_TO_DEVICE); else dma_unmap_single(trans->dev, le64_to_cpu(tfd->tbs[i].addr), le16_to_cpu(tfd->tbs[i].tb_len), DMA_TO_DEVICE); } tfd->num_tbs = 0; } static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); lockdep_assert_held(&txq->lock); iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, iwl_pcie_get_tfd(trans, txq, idx)); /* free SKB */ if (txq->entries) { struct sk_buff *skb; skb = txq->entries[idx].skb; /* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being * freed and that the queue is not empty - free the skb */ if (skb) { iwl_op_mode_free_skb(trans->op_mode, skb); txq->entries[idx].skb = NULL; } } } static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, dma_addr_t addr, u16 len) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); struct iwl_tfh_tb *tb; if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) return -EINVAL; tb = &tfd->tbs[idx]; /* Each TFD can point to a maximum max_tbs Tx buffers */ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", trans_pcie->max_tbs); return -EINVAL; } put_unaligned_le64(addr, &tb->addr); tb->tb_len = cpu_to_le16(len); tfd->num_tbs = cpu_to_le16(idx + 1); return idx; } static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, int start_len, u8 hdr_len, struct iwl_device_cmd *dev_cmd) { #ifdef CONFIG_INET struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; struct page **page_ptr; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ iv_len = ieee80211_has_protected(hdr->frame_control) ? IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, start_len, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; amsdu_pad = 0; /* total amount of header we may need for this A-MSDU */ hdr_room = DIV_ROUND_UP(total_len, mss) * (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ hdr_page = get_page_hdr(trans, hdr_room); if (!hdr_page) return -ENOMEM; get_page(hdr_page->page); start_hdr = hdr_page->pos; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); *page_ptr = hdr_page->page; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; /* * Pull the ieee80211 header + IV to be able to use TSO core, * we will restore it for the tx_status flow. */ skb_pull(skb, hdr_len + iv_len); /* * Remove the length of all the headers that we don't actually * have in the MPDU by themselves, but that we duplicate into * all the different MSDUs inside the A-MSDU. */ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); tso_start(skb, &tso); while (total_len) { /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); struct sk_buff *csum_skb = NULL; unsigned int tb_len; dma_addr_t tb_phys; u8 *subf_hdrs_start = hdr_page->pos; total_len -= data_left; memset(hdr_page->pos, 0, amsdu_pad); hdr_page->pos += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); hdr_page->pos += ETH_ALEN; ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); hdr_page->pos += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; *((__be16 *)hdr_page->pos) = cpu_to_be16(length); hdr_page->pos += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); hdr_page->pos += snap_ip_tcp_hdrlen; tb_len = hdr_page->pos - start_hdr; tb_phys = dma_map_single(trans->dev, start_hdr, tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { dev_kfree_skb(csum_skb); goto out_err; } iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ start_hdr = hdr_page->pos; /* put the payload */ while (data_left) { tb_len = min_t(unsigned int, tso.size, data_left); tb_phys = dma_map_single(trans->dev, tso.data, tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { dev_kfree_skb(csum_skb); goto out_err; } iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, tb_len); data_left -= tb_len; tso_build_data(skb, &tso, tb_len); } } /* re -add the WiFi header and IV */ skb_push(skb, hdr_len + iv_len); return 0; out_err: #endif return -EINVAL; } static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, int tx_cmd_len) { int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); dma_addr_t tb_phys; int len; void *tb1_addr; tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd)) goto out_err; /* building the A-MSDU might have changed this data, memcpy it now */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); return tfd; out_err: iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); return NULL; } static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, struct iwl_cmd_meta *out_meta) { int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t tb_phys; int tb_idx; if (!skb_frag_size(frag)) continue; tb_phys = skb_frag_dma_map(trans->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -ENOMEM; tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_frag_size(frag)); trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), skb_frag_size(frag)); if (tb_idx < 0) return tb_idx; out_meta->tbs |= BIT(tb_idx); } return 0; } static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, int tx_cmd_len, bool pad) { int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); dma_addr_t tb_phys; int len, tb1_len, tb2_len; void *tb1_addr; struct sk_buff *frag; tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); /* The first TB points to bi-directional DMA data */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; if (pad) tb1_len = ALIGN(len, 4); else tb1_len = len; /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); /* set up TFD's third entry to point to remainder of skb's head */ tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, tb2_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, tb2_len); } if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) goto out_err; skb_walk_frags(skb, frag) { tb_phys = dma_map_single(trans->dev, frag->data, skb_headlen(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag)); trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data, skb_headlen(frag)); if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta)) goto out_err; } return tfd; out_err: iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); return NULL; } static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); int len, hdr_len; bool amsdu; /* There must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); memset(tfd, 0, sizeof(*tfd)); if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) len = sizeof(struct iwl_tx_cmd_gen2); else len = sizeof(struct iwl_tx_cmd_gen3); amsdu = ieee80211_is_data_qos(hdr->frame_control) && (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); hdr_len = ieee80211_hdrlen(hdr->frame_control); /* * Only build A-MSDUs here if doing so by GSO, otherwise it may be * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been * built in the higher layers already. */ if (amsdu && skb_shinfo(skb)->gso_size) return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, out_meta, hdr_len, len); return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, hdr_len, len, !amsdu); } int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_cmd_meta *out_meta; struct iwl_txq *txq = trans_pcie->txq[txq_id]; u16 cmd_len; int idx; void *tfd; if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; if (skb_is_nonlinear(skb) && skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && __skb_linearize(skb)) return -ENOMEM; spin_lock(&txq->lock); if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { struct iwl_device_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); spin_unlock(&txq->lock); return 0; } } idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); /* Set up driver data for this TFD */ txq->entries[idx].skb = skb; txq->entries[idx].cmd = dev_cmd; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(idx))); /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[idx].meta; out_meta->flags = 0; tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); if (!tfd) { spin_unlock(&txq->lock); return -1; } if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = (void *)dev_cmd->payload; cmd_len = le16_to_cpu(tx_cmd_gen3->len); } else { struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = (void *)dev_cmd->payload; cmd_len = le16_to_cpu(tx_cmd_gen2->len); } /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len, iwl_pcie_gen2_get_num_tbs(trans, tfd)); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); /* Tell device the write index *just past* this latest filled TFD */ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. */ spin_unlock(&txq->lock); return 0; } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ /* * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a pointer to the ucode command structure * * The function returns < 0 values to indicate the operation * failed. On success, it returns the index (>= 0) of command in the * command queue. */ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; int i, cmd_pos, idx; u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_tfh_tfd *tfd; copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { cmddata[i] = cmd->data[i]; cmdlen[i] = cmd->len[i]; if (!cmd->len[i]) continue; /* need at least IWL_FIRST_TB_SIZE copied */ if (copy_size < IWL_FIRST_TB_SIZE) { int copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmdlen[i]) copy = cmdlen[i]; cmdlen[i] -= copy; cmddata[i] += copy; copy_size += copy; } if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { idx = -EINVAL; goto free_dup_buf; } } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { /* * This is also a chunk that isn't copied * to the static buffer so set had_nocopy. */ had_nocopy = true; /* only allowed once */ if (WARN_ON(dup_buf)) { idx = -EINVAL; goto free_dup_buf; } dup_buf = kmemdup(cmddata[i], cmdlen[i], GFP_ATOMIC); if (!dup_buf) return -ENOMEM; } else { /* NOCOPY must not be followed by normal! */ if (WARN_ON(had_nocopy)) { idx = -EINVAL; goto free_dup_buf; } copy_size += cmdlen[i]; } cmd_size += cmd->len[i]; } /* * If any of the command structures end up being larger than the * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into * separate TFDs, then we will need to increase the size of the buffers */ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, "Command %s (%#x) is too large (%d bytes)\n", iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { idx = -EINVAL; goto free_dup_buf; } spin_lock_bh(&txq->lock); idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); idx = -ENOSPC; goto free_dup_buf; } out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; /* re-initialize to NULL */ memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; /* set up the header */ out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr_wide.group_id = group_id; out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); out_cmd->hdr_wide.length = cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); copy_size = sizeof(struct iwl_cmd_header_wide); /* and copy the data that needs to be copied */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy; if (!cmd->len[i]) continue; /* copy everything if not nocopy/dup */ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) { copy = cmd->len[i]; memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; copy_size += copy; continue; } /* * Otherwise we need at least IWL_FIRST_TB_SIZE copied * in total (for bi-directional DMA), but copy up to what * we can fit into the payload for debug dump purposes. */ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; /* However, treat copy_size the proper way, we need it below */ if (copy_size < IWL_FIRST_TB_SIZE) { copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmd->len[i]) copy = cmd->len[i]; copy_size += copy; } } IWL_DEBUG_HC(trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size); iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), tb0_size); /* map first command fragment, if any remains */ if (copy_size > tb0_size) { phys_addr = dma_map_single(trans->dev, (u8 *)out_cmd + tb0_size, copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { idx = -ENOMEM; iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); goto out; } iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, copy_size - tb0_size); } /* map the remaining (adjusted) nocopy/dup fragments */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { const void *data = cmddata[i]; if (!cmdlen[i]) continue; if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) data = dup_buf; phys_addr = dma_map_single(trans->dev, (void *)data, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { idx = -ENOMEM; iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); goto out; } iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); } BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); spin_lock_irqsave(&trans_pcie->reg_lock, flags); /* Increment and update queue's write index */ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); out: spin_unlock_bh(&txq->lock); free_dup_buf: if (idx < 0) kfree(dup_buf); return idx; } #define HOST_COMPLETE_TIMEOUT (2 * HZ * CPTCFG_IWL_TIMEOUT_FACTOR) static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; int cmd_idx; int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), "Command %s: a command is already active!\n", cmd_str)) return -EIO; IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", cmd_str, ret); return ret; } ret = wait_event_timeout(trans_pcie->wait_command_queue, !test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { IWL_ERR(trans, "Error sending %s: time out after %dms.\n", cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", txq->read_ptr, txq->write_ptr); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", cmd_str); ret = -ETIMEDOUT; iwl_trans_pcie_sync_nmi(trans); goto cancel; } if (test_bit(STATUS_FW_ERROR, &trans->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); dump_stack(); ret = -EIO; goto cancel; } if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); ret = -EIO; goto cancel; } return 0; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { iwl_free_resp(cmd); cmd->resp_pkt = NULL; } return ret; } int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; } if (cmd->flags & CMD_ASYNC) { int ret; /* An asynchronous command can not expect an SKB to be set. */ if (WARN_ON(cmd->flags & CMD_WANT_SKB)) return -EINVAL; ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", iwl_get_cmd_string(trans, cmd->id), ret); return ret; } return 0; } return iwl_pcie_gen2_send_hcmd_sync(trans, cmd); } /* * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's */ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); if (txq_id != trans_pcie->cmd_queue) { int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb = txq->entries[idx].skb; if (WARN_ON_ONCE(!skb)) continue; iwl_pcie_free_tso_page(trans_pcie, skb); } iwl_pcie_gen2_free_tfd(trans, txq); txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); } while (!skb_queue_empty(&txq->overflow_q)) { struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); iwl_op_mode_free_skb(trans->op_mode, skb); } spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ iwl_wake_queue(trans, txq); } void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct device *dev = trans->dev; /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, trans_pcie->tfd_size * txq->n_window, txq->tfds, txq->dma_addr); dma_free_coherent(dev, sizeof(*txq->first_tb_bufs) * txq->n_window, txq->first_tb_bufs, txq->first_tb_dma); } kfree(txq->entries); iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl); kfree(txq); } /* * iwl_pcie_txq_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; int i; if (WARN_ON(!txq)) return; iwl_pcie_gen2_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < txq->n_window; i++) { kzfree(txq->entries[i].cmd); kzfree(txq->entries[i].free_buf); } del_timer_sync(&txq->stuck_timer); iwl_pcie_gen2_txq_free_memory(trans, txq); trans_pcie->txq[txq_id] = NULL; clear_bit(txq_id, trans_pcie->queue_used); } int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, struct iwl_txq **intxq, int size, unsigned int timeout) { int ret; struct iwl_txq *txq; txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) return -ENOMEM; ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_gen3_bc_tbl) : sizeof(struct iwlagn_scd_bc_tbl)); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); kfree(txq); return -ENOMEM; } ret = iwl_pcie_txq_alloc(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue alloc failed\n"); goto error; } ret = iwl_pcie_txq_init(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue init failed\n"); goto error; } txq->wd_timeout = msecs_to_jiffies(timeout); *intxq = txq; return 0; error: iwl_pcie_gen2_txq_free_memory(trans, txq); return ret; } int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_host_cmd *hcmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue_cfg_rsp *rsp; int ret, qid; u32 wr_ptr; if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != sizeof(*rsp))) { ret = -EINVAL; goto error_free_resp; } rsp = (void *)hcmd->resp_pkt->data; qid = le16_to_cpu(rsp->queue_number); wr_ptr = le16_to_cpu(rsp->write_pointer); if (qid >= ARRAY_SIZE(trans_pcie->txq)) { WARN_ONCE(1, "queue index %d unsupported", qid); ret = -EIO; goto error_free_resp; } if (test_and_set_bit(qid, trans_pcie->queue_used)) { WARN_ONCE(1, "queue %d already used", qid); ret = -EIO; goto error_free_resp; } txq->id = qid; trans_pcie->txq[qid] = txq; wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1); /* Place first TFD at index corresponding to start sequence number */ txq->read_ptr = wr_ptr; txq->write_ptr = wr_ptr; IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); iwl_free_resp(hcmd); return qid; error_free_resp: iwl_free_resp(hcmd); iwl_pcie_gen2_txq_free_memory(trans, txq); return ret; } int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, int cmd_id, int size, unsigned int timeout) { struct iwl_txq *txq = NULL; struct iwl_tx_queue_cfg_cmd cmd = { .flags = flags, .sta_id = sta_id, .tid = tid, }; struct iwl_host_cmd hcmd = { .id = cmd_id, .len = { sizeof(cmd) }, .data = { &cmd, }, .flags = CMD_WANT_SKB, }; int ret; ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout); if (ret) return ret; cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) goto error; return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd); error: iwl_pcie_gen2_txq_free_memory(trans, txq); return ret; } void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* * Upon HW Rfkill - we stop the device, and then stop the queues * in the op_mode. Just for the sake of the simplicity of the op_mode, * allow the op_mode to call txq_disable after it already called * stop_device. */ if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", queue); return; } iwl_pcie_gen2_txq_unmap(trans, queue); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); } void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); /* Free all TX queues */ for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) { if (!trans_pcie->txq[i]) continue; iwl_pcie_gen2_txq_free(trans, i); } } int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *queue; int ret; /* alloc and init the tx queue */ if (!trans_pcie->txq[txq_id]) { queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) { IWL_ERR(trans, "Not enough memory for tx queue\n"); return -ENOMEM; } trans_pcie->txq[txq_id] = queue; ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } } else { queue = trans_pcie->txq[txq_id]; } ret = iwl_pcie_txq_init(trans, queue, queue_size, (txq_id == trans_pcie->cmd_queue)); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } trans_pcie->txq[txq_id]->id = txq_id; set_bit(txq_id, trans_pcie->queue_used); return 0; error: iwl_pcie_gen2_tx_free(trans); return ret; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/pcie/tx.c000066400000000000000000002142601351064634500263130ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include #include "iwl-debug.h" #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-io.h" #include "iwl-scd.h" #include "iwl-op-mode.h" #include "internal.h" #include "fw/api/tx.h" #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** * DMA services * * Theory of operation * * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer * of buffer descriptors, each of which points to one or more data buffers for * the device to read from or fill. Driver and device exchange status of each * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty * entries in each circular buffer, to protect against confusing empty and full * queue states. * * The device reads or writes the data in the queues via the device's several * DMA/FIFO channels. Each queue is mapped to a single DMA channel. * * For Tx queue, there are low mark and high mark limits. If, after queuing * the packet for Tx, free space become < low mark, Tx queue stopped. When * reclaiming packets (on 'tx done IRQ), if free space become > high mark, * Tx queue resumed. * ***************************************************/ int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) { unsigned int max; unsigned int used; /* * To avoid ambiguity between empty and completely full queues, there * should always be less than max_tfd_queue_size elements in the queue. * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose. */ if (q->n_window < trans->cfg->base_params->max_tfd_queue_size) max = q->n_window; else max = trans->cfg->base_params->max_tfd_queue_size - 1; /* * max_tfd_queue_size is a power of 2, so the following is equivalent to * modulo by max_tfd_queue_size and is well defined. */ used = (q->write_ptr - q->read_ptr) & (trans->cfg->base_params->max_tfd_queue_size - 1); if (WARN_ON(used > max)) return 0; return max - used; } /* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ static int iwl_queue_init(struct iwl_txq *q, int slots_num) { q->n_window = slots_num; /* slots_num must be power-of-two size, otherwise * iwl_pcie_get_cmd_index is broken. */ if (WARN_ON(!is_power_of_2(slots_num))) return -EINVAL; q->low_mark = q->n_window / 4; if (q->low_mark < 4) q->low_mark = 4; q->high_mark = q->n_window / 8; if (q->high_mark < 2) q->high_mark = 2; q->write_ptr = 0; q->read_ptr = 0; return 0; } int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size) { if (WARN_ON(ptr->addr)) return -EINVAL; ptr->addr = dma_alloc_coherent(trans->dev, size, &ptr->dma, GFP_KERNEL); if (!ptr->addr) return -ENOMEM; ptr->size = size; return 0; } void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) { if (unlikely(!ptr->addr)) return; dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); memset(ptr, 0, sizeof(*ptr)); } static void iwl_pcie_txq_stuck_timer(struct timer_list *t) { struct iwl_txq *txq = from_timer(txq, t, stuck_timer); struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); spin_lock(&txq->lock); /* check if triggered erroneously */ if (txq->read_ptr == txq->write_ptr) { spin_unlock(&txq->lock); return; } spin_unlock(&txq->lock); iwl_trans_pcie_log_scd_error(trans, txq); iwl_force_nmi(trans); } /* * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int write_ptr = txq->write_ptr; int txq_id = txq->id; u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = (void *)txq->entries[txq->write_ptr].cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; sec_ctl = tx_cmd->sec_ctl; switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: len += IEEE80211_CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: len += IEEE80211_TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } if (trans_pcie->bc_table_dword) len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) return; bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id]. tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; } static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; int txq_id = txq->id; int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = (void *)txq->entries[read_ptr].cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); if (txq_id != trans_pcie->cmd_queue) sta_id = tx_cmd->sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id]. tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; } /* * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware */ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->id; lockdep_assert_held(&txq->lock); /* * explicitly wake up the NIC if: * 1. shadow registers aren't enabled * 2. NIC is woken up for CMD regardless of shadow outside this function * 3. there is a chance that the NIC is asleep */ if (!trans->cfg->base_params->shadow_reg_enable && txq_id != trans_pcie->cmd_queue && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* * wake up nic if it's powered down ... * uCode will wake up, and interrupt us again, so next * time we'll skip this part. */ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", txq_id, reg); iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); txq->need_update = true; return; } } /* * if not in power-save mode, uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx). */ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); if (!txq->block) iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq_id << 8)); } void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txq[i]; if (!test_bit(i, trans_pcie->queue_used)) continue; spin_lock_bh(&txq->lock); if (txq->need_update) { iwl_pcie_txq_inc_wr_ptr(trans, txq); txq->need_update = false; } spin_unlock_bh(&txq->lock); } } static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, void *_tfd, u8 idx) { if (trans->cfg->use_tfh) { struct iwl_tfh_tfd *tfd = _tfd; struct iwl_tfh_tb *tb = &tfd->tbs[idx]; return (dma_addr_t)(le64_to_cpu(tb->addr)); } else { struct iwl_tfd *tfd = _tfd; struct iwl_tfd_tb *tb = &tfd->tbs[idx]; dma_addr_t addr = get_unaligned_le32(&tb->lo); dma_addr_t hi_len; if (sizeof(dma_addr_t) <= sizeof(u32)) return addr; hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; /* * shift by 16 twice to avoid warnings on 32-bit * (where this code never runs anyway due to the * if statement above) */ return addr | ((hi_len << 16) << 16); } } static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, u8 idx, dma_addr_t addr, u16 len) { struct iwl_tfd *tfd_fh = (void *)tfd; struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; u16 hi_n_len = len << 4; put_unaligned_le32(addr, &tb->lo); hi_n_len |= iwl_get_dma_hi_addr(addr); tb->hi_n_len = cpu_to_le16(hi_n_len); tfd_fh->num_tbs = idx + 1; } static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) { if (trans->cfg->use_tfh) { struct iwl_tfh_tfd *tfd = _tfd; return le16_to_cpu(tfd->num_tbs) & 0x1f; } else { struct iwl_tfd *tfd = _tfd; return tfd->num_tbs & 0x1f; } } static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_txq *txq, int index) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i, num_tbs; void *tfd = iwl_pcie_get_tfd(trans, txq, index); /* Sanity check on number of chunks */ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); if (num_tbs > trans_pcie->max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); /* @todo issue fatal error, it is quite serious situation */ return; } /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->tbs & BIT(i)) dma_unmap_page(trans->dev, iwl_pcie_tfd_tb_get_addr(trans, tfd, i), iwl_pcie_tfd_tb_get_len(trans, tfd, i), DMA_TO_DEVICE); else dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(trans, tfd, i), iwl_pcie_tfd_tb_get_len(trans, tfd, i), DMA_TO_DEVICE); } if (trans->cfg->use_tfh) { struct iwl_tfh_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; } else { struct iwl_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; } } /* * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @trans - transport private data * @txq - tx queue * @dma_dir - the direction of the DMA mapping * * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ int rd_ptr = txq->read_ptr; int idx = iwl_pcie_get_cmd_index(txq, rd_ptr); lockdep_assert_held(&txq->lock); /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); /* free SKB */ if (txq->entries) { struct sk_buff *skb; skb = txq->entries[idx].skb; /* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being * freed and that the queue is not empty - free the skb */ if (skb) { iwl_op_mode_free_skb(trans->op_mode, skb); txq->entries[idx].skb = NULL; } } } static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); void *tfd; u32 num_tbs; tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; if (reset) memset(tfd, 0, trans_pcie->tfd_size); num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); /* Each TFD can point to a maximum max_tbs Tx buffers */ if (num_tbs >= trans_pcie->max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", trans_pcie->max_tbs); return -EINVAL; } if (WARN(addr & ~IWL_TX_DMA_MASK, "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); return num_tbs; } int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = trans_pcie->tfd_size * trans->cfg->base_params->max_tfd_queue_size; size_t tb0_buf_sz; int i; if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; if (trans->cfg->use_tfh) tfd_sz = trans_pcie->tfd_size * slots_num; timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); txq->trans_pcie = trans_pcie; txq->n_window = slots_num; txq->entries = kcalloc(slots_num, sizeof(struct iwl_pcie_txq_entry), GFP_KERNEL); if (!txq->entries) goto error; if (cmd_queue) for (i = 0; i < slots_num; i++) { txq->entries[i].cmd = kmalloc(sizeof(struct iwl_device_cmd), GFP_KERNEL); if (!txq->entries[i].cmd) goto error; } /* Circular buffer of transmit frame descriptors (TFDs), * shared with device */ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, &txq->dma_addr, GFP_KERNEL); if (!txq->tfds) goto error; BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, &txq->first_tb_dma, GFP_KERNEL); if (!txq->first_tb_bufs) goto err_free_tfds; return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); error: if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) kfree(txq->entries[i].cmd); kfree(txq->entries); txq->entries = NULL; return -ENOMEM; } int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { int ret; u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size; txq->need_update = false; /* max_tfd_queue_size must be power-of-two size, otherwise * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), "Max tfd queue size must be a power of two, but is %d", tfd_queue_max_size)) return -EINVAL; /* Initialize queue's high/low-water marks, and head/tail indexes */ ret = iwl_queue_init(txq, slots_num); if (ret) return ret; spin_lock_init(&txq->lock); if (cmd_queue) { static struct lock_class_key iwl_pcie_cmd_queue_lock_class; lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); } __skb_queue_head_init(&txq->overflow_q); return 0; } void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb) { struct page **page_ptr; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); if (*page_ptr) { __free_page(*page_ptr); *page_ptr = NULL; } } static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->reg_lock); if (!trans->cfg->base_params->apmg_wake_up_wa) return; if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) return; trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); } /* * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's */ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); if (txq_id != trans_pcie->cmd_queue) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; iwl_pcie_free_tso_page(trans_pcie, skb); } iwl_pcie_txq_free_tfd(trans, txq); txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr) { unsigned long flags; spin_lock_irqsave(&trans_pcie->reg_lock, flags); if (txq_id == trans_pcie->cmd_queue) iwl_pcie_clear_cmd_in_flight(trans); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } } while (!skb_queue_empty(&txq->overflow_q)) { struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); iwl_op_mode_free_skb(trans->op_mode, skb); } spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ iwl_wake_queue(trans, txq); } /* * iwl_pcie_txq_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct device *dev = trans->dev; int i; if (WARN_ON(!txq)) return; iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < txq->n_window; i++) { kzfree(txq->entries[i].cmd); kzfree(txq->entries[i].free_buf); } /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, trans_pcie->tfd_size * trans->cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; dma_free_coherent(dev, sizeof(*txq->first_tb_bufs) * txq->n_window, txq->first_tb_bufs, txq->first_tb_dma); } kfree(txq->entries); txq->entries = NULL; del_timer_sync(&txq->stuck_timer); /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int nq = trans->cfg->base_params->num_of_queues; int chan; u32 reg_val; int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); /* make sure all queue are not stopped/used */ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); WARN_ON(scd_base_addr != 0 && scd_base_addr != trans_pcie->scd_base_addr); /* reset context data, TX status and translation data */ iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND, NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans_pcie->scd_bc_tbls.dma >> 10); /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ if (trans->cfg->base_params->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, trans_pcie->cmd_fifo, trans_pcie->cmd_q_wdg_timeout); /* Activate all Tx DMA/FIFO channels */ iwl_scd_activate_fifos(trans); /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); /* Update FH chicken bits */ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id; /* * we should never get here in gen2 trans mode return early to avoid * having invalid accesses */ if (WARN_ON_ONCE(trans->cfg->gen2)) return; for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { struct iwl_txq *txq = trans_pcie->txq[txq_id]; if (trans->cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr); else iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr >> 8); iwl_pcie_txq_unmap(trans, txq_id); txq->read_ptr = 0; txq->write_ptr = 0; } /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4); /* * Send 0 as the scd_base_addr since the device may have be reset * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will * contain garbage. */ iwl_pcie_tx_start(trans, 0); } static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; int ch, ret; u32 mask = 0; spin_lock(&trans_pcie->irq_lock); if (!iwl_trans_grab_nic_access(trans, &flags)) goto out; /* Stop each Tx DMA channel */ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); } /* Wait for DMA channels to be idle */ ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); if (ret < 0) IWL_ERR(trans, "Failing on timeout while stopping DMA channel %d [0x%08x]\n", ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); iwl_trans_release_nic_access(trans, &flags); out: spin_unlock(&trans_pcie->irq_lock); } /* * iwl_pcie_tx_stop - Stop all Tx DMA channels */ int iwl_pcie_tx_stop(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id; /* Turn off all Tx DMA fifos */ iwl_scd_deactivate_fifos(trans); /* Turn off all Tx DMA channels */ iwl_pcie_tx_stop_fh(trans); /* * This function can be called before the op_mode disabled the * queues. This happens when we have an rfkill interrupt. * Since we stop Tx altogether - mark the queues as stopped. */ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); /* This can happen: start_hw, stop_device */ if (!trans_pcie->txq_memory) return 0; /* Unmap DMA from host system and free skb's */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) iwl_pcie_txq_unmap(trans, txq_id); return 0; } /* * iwl_trans_tx_free - Free TXQ Context * * Destroy all TX DMA queues and structures */ void iwl_pcie_tx_free(struct iwl_trans *trans) { int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); /* Tx queues */ if (trans_pcie->txq_memory) { for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); trans_pcie->txq[txq_id] = NULL; } } kfree(trans_pcie->txq_memory); trans_pcie->txq_memory = NULL; iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); } /* * iwl_pcie_tx_alloc - allocate TX context * Allocate all Tx DMA structures and initialize them */ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) { int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 bc_tbls_size = trans->cfg->base_params->num_of_queues; bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_gen3_bc_tbl) : sizeof(struct iwlagn_scd_bc_tbl); /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ if (WARN_ON(trans_pcie->txq_memory)) { ret = -EINVAL; goto error; } ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); goto error; } /* Alloc keep-warm buffer */ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); if (ret) { IWL_ERR(trans, "Keep Warm allocation failed\n"); goto error; } trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = -ENOMEM; goto error; } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_256_ba_txq_size); trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } trans_pcie->txq[txq_id]->id = txq_id; } return 0; error: iwl_pcie_tx_free(trans); return ret; } int iwl_pcie_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; int txq_id, slots_num; bool alloc = false; if (!trans_pcie->txq_memory) { ret = iwl_pcie_tx_alloc(trans); if (ret) goto error; alloc = true; } spin_lock(&trans_pcie->irq_lock); /* Turn off all Tx DMA fifos */ iwl_scd_deactivate_fifos(trans); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4); spin_unlock(&trans_pcie->irq_lock); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_256_ba_txq_size); ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } /* * Tell nic where to find circular buffer of TFDs for a * given Tx queue, and enable the DMA channel used for that * queue. * Circular buffer (TFD queue in DRAM) physical base address */ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), trans_pcie->txq[txq_id]->dma_addr >> 8); } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); if (trans->cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); return 0; error: /*Upon error, free only if we allocated something */ if (alloc) iwl_pcie_tx_free(trans); return ret; } static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) { lockdep_assert_held(&txq->lock); if (!txq->wd_timeout) return; /* * station is asleep and we send data - that must * be uAPSD or PS-Poll. Don't rearm the timer. */ if (txq->frozen) return; /* * if empty delete timer, otherwise move timer forward * since we're making progress on this queue */ if (txq->read_ptr == txq->write_ptr) del_timer(&txq->stuck_timer); else mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); } /* Frees buffers until index _not_ inclusive */ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); int last_to_free; /* This function is not meant to release cmd queue*/ if (WARN_ON(txq_id == trans_pcie->cmd_queue)) return; spin_lock_bh(&txq->lock); if (!test_bit(txq_id, trans_pcie->queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", txq_id, ssn); goto out; } if (read_ptr == tfd_num) goto out; IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", txq_id, txq->read_ptr, tfd_num, ssn); /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ last_to_free = iwl_queue_dec_wrap(trans, tfd_num); if (!iwl_queue_used(txq, last_to_free)) { IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, trans->cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); goto out; } if (WARN_ON(!skb_queue_empty(skbs))) goto out; for (; read_ptr != tfd_num; txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr), read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) { struct sk_buff *skb = txq->entries[read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; iwl_pcie_free_tso_page(trans_pcie, skb); __skb_queue_tail(skbs, skb); txq->entries[read_ptr].skb = NULL; if (!trans->cfg->use_tfh) iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); iwl_pcie_txq_free_tfd(trans, txq); } iwl_pcie_txq_progress(txq); if (iwl_queue_space(trans, txq) > txq->low_mark && test_bit(txq_id, trans_pcie->queue_stopped)) { struct sk_buff_head overflow_skbs; __skb_queue_head_init(&overflow_skbs); skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); /* * We are going to transmit from the overflow queue. * Remember this state so that wait_for_txq_empty will know we * are adding more packets to the TFD queue. It cannot rely on * the state of &txq->overflow_q, as we just emptied it, but * haven't TXed the content yet. */ txq->overflow_tx = true; /* * This is tricky: we are in reclaim path which is non * re-entrant, so noone will try to take the access the * txq data from that path. We stopped tx, so we can't * have tx as well. Bottom line, we can unlock and re-lock * later. */ spin_unlock_bh(&txq->lock); while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); struct iwl_device_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); /* * Note that we can very well be overflowing again. * In that case, iwl_queue_space will be small again * and we won't wake mac80211's queue. */ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); } if (iwl_queue_space(trans, txq) > txq->low_mark) iwl_wake_queue(trans, txq); spin_lock_bh(&txq->lock); txq->overflow_tx = false; } out: spin_unlock_bh(&txq->lock); } /* Set wr_ptr of specific device and txq */ void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; lockdep_assert_held(&txq->lock); txq->write_ptr = ptr; txq->read_ptr = txq->write_ptr; } static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const struct iwl_cfg *cfg = trans->cfg; int ret; lockdep_assert_held(&trans_pcie->reg_lock); /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set. */ if (cfg->base_params->apmg_wake_up_wa && !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, BIT(cfg->csr->flag_mac_access_req)); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, BIT(cfg->csr->flag_val_mac_access_en), (BIT(cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (ret < 0) { __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, BIT(cfg->csr->flag_mac_access_req)); IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); return -EIO; } trans_pcie->cmd_hold_nic_awake = true; } return 0; } /* * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * * When FW advances 'R' index, all entries between old and new 'R' index * need to be reclaimed. As result, some free space forms. If there is * enough free space (> low mark), wake the stack that feeds us. */ void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; unsigned long flags; int nfreed = 0; u16 r; lockdep_assert_held(&txq->lock); idx = iwl_pcie_get_cmd_index(txq, idx); r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); if (idx >= trans->cfg->base_params->max_tfd_queue_size || (!iwl_queue_used(txq, idx))) { WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, trans->cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } for (idx = iwl_queue_inc_wrap(trans, idx); r != idx; r = iwl_queue_inc_wrap(trans, r)) { txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, txq->write_ptr, r); iwl_force_nmi(trans); } } if (txq->read_ptr == txq->write_ptr) { spin_lock_irqsave(&trans_pcie->reg_lock, flags); iwl_pcie_clear_cmd_in_flight(trans); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } iwl_pcie_txq_progress(txq); } static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, u16 txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 tbl_dw_addr; u32 tbl_dw; u16 scd_q2ratid; scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; tbl_dw_addr = trans_pcie->scd_base_addr + SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); if (txq_id & 0x1) tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); else tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); return 0; } /* Receiver address (actually, Rx station's index into station table), * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; int fifo = -1; bool scd_bug = false; if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); txq->wd_timeout = msecs_to_jiffies(wdg_timeout); if (cfg) { fifo = cfg->fifo; /* Disable the scheduler prior configuring the cmd queue */ if (txq_id == trans_pcie->cmd_queue && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, 0); /* Stop this Tx queue before configuring it */ iwl_scd_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD */ if (txq_id != trans_pcie->cmd_queue) iwl_scd_txq_set_chain(trans, txq_id); if (cfg->aggregate) { u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); /* Map receiver-address / traffic-ID to this queue */ iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); /* enable aggregations for the queue */ iwl_scd_txq_enable_agg(trans, txq_id); txq->ampdu = true; } else { /* * disable aggregations for the queue, this will also * make the ra_tid mapping configuration irrelevant * since it is now a non-AGG queue. */ iwl_scd_txq_disable_agg(trans, txq_id); ssn = txq->read_ptr; } } else { /* * If we need to move the SCD write pointer by steps of * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let * the op_mode know by returning true later. * Do this only in case cfg is NULL since this trick can * be done only if we have DQA enabled which is true for mvm * only. And mvm never sets a cfg pointer. * This is really ugly, but this is the easiest way out for * this sad hardware issue. * This bug has been fixed on devices 9000 and up. */ scd_bug = !trans->cfg->mq_rx_supported && !((ssn - txq->write_ptr) & 0x3f) && (ssn != txq->write_ptr); if (scd_bug) ssn++; } /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ txq->read_ptr = (ssn & 0xff); txq->write_ptr = (ssn & 0xff); iwl_write_direct32(trans, HBUS_TARG_WRPTR, (ssn & 0xff) | (txq_id << 8)); if (cfg) { u8 frame_limit = cfg->frame_limit; iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); /* Set up Tx window size and frame limit for this queue */ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); /* enable the scheduler for this queue (only) */ if (txq_id == trans_pcie->cmd_queue && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, BIT(txq_id)); IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", txq_id, fifo, ssn & 0xff); } else { IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d WrPtr: %d\n", txq_id, ssn & 0xff); } return scd_bug; } void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; txq->ampdu = !shared_mode; } void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, bool configure_scd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 stts_addr = trans_pcie->scd_base_addr + SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; trans_pcie->txq[txq_id]->frozen = false; /* * Upon HW Rfkill - we stop the device, and then stop the queues * in the op_mode. Just for the sake of the simplicity of the op_mode, * allow the op_mode to call txq_disable after it already called * stop_device. */ if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", txq_id); return; } if (configure_scd) { iwl_scd_txq_set_inactive(trans, txq_id); iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, ARRAY_SIZE(zero_val)); } iwl_pcie_txq_unmap(trans, txq_id); trans_pcie->txq[txq_id]->ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ /* * iwl_pcie_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a pointer to the ucode command structure * * The function returns < 0 values to indicate the operation * failed. On success, it returns the index (>= 0) of command in the * command queue. */ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; int idx; u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); int i, ret; u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; if (WARN(!trans->wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, "unsupported wide command %#x\n", cmd->id)) return -EINVAL; if (group_id != 0) { copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); } else { copy_size = sizeof(struct iwl_cmd_header); cmd_size = sizeof(struct iwl_cmd_header); } /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { cmddata[i] = cmd->data[i]; cmdlen[i] = cmd->len[i]; if (!cmd->len[i]) continue; /* need at least IWL_FIRST_TB_SIZE copied */ if (copy_size < IWL_FIRST_TB_SIZE) { int copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmdlen[i]) copy = cmdlen[i]; cmdlen[i] -= copy; cmddata[i] += copy; copy_size += copy; } if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { idx = -EINVAL; goto free_dup_buf; } } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { /* * This is also a chunk that isn't copied * to the static buffer so set had_nocopy. */ had_nocopy = true; /* only allowed once */ if (WARN_ON(dup_buf)) { idx = -EINVAL; goto free_dup_buf; } dup_buf = kmemdup(cmddata[i], cmdlen[i], GFP_ATOMIC); if (!dup_buf) return -ENOMEM; } else { /* NOCOPY must not be followed by normal! */ if (WARN_ON(had_nocopy)) { idx = -EINVAL; goto free_dup_buf; } copy_size += cmdlen[i]; } cmd_size += cmd->len[i]; } /* * If any of the command structures end up being larger than * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically * allocated into separate TFDs, then we will need to * increase the size of the buffers. */ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, "Command %s (%#x) is too large (%d bytes)\n", iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { idx = -EINVAL; goto free_dup_buf; } spin_lock_bh(&txq->lock); if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); idx = -ENOSPC; goto free_dup_buf; } idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; /* set up the header */ if (group_id != 0) { out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr_wide.group_id = group_id; out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); out_cmd->hdr_wide.length = cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); copy_size = sizeof(struct iwl_cmd_header_wide); } else { out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; cmd_pos = sizeof(struct iwl_cmd_header); copy_size = sizeof(struct iwl_cmd_header); } /* and copy the data that needs to be copied */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy; if (!cmd->len[i]) continue; /* copy everything if not nocopy/dup */ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) { copy = cmd->len[i]; memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; copy_size += copy; continue; } /* * Otherwise we need at least IWL_FIRST_TB_SIZE copied * in total (for bi-directional DMA), but copy up to what * we can fit into the payload for debug dump purposes. */ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; /* However, treat copy_size the proper way, we need it below */ if (copy_size < IWL_FIRST_TB_SIZE) { copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmd->len[i]) copy = cmd->len[i]; copy_size += copy; } } IWL_DEBUG_HC(trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); iwl_pcie_txq_build_tfd(trans, txq, iwl_pcie_get_first_tb_dma(txq, idx), tb0_size, true); /* map first command fragment, if any remains */ if (copy_size > tb0_size) { phys_addr = dma_map_single(trans->dev, ((u8 *)&out_cmd->hdr) + tb0_size, copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); idx = -ENOMEM; goto out; } iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size - tb0_size, false); } /* map the remaining (adjusted) nocopy/dup fragments */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { const void *data = cmddata[i]; if (!cmdlen[i]) continue; if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) data = dup_buf; phys_addr = dma_map_single(trans->dev, (void *)data, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); idx = -ENOMEM; goto out; } iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); spin_lock_irqsave(&trans_pcie->reg_lock, flags); ret = iwl_pcie_set_cmd_in_flight(trans, cmd); if (ret < 0) { idx = ret; spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); goto out; } /* Increment and update queue's write index */ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); out: spin_unlock_bh(&txq->lock); free_dup_buf: if (idx < 0) kfree(dup_buf); return idx; } /* * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim */ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); u8 group_id; u32 cmd_id; int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index; struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ if (WARN(txq_id != trans_pcie->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; } spin_lock_bh(&txq->lock); cmd_index = iwl_pcie_get_cmd_index(txq, index); cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; group_id = cmd->hdr.group_id; cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); iwl_pcie_tfd_unmap(trans, meta, txq, index); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { struct page *p = rxb_steal_page(rxb); meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); meta->source->_rx_page_order = trans_pcie->rx_page_order; } if (meta->flags & CMD_WANT_ASYNC_CALLBACK) iwl_op_mode_async_cb(trans->op_mode, cmd); iwl_pcie_cmdq_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", iwl_get_cmd_string(trans, cmd_id)); } clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd_id)); wake_up(&trans_pcie->wait_command_queue); } meta->flags = 0; spin_unlock_bh(&txq->lock); } #define HOST_COMPLETE_TIMEOUT (2 * HZ * CPTCFG_IWL_TIMEOUT_FACTOR) static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { int ret; /* An asynchronous command can not expect an SKB to be set. */ if (WARN_ON(cmd->flags & CMD_WANT_SKB)) return -EINVAL; ret = iwl_pcie_enqueue_hcmd(trans, cmd); if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", iwl_get_cmd_string(trans, cmd->id), ret); return ret; } return 0; } static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; int cmd_idx; int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", iwl_get_cmd_string(trans, cmd->id)); if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), "Command %s: a command is already active!\n", iwl_get_cmd_string(trans, cmd->id))) return -EIO; IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd->id)); cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", iwl_get_cmd_string(trans, cmd->id), ret); return ret; } ret = wait_event_timeout(trans_pcie->wait_command_queue, !test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { IWL_ERR(trans, "Error sending %s: time out after %dms.\n", iwl_get_cmd_string(trans, cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", txq->read_ptr, txq->write_ptr); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd->id)); ret = -ETIMEDOUT; iwl_trans_pcie_sync_nmi(trans); goto cancel; } if (test_bit(STATUS_FW_ERROR, &trans->status)) { iwl_trans_pcie_dump_regs(trans); IWL_ERR(trans, "FW error in SYNC CMD %s\n", iwl_get_cmd_string(trans, cmd->id)); dump_stack(); ret = -EIO; goto cancel; } if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", iwl_get_cmd_string(trans, cmd->id)); ret = -EIO; goto cancel; } return 0; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { iwl_free_resp(cmd); cmd->resp_pkt = NULL; } return ret; } int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; } if (cmd->flags & CMD_ASYNC) return iwl_pcie_send_hcmd_async(trans, cmd); /* We still can fail on RFKILL that can be asserted while we wait */ return iwl_pcie_send_hcmd_sync(trans, cmd); } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS static void iwl_trans_pci_tx_lat_add_ts_write(struct sk_buff *skb) { s64 temp = ktime_to_ms(ktime_get()); s64 ts_1 = ktime_to_ns(skb->tstamp) >> 32; s64 diff = temp - ts_1; #if LINUX_VERSION_IS_LESS(4,10,0) skb->tstamp.tv64 += diff << 16; #else skb->tstamp += diff << 16; #endif } #endif static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta) { u16 head_tb_len; int i; /* * Set up TFD's third entry to point directly to remainder * of skb's head, if any */ head_tb_len = skb_headlen(skb) - hdr_len; if (head_tb_len > 0) { dma_addr_t tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, head_tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, head_tb_len); iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); } /* set up the remaining entries to point to the data */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t tb_phys; int tb_idx; if (!skb_frag_size(frag)) continue; tb_phys = skb_frag_dma_map(trans->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), skb_frag_size(frag)); tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, skb_frag_size(frag), false); if (tb_idx < 0) return tb_idx; out_meta->tbs |= BIT(tb_idx); } return 0; } #ifdef CONFIG_INET struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); if (!p->page) goto alloc; /* enough room on this page */ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) return p; /* We don't have enough room on this page, get a new one. */ __free_page(p->page); alloc: p->page = alloc_page(GFP_ATOMIC); if (!p->page) return NULL; p->pos = page_address(p->page); return p; } static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, bool ipv6, unsigned int len) { if (ipv6) { struct ipv6hdr *iphv6 = iph; tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, len + tcph->doff * 4, IPPROTO_TCP, 0); } else { struct iphdr *iphv4 = iph; ip_send_check(iphv4); tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, len + tcph->doff * 4, IPPROTO_TCP, 0); } } static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, struct iwl_device_cmd *dev_cmd, u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; struct page **page_ptr; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); iv_len = ieee80211_has_protected(hdr->frame_control) ? IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, iwl_pcie_get_tfd(trans, txq, txq->write_ptr), trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; amsdu_pad = 0; /* total amount of header we may need for this A-MSDU */ hdr_room = DIV_ROUND_UP(total_len, mss) * (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ hdr_page = get_page_hdr(trans, hdr_room); if (!hdr_page) return -ENOMEM; get_page(hdr_page->page); start_hdr = hdr_page->pos; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); *page_ptr = hdr_page->page; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; /* * Pull the ieee80211 header + IV to be able to use TSO core, * we will restore it for the tx_status flow. */ skb_pull(skb, hdr_len + iv_len); /* * Remove the length of all the headers that we don't actually * have in the MPDU by themselves, but that we duplicate into * all the different MSDUs inside the A-MSDU. */ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); tso_start(skb, &tso); while (total_len) { /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); struct sk_buff *csum_skb = NULL; unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; struct tcphdr *tcph; u8 *iph, *subf_hdrs_start = hdr_page->pos; total_len -= data_left; memset(hdr_page->pos, 0, amsdu_pad); hdr_page->pos += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); hdr_page->pos += ETH_ALEN; ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); hdr_page->pos += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; *((__be16 *)hdr_page->pos) = cpu_to_be16(length); hdr_page->pos += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); iph = hdr_page->pos + 8; tcph = (void *)(iph + ip_hdrlen); /* For testing on current hardware only */ if (trans_pcie->sw_csum_tx) { csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), GFP_ATOMIC); if (!csum_skb) return -ENOMEM; iwl_compute_pseudo_hdr_csum(iph, tcph, skb->protocol == htons(ETH_P_IPV6), data_left); skb_put_data(csum_skb, tcph, tcp_hdrlen(skb)); skb_reset_transport_header(csum_skb); csum_skb->csum_start = (unsigned char *)tcp_hdr(csum_skb) - csum_skb->head; } hdr_page->pos += snap_ip_tcp_hdrlen; hdr_tb_len = hdr_page->pos - start_hdr; hdr_tb_phys = dma_map_single(trans->dev, start_hdr, hdr_tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { dev_kfree_skb(csum_skb); return -EINVAL; } iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, hdr_tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ start_hdr = hdr_page->pos; /* put the payload */ while (data_left) { unsigned int size = min_t(unsigned int, tso.size, data_left); dma_addr_t tb_phys; if (trans_pcie->sw_csum_tx) skb_put_data(csum_skb, tso.data, size); tb_phys = dma_map_single(trans->dev, tso.data, size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { dev_kfree_skb(csum_skb); return -EINVAL; } iwl_pcie_txq_build_tfd(trans, txq, tb_phys, size, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, size); data_left -= size; tso_build_data(skb, &tso, size); } /* For testing on early hardware only */ if (trans_pcie->sw_csum_tx) { __wsum csum; csum = skb_checksum(csum_skb, skb_checksum_start_offset(csum_skb), csum_skb->len - skb_checksum_start_offset(csum_skb), 0); dev_kfree_skb(csum_skb); dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, hdr_tb_len, DMA_TO_DEVICE); tcph->check = csum_fold(csum); dma_sync_single_for_device(trans->dev, hdr_tb_phys, hdr_tb_len, DMA_TO_DEVICE); } } /* re -add the WiFi header and IV */ skb_push(skb, hdr_len + iv_len); return 0; } #else /* CONFIG_INET */ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, struct iwl_device_cmd *dev_cmd, u16 tb1_len) { /* No A-MSDU without CONFIG_INET */ WARN_ON(1); return -1; } #endif /* CONFIG_INET */ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; dma_addr_t tb0_phys, tb1_phys, scratch_phys; void *tb1_addr; void *tfd; u16 len, tb1_len; bool wait_write_ptr; __le16 fc; u8 hdr_len; u16 wifi_seq; bool amsdu; txq = trans_pcie->txq[txq_id]; if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; if (unlikely(trans_pcie->sw_csum_tx && skb->ip_summed == CHECKSUM_PARTIAL)) { int offs = skb_checksum_start_offset(skb); int csum_offs = offs + skb->csum_offset; __wsum csum; if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) return -1; csum = skb_checksum(skb, offs, skb->len - offs, 0); *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); skb->ip_summed = CHECKSUM_UNNECESSARY; } if (skb_is_nonlinear(skb) && skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && __skb_linearize(skb)) return -ENOMEM; /* mac80211 always puts the full header into the SKB's head, * so there's no need to check if it's readable there */ hdr = (struct ieee80211_hdr *)skb->data; fc = hdr->frame_control; hdr_len = ieee80211_hdrlen(fc); spin_lock(&txq->lock); if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { struct iwl_device_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); spin_unlock(&txq->lock); return 0; } } /* In AGG mode, the index in the ring must correspond to the WiFi * sequence number. This is a HW requirements to help the SCD to parse * the BA. * Check here that the packets are in the right place on the ring. */ wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); WARN_ONCE(txq->ampdu && (wifi_seq & 0xff) != txq->write_ptr, "Q: %d WiFi Seq %d tfdNum %d", txq_id, wifi_seq, txq->write_ptr); /* Set up driver data for this TFD */ txq->entries[txq->write_ptr].skb = skb; txq->entries[txq->write_ptr].cmd = dev_cmd; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(txq->write_ptr))); tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[txq->write_ptr].meta; out_meta->flags = 0; /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ amsdu = ieee80211_is_data_qos(fc) && (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); if (trans_pcie->sw_csum_tx || !amsdu) { tb1_len = ALIGN(len, 4); /* Tell NIC about any 2-byte padding after MAC header */ if (tb1_len != len) tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); } else { tb1_len = len; } /* * The first TB points to bi-directional DMA data, we'll * memcpy the data into it later. */ iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, IWL_FIRST_TB_SIZE, true); /* there must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); trace_iwlwifi_dev_tx(trans->dev, skb, iwl_pcie_get_tfd(trans, txq, txq->write_ptr), trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); /* * If gso_size wasn't set, don't give the frame "amsdu treatment" * (adding subframes, etc.). * This can happen in some testing flows when the amsdu was already * pre-built, and we just need to send the resulting skb. */ if (amsdu && skb_shinfo(skb)->gso_size) { if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, out_meta, dev_cmd, tb1_len))) goto out_err; } else { struct sk_buff *frag; if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, out_meta))) goto out_err; skb_walk_frags(skb, frag) { if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, out_meta))) goto out_err; } } /* building the A-MSDU might have changed this data, so memcpy it now */ memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), iwl_pcie_tfd_get_num_tbs(trans, tfd)); wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { /* * If the TXQ is active, then set the timer, if not, * set the timer in remainder so that the timer will * be armed with the right value when the station will * wake up. */ if (!txq->frozen) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); else txq->frozen_expiry_remainder = txq->wd_timeout; } /* Tell device the write index *just past* this latest filled TFD */ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); if (!wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. */ #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS iwl_trans_pci_tx_lat_add_ts_write(skb); #endif spin_unlock(&txq->lock); return 0; out_err: iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); spin_unlock(&txq->lock); return -1; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/000077500000000000000000000000001351064634500254105ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/Makefile000066400000000000000000000002651351064634500270530ustar00rootroot00000000000000obj-$(CPTCFG_IWLXVT) += iwlxvt.o iwlxvt-y += xvt.o user-infc.o utils.o fw.o nvm.o rx.o iwlxvt-$(CPTCFG_IWLWIFI_DEBUGFS) += debugfs.o ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/constants.h000066400000000000000000000065611351064634500276050ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __XVT_CONSTANTS_H #define __XVT_CONSTANTS_H #ifndef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES #define IWL_XVT_DEFAULT_DBGM_MEM_POWER 0 #define IWL_XVT_DEFAULT_DBGM_LMAC_MASK 0 #define IWL_XVT_DEFAULT_DBGM_PRPH_MASK 0 #else /* CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES */ #define IWL_XVT_DEFAULT_DBGM_MEM_POWER (xvt->trans->dbg_cfg.XVT_DEFAULT_DBGM_MEM_POWER) #define IWL_XVT_DEFAULT_DBGM_LMAC_MASK (xvt->trans->dbg_cfg.XVT_DEFAULT_DBGM_LMAC_MASK) #define IWL_XVT_DEFAULT_DBGM_PRPH_MASK (xvt->trans->dbg_cfg.XVT_DEFAULT_DBGM_PRPH_MASK) #endif /* CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES */ #endif /* __XVT_CONSTANTS_H */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/debugfs.c000066400000000000000000000122111351064634500271700ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "xvt.h" #include "fw/dbg.h" #define XVT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ const char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ argtype *arg = file->private_data; \ char buf[buflen] = {}; \ size_t buf_size = min(count, sizeof(buf) - 1); \ \ if (copy_from_user(buf, user_buf, buf_size)) \ return -EFAULT; \ \ return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \ } #define _XVT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \ XVT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #define XVT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _XVT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_xvt) #define XVT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ if (!debugfs_create_file(alias, mode, parent, xvt, \ &iwl_dbgfs_##name##_ops)) \ goto err; \ } while (0) #define XVT_DEBUGFS_ADD_FILE(name, parent, mode) \ XVT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_xvt *xvt, char *buf, size_t count, loff_t *ppos) { if (!(xvt->state == IWL_XVT_STATE_OPERATIONAL && xvt->fw_running)) return -EIO; if (count == 0) return 0; iwl_fw_dbg_collect(&xvt->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); return count; } static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_xvt *xvt, char *buf, size_t count, loff_t *ppos) { int __maybe_unused ret; if (!xvt->fw_running) return -EIO; mutex_lock(&xvt->mutex); /* Take the return value, though failure is expected, for compilation */ ret = iwl_xvt_send_cmd_pdu(xvt, REPLY_ERROR, 0, 0, NULL); mutex_unlock(&xvt->mutex); return count; } /* Device wide debugfs entries */ XVT_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); XVT_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); #ifdef CPTCFG_IWLWIFI_DEBUGFS int iwl_xvt_dbgfs_register(struct iwl_xvt *xvt, struct dentry *dbgfs_dir) { xvt->debugfs_dir = dbgfs_dir; XVT_DEBUGFS_ADD_FILE(fw_dbg_collect, xvt->debugfs_dir, S_IWUSR); XVT_DEBUGFS_ADD_FILE(fw_restart, xvt->debugfs_dir, S_IWUSR); return 0; err: IWL_ERR(xvt, "Can't create the xvt debugfs directory\n"); return -ENOMEM; } #endif /* CPTCFG_IWLWIFI_DEBUGFS */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/fw-api.h000066400000000000000000000112631351064634500267470ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __fw_api_h__ #define __fw_api_h__ #include "fw/api/rs.h" #include "fw/api/txq.h" #include "fw/api/location.h" #include "fw/api/alive.h" #include "fw/api/phy.h" #include "fw/api/nvm-reg.h" #include "fw/api/commands.h" #include "fw/api/tx.h" #include "fw/api/config.h" #include "fw/api/datapath.h" #define IWL_XVT_DEFAULT_TX_QUEUE 1 #define IWL_XVT_DEFAULT_TX_FIFO 3 #define IWL_XVT_TX_STA_ID_DEFAULT 0 #define XVT_LMAC_0_ID 0 #define XVT_LMAC_1_ID 1 enum { APMG_PD_SV_CMD = 0x43, /* ToF */ LOCATION_GROUP_NOTIFICATION = 0x11, NVM_COMMIT_COMPLETE_NOTIFICATION = 0xad, GET_SET_PHY_DB_CMD = 0x8f, /* BFE */ REPLY_HD_PARAMS_CMD = 0xa6, REPLY_RX_DSP_EXT_INFO = 0xc4, REPLY_DEBUG_XVT_CMD = 0xf3, }; struct xvt_alive_resp_ver2 { __le16 status; __le16 flags; u8 ucode_minor; u8 ucode_major; __le16 id; u8 api_minor; u8 api_major; u8 ver_subtype; u8 ver_type; u8 mac; u8 opt; __le16 reserved2; __le32 timestamp; __le32 error_event_table_ptr; /* SRAM address for error log */ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ __le32 cpu_register_ptr; __le32 dbgm_config_ptr; __le32 alive_counter_ptr; __le32 scd_base_ptr; /* SRAM address for SCD */ __le32 st_fwrd_addr; /* pointer to Store and forward */ __le32 st_fwrd_size; u8 umac_minor; /* UMAC version: minor */ u8 umac_major; /* UMAC version: major */ __le16 umac_id; /* UMAC version: id */ __le32 error_info_addr; /* SRAM address for UMAC error log */ __le32 dbg_print_buff_addr; } __packed; /* ALIVE_RES_API_S_VER_2 */ enum { XVT_DBG_GET_SVDROP_VER_OP = 0x01, }; struct xvt_debug_cmd { __le32 opcode; __le32 dw_num; }; /* DEBUG_XVT_CMD_API_S_VER_1 */ struct xvt_debug_res { __le32 dw_num; __le32 data[0]; }; /* DEBUG_XVT_RES_API_S_VER_1 */ #endif /* __fw_api_h__ */ backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/fw.c000066400000000000000000000272511351064634500261770ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-csr.h" #include "xvt.h" #include "iwl-dnt-cfg.h" #include "fw/dbg.h" #include "fw/testmode.h" #include "fw/api/power.h" #define XVT_UCODE_ALIVE_TIMEOUT (HZ * CPTCFG_IWL_TIMEOUT_FACTOR) struct iwl_xvt_alive_data { bool valid; u32 scd_base_addr; }; static int iwl_xvt_send_dqa_cmd(struct iwl_xvt *xvt) { struct iwl_dqa_enable_cmd dqa_cmd = { .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), }; u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0); int ret; ret = iwl_xvt_send_cmd_pdu(xvt, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); if (ret) IWL_ERR(xvt, "Failed to send DQA enabling command: %d\n", ret); else IWL_DEBUG_FW(xvt, "Working in DQA mode\n"); return ret; } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_xvt *xvt = container_of(notif_wait, struct iwl_xvt, notif_wait); struct iwl_xvt_alive_data *alive_data = data; struct xvt_alive_resp_ver2 *palive2; struct mvm_alive_resp_v3 *palive3; struct mvm_alive_resp *palive4; struct iwl_lmac_alive *lmac1, *lmac2; struct iwl_umac_alive *umac; u32 rx_packet_payload_size = iwl_rx_packet_payload_len(pkt); u16 status, flags; u32 lmac_error_event_table, umac_error_event_table; xvt->support_umac_log = false; if (rx_packet_payload_size == sizeof(*palive2)) { palive2 = (void *)pkt->data; lmac_error_event_table = le32_to_cpu(palive2->error_event_table_ptr); alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); alive_data->valid = le16_to_cpu(palive2->status) == IWL_ALIVE_STATUS_OK; iwl_tm_set_fw_ver(xvt->trans, palive2->ucode_major, palive2->ucode_minor); umac_error_event_table = le32_to_cpu(palive2->error_info_addr); if (umac_error_event_table) xvt->support_umac_log = true; IWL_DEBUG_FW(xvt, "Alive VER2 ucode status 0x%04x revision 0x%01X " "0x%01X flags 0x%01X\n", le16_to_cpu(palive2->status), palive2->ver_type, palive2->ver_subtype, palive2->flags); IWL_DEBUG_FW(xvt, "UMAC version: Major - 0x%x, Minor - 0x%x\n", palive2->umac_major, palive2->umac_minor); } else { if (rx_packet_payload_size == sizeof(*palive3)) { palive3 = (void *)pkt->data; status = le16_to_cpu(palive3->status); flags = le16_to_cpu(palive3->flags); lmac1 = &palive3->lmac_data; umac = &palive3->umac_data; IWL_DEBUG_FW(xvt, "Alive VER3\n"); } else if (rx_packet_payload_size == sizeof(*palive4)) { __le32 lmac2_err_ptr; palive4 = (void *)pkt->data; status = le16_to_cpu(palive4->status); flags = le16_to_cpu(palive4->flags); lmac1 = &palive4->lmac_data[0]; lmac2 = &palive4->lmac_data[1]; umac = &palive4->umac_data; lmac2_err_ptr = lmac2->dbg_ptrs.error_event_table_ptr; xvt->trans->dbg.lmac_error_event_table[1] = le32_to_cpu(lmac2_err_ptr); IWL_DEBUG_FW(xvt, "Alive VER4 CDB\n"); } else { IWL_ERR(xvt, "unrecognized alive notificatio\n"); return false; } alive_data->valid = status == IWL_ALIVE_STATUS_OK; lmac_error_event_table = le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr); iwl_tm_set_fw_ver(xvt->trans, le32_to_cpu(lmac1->ucode_major), le32_to_cpu(lmac1->ucode_minor)); umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr); if (umac_error_event_table) xvt->support_umac_log = true; IWL_DEBUG_FW(xvt, "status 0x%04x rev 0x%01X 0x%01X flags 0x%01X\n", status, lmac1->ver_type, lmac1->ver_subtype, flags); IWL_DEBUG_FW(xvt, "UMAC version: Major - 0x%x, Minor - 0x%x\n", umac->umac_major, umac->umac_minor); } iwl_fw_lmac1_set_alive_err_table(xvt->trans, lmac_error_event_table); if (xvt->support_umac_log) iwl_fw_umac_set_alive_err_table(xvt->trans, umac_error_event_table); return true; } static int iwl_xvt_load_ucode_wait_alive(struct iwl_xvt *xvt, enum iwl_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; struct iwl_xvt_alive_data alive_data; const struct fw_img *fw; int ret; enum iwl_ucode_type old_type = xvt->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = IWL_XVT_DEFAULT_TX_QUEUE, .action = SCD_CFG_ENABLE_QUEUE, .window = IWL_FRAME_LIMIT, .sta_id = IWL_XVT_TX_STA_ID_DEFAULT, .ssn = 0, .tx_fifo = IWL_XVT_DEFAULT_TX_FIFO, .aggregate = false, .tid = IWL_MAX_TID_COUNT, }; iwl_fw_set_current_image(&xvt->fwrt, ucode_type); fw = iwl_get_ucode_image(xvt->fw, ucode_type); if (!fw) return -EINVAL; iwl_init_notification_wait(&xvt->notif_wait, &alive_wait, alive_cmd, ARRAY_SIZE(alive_cmd), iwl_alive_fn, &alive_data); ret = iwl_trans_start_fw_dbg(xvt->trans, fw, ucode_type == IWL_UCODE_INIT, (xvt->sw_stack_cfg.fw_dbg_flags & ~IWL_XVT_DBG_FLAGS_NO_DEFAULT_TXQ)); if (ret) { iwl_fw_set_current_image(&xvt->fwrt, old_type); iwl_remove_notification(&xvt->notif_wait, &alive_wait); return ret; } /* * Some things may run in the background now, but we * just wait for the ALIVE notification here. */ ret = iwl_wait_notification(&xvt->notif_wait, &alive_wait, XVT_UCODE_ALIVE_TIMEOUT); if (ret) { iwl_fw_set_current_image(&xvt->fwrt, old_type); return ret; } if (!alive_data.valid) { IWL_ERR(xvt, "Loaded ucode is not valid!\n"); iwl_fw_set_current_image(&xvt->fwrt, old_type); return -EIO; } /* fresh firmware was loaded */ xvt->fw_error = false; iwl_trans_fw_alive(xvt->trans, alive_data.scd_base_addr); ret = iwl_init_paging(&xvt->fwrt, ucode_type); if (ret) return ret; if (ucode_type == IWL_UCODE_REGULAR && fw_has_capa(&xvt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) { ret = iwl_xvt_send_dqa_cmd(xvt); if (ret) return ret; } /* * Starting from 22000 tx queue allocation must be done after add * station, so it is not part of the init flow. */ if (!iwl_xvt_is_unified_fw(xvt) && iwl_xvt_has_default_txq(xvt) && ucode_type != IWL_UCODE_INIT) { iwl_trans_txq_enable_cfg(xvt->trans, IWL_XVT_DEFAULT_TX_QUEUE, 0, NULL, 0); WARN(iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), "Failed to configure queue %d on FIFO %d\n", IWL_XVT_DEFAULT_TX_QUEUE, IWL_XVT_DEFAULT_TX_FIFO); xvt->tx_meta_data[XVT_LMAC_0_ID].queue = IWL_XVT_DEFAULT_TX_QUEUE; } xvt->fw_running = true; return 0; } static int iwl_xvt_send_extended_config(struct iwl_xvt *xvt) { /* * TODO: once WRT will be implemented in xVT, IWL_INIT_DEBUG_CFG * flag will not always be set */ struct iwl_init_extended_cfg_cmd ext_cfg = { .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM) | BIT(IWL_INIT_DEBUG_CFG)), }; if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME) ext_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY)); return iwl_xvt_send_cmd_pdu(xvt, WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), 0, sizeof(ext_cfg), &ext_cfg); } static int iwl_xvt_config_ltr(struct iwl_xvt *xvt) { struct iwl_ltr_config_cmd cmd = { .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), }; if (!xvt->trans->ltr_enabled) return 0; return iwl_xvt_send_cmd_pdu(xvt, LTR_CONFIG, 0, sizeof(cmd), &cmd); } int iwl_xvt_run_fw(struct iwl_xvt *xvt, u32 ucode_type) { int ret; if (ucode_type >= IWL_UCODE_TYPE_MAX) return -EINVAL; lockdep_assert_held(&xvt->mutex); if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) { if (xvt->fw_running) { xvt->fw_running = false; if (xvt->fwrt.cur_fw_img == IWL_UCODE_REGULAR) iwl_xvt_txq_disable(xvt); } iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); } ret = iwl_trans_start_hw(xvt->trans); if (ret) { IWL_ERR(xvt, "Failed to start HW\n"); return ret; } iwl_trans_set_bits_mask(xvt->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); /* Will also start the device */ ret = iwl_xvt_load_ucode_wait_alive(xvt, ucode_type); if (ret) { IWL_ERR(xvt, "Failed to start ucode: %d\n", ret); iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); } if (iwl_xvt_is_unified_fw(xvt)) { ret = iwl_xvt_send_extended_config(xvt); if (ret) { IWL_ERR(xvt, "Failed to send extended_config: %d\n", ret); iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); return ret; } } iwl_dnt_start(xvt->trans); if (xvt->fwrt.cur_fw_img == IWL_UCODE_REGULAR && (!fw_has_capa(&xvt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))) WARN_ON(iwl_xvt_config_ltr(xvt)); xvt->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ if (xvt->fw->dbg.dest_tlv) xvt->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; iwl_fw_start_dbg_conf(&xvt->fwrt, FW_DBG_START_FROM_ALIVE); return ret; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/nvm.c000066400000000000000000000222221351064634500263540ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include "iwl-trans.h" #include "xvt.h" #include "iwl-eeprom-parse.h" #include "iwl-eeprom-read.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" #include "fw-api.h" /* Default NVM size to read */ #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) #define IWL_MAX_NVM_SECTION_SIZE 7000 #define NVM_WRITE_OPCODE 1 #define NVM_READ_OPCODE 0 enum wkp_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ HW_ADDR = 0x15, }; enum ext_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, }; /* * prepare the NVM host command w/ the pointers to the nvm buffer * and send it to fw */ static int iwl_nvm_write_chunk(struct iwl_xvt *xvt, u16 section, u16 offset, u16 length, const u8 *data) { struct iwl_nvm_access_cmd nvm_access_cmd = { .offset = cpu_to_le16(offset), .length = cpu_to_le16(length), .type = cpu_to_le16(section), .op_code = NVM_WRITE_OPCODE, }; struct iwl_host_cmd cmd = { .id = NVM_ACCESS_CMD, .len = { sizeof(struct iwl_nvm_access_cmd), length }, .flags = CMD_SEND_IN_RFKILL, .data = { &nvm_access_cmd, data }, /* data may come from vmalloc, so use _DUP */ .dataflags = { 0, IWL_HCMD_DFL_DUP }, }; return iwl_xvt_send_cmd(xvt, &cmd); } static int iwl_nvm_write_section(struct iwl_xvt *xvt, u16 section, const u8 *data, u16 length) { int offset = 0; /* copy data in chunks of 2k (and remainder if any) */ while (offset < length) { int chunk_size, ret; chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, length - offset); ret = iwl_nvm_write_chunk(xvt, section, offset, chunk_size, data + offset); if (ret < 0) return ret; offset += chunk_size; } return 0; } #define MAX_NVM_FILE_LEN 16384 /* * HOW TO CREATE THE NVM FILE FORMAT: * ------------------------------ * 1. create hex file, format: * 3800 -> header * 0000 -> header * 5a40 -> data * * rev - 6 bit (word1) * len - 10 bit (word1) * id - 4 bit (word2) * rsv - 12 bit (word2) * * 2. flip 8bits with 8 bits per line to get the right NVM file format * * 3. create binary file from the hex file * * 4. save as "iNVM_xxx.bin" under /lib/firmware */ static int iwl_xvt_load_external_nvm(struct iwl_xvt *xvt) { int ret, section_size; u16 section_id; const struct firmware *fw_entry; const struct { __le16 word1; __le16 word2; u8 data[]; } *file_sec; const u8 *eof; const __le32 *dword_buff; const u8 *hw_addr; #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) #define NVM_WORD2_ID(x) (x >> 12) #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) #define EXT_NVM_WORD1_ID(x) ((x) >> 4) #define NVM_HEADER_0 (0x2A504C54) #define NVM_HEADER_1 (0x4E564D2A) #define NVM_HEADER_SIZE (4 * sizeof(u32)) /* * Obtain NVM image via request_firmware. Since we already used * request_firmware_nowait() for the firmware binary load and only * get here after that we assume the NVM request can be satisfied * synchronously. */ ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file, xvt->trans->dev); if (ret) { IWL_WARN(xvt, "WARNING: %s isn't available %d\n", iwlwifi_mod_params.nvm_file, ret); return 0; } IWL_INFO(xvt, "Loaded NVM file %s (%zu bytes)\n", iwlwifi_mod_params.nvm_file, fw_entry->size); if (fw_entry->size > MAX_NVM_FILE_LEN) { IWL_ERR(xvt, "NVM file too large\n"); ret = -EINVAL; goto out; } eof = fw_entry->data + fw_entry->size; dword_buff = (__le32 *)fw_entry->data; /* some NVM file will contain a header. * The header is identified by 2 dwords header as follows: * dword[0] = 0x2A504C54 * dword[1] = 0x4E564D2A * * This header must be skipped when providing the NVM data to the FW. */ if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); IWL_INFO(xvt, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); IWL_INFO(xvt, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3])); } else { file_sec = (void *)fw_entry->data; } while (true) { if (file_sec->data > eof) { IWL_ERR(xvt, "ERROR - NVM file too short for section header\n"); ret = -EINVAL; break; } /* check for EOF marker */ if (!file_sec->word1 && !file_sec->word2) { ret = 0; break; } if (xvt->trans->cfg->nvm_type != IWL_NVM_EXT) { section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); } else { section_size = 2 * EXT_NVM_WORD2_LEN( le16_to_cpu(file_sec->word2)); section_id = EXT_NVM_WORD1_ID( le16_to_cpu(file_sec->word1)); } if (section_size > IWL_MAX_NVM_SECTION_SIZE) { IWL_ERR(xvt, "ERROR - section too large (%d)\n", section_size); ret = -EINVAL; break; } if (!section_size) { IWL_ERR(xvt, "ERROR - section empty\n"); ret = -EINVAL; break; } if (file_sec->data + section_size > eof) { IWL_ERR(xvt, "ERROR - NVM file too short for section (%d bytes)\n", section_size); ret = -EINVAL; break; } if (section_id == xvt->cfg->nvm_hw_section_num) { hw_addr = (const u8 *)((const __le16 *)file_sec->data + HW_ADDR); /* The byte order is little endian 16 bit, meaning 214365 */ xvt->nvm_hw_addr[0] = hw_addr[1]; xvt->nvm_hw_addr[1] = hw_addr[0]; xvt->nvm_hw_addr[2] = hw_addr[3]; xvt->nvm_hw_addr[3] = hw_addr[2]; xvt->nvm_hw_addr[4] = hw_addr[5]; xvt->nvm_hw_addr[5] = hw_addr[4]; } if (section_id == NVM_SECTION_TYPE_MAC_OVERRIDE) { xvt->is_nvm_mac_override = true; hw_addr = (const u8 *)((const __le16 *)file_sec->data + MAC_ADDRESS_OVERRIDE_EXT_NVM); /* * Store the MAC address from MAO section. * No byte swapping is required in MAO section. */ memcpy(xvt->nvm_hw_addr, hw_addr, ETH_ALEN); } ret = iwl_nvm_write_section(xvt, section_id, file_sec->data, section_size); if (ret < 0) { IWL_ERR(xvt, "iwl_mvm_send_cmd failed: %d\n", ret); break; } /* advance to the next section */ file_sec = (void *)(file_sec->data + section_size); } out: release_firmware(fw_entry); return ret; } int iwl_xvt_nvm_init(struct iwl_xvt *xvt) { int ret; xvt->is_nvm_mac_override = false; /* load external NVM if configured */ if (iwlwifi_mod_params.nvm_file) { /* move to External NVM flow */ ret = iwl_xvt_load_external_nvm(xvt); if (ret) return ret; } return 0; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/rx.c000066400000000000000000000241571351064634500262160ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "xvt.h" #include "fw/api/rx.h" #include "fw/dbg.h" /* * Returns true if sn2 - buffer_size < sn1 < sn2. * To be used only in order to compare reorder buffer head with NSSN. * We fully trust NSSN unless it is behind us due to reorder timeout. * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. */ static bool iwl_xvt_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) { return ieee80211_sn_less(sn1, sn2) && !ieee80211_sn_less(sn1, sn2 - buffer_size); } static void iwl_xvt_release_frames(struct iwl_xvt *xvt, struct iwl_xvt_reorder_buffer *reorder_buf, u16 nssn) { u16 ssn = reorder_buf->head_sn; lockdep_assert_held(&reorder_buf->lock); IWL_DEBUG_HT(xvt, "reorder: release nssn=%d\n", nssn); /* ignore nssn smaller than head sn - this can happen due to timeout */ if (iwl_xvt_is_sn_less(nssn, ssn, reorder_buf->buf_size)) return; while (iwl_xvt_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { int index = ssn % reorder_buf->buf_size; u16 frames_count = reorder_buf->entries[index]; ssn = ieee80211_sn_inc(ssn); /* * Reset frame count. Will have more than one frame for A-MSDU. * entries=0 is valid as well since nssn indicates frames were * received. */ IWL_DEBUG_HT(xvt, "reorder: deliver index=0x%x\n", index); reorder_buf->entries[index] = 0; reorder_buf->num_stored -= frames_count; reorder_buf->stats.released += frames_count; } reorder_buf->head_sn = nssn; /* don't mess with reorder timer for now */ } void iwl_xvt_rx_frame_release(struct iwl_xvt *xvt, struct iwl_rx_packet *pkt) { struct iwl_frame_release *release = (void *)pkt->data; struct iwl_xvt_reorder_buffer *buffer; int baid = release->baid; IWL_DEBUG_HT(xvt, "Frame release notification for BAID %u, NSSN %d\n", baid, le16_to_cpu(release->nssn)); if (WARN_ON_ONCE(baid >= IWL_MAX_BAID)) return; buffer = &xvt->reorder_bufs[baid]; if (buffer->sta_id == IWL_XVT_INVALID_STA) return; spin_lock_bh(&buffer->lock); iwl_xvt_release_frames(xvt, buffer, le16_to_cpu(release->nssn)); spin_unlock_bh(&buffer->lock); } void iwl_xvt_destroy_reorder_buffer(struct iwl_xvt *xvt, struct iwl_xvt_reorder_buffer *buf) { if (buf->sta_id == IWL_XVT_INVALID_STA) return; spin_lock_bh(&buf->lock); iwl_xvt_release_frames(xvt, buf, ieee80211_sn_add(buf->head_sn, buf->buf_size)); buf->sta_id = IWL_XVT_INVALID_STA; spin_unlock_bh(&buf->lock); } static bool iwl_xvt_init_reorder_buffer(struct iwl_xvt_reorder_buffer *buf, u8 sta_id, u8 tid, u16 ssn, u8 buf_size) { int j; if (WARN_ON(buf_size > ARRAY_SIZE(buf->entries))) return false; buf->num_stored = 0; buf->head_sn = ssn; buf->buf_size = buf_size; spin_lock_init(&buf->lock); buf->queue = 0; buf->sta_id = sta_id; buf->tid = tid; for (j = 0; j < buf->buf_size; j++) buf->entries[j] = 0; memset(&buf->stats, 0, sizeof(buf->stats)); /* currently there's no need to mess with reorder timer */ return true; } bool iwl_xvt_reorder(struct iwl_xvt *xvt, struct iwl_rx_packet *pkt) { struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; struct ieee80211_hdr *hdr; u32 reorder = le32_to_cpu(desc->reorder_data); struct iwl_xvt_reorder_buffer *buffer; u16 tail; bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; u8 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; u16 nssn, sn, min_sn; int index; u8 baid; u8 sta_id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; u8 tid; baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT; if (baid >= IWL_MAX_BAID) return false; if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) hdr = (void *)(pkt->data + sizeof(struct iwl_rx_mpdu_desc)); else hdr = (void *)(pkt->data + IWL_RX_DESC_SIZE_V1); /* not a data packet */ if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return false; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return false; nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK; sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >> IWL_RX_MPDU_REORDER_SN_SHIFT; min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; /* Check if buffer needs to be initialized */ buffer = &xvt->reorder_bufs[baid]; if (buffer->sta_id == IWL_XVT_INVALID_STA) { /* don't initialize until first valid packet comes through */ if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) return false; if (!iwl_xvt_init_reorder_buffer(buffer, sta_id, tid, min_sn, IEEE80211_MAX_AMPDU_BUF_HT)) return false; } /* verify sta_id and tid match the reorder buffer params */ if (buffer->sta_id != sta_id || buffer->tid != tid) { /* TODO: add add_ba/del_ba notifications */ WARN(1, "sta_id/tid doesn't match saved baid params\n"); return false; } spin_lock_bh(&buffer->lock); /* * If there was a significant jump in the nssn - adjust. * If the SN is smaller than the NSSN it might need to first go into * the reorder buffer, in which case we just release up to it and the * rest of the function will take care of storing it and releasing up to * the nssn */ if (!iwl_xvt_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, buffer->buf_size) || !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) iwl_xvt_release_frames(xvt, buffer, min_sn); /* drop any outdated packets */ if (ieee80211_sn_less(sn, buffer->head_sn)) goto drop; /* release immediately if allowed by nssn and no stored frames */ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { if (iwl_xvt_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) && (!amsdu || last_subframe)) buffer->head_sn = nssn; /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); buffer->stats.released++; buffer->stats.skipped++; return false; } index = sn % buffer->buf_size; /* * Check if we already stored this frame * As AMSDU is either received or not as whole, logic is simple: * If we have frames in that position in the buffer and the last frame * originated from AMSDU had a different SN then it is a retransmission. * If it is the same SN then if the subframe index is incrementing it * is the same AMSDU - otherwise it is a retransmission. */ tail = buffer->entries[index]; if (tail && !amsdu) goto drop; else if (tail && (sn != buffer->last_amsdu || buffer->last_sub_index >= sub_frame_idx)) goto drop; /* put in reorder buffer */ buffer->entries[index]++; buffer->num_stored++; if (amsdu) { buffer->last_amsdu = sn; buffer->last_sub_index = sub_frame_idx; } buffer->stats.reordered++; /* * We cannot trust NSSN for AMSDU sub-frames that are not the last. * The reason is that NSSN advances on the first sub-frame, and may * cause the reorder buffer to advance before all the sub-frames arrive. * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with * SN 1. NSSN for first sub frame will be 3 with the result of driver * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is * already ahead and it will be dropped. * If the last sub-frame is not on this queue - we will get frame * release notification with up to date NSSN. */ if (!amsdu || last_subframe) iwl_xvt_release_frames(xvt, buffer, nssn); spin_unlock_bh(&buffer->lock); return true; drop: buffer->stats.dropped++; spin_unlock_bh(&buffer->lock); return true; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/user-infc.c000066400000000000000000001571771351064634500274710ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(C) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(C) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include #include #include #include "iwl-drv.h" #include "iwl-prph.h" #include "iwl-csr.h" #include "iwl-io.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "iwl-phy-db.h" #include "xvt.h" #include "user-infc.h" #include "iwl-dnt-cfg.h" #include "iwl-dnt-dispatch.h" #include "iwl-trans.h" #include "fw/dbg.h" #define XVT_UCODE_CALIB_TIMEOUT (CPTCFG_IWL_TIMEOUT_FACTOR * HZ) #define XVT_SCU_BASE (0xe6a00000) #define XVT_SCU_SNUM1 (XVT_SCU_BASE + 0x300) #define XVT_SCU_SNUM2 (XVT_SCU_SNUM1 + 0x4) #define XVT_SCU_SNUM3 (XVT_SCU_SNUM2 + 0x4) #define XVT_MAX_TX_COUNT (ULLONG_MAX) #define XVT_LMAC_0_STA_ID (0) /* must be aligned with station id added in USC */ #define XVT_LMAC_1_STA_ID (3) /* must be aligned with station id added in USC */ #define XVT_STOP_TX (IEEE80211_SCTL_FRAG + 1) void iwl_xvt_send_user_rx_notif(struct iwl_xvt *xvt, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); void *data = pkt->data; u32 size = iwl_rx_packet_payload_len(pkt); IWL_DEBUG_INFO(xvt, "rx notification: group=0x%x, id=0x%x\n", pkt->hdr.group_id, pkt->hdr.cmd); switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { case WIDE_ID(LONG_GROUP, GET_SET_PHY_DB_CMD): iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_PHY_DB, data, size, GFP_ATOMIC); break; case DTS_MEASUREMENT_NOTIFICATION: case WIDE_ID(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE): iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_DTS_MEASUREMENTS, data, size, GFP_ATOMIC); break; case REPLY_RX_DSP_EXT_INFO: if (!xvt->rx_hdr_enabled) break; iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_RX_HDR, data, size, GFP_ATOMIC); break; case APMG_PD_SV_CMD: if (!xvt->apmg_pd_en) break; iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_APMG_PD, data, size, GFP_ATOMIC); break; case REPLY_RX_MPDU_CMD: if (!xvt->send_rx_mpdu) break; iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT, data, size, GFP_ATOMIC); break; case NVM_COMMIT_COMPLETE_NOTIFICATION: iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_COMMIT_STATISTICS, data, size, GFP_ATOMIC); break; case REPLY_HD_PARAMS_CMD: iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_BFE, data, size, GFP_ATOMIC); break; case DEBUG_LOG_MSG: iwl_dnt_dispatch_collect_ucode_message(xvt->trans, rxb); break; case WIDE_ID(LOCATION_GROUP, TOF_MCSI_DEBUG_NOTIF): iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_LOC_MCSI, data, size, GFP_ATOMIC); break; case WIDE_ID(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF): iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_LOC_RANGE, data, size, GFP_ATOMIC); break; case WIDE_ID(XVT_GROUP, IQ_CALIB_CONFIG_NOTIF): iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_IQ_CALIB, data, size, GFP_ATOMIC); break; case WIDE_ID(PHY_OPS_GROUP, CT_KILL_NOTIFICATION): iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_CT_KILL, data, size, GFP_ATOMIC); break; case REPLY_RX_PHY_CMD: IWL_DEBUG_INFO(xvt, "REPLY_RX_PHY_CMD received but not handled\n"); break; case INIT_COMPLETE_NOTIF: IWL_DEBUG_INFO(xvt, "received INIT_COMPLETE_NOTIF\n"); break; case TX_CMD: if (xvt->send_tx_resp) iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_TX_CMD_RESP, data, size, GFP_ATOMIC); break; default: IWL_DEBUG_INFO(xvt, "xVT mode RX command 0x%x not handled\n", pkt->hdr.cmd); } } static void iwl_xvt_led_enable(struct iwl_xvt *xvt) { iwl_write32(xvt->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); } static void iwl_xvt_led_disable(struct iwl_xvt *xvt) { iwl_write32(xvt->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF); } static int iwl_xvt_sdio_io_toggle(struct iwl_xvt *xvt, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_tm_sdio_io_toggle *sdio_io_toggle = data_in->data; return iwl_trans_test_mode_cmd(xvt->trans, sdio_io_toggle->enable); } /** * iwl_xvt_read_sv_drop - read SV drop version * @xvt: xvt data * Return: the SV drop (>= 0) or a negative error number */ static int iwl_xvt_read_sv_drop(struct iwl_xvt *xvt) { struct xvt_debug_cmd debug_cmd = { .opcode = cpu_to_le32(XVT_DBG_GET_SVDROP_VER_OP), .dw_num = 0, }; struct xvt_debug_res *debug_res; struct iwl_rx_packet *pkt; struct iwl_host_cmd host_cmd = { .id = REPLY_DEBUG_XVT_CMD, .data[0] = &debug_cmd, .len[0] = sizeof(debug_cmd), .dataflags[0] = IWL_HCMD_DFL_NOCOPY, .flags = CMD_WANT_SKB, }; int ret; if (xvt->state != IWL_XVT_STATE_OPERATIONAL) return 0; ret = iwl_xvt_send_cmd(xvt, &host_cmd); if (ret) return ret; /* Retrieve response packet */ pkt = host_cmd.resp_pkt; /* Get response data */ debug_res = (struct xvt_debug_res *)pkt->data; if (le32_to_cpu(debug_res->dw_num) < 1) { ret = -ENODATA; goto out; } ret = le32_to_cpu(debug_res->data[0]) & 0xFF; out: iwl_free_resp(&host_cmd); return ret; } static int iwl_xvt_get_dev_info(struct iwl_xvt *xvt, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_tm_dev_info_req *dev_info_req; struct iwl_tm_dev_info *dev_info; const u8 driver_ver[] = BACKPORTS_GIT_TRACKED; int sv_step = 0x00; int dev_info_size; bool read_sv_drop = true; if (data_in) { dev_info_req = (struct iwl_tm_dev_info_req *)data_in->data; read_sv_drop = dev_info_req->read_sv ? true : false; } if (xvt->fwrt.cur_fw_img == IWL_UCODE_REGULAR && read_sv_drop) { sv_step = iwl_xvt_read_sv_drop(xvt); if (sv_step < 0) return sv_step; } dev_info_size = sizeof(struct iwl_tm_dev_info) + (strlen(driver_ver)+1)*sizeof(u8); dev_info = kzalloc(dev_info_size, GFP_KERNEL); if (!dev_info) return -ENOMEM; dev_info->dev_id = xvt->trans->hw_id; dev_info->fw_ver = xvt->fw->ucode_ver; dev_info->vendor_id = PCI_VENDOR_ID_INTEL; dev_info->build_ver = sv_step; /* * TODO: Silicon step is retrieved by reading * radio register 0x00. Simplifying implementation * by reading it in user space. */ dev_info->silicon_step = 0x00; strcpy(dev_info->driver_ver, driver_ver); data_out->data = dev_info; data_out->len = dev_info_size; return 0; } static int iwl_xvt_set_sw_config(struct iwl_xvt *xvt, struct iwl_tm_data *data_in) { struct iwl_xvt_sw_cfg_request *sw_cfg = (struct iwl_xvt_sw_cfg_request *)data_in->data; struct iwl_phy_cfg_cmd *fw_calib_cmd_cfg = xvt->sw_stack_cfg.fw_calib_cmd_cfg; __le32 cfg_mask = cpu_to_le32(sw_cfg->cfg_mask), fw_calib_event, fw_calib_flow, event_override, flow_override; int usr_idx, iwl_idx; if (data_in->len < sizeof(struct iwl_xvt_sw_cfg_request)) return -EINVAL; xvt->sw_stack_cfg.fw_dbg_flags = sw_cfg->dbg_flags; xvt->sw_stack_cfg.load_mask = sw_cfg->load_mask; xvt->sw_stack_cfg.calib_override_mask = sw_cfg->cfg_mask; for (usr_idx = 0; usr_idx < IWL_USER_FW_IMAGE_IDX_TYPE_MAX; usr_idx++) { switch (usr_idx) { case IWL_USER_FW_IMAGE_IDX_INIT: iwl_idx = IWL_UCODE_INIT; break; case IWL_USER_FW_IMAGE_IDX_REGULAR: iwl_idx = IWL_UCODE_REGULAR; break; case IWL_USER_FW_IMAGE_IDX_WOWLAN: iwl_idx = IWL_UCODE_WOWLAN; break; } /* TODO: Calculate PHY config according to device values */ fw_calib_cmd_cfg[iwl_idx].phy_cfg = cpu_to_le32(xvt->fw->phy_config); /* * If a cfg_mask bit is unset, take the default value * from the FW. Otherwise, take the value from sw_cfg. */ fw_calib_event = xvt->fw->default_calib[iwl_idx].event_trigger; event_override = cpu_to_le32(sw_cfg->calib_ctrl[usr_idx].event_trigger); fw_calib_cmd_cfg[iwl_idx].calib_control.event_trigger = (~cfg_mask & fw_calib_event) | (cfg_mask & event_override); fw_calib_flow = xvt->fw->default_calib[iwl_idx].flow_trigger; flow_override = cpu_to_le32(sw_cfg->calib_ctrl[usr_idx].flow_trigger); fw_calib_cmd_cfg[iwl_idx].calib_control.flow_trigger = (~cfg_mask & fw_calib_flow) | (cfg_mask & flow_override); } return 0; } static int iwl_xvt_get_sw_config(struct iwl_xvt *xvt, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_xvt_sw_cfg_request *get_cfg_req; struct iwl_xvt_sw_cfg_request *sw_cfg; struct iwl_phy_cfg_cmd *fw_calib_cmd_cfg = xvt->sw_stack_cfg.fw_calib_cmd_cfg; __le32 event_trigger, flow_trigger; int i, u; if (data_in->len < sizeof(struct iwl_xvt_sw_cfg_request)) return -EINVAL; get_cfg_req = data_in->data; sw_cfg = kzalloc(sizeof(*sw_cfg), GFP_KERNEL); if (!sw_cfg) return -ENOMEM; sw_cfg->load_mask = xvt->sw_stack_cfg.load_mask; sw_cfg->phy_config = xvt->fw->phy_config; sw_cfg->cfg_mask = xvt->sw_stack_cfg.calib_override_mask; sw_cfg->dbg_flags = xvt->sw_stack_cfg.fw_dbg_flags; for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) { switch (i) { case IWL_UCODE_INIT: u = IWL_USER_FW_IMAGE_IDX_INIT; break; case IWL_UCODE_REGULAR: u = IWL_USER_FW_IMAGE_IDX_REGULAR; break; case IWL_UCODE_WOWLAN: u = IWL_USER_FW_IMAGE_IDX_WOWLAN; break; case IWL_UCODE_REGULAR_USNIFFER: continue; } if (get_cfg_req->get_calib_type == IWL_XVT_GET_CALIB_TYPE_DEF) { event_trigger = xvt->fw->default_calib[i].event_trigger; flow_trigger = xvt->fw->default_calib[i].flow_trigger; } else { event_trigger = fw_calib_cmd_cfg[i].calib_control.event_trigger; flow_trigger = fw_calib_cmd_cfg[i].calib_control.flow_trigger; } sw_cfg->calib_ctrl[u].event_trigger = le32_to_cpu(event_trigger); sw_cfg->calib_ctrl[u].flow_trigger = le32_to_cpu(flow_trigger); } data_out->data = sw_cfg; data_out->len = sizeof(*sw_cfg); return 0; } static int iwl_xvt_send_phy_cfg_cmd(struct iwl_xvt *xvt, u32 ucode_type) { struct iwl_phy_cfg_cmd *calib_cmd_cfg = &xvt->sw_stack_cfg.fw_calib_cmd_cfg[ucode_type]; int err; IWL_DEBUG_INFO(xvt, "Sending Phy CFG command: 0x%x\n", calib_cmd_cfg->phy_cfg); /* ESL workaround - calibration is not allowed */ if (CPTCFG_IWL_TIMEOUT_FACTOR > 20) { calib_cmd_cfg->calib_control.event_trigger = 0; calib_cmd_cfg->calib_control.flow_trigger = 0; } /* Sending calibration configuration control data */ err = iwl_xvt_send_cmd_pdu(xvt, PHY_CONFIGURATION_CMD, 0, sizeof(*calib_cmd_cfg), calib_cmd_cfg); if (err) IWL_ERR(xvt, "Error (%d) running INIT calibrations control\n", err); return err; } static int iwl_xvt_continue_init_unified(struct iwl_xvt *xvt) { struct iwl_nvm_access_complete_cmd nvm_complete = {}; struct iwl_notification_wait init_complete_wait; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF }; int err; err = iwl_xvt_send_cmd_pdu(xvt, WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete); if (err) goto init_error; xvt->state = IWL_XVT_STATE_OPERATIONAL; iwl_init_notification_wait(&xvt->notif_wait, &init_complete_wait, init_complete, sizeof(init_complete), NULL, NULL); err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_REGULAR); if (err) { iwl_remove_notification(&xvt->notif_wait, &init_complete_wait); goto init_error; } err = iwl_wait_notification(&xvt->notif_wait, &init_complete_wait, XVT_UCODE_CALIB_TIMEOUT); if (err) goto init_error; return 0; init_error: xvt->state = IWL_XVT_STATE_UNINITIALIZED; iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); return err; } static int iwl_xvt_run_runtime_fw(struct iwl_xvt *xvt) { int err; err = iwl_xvt_run_fw(xvt, IWL_UCODE_REGULAR); if (err) goto fw_error; xvt->state = IWL_XVT_STATE_OPERATIONAL; if (iwl_xvt_is_unified_fw(xvt)) { err = iwl_xvt_nvm_init(xvt); if (err) { IWL_ERR(xvt, "Failed to read NVM: %d\n", err); return err; } return iwl_xvt_continue_init_unified(xvt); } /* Send phy db control command and then phy db calibration*/ err = iwl_send_phy_db_data(xvt->phy_db); if (err) goto phy_error; err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_REGULAR); if (err) goto phy_error; return 0; phy_error: iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); fw_error: xvt->state = IWL_XVT_STATE_UNINITIALIZED; return err; } static bool iwl_xvt_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_phy_db *phy_db = data; if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); return true; } WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); return false; } /* * iwl_xvt_start_op_mode starts FW according to load mask, * waits for alive notification from device, and sends it * to user. */ static int iwl_xvt_start_op_mode(struct iwl_xvt *xvt) { int err = 0; u32 ucode_type = IWL_UCODE_INIT; /* * If init FW and runtime FW are both enabled, * Runtime FW will be executed after "continue * initialization" is done. * If init FW is disabled and runtime FW is * enabled, run Runtime FW. If runtime fw is * disabled, do nothing. */ if (!(xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_INIT)) { if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME) { err = iwl_xvt_run_runtime_fw(xvt); } else { if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) { xvt->fw_running = false; iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); } err = iwl_trans_start_hw(xvt->trans); if (err) { IWL_ERR(xvt, "Failed to start HW\n"); } else { iwl_write32(xvt->trans, CSR_RESET, 0); xvt->state = IWL_XVT_STATE_NO_FW; } } return err; } /* when fw image is unified, only regular ucode is loaded. */ if (iwl_xvt_is_unified_fw(xvt)) ucode_type = IWL_UCODE_REGULAR; err = iwl_xvt_run_fw(xvt, ucode_type); if (err) return err; xvt->state = IWL_XVT_STATE_INIT_STARTED; err = iwl_xvt_nvm_init(xvt); if (err) IWL_ERR(xvt, "Failed to read NVM: %d\n", err); /* * The initialization flow is not yet complete. * User need to execute "Continue initialization" * flow in order to complete it. * * NOT sending ALIVE notification to user. User * knows that FW is alive when "start op mode" * returns without errors. */ return err; } static void iwl_xvt_stop_op_mode(struct iwl_xvt *xvt) { if (xvt->state == IWL_XVT_STATE_UNINITIALIZED) return; if (xvt->fw_running) { iwl_xvt_txq_disable(xvt); xvt->fw_running = false; } iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); iwl_free_fw_paging(&xvt->fwrt); xvt->state = IWL_XVT_STATE_UNINITIALIZED; } /* * iwl_xvt_continue_init get phy calibrations data from * device and stores them. It also runs runtime FW if it * is marked in the load mask. */ static int iwl_xvt_continue_init(struct iwl_xvt *xvt) { struct iwl_notification_wait calib_wait; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB }; int err; if (xvt->state != IWL_XVT_STATE_INIT_STARTED) return -EINVAL; if (iwl_xvt_is_unified_fw(xvt)) return iwl_xvt_continue_init_unified(xvt); iwl_init_notification_wait(&xvt->notif_wait, &calib_wait, init_complete, ARRAY_SIZE(init_complete), iwl_xvt_wait_phy_db_entry, xvt->phy_db); err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_INIT); if (err) { iwl_remove_notification(&xvt->notif_wait, &calib_wait); goto error; } /* * Waiting for the calibration complete notification * iwl_xvt_wait_phy_db_entry will store the calibrations */ err = iwl_wait_notification(&xvt->notif_wait, &calib_wait, XVT_UCODE_CALIB_TIMEOUT); if (err) goto error; xvt->state = IWL_XVT_STATE_OPERATIONAL; if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME) /* Run runtime FW stops the device by itself if error occurs */ err = iwl_xvt_run_runtime_fw(xvt); goto cont_init_end; error: xvt->state = IWL_XVT_STATE_UNINITIALIZED; iwl_xvt_txq_disable(xvt); iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); cont_init_end: return err; } static int iwl_xvt_get_phy_db(struct iwl_xvt *xvt, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_xvt_phy_db_request *phy_db_req = (struct iwl_xvt_phy_db_request *)data_in->data; struct iwl_xvt_phy_db_request *phy_db_resp; u8 *phy_data; u16 phy_size; u32 resp_size; int err; if ((data_in->len < sizeof(struct iwl_xvt_phy_db_request)) || (phy_db_req->size != 0)) return -EINVAL; err = iwl_phy_db_get_section_data(xvt->phy_db, phy_db_req->type, &phy_data, &phy_size, phy_db_req->chg_id); if (err) return err; resp_size = sizeof(*phy_db_resp) + phy_size; phy_db_resp = kzalloc(resp_size, GFP_KERNEL); if (!phy_db_resp) return -ENOMEM; phy_db_resp->chg_id = phy_db_req->chg_id; phy_db_resp->type = phy_db_req->type; phy_db_resp->size = phy_size; memcpy(phy_db_resp->data, phy_data, phy_size); data_out->data = phy_db_resp; data_out->len = resp_size; return 0; } static struct iwl_device_cmd *iwl_xvt_init_tx_dev_cmd(struct iwl_xvt *xvt) { struct iwl_device_cmd *dev_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(xvt->trans); if (unlikely(!dev_cmd)) return NULL; memset(dev_cmd, 0, sizeof(*dev_cmd)); dev_cmd->hdr.cmd = TX_CMD; return dev_cmd; } static u16 iwl_xvt_get_offload_assist(struct ieee80211_hdr *hdr) { int hdrlen = ieee80211_hdrlen(hdr->frame_control); u16 offload_assist = 0; bool amsdu; amsdu = ieee80211_is_data_qos(hdr->frame_control) && (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); if (amsdu) offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); /* * padding is inserted later in transport. * do not align A-MSDUs to dword, as the subframe header * aligns the SNAP header. */ if (hdrlen % 4 && !amsdu) offload_assist |= BIT(TX_CMD_OFFLD_PAD); return offload_assist; } static struct iwl_device_cmd * iwl_xvt_set_tx_params_gen3(struct iwl_xvt *xvt, struct sk_buff *skb, u32 rate_flags, u32 tx_flags) { struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd_gen3 *cmd; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_xvt_skb_info *skb_info = (void *)skb->cb; u32 header_length = ieee80211_hdrlen(hdr->frame_control); dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt); if (unlikely(!dev_cmd)) return NULL; cmd = (struct iwl_tx_cmd_gen3 *)dev_cmd->payload; cmd->offload_assist |= cpu_to_le32(iwl_xvt_get_offload_assist(hdr)); cmd->len = cpu_to_le16((u16)skb->len); cmd->flags = cpu_to_le16(tx_flags); if (ieee80211_has_morefrags(hdr->frame_control)) /* though this flag is not supported for gen3, it is used * here for silicon feedback tests. */ cmd->flags |= cpu_to_le16(TX_CMD_FLG_MORE_FRAG); cmd->rate_n_flags = cpu_to_le32(rate_flags); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, header_length); /* Saving device command address itself in the control buffer, to be * used when reclaiming the command. */ skb_info->dev_cmd = dev_cmd; return dev_cmd; } static struct iwl_device_cmd * iwl_xvt_set_tx_params_gen2(struct iwl_xvt *xvt, struct sk_buff *skb, u32 rate_flags, u32 flags) { struct iwl_device_cmd *dev_cmd; struct iwl_xvt_skb_info *skb_info = (void *)skb->cb; struct iwl_tx_cmd_gen2 *tx_cmd; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; u32 header_length = ieee80211_hdrlen(hdr->frame_control); dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt); if (unlikely(!dev_cmd)) return NULL; tx_cmd = (struct iwl_tx_cmd_gen2 *)dev_cmd->payload; tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->offload_assist |= cpu_to_le16(iwl_xvt_get_offload_assist(hdr)); tx_cmd->flags = cpu_to_le32(flags); if (ieee80211_has_morefrags(hdr->frame_control)) /* though this flag is not supported for gen2, it is used * for silicon feedback tests. */ tx_cmd->flags |= cpu_to_le32(TX_CMD_FLG_MORE_FRAG); tx_cmd->rate_n_flags = cpu_to_le32(rate_flags); /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, header_length); /* Saving device command address itself in the * control buffer, to be used when reclaiming * the command. */ skb_info->dev_cmd = dev_cmd; return dev_cmd; } /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ static struct iwl_device_cmd * iwl_xvt_set_mod_tx_params(struct iwl_xvt *xvt, struct sk_buff *skb, u8 sta_id, u32 rate_flags, u32 flags) { struct iwl_device_cmd *dev_cmd; struct iwl_xvt_skb_info *skb_info = (void *)skb->cb; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt); if (unlikely(!dev_cmd)) return NULL; tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; tx_cmd->rate_n_flags = cpu_to_le32(rate_flags); tx_cmd->tx_flags = cpu_to_le32(flags); /* the skb should already hold the data */ memcpy(tx_cmd->hdr, skb->data, sizeof(struct ieee80211_hdr)); /* * Saving device command address itself in the * control buffer, to be used when reclaiming * the command. */ skb_info->dev_cmd = dev_cmd; return dev_cmd; } static void iwl_xvt_set_seq_number(struct iwl_xvt *xvt, struct tx_meta_data *meta_tx, struct sk_buff *skb, u8 frag_num) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; u8 *qc, tid; if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return; qc = ieee80211_get_qos_ctl(hdr); tid = *qc & IEEE80211_QOS_CTL_TID_MASK; if (WARN_ON(tid >= IWL_MAX_TID_COUNT)) tid = IWL_MAX_TID_COUNT - 1; /* frag_num is expected to be zero in case of no fragmentation */ hdr->seq_ctrl = cpu_to_le16(meta_tx->seq_num[tid] | (frag_num & IEEE80211_SCTL_FRAG)); if (!ieee80211_has_morefrags(hdr->frame_control)) meta_tx->seq_num[tid] += 0x10; } static int iwl_xvt_send_packet(struct iwl_xvt *xvt, struct iwl_tm_mod_tx_request *tx_req, u32 *status, struct tx_meta_data *meta_tx) { struct sk_buff *skb; struct iwl_device_cmd *dev_cmd; int time_remain, err = 0; u32 flags = 0; u32 rate_flags = tx_req->rate_flags; if (xvt->fw_error) { IWL_ERR(xvt, "FW Error while sending Tx\n"); *status = XVT_TX_DRIVER_ABORTED; return -ENODEV; } skb = alloc_skb(tx_req->len, GFP_KERNEL); if (!skb) { *status = XVT_TX_DRIVER_ABORTED; return -ENOMEM; } memcpy(skb_put(skb, tx_req->len), tx_req->data, tx_req->len); iwl_xvt_set_seq_number(xvt, meta_tx, skb, 0); flags = tx_req->no_ack ? 0 : TX_CMD_FLG_ACK; if (iwl_xvt_is_unified_fw(xvt)) { flags |= IWL_TX_FLAGS_CMD_RATE; if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) dev_cmd = iwl_xvt_set_tx_params_gen3(xvt, skb, rate_flags, flags); else dev_cmd = iwl_xvt_set_tx_params_gen2(xvt, skb, rate_flags, flags); } else { dev_cmd = iwl_xvt_set_mod_tx_params(xvt, skb, tx_req->sta_id, tx_req->rate_flags, flags); } if (!dev_cmd) { kfree_skb(skb); *status = XVT_TX_DRIVER_ABORTED; return -ENOMEM; } if (tx_req->trigger_led) iwl_xvt_led_enable(xvt); /* wait until the tx queue isn't full */ time_remain = wait_event_interruptible_timeout(meta_tx->mod_tx_wq, !meta_tx->txq_full, HZ); if (time_remain <= 0) { /* This should really not happen */ WARN_ON_ONCE(meta_tx->txq_full); IWL_ERR(xvt, "Error while sending Tx\n"); *status = XVT_TX_DRIVER_QUEUE_FULL; err = -EIO; goto err; } if (xvt->fw_error) { WARN_ON_ONCE(meta_tx->txq_full); IWL_ERR(xvt, "FW Error while sending Tx\n"); *status = XVT_TX_DRIVER_ABORTED; err = -ENODEV; goto err; } /* Assume we have one Txing thread only: the queue is not full * any more - nobody could fill it up in the meantime since we * were blocked. */ local_bh_disable(); err = iwl_trans_tx(xvt->trans, skb, dev_cmd, meta_tx->queue); local_bh_enable(); if (err) { IWL_ERR(xvt, "Tx command failed (error %d)\n", err); *status = XVT_TX_DRIVER_ABORTED; goto err; } if (tx_req->trigger_led) iwl_xvt_led_disable(xvt); return err; err: iwl_trans_free_tx_cmd(xvt->trans, dev_cmd); kfree_skb(skb); return err; } static struct iwl_device_cmd * iwl_xvt_set_tx_params(struct iwl_xvt *xvt, struct sk_buff *skb, struct iwl_xvt_tx_start *tx_start, u8 packet_index) { struct iwl_device_cmd *dev_cmd; struct iwl_xvt_skb_info *skb_info = (void *)skb->cb; struct iwl_tx_cmd *tx_cmd; /* the skb should already hold the data */ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; u32 header_length = ieee80211_hdrlen(hdr->frame_control); dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt); if (unlikely(!dev_cmd)) return NULL; tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; /* let the fw manage the seq number for non-qos/multicast */ if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_SEQ_CTL); tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->offload_assist |= cpu_to_le16(iwl_xvt_get_offload_assist(hdr)); tx_cmd->tx_flags |= cpu_to_le32(tx_start->tx_data.tx_flags); if (ieee80211_has_morefrags(hdr->frame_control)) tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MORE_FRAG); tx_cmd->rate_n_flags = cpu_to_le32(tx_start->tx_data.rate_flags); tx_cmd->sta_id = tx_start->frames_data[packet_index].sta_id; tx_cmd->sec_ctl = tx_start->frames_data[packet_index].sec_ctl; tx_cmd->initial_rate_index = tx_start->tx_data.initial_rate_index; tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->rts_retry_limit = tx_start->tx_data.rts_retry_limit; tx_cmd->data_retry_limit = tx_start->tx_data.data_retry_limit; tx_cmd->tid_tspec = tx_start->frames_data[packet_index].tid_tspec; memcpy(tx_cmd->key, tx_start->frames_data[packet_index].key, sizeof(tx_cmd->key)); memcpy(tx_cmd->hdr, hdr, header_length); /* * Saving device command address itself in the control buffer, * to be used when reclaiming the command. */ skb_info->dev_cmd = dev_cmd; return dev_cmd; } static struct sk_buff *iwl_xvt_set_skb(struct iwl_xvt *xvt, struct ieee80211_hdr *hdr, struct tx_payload *payload) { struct sk_buff *skb; u32 header_size = ieee80211_hdrlen(hdr->frame_control); u32 payload_length = payload->length; u32 packet_length = payload_length + header_size; skb = alloc_skb(packet_length, GFP_KERNEL); if (!skb) return NULL; /* copy MAC header into skb */ memcpy(skb_put(skb, header_size), hdr, header_size); /* copy frame payload into skb */ memcpy(skb_put(skb, payload_length), payload, payload_length); return skb; } static struct sk_buff *iwl_xvt_create_fragment_skb(struct iwl_xvt *xvt, struct ieee80211_hdr *hdr, struct tx_payload *payload, u32 fragment_size, u8 frag_num) { struct sk_buff *skb; const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); u32 header_size = ieee80211_hdrlen(hdr->frame_control); u32 skb_size, offset, payload_remain, payload_chunck_size; if (WARN(fragment_size <= header_size || !ieee80211_is_data_qos(hdr->frame_control), "can't fragment, fragment_size small big or not qos data")) return NULL; payload_chunck_size = fragment_size - header_size; offset = payload_chunck_size * frag_num; if (WARN(offset >= payload->length, "invalid fragment number %d\n", frag_num)) return NULL; payload_remain = payload->length - offset; if (fragment_size < payload_remain + header_size) { skb_size = fragment_size; hdr->frame_control |= morefrags; } else { skb_size = payload_remain + header_size; hdr->frame_control &= ~morefrags; payload_chunck_size = payload_remain; } skb = alloc_skb(skb_size, GFP_KERNEL); if (!skb) return NULL; /* copy MAC header into skb */ memcpy(skb_put(skb, header_size), hdr, header_size); /* copy frame payload into skb */ memcpy(skb_put(skb, payload_chunck_size), &payload->payload[offset], payload_chunck_size); return skb; } static struct sk_buff *iwl_xvt_get_skb(struct iwl_xvt *xvt, struct ieee80211_hdr *hdr, struct tx_payload *payload, u32 fragment_size, u8 frag_num) { if (fragment_size == 0)/* no framgmentation */ return iwl_xvt_set_skb(xvt, hdr, payload); return iwl_xvt_create_fragment_skb(xvt, hdr, payload, fragment_size, frag_num); } static int iwl_xvt_transmit_packet(struct iwl_xvt *xvt, struct sk_buff *skb, struct iwl_xvt_tx_start *tx_start, u8 packet_index, u8 frag_num, u32 *status) { struct iwl_device_cmd *dev_cmd; int time_remain, err = 0; u8 queue = tx_start->frames_data[packet_index].queue; struct tx_queue_data *queue_data = &xvt->queue_data[queue]; u32 rate_flags = tx_start->tx_data.rate_flags; u32 tx_flags = tx_start->tx_data.tx_flags; /* set tx number */ iwl_xvt_set_seq_number(xvt, &xvt->tx_meta_data[XVT_LMAC_0_ID], skb, frag_num); if (iwl_xvt_is_unified_fw(xvt)) { if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) dev_cmd = iwl_xvt_set_tx_params_gen3(xvt, skb, rate_flags, tx_flags); else dev_cmd = iwl_xvt_set_tx_params_gen2(xvt, skb, rate_flags, tx_flags); } else { dev_cmd = iwl_xvt_set_tx_params(xvt, skb, tx_start, packet_index); } if (!dev_cmd) { kfree_skb(skb); *status = XVT_TX_DRIVER_ABORTED; return -ENOMEM; } /* wait until the tx queue isn't full */ time_remain = wait_event_interruptible_timeout(queue_data->tx_wq, !queue_data->txq_full, HZ); if (time_remain <= 0) { /* This should really not happen */ WARN_ON_ONCE(queue_data->txq_full); IWL_ERR(xvt, "Error while sending Tx\n"); *status = XVT_TX_DRIVER_QUEUE_FULL; err = -EIO; goto on_err; } if (xvt->fw_error) { WARN_ON_ONCE(queue_data->txq_full); IWL_ERR(xvt, "FW Error while sending packet\n"); *status = XVT_TX_DRIVER_ABORTED; err = -ENODEV; goto on_err; } /* Assume we have one Txing thread only: the queue is not full * any more - nobody could fill it up in the meantime since we * were blocked. */ local_bh_disable(); err = iwl_trans_tx(xvt->trans, skb, dev_cmd, queue); local_bh_enable(); if (err) { IWL_ERR(xvt, "Tx command failed (error %d)\n", err); *status = XVT_TX_DRIVER_ABORTED; goto on_err; } return 0; on_err: iwl_trans_free_tx_cmd(xvt->trans, dev_cmd); kfree_skb(skb); return err; } static int iwl_xvt_send_tx_done_notif(struct iwl_xvt *xvt, u32 status) { struct iwl_xvt_tx_done *done_notif; u32 i, j, done_notif_size, num_of_queues = 0; int err; for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { if (xvt->queue_data[i].allocated_queue) num_of_queues++; } done_notif_size = sizeof(*done_notif) + num_of_queues * sizeof(struct iwl_xvt_post_tx_data); done_notif = kzalloc(done_notif_size, GFP_KERNEL); if (!done_notif) return -ENOMEM; done_notif->status = status; done_notif->num_of_queues = num_of_queues; for (i = 1, j = 0; i <= num_of_queues; i++) { if (!xvt->queue_data[i].allocated_queue) continue; done_notif->tx_data[j].num_of_packets = xvt->queue_data[i].tx_counter; done_notif->tx_data[j].queue = i; j++; } err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_ENHANCED_TX_DONE, (void *)done_notif, done_notif_size, GFP_ATOMIC); if (err) IWL_ERR(xvt, "Error %d sending tx_done notification\n", err); kfree(done_notif); return err; } static int iwl_xvt_start_tx_handler(void *data) { struct iwl_xvt_enhanced_tx_data *task_data = data; struct iwl_xvt_tx_start *tx_start = &task_data->tx_start_data; struct iwl_xvt *xvt = task_data->xvt; u8 num_of_frames; u32 status, packets_in_cycle = 0; int time_remain, err = 0, sent_packets = 0; u32 num_of_cycles = tx_start->num_of_cycles; u64 i, num_of_iterations; /* reset tx parameters */ xvt->num_of_tx_resp = 0; xvt->send_tx_resp = tx_start->send_tx_resp; status = 0; for (i = 0; i < IWL_MAX_HW_QUEUES; i++) xvt->queue_data[i].tx_counter = 0; num_of_frames = tx_start->num_of_different_frames; for (i = 0; i < num_of_frames; i++) packets_in_cycle += tx_start->frames_data[i].times; if (WARN(packets_in_cycle == 0, "invalid packets amount to send")) return -EINVAL; if (num_of_cycles == IWL_XVT_TX_MODULATED_INFINITE) num_of_cycles = XVT_MAX_TX_COUNT / packets_in_cycle; xvt->expected_tx_amount = packets_in_cycle * num_of_cycles; num_of_iterations = num_of_cycles * num_of_frames; for (i = 0; (i < num_of_iterations) && !kthread_should_stop(); i++) { u16 j, times; u8 frame_index, payload_idx, frag_idx, frag_num; struct ieee80211_hdr *hdr; struct sk_buff *skb; u8 frag_size = tx_start->tx_data.fragment_size; struct tx_payload *payload; u8 frag_array_size = ARRAY_SIZE(tx_start->tx_data.frag_num); frame_index = i % num_of_frames; payload_idx = tx_start->frames_data[frame_index].payload_index; payload = xvt->payloads[payload_idx]; hdr = (struct ieee80211_hdr *) tx_start->frames_data[frame_index].header; times = tx_start->frames_data[frame_index].times; for (j = 0; j < times; j++) { if (xvt->fw_error) { IWL_ERR(xvt, "FW Error during TX\n"); status = XVT_TX_DRIVER_ABORTED; err = -ENODEV; goto on_exit; } frag_idx = 0; while (frag_idx < frag_array_size) { frag_num = tx_start->tx_data.frag_num[frag_idx]; if (frag_num == XVT_STOP_TX || (frag_size == 0 && frag_idx > 0)) break; skb = iwl_xvt_get_skb(xvt, hdr, payload, frag_size, frag_num); if (!skb) { IWL_ERR(xvt, "skb is NULL\n"); status = XVT_TX_DRIVER_ABORTED; err = -ENOMEM; goto on_exit; } err = iwl_xvt_transmit_packet(xvt, skb, tx_start, frame_index, frag_num, &status); sent_packets++; if (err) { IWL_ERR(xvt, "stop due to err %d\n", err); goto on_exit; } ++frag_idx; } } } time_remain = wait_event_interruptible_timeout( xvt->tx_done_wq, xvt->num_of_tx_resp == sent_packets, 5 * HZ * CPTCFG_IWL_TIMEOUT_FACTOR); if (time_remain <= 0) { IWL_ERR(xvt, "Not all Tx messages were sent\n"); status = XVT_TX_DRIVER_TIMEOUT; } on_exit: err = iwl_xvt_send_tx_done_notif(xvt, status); xvt->is_enhanced_tx = false; kfree(data); for (i = 0; i < IWL_XVT_MAX_PAYLOADS_AMOUNT; i++) { kfree(xvt->payloads[i]); xvt->payloads[i] = NULL; } do_exit(err); } static int iwl_xvt_modulated_tx_handler(void *data) { u64 tx_count, max_tx; int time_remain, num_of_packets, err = 0; struct iwl_xvt *xvt; struct iwl_xvt_tx_mod_done *done_notif; u32 status = XVT_TX_DRIVER_SUCCESSFUL; struct iwl_xvt_tx_mod_task_data *task_data = (struct iwl_xvt_tx_mod_task_data *)data; struct tx_meta_data *xvt_tx; xvt = task_data->xvt; xvt_tx = &xvt->tx_meta_data[task_data->lmac_id]; xvt_tx->tx_task_operating = true; num_of_packets = task_data->tx_req.times; max_tx = (num_of_packets == IWL_XVT_TX_MODULATED_INFINITE) ? XVT_MAX_TX_COUNT : num_of_packets; xvt_tx->tot_tx = num_of_packets; xvt_tx->tx_counter = 0; for (tx_count = 0; (tx_count < max_tx) && (!kthread_should_stop()); tx_count++){ err = iwl_xvt_send_packet(xvt, &task_data->tx_req, &status, xvt_tx); if (err) { IWL_ERR(xvt, "stop send packets due to err %d\n", err); break; } } if (!err) { time_remain = wait_event_interruptible_timeout( xvt_tx->mod_tx_done_wq, xvt_tx->tx_counter == tx_count, 5 * HZ); if (time_remain <= 0) { IWL_ERR(xvt, "Not all Tx messages were sent\n"); xvt_tx->tx_task_operating = false; status = XVT_TX_DRIVER_TIMEOUT; } } done_notif = kmalloc(sizeof(*done_notif), GFP_KERNEL); if (!done_notif) { xvt_tx->tx_task_operating = false; kfree(data); return -ENOMEM; } done_notif->num_of_packets = xvt_tx->tx_counter; done_notif->status = status; done_notif->lmac_id = task_data->lmac_id; err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_MOD_TX_DONE, (void *)done_notif, sizeof(*done_notif), GFP_ATOMIC); if (err) { IWL_ERR(xvt, "Error %d sending tx_done notification\n", err); kfree(done_notif); } xvt_tx->tx_task_operating = false; kfree(data); do_exit(err); } static int iwl_xvt_modulated_tx_infinite_stop(struct iwl_xvt *xvt, struct iwl_tm_data *data_in) { int err = 0; u32 lmac_id = ((struct iwl_xvt_tx_mod_stop *)data_in->data)->lmac_id; struct tx_meta_data *xvt_tx = &xvt->tx_meta_data[lmac_id]; if (xvt_tx->tx_mod_thread && xvt_tx->tx_task_operating) { err = kthread_stop(xvt_tx->tx_mod_thread); xvt_tx->tx_mod_thread = NULL; } return err; } static inline int map_sta_to_lmac(struct iwl_xvt *xvt, u8 sta_id) { switch (sta_id) { case XVT_LMAC_0_STA_ID: return XVT_LMAC_0_ID; case XVT_LMAC_1_STA_ID: return XVT_LMAC_1_ID; default: IWL_ERR(xvt, "wrong sta id, can't match queue\n"); return -EINVAL; } } static int iwl_xvt_tx_queue_cfg(struct iwl_xvt *xvt, struct iwl_tm_data *data_in) { struct iwl_xvt_tx_queue_cfg *input = (struct iwl_xvt_tx_queue_cfg *)data_in->data; u8 sta_id = input->sta_id; int lmac_id = map_sta_to_lmac(xvt, sta_id); if (lmac_id < 0) return lmac_id; switch (input->operation) { case TX_QUEUE_CFG_ADD: return iwl_xvt_allocate_tx_queue(xvt, sta_id, lmac_id); case TX_QUEUE_CFG_REMOVE: iwl_xvt_free_tx_queue(xvt, lmac_id); break; default: IWL_ERR(xvt, "failed in tx config - wrong operation\n"); return -EINVAL; } return 0; } static int iwl_xvt_start_tx(struct iwl_xvt *xvt, struct iwl_xvt_driver_command_req *req) { struct iwl_xvt_enhanced_tx_data *task_data; if (WARN(xvt->is_enhanced_tx || xvt->tx_meta_data[XVT_LMAC_0_ID].tx_task_operating || xvt->tx_meta_data[XVT_LMAC_1_ID].tx_task_operating, "TX is already in progress\n")) return -EINVAL; xvt->is_enhanced_tx = true; task_data = kzalloc(sizeof(*task_data), GFP_KERNEL); if (!task_data) { xvt->is_enhanced_tx = false; return -ENOMEM; } task_data->xvt = xvt; memcpy(&task_data->tx_start_data, req->input_data, sizeof(struct iwl_xvt_tx_start)); xvt->tx_task = kthread_run(iwl_xvt_start_tx_handler, task_data, "start enhanced tx command"); if (!xvt->tx_task) { xvt->is_enhanced_tx = true; kfree(task_data); return -ENOMEM; } return 0; } static int iwl_xvt_stop_tx(struct iwl_xvt *xvt) { int err = 0; if (xvt->tx_task && xvt->is_enhanced_tx) { err = kthread_stop(xvt->tx_task); xvt->tx_task = NULL; } return err; } static int iwl_xvt_set_tx_payload(struct iwl_xvt *xvt, struct iwl_xvt_driver_command_req *req) { struct iwl_xvt_set_tx_payload *input = (struct iwl_xvt_set_tx_payload *)req->input_data; u32 size = sizeof(struct tx_payload) + input->length; struct tx_payload *payload_struct; if (WARN(input->index >= IWL_XVT_MAX_PAYLOADS_AMOUNT, "invalid payload index\n")) return -EINVAL; /* First free payload in case index is already in use */ kfree(xvt->payloads[input->index]); /* Allocate payload in xvt buffer */ xvt->payloads[input->index] = kzalloc(size, GFP_KERNEL); if (!xvt->payloads[input->index]) return -ENOMEM; payload_struct = xvt->payloads[input->index]; payload_struct->length = input->length; memcpy(payload_struct->payload, input->payload, input->length); return 0; } static int iwl_xvt_modulated_tx(struct iwl_xvt *xvt, struct iwl_tm_data *data_in) { u32 pkt_length = ((struct iwl_tm_mod_tx_request *)data_in->data)->len; u32 req_length = sizeof(struct iwl_tm_mod_tx_request) + pkt_length; u32 task_data_length = sizeof(struct iwl_xvt_tx_mod_task_data) + pkt_length; struct tx_meta_data *xvt_tx = &xvt->tx_meta_data[XVT_LMAC_0_ID]; u8 sta_id; int lmac_id; struct iwl_xvt_tx_mod_task_data *task_data; int err; /* Verify this command was not called while tx is operating */ if (WARN_ON(xvt->is_enhanced_tx)) return -EINVAL; task_data = kzalloc(task_data_length, GFP_KERNEL); if (!task_data) return -ENOMEM; /* * no need to check whether tx already operating on lmac, since check * is already done in the USC */ task_data->xvt = xvt; memcpy(&task_data->tx_req, data_in->data, req_length); if (iwl_xvt_is_unified_fw(xvt)) { sta_id = task_data->tx_req.sta_id; lmac_id = map_sta_to_lmac(xvt, sta_id); if (lmac_id < 0) { err = lmac_id; goto out; } task_data->lmac_id = lmac_id; xvt_tx = &xvt->tx_meta_data[lmac_id]; /* check if tx queue is allocated. if not - return */ if (xvt_tx->queue < 0) { IWL_ERR(xvt, "failed in tx - queue is not allocated\n"); err = -EIO; goto out; } } xvt_tx->tx_mod_thread = kthread_run(iwl_xvt_modulated_tx_handler, task_data, "tx mod infinite"); if (!xvt_tx->tx_mod_thread) { xvt_tx->tx_task_operating = false; err = -ENOMEM; goto out; } return 0; out: kfree(task_data); return err; } static int iwl_xvt_rx_hdrs_mode(struct iwl_xvt *xvt, struct iwl_tm_data *data_in) { struct iwl_xvt_rx_hdrs_mode_request *rx_hdr = data_in->data; if (data_in->len < sizeof(struct iwl_xvt_rx_hdrs_mode_request)) return -EINVAL; if (rx_hdr->mode) xvt->rx_hdr_enabled = true; else xvt->rx_hdr_enabled = false; return 0; } static int iwl_xvt_apmg_pd_mode(struct iwl_xvt *xvt, struct iwl_tm_data *data_in) { struct iwl_xvt_apmg_pd_mode_request *apmg_pd = data_in->data; if (apmg_pd->mode) xvt->apmg_pd_en = true; else xvt->apmg_pd_en = false; return 0; } static int iwl_xvt_allocate_dma(struct iwl_xvt *xvt, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_xvt_alloc_dma *dma_req = data_in->data; struct iwl_xvt_alloc_dma *dma_res; if (data_in->len < sizeof(struct iwl_xvt_alloc_dma)) return -EINVAL; if (xvt->dma_cpu_addr) { IWL_ERR(xvt, "XVT DMA already allocated\n"); return -EBUSY; } xvt->dma_cpu_addr = dma_alloc_coherent(xvt->trans->dev, dma_req->size, &(xvt->dma_addr), GFP_KERNEL); if (!xvt->dma_cpu_addr) { return false; } dma_res = kmalloc(sizeof(*dma_res), GFP_KERNEL); if (!dma_res) { dma_free_coherent(xvt->trans->dev, dma_req->size, xvt->dma_cpu_addr, xvt->dma_addr); xvt->dma_cpu_addr = NULL; xvt->dma_addr = 0; return -ENOMEM; } dma_res->size = dma_req->size; /* Casting to avoid compilation warnings when DMA address is 32bit */ dma_res->addr = (u64)xvt->dma_addr; data_out->data = dma_res; data_out->len = sizeof(struct iwl_xvt_alloc_dma); xvt->dma_buffer_size = dma_req->size; return 0; } static int iwl_xvt_get_dma(struct iwl_xvt *xvt, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_xvt_get_dma *get_dma_resp; u32 resp_size; if (!xvt->dma_cpu_addr) { return -ENOMEM; } resp_size = sizeof(*get_dma_resp) + xvt->dma_buffer_size; get_dma_resp = kmalloc(resp_size, GFP_KERNEL); if (!get_dma_resp) { return -ENOMEM; } get_dma_resp->size = xvt->dma_buffer_size; memcpy(get_dma_resp->data, xvt->dma_cpu_addr, xvt->dma_buffer_size); data_out->data = get_dma_resp; data_out->len = resp_size; return 0; } static int iwl_xvt_free_dma(struct iwl_xvt *xvt, struct iwl_tm_data *data_in) { if (!xvt->dma_cpu_addr) { IWL_ERR(xvt, "XVT DMA was not allocated\n"); return 0; } dma_free_coherent(xvt->trans->dev, xvt->dma_buffer_size, xvt->dma_cpu_addr, xvt->dma_addr); xvt->dma_cpu_addr = NULL; xvt->dma_addr = 0; xvt->dma_buffer_size = 0; return 0; } static int iwl_xvt_get_chip_id(struct iwl_xvt *xvt, struct iwl_tm_data *data_out) { struct iwl_xvt_chip_id *chip_id; chip_id = kmalloc(sizeof(struct iwl_xvt_chip_id), GFP_KERNEL); if (!chip_id) return -ENOMEM; chip_id->registers[0] = ioread32((void __force __iomem *)XVT_SCU_SNUM1); chip_id->registers[1] = ioread32((void __force __iomem *)XVT_SCU_SNUM2); chip_id->registers[2] = ioread32((void __force __iomem *)XVT_SCU_SNUM3); data_out->data = chip_id; data_out->len = sizeof(struct iwl_xvt_chip_id); return 0; } static int iwl_xvt_get_mac_addr_info(struct iwl_xvt *xvt, struct iwl_tm_data *data_out) { struct iwl_xvt_mac_addr_info *mac_addr_info; u32 mac_addr0, mac_addr1; __u8 temp_mac_addr[ETH_ALEN]; const u8 *hw_addr; mac_addr_info = kzalloc(sizeof(*mac_addr_info), GFP_KERNEL); if (!mac_addr_info) return -ENOMEM; if (xvt->cfg->nvm_type != IWL_NVM_EXT) { memcpy(mac_addr_info->mac_addr, xvt->nvm_hw_addr, sizeof(mac_addr_info->mac_addr)); } else { /* MAC address in family 8000 */ if (xvt->is_nvm_mac_override) { memcpy(mac_addr_info->mac_addr, xvt->nvm_mac_addr, sizeof(mac_addr_info->mac_addr)); } else { /* read the mac address from WFMP registers */ mac_addr0 = iwl_read_umac_prph_no_grab(xvt->trans, WFMP_MAC_ADDR_0); mac_addr1 = iwl_read_umac_prph_no_grab(xvt->trans, WFMP_MAC_ADDR_1); hw_addr = (const u8 *)&mac_addr0; temp_mac_addr[0] = hw_addr[3]; temp_mac_addr[1] = hw_addr[2]; temp_mac_addr[2] = hw_addr[1]; temp_mac_addr[3] = hw_addr[0]; hw_addr = (const u8 *)&mac_addr1; temp_mac_addr[4] = hw_addr[1]; temp_mac_addr[5] = hw_addr[0]; memcpy(mac_addr_info->mac_addr, temp_mac_addr, sizeof(mac_addr_info->mac_addr)); } } data_out->data = mac_addr_info; data_out->len = sizeof(*mac_addr_info); return 0; } static int iwl_xvt_add_txq(struct iwl_xvt *xvt, struct iwl_scd_txq_cfg_cmd *cmd, u16 ssn, u16 flags, int size) { int queue_id = cmd->scd_queue, ret; if (iwl_xvt_is_unified_fw(xvt)) { /*TODO: add support for second lmac*/ queue_id = iwl_trans_txq_alloc(xvt->trans, cpu_to_le16(flags), cmd->sta_id, cmd->tid, SCD_QUEUE_CFG, size, 0); if (queue_id < 0) return queue_id; } else { iwl_trans_txq_enable_cfg(xvt->trans, queue_id, ssn, NULL, 0); ret = iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(*cmd), cmd); if (ret) { IWL_ERR(xvt, "Failed to config queue %d on FIFO %d\n", cmd->scd_queue, cmd->tx_fifo); return ret; } } xvt->queue_data[queue_id].allocated_queue = true; init_waitqueue_head(&xvt->queue_data[queue_id].tx_wq); return queue_id; } static int iwl_xvt_remove_txq(struct iwl_xvt *xvt, struct iwl_scd_txq_cfg_cmd *cmd) { int ret = 0; if (iwl_xvt_is_unified_fw(xvt)) { struct iwl_tx_queue_cfg_cmd queue_cfg_cmd = { .flags = 0, .sta_id = cmd->sta_id, .tid = cmd->tid, }; ret = iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(queue_cfg_cmd), &queue_cfg_cmd); iwl_trans_txq_free(xvt->trans, cmd->scd_queue); } else { iwl_trans_txq_disable(xvt->trans, cmd->scd_queue, false); ret = iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(*cmd), cmd); } if (WARN(ret, "failed to send SCD_QUEUE_CFG")) return ret; xvt->queue_data[cmd->scd_queue].allocated_queue = false; return 0; } static int iwl_xvt_config_txq(struct iwl_xvt *xvt, struct iwl_xvt_driver_command_req *req, struct iwl_xvt_driver_command_resp *resp) { struct iwl_xvt_txq_config *conf = (struct iwl_xvt_txq_config *)req->input_data; int queue_id = conf->scd_queue, error; struct iwl_scd_txq_cfg_cmd cmd = { .sta_id = conf->sta_id, .tid = conf->tid, .scd_queue = conf->scd_queue, .action = conf->action, .aggregate = conf->aggregate, .tx_fifo = conf->tx_fifo, .window = conf->window, .ssn = cpu_to_le16(conf->ssn), }; struct iwl_xvt_txq_config_resp txq_resp = { .sta_id = conf->sta_id, .tid = conf->tid, }; if (req->max_out_length < sizeof(txq_resp)) return -ENOBUFS; if (conf->action == TX_QUEUE_CFG_REMOVE) { error = iwl_xvt_remove_txq(xvt, &cmd); if (WARN(error, "failed to remove queue")) return error; } else { queue_id = iwl_xvt_add_txq(xvt, &cmd, conf->ssn, conf->flags, conf->queue_size); if (queue_id < 0) return queue_id; } txq_resp.scd_queue = queue_id; memcpy(resp->resp_data, &txq_resp, sizeof(txq_resp)); resp->length = sizeof(txq_resp); return 0; } static int iwl_xvt_get_rx_agg_stats_cmd(struct iwl_xvt *xvt, struct iwl_xvt_driver_command_req *req, struct iwl_xvt_driver_command_resp *resp) { struct iwl_xvt_get_rx_agg_stats *params = (void *)req->input_data; struct iwl_xvt_get_rx_agg_stats_resp *stats_resp = (void *)resp->resp_data; struct iwl_xvt_reorder_buffer *buffer; int i; IWL_DEBUG_INFO(xvt, "get rx agg stats: sta_id=%d, tid=%d\n", params->sta_id, params->tid); if (req->max_out_length < sizeof(stats_resp)) return -ENOBUFS; for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) { buffer = &xvt->reorder_bufs[i]; if (buffer->sta_id != params->sta_id || buffer->tid != params->tid) continue; spin_lock_bh(&buffer->lock); stats_resp->dropped = buffer->stats.dropped; stats_resp->released = buffer->stats.released; stats_resp->skipped = buffer->stats.skipped; stats_resp->reordered = buffer->stats.reordered; /* clear statistics */ memset(&buffer->stats, 0, sizeof(buffer->stats)); spin_unlock_bh(&buffer->lock); break; } if (i == ARRAY_SIZE(xvt->reorder_bufs)) return -ENOENT; resp->length = sizeof(*stats_resp); return 0; } static void iwl_xvt_config_rx_mpdu(struct iwl_xvt *xvt, struct iwl_xvt_driver_command_req *req) { xvt->send_rx_mpdu = ((struct iwl_xvt_config_rx_mpdu_req *)req->input_data)->enable; } static int iwl_xvt_echo_notif(struct iwl_xvt *xvt) { return iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_ECHO_NOTIF, NULL, 0, GFP_KERNEL); } static int iwl_xvt_handle_driver_cmd(struct iwl_xvt *xvt, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_xvt_driver_command_req *req = data_in->data; struct iwl_xvt_driver_command_resp *resp = NULL; __u32 cmd_id = req->command_id; int err = 0; IWL_DEBUG_INFO(xvt, "handle driver command 0x%X\n", cmd_id); if (req->max_out_length > 0) { resp = kzalloc(sizeof(*resp) + req->max_out_length, GFP_KERNEL); if (!resp) return -ENOMEM; } /* resp->length and resp->resp_data should be set in command handler */ switch (cmd_id) { case IWL_DRV_CMD_CONFIG_TX_QUEUE: err = iwl_xvt_config_txq(xvt, req, resp); break; case IWL_DRV_CMD_SET_TX_PAYLOAD: err = iwl_xvt_set_tx_payload(xvt, req); break; case IWL_DRV_CMD_TX_START: err = iwl_xvt_start_tx(xvt, req); break; case IWL_DRV_CMD_TX_STOP: err = iwl_xvt_stop_tx(xvt); break; case IWL_DRV_CMD_GET_RX_AGG_STATS: err = iwl_xvt_get_rx_agg_stats_cmd(xvt, req, resp); break; case IWL_DRV_CMD_CONFIG_RX_MPDU: iwl_xvt_config_rx_mpdu(xvt, req); break; case IWL_DRV_CMD_ECHO_NOTIF: err = iwl_xvt_echo_notif(xvt); break; default: IWL_ERR(xvt, "no command handler found for cmd_id[%u]\n", cmd_id); err = -EOPNOTSUPP; } if (err) goto out_free; if (req->max_out_length > 0) { if (WARN_ONCE(resp->length == 0, "response was not set correctly\n")) { err = -ENODATA; goto out_free; } resp->command_id = cmd_id; data_out->len = resp->length + sizeof(struct iwl_xvt_driver_command_resp); data_out->data = resp; return err; } out_free: kfree(resp); return err; } int iwl_xvt_user_cmd_execute(struct iwl_testmode *testmode, u32 cmd, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out, bool *supported_cmd) { struct iwl_xvt *xvt = testmode->op_mode; int ret = 0; *supported_cmd = true; if (WARN_ON_ONCE(!xvt || !data_in)) return -EINVAL; IWL_DEBUG_INFO(xvt, "%s cmd=0x%X\n", __func__, cmd); mutex_lock(&xvt->mutex); switch (cmd) { /* Testmode custom cases */ case IWL_TM_USER_CMD_GET_DEVICE_INFO: ret = iwl_xvt_get_dev_info(xvt, data_in, data_out); break; case IWL_TM_USER_CMD_SV_IO_TOGGLE: ret = iwl_xvt_sdio_io_toggle(xvt, data_in, data_out); break; /* xVT cases */ case IWL_XVT_CMD_START: ret = iwl_xvt_start_op_mode(xvt); break; case IWL_XVT_CMD_STOP: iwl_xvt_stop_op_mode(xvt); break; case IWL_XVT_CMD_CONTINUE_INIT: ret = iwl_xvt_continue_init(xvt); break; case IWL_XVT_CMD_GET_PHY_DB_ENTRY: ret = iwl_xvt_get_phy_db(xvt, data_in, data_out); break; case IWL_XVT_CMD_SET_CONFIG: ret = iwl_xvt_set_sw_config(xvt, data_in); break; case IWL_XVT_CMD_GET_CONFIG: ret = iwl_xvt_get_sw_config(xvt, data_in, data_out); break; case IWL_XVT_CMD_MOD_TX: ret = iwl_xvt_modulated_tx(xvt, data_in); break; case IWL_XVT_CMD_RX_HDRS_MODE: ret = iwl_xvt_rx_hdrs_mode(xvt, data_in); break; case IWL_XVT_CMD_APMG_PD_MODE: ret = iwl_xvt_apmg_pd_mode(xvt, data_in); break; case IWL_XVT_CMD_ALLOC_DMA: ret = iwl_xvt_allocate_dma(xvt, data_in, data_out); break; case IWL_XVT_CMD_GET_DMA: ret = iwl_xvt_get_dma(xvt, data_in, data_out); break; case IWL_XVT_CMD_FREE_DMA: ret = iwl_xvt_free_dma(xvt, data_in); break; case IWL_XVT_CMD_GET_CHIP_ID: ret = iwl_xvt_get_chip_id(xvt, data_out); break; case IWL_XVT_CMD_GET_MAC_ADDR_INFO: ret = iwl_xvt_get_mac_addr_info(xvt, data_out); break; case IWL_XVT_CMD_MOD_TX_STOP: ret = iwl_xvt_modulated_tx_infinite_stop(xvt, data_in); break; case IWL_XVT_CMD_TX_QUEUE_CFG: ret = iwl_xvt_tx_queue_cfg(xvt, data_in); break; case IWL_XVT_CMD_DRIVER_CMD: ret = iwl_xvt_handle_driver_cmd(xvt, data_in, data_out); break; default: *supported_cmd = false; ret = -EOPNOTSUPP; IWL_DEBUG_INFO(xvt, "%s (cmd=0x%X) Not supported by xVT\n", __func__, cmd); break; } mutex_unlock(&xvt->mutex); if (ret && *supported_cmd) IWL_ERR(xvt, "%s (cmd=0x%X) ret=%d\n", __func__, cmd, ret); else IWL_DEBUG_INFO(xvt, "%s (cmd=0x%X) ended Ok\n", __func__, cmd); return ret; } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/user-infc.h000066400000000000000000000072061351064634500274610ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __user_infc_h__ #define __user_infc_h__ #include "iwl-tm-gnl.h" #include "iwl-tm-infc.h" #include "xvt.h" /* * iwl_xvt_user_send_notif masks the usage of iwl-tm-gnl * If there is a need in replacing the interface, it * should be done only here. */ static inline int iwl_xvt_user_send_notif(struct iwl_xvt *xvt, u32 cmd, void *data, u32 size, gfp_t flags) { int err; IWL_DEBUG_INFO(xvt, "send user notification: cmd=0x%x, size=%d\n", cmd, size); err = iwl_tm_gnl_send_msg(xvt->trans, cmd, false, data, size, flags); WARN_ONCE(err, "failed to send notification to user, err %d\n", err); return err; } void iwl_xvt_send_user_rx_notif(struct iwl_xvt *xvt, struct iwl_rx_cmd_buffer *rxb); int iwl_xvt_user_cmd_execute(struct iwl_testmode *testmode, u32 cmd, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out, bool *supported_cmd); #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/utils.c000066400000000000000000000252161351064634500267220ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-debug.h" #include "iwl-io.h" #include "fw-api.h" #include "xvt.h" #include "fw/dbg.h" int iwl_xvt_send_cmd(struct iwl_xvt *xvt, struct iwl_host_cmd *cmd) { /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ if (!(cmd->flags & CMD_ASYNC)) lockdep_assert_held(&xvt->mutex); return iwl_trans_send_cmd(xvt->trans, cmd); } int iwl_xvt_send_cmd_pdu(struct iwl_xvt *xvt, u32 id, u32 flags, u16 len, const void *data) { struct iwl_host_cmd cmd = { .id = id, .len = { len, }, .data = { data, }, .flags = flags, }; return iwl_xvt_send_cmd(xvt, &cmd); } static struct { char *name; u8 num; } advanced_lookup[] = { { "NMI_INTERRUPT_WDG", 0x34 }, { "SYSASSERT", 0x35 }, { "UCODE_VERSION_MISMATCH", 0x37 }, { "BAD_COMMAND", 0x38 }, { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, { "FATAL_ERROR", 0x3D }, { "NMI_TRM_HW_ERR", 0x46 }, { "NMI_INTERRUPT_TRM", 0x4C }, { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, { "NMI_INTERRUPT_HOST", 0x66 }, { "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, { "ADVANCED_SYSASSERT", 0 }, }; static const char *desc_lookup(u32 num) { int i; for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) if (advanced_lookup[i].num == num) return advanced_lookup[i].name; /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ return advanced_lookup[i].name; } #define ERROR_START_OFFSET (1 * sizeof(u32)) #define ERROR_ELEM_SIZE (7 * sizeof(u32)) void iwl_xvt_get_nic_error_log_v1(struct iwl_xvt *xvt, struct iwl_error_event_table_v1 *table) { struct iwl_trans *trans = xvt->trans; u32 base = xvt->trans->dbg.lmac_error_event_table[0]; /* TODO: support CDB */ if (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) { if (!base) base = xvt->fw->init_errlog_ptr; } else { if (!base) base = xvt->fw->inst_errlog_ptr; } iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table)); } void iwl_xvt_dump_nic_error_log_v1(struct iwl_xvt *xvt, struct iwl_error_event_table_v1 *table) { IWL_ERR(xvt, "0x%08X | %-28s\n", table->error_id, desc_lookup(table->error_id)); IWL_ERR(xvt, "0x%08X | uPc\n", table->pc); IWL_ERR(xvt, "0x%08X | branchlink1\n", table->blink1); IWL_ERR(xvt, "0x%08X | branchlink2\n", table->blink2); IWL_ERR(xvt, "0x%08X | interruptlink1\n", table->ilink1); IWL_ERR(xvt, "0x%08X | interruptlink2\n", table->ilink2); IWL_ERR(xvt, "0x%08X | data1\n", table->data1); IWL_ERR(xvt, "0x%08X | data2\n", table->data2); IWL_ERR(xvt, "0x%08X | data3\n", table->data3); IWL_ERR(xvt, "0x%08X | beacon time\n", table->bcon_time); IWL_ERR(xvt, "0x%08X | tsf low\n", table->tsf_low); IWL_ERR(xvt, "0x%08X | tsf hi\n", table->tsf_hi); IWL_ERR(xvt, "0x%08X | time gp1\n", table->gp1); IWL_ERR(xvt, "0x%08X | time gp2\n", table->gp2); IWL_ERR(xvt, "0x%08X | time gp3\n", table->gp3); IWL_ERR(xvt, "0x%08X | uCode version\n", table->ucode_ver); IWL_ERR(xvt, "0x%08X | hw version\n", table->hw_ver); IWL_ERR(xvt, "0x%08X | board version\n", table->brd_ver); IWL_ERR(xvt, "0x%08X | hcmd\n", table->hcmd); IWL_ERR(xvt, "0x%08X | isr0\n", table->isr0); IWL_ERR(xvt, "0x%08X | isr1\n", table->isr1); IWL_ERR(xvt, "0x%08X | isr2\n", table->isr2); IWL_ERR(xvt, "0x%08X | isr3\n", table->isr3); IWL_ERR(xvt, "0x%08X | isr4\n", table->isr4); IWL_ERR(xvt, "0x%08X | isr_pref\n", table->isr_pref); IWL_ERR(xvt, "0x%08X | wait_event\n", table->wait_event); IWL_ERR(xvt, "0x%08X | l2p_control\n", table->l2p_control); IWL_ERR(xvt, "0x%08X | l2p_duration\n", table->l2p_duration); IWL_ERR(xvt, "0x%08X | l2p_mhvalid\n", table->l2p_mhvalid); IWL_ERR(xvt, "0x%08X | l2p_addr_match\n", table->l2p_addr_match); IWL_ERR(xvt, "0x%08X | lmpm_pmg_sel\n", table->lmpm_pmg_sel); IWL_ERR(xvt, "0x%08X | timestamp\n", table->u_timestamp); IWL_ERR(xvt, "0x%08X | flow_handler\n", table->flow_handler); } void iwl_xvt_get_nic_error_log_v2(struct iwl_xvt *xvt, struct iwl_error_event_table_v2 *table) { struct iwl_trans *trans = xvt->trans; u32 base = xvt->trans->dbg.lmac_error_event_table[0]; /* TODO: support CDB */ if (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) { if (!base) base = xvt->fw->init_errlog_ptr; } else { if (!base) base = xvt->fw->inst_errlog_ptr; } iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table)); } void iwl_xvt_dump_nic_error_log_v2(struct iwl_xvt *xvt, struct iwl_error_event_table_v2 *table) { IWL_ERR(xvt, "0x%08X | %-28s\n", table->error_id, desc_lookup(table->error_id)); IWL_ERR(xvt, "0x%08X | trm_hw_status0\n", table->trm_hw_status0); IWL_ERR(xvt, "0x%08X | trm_hw_status1\n", table->trm_hw_status1); IWL_ERR(xvt, "0x%08X | branchlink2\n", table->blink2); IWL_ERR(xvt, "0x%08X | interruptlink1\n", table->ilink1); IWL_ERR(xvt, "0x%08X | interruptlink2\n", table->ilink2); IWL_ERR(xvt, "0x%08X | data1\n", table->data1); IWL_ERR(xvt, "0x%08X | data2\n", table->data2); IWL_ERR(xvt, "0x%08X | data3\n", table->data3); IWL_ERR(xvt, "0x%08X | beacon time\n", table->bcon_time); IWL_ERR(xvt, "0x%08X | tsf low\n", table->tsf_low); IWL_ERR(xvt, "0x%08X | tsf hi\n", table->tsf_hi); IWL_ERR(xvt, "0x%08X | time gp1\n", table->gp1); IWL_ERR(xvt, "0x%08X | time gp2\n", table->gp2); IWL_ERR(xvt, "0x%08X | uCode revision type\n", table->fw_rev_type); IWL_ERR(xvt, "0x%08X | uCode version major\n", table->major); IWL_ERR(xvt, "0x%08X | uCode version minor\n", table->minor); IWL_ERR(xvt, "0x%08X | hw version\n", table->hw_ver); IWL_ERR(xvt, "0x%08X | board version\n", table->brd_ver); IWL_ERR(xvt, "0x%08X | hcmd\n", table->hcmd); IWL_ERR(xvt, "0x%08X | isr0\n", table->isr0); IWL_ERR(xvt, "0x%08X | isr1\n", table->isr1); IWL_ERR(xvt, "0x%08X | isr2\n", table->isr2); IWL_ERR(xvt, "0x%08X | isr3\n", table->isr3); IWL_ERR(xvt, "0x%08X | isr4\n", table->isr4); IWL_ERR(xvt, "0x%08X | last cmd Id\n", table->last_cmd_id); IWL_ERR(xvt, "0x%08X | wait_event\n", table->wait_event); IWL_ERR(xvt, "0x%08X | l2p_control\n", table->l2p_control); IWL_ERR(xvt, "0x%08X | l2p_duration\n", table->l2p_duration); IWL_ERR(xvt, "0x%08X | l2p_mhvalid\n", table->l2p_mhvalid); IWL_ERR(xvt, "0x%08X | l2p_addr_match\n", table->l2p_addr_match); IWL_ERR(xvt, "0x%08X | lmpm_pmg_sel\n", table->lmpm_pmg_sel); IWL_ERR(xvt, "0x%08X | timestamp\n", table->u_timestamp); IWL_ERR(xvt, "0x%08X | flow_handler\n", table->flow_handler); } void iwl_xvt_get_umac_error_log(struct iwl_xvt *xvt, struct iwl_umac_error_event_table *table) { struct iwl_trans *trans = xvt->trans; u32 base = xvt->trans->dbg.umac_error_event_table; if (base < trans->cfg->min_umac_error_event_table) { IWL_ERR(xvt, "Not valid error log pointer 0x%08X for %s uCode\n", base, (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT"); return; } iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table)); } void iwl_xvt_dump_umac_error_log(struct iwl_xvt *xvt, struct iwl_umac_error_event_table *table) { IWL_ERR(xvt, "0x%08X | %s\n", table->error_id, desc_lookup(table->error_id)); IWL_ERR(xvt, "0x%08X | umac branchlink1\n", table->blink1); IWL_ERR(xvt, "0x%08X | umac branchlink2\n", table->blink2); IWL_ERR(xvt, "0x%08X | umac interruptlink1\n", table->ilink1); IWL_ERR(xvt, "0x%08X | umac interruptlink2\n", table->ilink2); IWL_ERR(xvt, "0x%08X | umac data1\n", table->data1); IWL_ERR(xvt, "0x%08X | umac data2\n", table->data2); IWL_ERR(xvt, "0x%08X | umac data3\n", table->data3); IWL_ERR(xvt, "0x%08X | umac major\n", table->umac_major); IWL_ERR(xvt, "0x%08X | umac minor\n", table->umac_minor); IWL_ERR(xvt, "0x%08X | frame pointer\n", table->frame_pointer); IWL_ERR(xvt, "0x%08X | stack pointer\n", table->stack_pointer); IWL_ERR(xvt, "0x%08X | last host cmd\n", table->cmd_header); IWL_ERR(xvt, "0x%08X | isr status reg\n", table->nic_isr_pref); } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/xvt.c000066400000000000000000000561361351064634500264100ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-config.h" #include "iwl-phy-db.h" #include "iwl-csr.h" #include "xvt.h" #include "user-infc.h" #include "iwl-dnt-cfg.h" #include "iwl-dnt-dispatch.h" #include "iwl-io.h" #include "iwl-prph.h" #include "fw/dbg.h" #include "fw/api/rx.h" #define DRV_DESCRIPTION "Intel(R) xVT driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_LICENSE("GPL"); #define TX_QUEUE_CFG_TID (6) static const struct iwl_op_mode_ops iwl_xvt_ops; /* * module init and exit functions */ static int __init iwl_xvt_init(void) { return iwl_opmode_register("iwlxvt", &iwl_xvt_ops); } module_init(iwl_xvt_init); static void __exit iwl_xvt_exit(void) { iwl_opmode_deregister("iwlxvt"); } module_exit(iwl_xvt_exit); /* Please keep this array *SORTED* by hex value. * Access is done through binary search. * A warning will be triggered on violation. */ static const struct iwl_hcmd_names iwl_xvt_cmd_names[] = { HCMD_NAME(MVM_ALIVE), HCMD_NAME(INIT_COMPLETE_NOTIF), HCMD_NAME(TX_CMD), HCMD_NAME(SCD_QUEUE_CFG), HCMD_NAME(FW_PAGING_BLOCK_CMD), HCMD_NAME(PHY_CONFIGURATION_CMD), HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), HCMD_NAME(NVM_ACCESS_CMD), HCMD_NAME(GET_SET_PHY_DB_CMD), HCMD_NAME(REPLY_HD_PARAMS_CMD), HCMD_NAME(NVM_COMMIT_COMPLETE_NOTIFICATION), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), HCMD_NAME(FRAME_RELEASE), HCMD_NAME(REPLY_RX_DSP_EXT_INFO), HCMD_NAME(BA_NOTIF), HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), HCMD_NAME(REPLY_DEBUG_XVT_CMD), HCMD_NAME(LDBG_CONFIG_CMD), HCMD_NAME(DEBUG_LOG_MSG), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search. */ static const struct iwl_hcmd_names iwl_xvt_long_cmd_names[] = { HCMD_NAME(PHY_CONTEXT_CMD), HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), HCMD_NAME(REMOVE_STA), HCMD_NAME(MAC_CONTEXT_CMD), HCMD_NAME(BINDING_CONTEXT_CMD), HCMD_NAME(LQ_CMD), HCMD_NAME(POWER_TABLE_CMD), HCMD_NAME(GET_SET_PHY_DB_CMD), HCMD_NAME(TX_ANT_CONFIGURATION_CMD), HCMD_NAME(REPLY_SF_CFG_CMD), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search. */ static const struct iwl_hcmd_names iwl_xvt_phy_names[] = { HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search. */ static const struct iwl_hcmd_names iwl_xvt_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search. */ static const struct iwl_hcmd_names iwl_xvt_regulatory_and_nvm_names[] = { HCMD_NAME(NVM_ACCESS_COMPLETE), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search. */ static const struct iwl_hcmd_names iwl_xvt_location_names[] = { HCMD_NAME(LOCATION_GROUP_NOTIFICATION), HCMD_NAME(TOF_MCSI_DEBUG_NOTIF), HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search. */ static const struct iwl_hcmd_names iwl_xvt_system_names[] = { HCMD_NAME(INIT_EXTENDED_CFG_CMD), }; static const struct iwl_hcmd_names iwl_xvt_xvt_names[] = { HCMD_NAME(IQ_CALIB_CONFIG_NOTIF), }; static const struct iwl_hcmd_names iwl_xvt_debug_names[] = { HCMD_NAME(DBGC_SUSPEND_RESUME), }; static const struct iwl_hcmd_arr iwl_xvt_cmd_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_xvt_cmd_names), [LONG_GROUP] = HCMD_ARR(iwl_xvt_long_cmd_names), [SYSTEM_GROUP] = HCMD_ARR(iwl_xvt_system_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_xvt_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_xvt_data_path_names), [LOCATION_GROUP] = HCMD_ARR(iwl_xvt_location_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_xvt_regulatory_and_nvm_names), [XVT_GROUP] = HCMD_ARR(iwl_xvt_xvt_names), [DEBUG_GROUP] = HCMD_ARR(iwl_xvt_debug_names), }; static int iwl_xvt_tm_send_hcmd(void *op_mode, struct iwl_host_cmd *host_cmd) { struct iwl_xvt *xvt = (struct iwl_xvt *)op_mode; if (WARN_ON_ONCE(!op_mode)) return -EINVAL; return iwl_xvt_send_cmd(xvt, host_cmd); } static struct iwl_op_mode *iwl_xvt_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) { struct iwl_op_mode *op_mode; struct iwl_xvt *xvt; struct iwl_trans_config trans_cfg = {}; static const u8 no_reclaim_cmds[] = { TX_CMD, }; u8 i; int err; op_mode = kzalloc(sizeof(struct iwl_op_mode) + sizeof(struct iwl_xvt), GFP_KERNEL); if (!op_mode) return NULL; op_mode->ops = &iwl_xvt_ops; xvt = IWL_OP_MODE_GET_XVT(op_mode); xvt->fw = fw; xvt->cfg = cfg; xvt->trans = trans; xvt->dev = trans->dev; iwl_fw_runtime_init(&xvt->fwrt, trans, fw, NULL, NULL, dbgfs_dir); mutex_init(&xvt->mutex); spin_lock_init(&xvt->notif_lock); /* * Populate the state variables that the * transport layer needs to know about. */ trans_cfg.op_mode = op_mode; trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); trans_cfg.command_groups = iwl_xvt_cmd_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_xvt_cmd_groups); trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; IWL_DEBUG_INFO(xvt, "dqa supported\n"); trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.bc_table_dword = true; trans_cfg.scd_set_active = true; trans->wide_cmd_header = true; switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_8K: trans_cfg.rx_buf_size = IWL_AMSDU_8K; break; case IWL_AMSDU_12K: trans_cfg.rx_buf_size = IWL_AMSDU_12K; break; default: pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size); trans_cfg.rx_buf_size = IWL_AMSDU_4K; } /* the hardware splits the A-MSDU */ if (xvt->trans->cfg->mq_rx_supported) trans_cfg.rx_buf_size = IWL_AMSDU_4K; trans->rx_mpdu_cmd_hdr_size = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_rx_mpdu_desc) : IWL_RX_DESC_SIZE_V1; trans_cfg.cb_data_offs = offsetof(struct iwl_xvt_skb_info, trans); /* Configure transport layer */ iwl_trans_configure(xvt->trans, &trans_cfg); trans->command_groups = trans_cfg.command_groups; trans->command_groups_size = trans_cfg.command_groups_size; /* set up notification wait support */ iwl_notification_wait_init(&xvt->notif_wait); iwl_tm_init(trans, xvt->fw, &xvt->mutex, xvt); /* Init phy db */ xvt->phy_db = iwl_phy_db_init(xvt->trans); if (!xvt->phy_db) goto out_free; iwl_dnt_init(xvt->trans, dbgfs_dir); for (i = 0; i < NUM_OF_LMACS; i++) { init_waitqueue_head(&xvt->tx_meta_data[i].mod_tx_wq); init_waitqueue_head(&xvt->tx_meta_data[i].mod_tx_done_wq); xvt->tx_meta_data[i].queue = -1; xvt->tx_meta_data[i].tx_mod_thread = NULL; xvt->tx_meta_data[i].txq_full = false; }; for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) xvt->reorder_bufs[i].sta_id = IWL_XVT_INVALID_STA; memset(xvt->payloads, 0, sizeof(xvt->payloads)); xvt->tx_task = NULL; xvt->is_enhanced_tx = false; xvt->send_tx_resp = false; xvt->send_rx_mpdu = true; memset(xvt->queue_data, 0, sizeof(xvt->queue_data)); init_waitqueue_head(&xvt->tx_done_wq); trans->dbg.dest_tlv = xvt->fw->dbg.dest_tlv; trans->dbg.n_dest_reg = xvt->fw->dbg.n_dest_reg; memcpy(trans->dbg.conf_tlv, xvt->fw->dbg.conf_tlv, sizeof(trans->dbg.conf_tlv)); trans->dbg.trigger_tlv = xvt->fw->dbg.trigger_tlv; IWL_INFO(xvt, "Detected %s, REV=0x%X, xVT operation mode\n", xvt->cfg->name, xvt->trans->hw_rev); err = iwl_xvt_dbgfs_register(xvt, dbgfs_dir); if (err) IWL_ERR(xvt, "failed register xvt debugfs folder (%d)\n", err); return op_mode; out_free: kfree(op_mode); return NULL; } static void iwl_xvt_stop(struct iwl_op_mode *op_mode) { struct iwl_xvt *xvt = IWL_OP_MODE_GET_XVT(op_mode); int i; iwl_fw_cancel_timestamp(&xvt->fwrt); if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) { if (xvt->fw_running) { iwl_xvt_txq_disable(xvt); xvt->fw_running = false; } iwl_fw_dbg_stop_sync(&xvt->fwrt); iwl_trans_stop_device(xvt->trans); } for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) { struct iwl_xvt_reorder_buffer *buffer; buffer = &xvt->reorder_bufs[i]; iwl_xvt_destroy_reorder_buffer(xvt, buffer); } iwl_phy_db_free(xvt->phy_db); xvt->phy_db = NULL; iwl_dnt_free(xvt->trans); kfree(op_mode); } static void iwl_xvt_reclaim_and_free(struct iwl_xvt *xvt, struct tx_meta_data *tx_data, u16 txq_id, u16 ssn) { struct sk_buff_head skbs; struct sk_buff *skb; struct iwl_xvt_skb_info *skb_info; __skb_queue_head_init(&skbs); iwl_trans_reclaim(xvt->trans, txq_id, ssn, &skbs); while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); skb_info = (void *)skb->cb; if (xvt->is_enhanced_tx) { xvt->queue_data[txq_id].tx_counter++; xvt->num_of_tx_resp++; } else { tx_data->tx_counter++; } if (skb_info->dev_cmd) iwl_trans_free_tx_cmd(xvt->trans, skb_info->dev_cmd); kfree_skb(skb); } if (xvt->is_enhanced_tx && xvt->expected_tx_amount == xvt->num_of_tx_resp) wake_up_interruptible(&xvt->tx_done_wq); else if (tx_data->tot_tx == tx_data->tx_counter) wake_up_interruptible(&tx_data->mod_tx_done_wq); } static struct tx_meta_data * iwl_xvt_rx_get_tx_meta_data(struct iwl_xvt *xvt, u16 txq_id) { u8 lmac_id; /* * in case of enhanced_tx, tx_meta_data->queue is not * being set, so there's nothing to verify */ if (xvt->is_enhanced_tx) return &xvt->tx_meta_data[XVT_LMAC_0_ID]; if (!iwl_xvt_is_unified_fw(xvt)) { lmac_id = XVT_LMAC_0_ID; goto verify; } if (txq_id == xvt->tx_meta_data[XVT_LMAC_1_ID].queue) { lmac_id = XVT_LMAC_1_ID; goto verify; } lmac_id = XVT_LMAC_0_ID; verify: if (WARN(txq_id != xvt->tx_meta_data[lmac_id].queue, "got TX_CMD from unidentified queue: (lmac %d) %d %d\n", lmac_id, txq_id, xvt->tx_meta_data[lmac_id].queue)) return NULL; return &xvt->tx_meta_data[lmac_id]; } static void iwl_xvt_rx_tx_cmd_single(struct iwl_xvt *xvt, struct iwl_rx_packet *pkt) { /* struct iwl_mvm_tx_resp_v3 is almost the same */ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int txq_id = SEQ_TO_QUEUE(le16_to_cpu(pkt->hdr.sequence)); u16 ssn = iwl_xvt_get_scd_ssn(xvt, tx_resp); struct tx_meta_data *tx_data; u16 status = le16_to_cpu(iwl_xvt_get_agg_status(xvt, tx_resp)->status) & TX_STATUS_MSK; tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, txq_id); if (!tx_data) return; if (unlikely(status != TX_STATUS_SUCCESS)) IWL_WARN(xvt, "got error TX_RSP status %#x\n", status); iwl_xvt_reclaim_and_free(xvt, tx_data, txq_id, ssn); } static void iwl_xvt_rx_tx_cmd_handler(struct iwl_xvt *xvt, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; if (tx_resp->frame_count == 1) iwl_xvt_rx_tx_cmd_single(xvt, pkt); /* for aggregations - we reclaim on BA_NOTIF */ } static void iwl_xvt_rx_ba_notif(struct iwl_xvt *xvt, struct iwl_rx_packet *pkt) { struct iwl_mvm_ba_notif *ba_notif; struct tx_meta_data *tx_data; u16 scd_flow; u16 scd_ssn; if (iwl_xvt_is_unified_fw(xvt)) { struct iwl_mvm_compressed_ba_notif *ba_res = (void *)pkt->data; u8 tid; u16 queue; u16 tfd_idx; if (!le16_to_cpu(ba_res->tfd_cnt)) goto out; /* * TODO: * When supporting multi TID aggregations - we need to move * next_reclaimed to be per TXQ and not per TID or handle it * in a different way. * This will go together with SN and AddBA offload and cannot * be handled properly for now. */ WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1); tid = ba_res->ra_tid[0].tid; if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; queue = le16_to_cpu(ba_res->tfd[0].q_num); tfd_idx = le16_to_cpu(ba_res->tfd[0].tfd_index); tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, queue); if (!tx_data) return; iwl_xvt_reclaim_and_free(xvt, tx_data, queue, tfd_idx); out: IWL_DEBUG_TX_REPLY(xvt, "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", ba_res->sta_id, le32_to_cpu(ba_res->flags), le16_to_cpu(ba_res->txed), le16_to_cpu(ba_res->done)); return; } ba_notif = (void *)pkt->data; scd_ssn = le16_to_cpu(ba_notif->scd_ssn); scd_flow = le16_to_cpu(ba_notif->scd_flow); tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, scd_flow); if (!tx_data) return; iwl_xvt_reclaim_and_free(xvt, tx_data, scd_flow, scd_ssn); IWL_DEBUG_TX_REPLY(xvt, "ba_notif from %pM, sta_id = %d\n", ba_notif->sta_addr, ba_notif->sta_id); IWL_DEBUG_TX_REPLY(xvt, "tid %d, seq %d, bitmap 0x%llx, scd flow %d, ssn %d, sent %d, acked %d\n", ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), (unsigned long long)le64_to_cpu(ba_notif->bitmap), scd_flow, scd_ssn, ba_notif->txed, ba_notif->txed_2_done); } static void iwl_xvt_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct iwl_xvt *xvt = IWL_OP_MODE_GET_XVT(op_mode); struct iwl_rx_packet *pkt = rxb_addr(rxb); spin_lock(&xvt->notif_lock); iwl_notification_wait_notify(&xvt->notif_wait, pkt); IWL_DEBUG_INFO(xvt, "rx dispatch got notification\n"); switch (pkt->hdr.cmd) { case TX_CMD: iwl_xvt_rx_tx_cmd_handler(xvt, pkt); break; case BA_NOTIF: iwl_xvt_rx_ba_notif(xvt, pkt); break; case REPLY_RX_MPDU_CMD: iwl_xvt_reorder(xvt, pkt); break; case FRAME_RELEASE: iwl_xvt_rx_frame_release(xvt, pkt); } iwl_xvt_send_user_rx_notif(xvt, rxb); spin_unlock(&xvt->notif_lock); } static void iwl_xvt_nic_config(struct iwl_op_mode *op_mode) { struct iwl_xvt *xvt = IWL_OP_MODE_GET_XVT(op_mode); u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; u32 reg_val = 0; radio_cfg_type = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_TYPE) >> FW_PHY_CFG_RADIO_TYPE_POS; radio_cfg_step = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_STEP) >> FW_PHY_CFG_RADIO_STEP_POS; radio_cfg_dash = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS; /* SKU control */ reg_val |= CSR_HW_REV_STEP(xvt->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; reg_val |= CSR_HW_REV_DASH(xvt->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; /* radio configuration */ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); /* * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC * sampling, and shouldn't be set to any non-zero value. * The same is supposed to be true of the other HW, but unsetting * them (such as the 7260) causes automatic tests to fail on seemingly * unrelated errors. Need to further investigate this, but for now * we'll separate cases. */ if (xvt->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; iwl_trans_set_bits_mask(xvt->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, reg_val); IWL_DEBUG_INFO(xvt, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, radio_cfg_step, radio_cfg_dash); /* * W/A : NIC is stuck in a reset state after Early PCIe power off * (PCIe power is lost before PERST# is asserted), causing ME FW * to lose ownership and not being able to obtain it back. */ if (!xvt->trans->cfg->apmg_not_supported) iwl_set_bits_mask_prph(xvt->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); } static void iwl_xvt_nic_error(struct iwl_op_mode *op_mode) { struct iwl_xvt *xvt = IWL_OP_MODE_GET_XVT(op_mode); void *p_table; void *p_table_umac = NULL; struct iwl_error_event_table_v2 table_v2; struct iwl_umac_error_event_table table_umac; int err, table_size; xvt->fw_error = true; wake_up_interruptible(&xvt->tx_meta_data[XVT_LMAC_0_ID].mod_tx_wq); iwl_xvt_get_nic_error_log_v2(xvt, &table_v2); iwl_xvt_dump_nic_error_log_v2(xvt, &table_v2); p_table = kmemdup(&table_v2, sizeof(table_v2), GFP_ATOMIC); table_size = sizeof(table_v2); if (xvt->support_umac_log || (xvt->trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_UMAC)) { iwl_xvt_get_umac_error_log(xvt, &table_umac); iwl_xvt_dump_umac_error_log(xvt, &table_umac); p_table_umac = kmemdup(&table_umac, sizeof(table_umac), GFP_ATOMIC); } if (p_table) { err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_NIC_ERROR, (void *)p_table, table_size, GFP_ATOMIC); if (err) IWL_WARN(xvt, "Error %d sending NIC error notification\n", err); kfree(p_table); } if (p_table_umac) { err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_NIC_UMAC_ERROR, (void *)p_table_umac, sizeof(table_umac), GFP_ATOMIC); if (err) IWL_WARN(xvt, "Error %d sending NIC umac error notification\n", err); kfree(p_table_umac); } iwl_fw_error_collect(&xvt->fwrt); } static bool iwl_xvt_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_xvt *xvt = IWL_OP_MODE_GET_XVT(op_mode); u32 rfkill_state = state ? IWL_XVT_RFKILL_ON : IWL_XVT_RFKILL_OFF; int err; err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_RFKILL, &rfkill_state, sizeof(rfkill_state), GFP_ATOMIC); if (err) IWL_WARN(xvt, "Error %d sending RFKILL notification\n", err); return false; } static void iwl_xvt_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) { struct iwl_xvt *xvt = IWL_OP_MODE_GET_XVT(op_mode); struct iwl_xvt_skb_info *skb_info = (void *)skb->cb; iwl_trans_free_tx_cmd(xvt->trans, skb_info->dev_cmd); kfree_skb(skb); } static void iwl_xvt_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) { struct iwl_xvt *xvt = IWL_OP_MODE_GET_XVT(op_mode); u8 i; if (xvt->queue_data[queue].allocated_queue) { xvt->queue_data[queue].txq_full = true; } else { for (i = 0; i < NUM_OF_LMACS; i++) { if (queue == xvt->tx_meta_data[i].queue) { xvt->tx_meta_data[i].txq_full = true; break; } } } } static void iwl_xvt_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) { struct iwl_xvt *xvt = IWL_OP_MODE_GET_XVT(op_mode); u8 i; if (xvt->queue_data[queue].allocated_queue) { xvt->queue_data[queue].txq_full = false; wake_up_interruptible(&xvt->queue_data[queue].tx_wq); } else { for (i = 0; i < NUM_OF_LMACS; i++) { if (queue == xvt->tx_meta_data[i].queue) { xvt->tx_meta_data[i].txq_full = false; wake_up_interruptible( &xvt->tx_meta_data[i].mod_tx_wq); break; } } } } static const struct iwl_op_mode_ops iwl_xvt_ops = { .start = iwl_xvt_start, .stop = iwl_xvt_stop, .rx = iwl_xvt_rx_dispatch, .nic_config = iwl_xvt_nic_config, .nic_error = iwl_xvt_nic_error, .hw_rf_kill = iwl_xvt_set_hw_rfkill_state, .free_skb = iwl_xvt_free_skb, .queue_full = iwl_xvt_stop_sw_queue, .queue_not_full = iwl_xvt_wake_sw_queue, .test_ops = { .send_hcmd = iwl_xvt_tm_send_hcmd, .cmd_exec = iwl_xvt_user_cmd_execute, }, }; void iwl_xvt_free_tx_queue(struct iwl_xvt *xvt, u8 lmac_id) { if (xvt->tx_meta_data[lmac_id].queue == -1) return; iwl_trans_txq_free(xvt->trans, xvt->tx_meta_data[lmac_id].queue); xvt->tx_meta_data[lmac_id].queue = -1; } int iwl_xvt_allocate_tx_queue(struct iwl_xvt *xvt, u8 sta_id, u8 lmac_id) { int ret, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, xvt->trans->cfg->min_256_ba_txq_size); ret = iwl_trans_txq_alloc(xvt->trans, cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), sta_id, TX_QUEUE_CFG_TID, SCD_QUEUE_CFG, size, 0); /* ret is positive when func returns the allocated the queue number */ if (ret > 0) { xvt->tx_meta_data[lmac_id].queue = ret; ret = 0; } else { IWL_ERR(xvt, "failed to allocate queue\n"); } return ret; } void iwl_xvt_txq_disable(struct iwl_xvt *xvt) { if (!iwl_xvt_has_default_txq(xvt)) return; if (iwl_xvt_is_unified_fw(xvt)) { iwl_xvt_free_tx_queue(xvt, XVT_LMAC_0_ID); iwl_xvt_free_tx_queue(xvt, XVT_LMAC_1_ID); } else { iwl_trans_txq_disable(xvt->trans, IWL_XVT_DEFAULT_TX_QUEUE, true); } } backport-iwlwifi-dkms-7906/drivers/net/wireless/intel/iwlwifi/xvt/xvt.h000066400000000000000000000410501351064634500264020ustar00rootroot00000000000000/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_xvt_h__ #define __iwl_xvt_h__ #include #include #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-config.h" #include "fw-api.h" #include "fw/notif-wait.h" #include "constants.h" #include "fw/runtime.h" enum iwl_xvt_state { IWL_XVT_STATE_UNINITIALIZED = 0, IWL_XVT_STATE_NO_FW, IWL_XVT_STATE_INIT_STARTED, IWL_XVT_STATE_OPERATIONAL, }; #define IWL_XVT_LOAD_MASK_INIT BIT(0) #define IWL_XVT_LOAD_MASK_RUNTIME BIT(1) #define NUM_OF_LMACS (2) #define IWL_XVT_DBG_FLAGS_NO_DEFAULT_TXQ (BIT(2)) #define IWL_XVT_MAX_PAYLOADS_AMOUNT (16) #define IWL_MAX_BAID 32 #define IWL_XVT_INVALID_STA 0xFF /** * tx_meta_data - Holds data and member needed for tx * @tx_mod_thread: thread dedicated for tx traffic * @mod_tx_wq: send packets queue * @tx_task_operating: whether tx is active * @queue: FW queue ID for TX_CMD * @tx_counter: counts number of sent packets * @tot_tx: total number of packets required to sent * @mod_tx_done_wq: queue to wait on until all packets are sent and received * @txq_full: set to true when mod_tx_wq is full * @seq_num: sequence number of qos frames (per-tid) */ struct tx_meta_data { struct task_struct *tx_mod_thread; wait_queue_head_t mod_tx_wq; bool tx_task_operating; int queue; u64 tx_counter; u32 tot_tx; wait_queue_head_t mod_tx_done_wq; bool txq_full; u16 seq_num[IWL_MAX_TID_COUNT]; }; /* * struct iwl_xvt_reorder_statistics - reorder buffer statistics * @dropped: number of frames dropped (e.g. too old) * @released: total number of frames released (either in-order or * out of order (after passing the reorder buffer) * @skipped: number of frames skipped the reorder buffer (in-order) * @reordered: number of frames gone through the reorder buffer (unordered) */ struct iwl_xvt_reorder_statistics { u32 dropped; u32 released; u32 skipped; u32 reordered; }; /** * struct iwl_xvt_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn * @num_stored: number of mpdus stored in the buffer * @buf_size: the reorder buffer size as set by the last addba request * @sta_id: sta id of this reorder buffer * @tid: tid of this reorder buffer * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection * @entries: number of pending frames (for each index) * @lock: protect reorder buffer internal state * @stats: reorder buffer statistics */ struct iwl_xvt_reorder_buffer { u16 head_sn; u16 num_stored; u8 buf_size; u8 sta_id; u8 tid; int queue; u16 last_amsdu; u8 last_sub_index; /* * we don't care about the actual frames, only their count. * avoid messing with reorder timer for that reason as well */ u16 entries[IEEE80211_MAX_AMPDU_BUF_HT]; spinlock_t lock; /* protect reorder buffer internal state */ struct iwl_xvt_reorder_statistics stats; }; /** * tx_queue_data - Holds tx data per tx queue * @tx_wq: TX sw queue * @tx_counter: Number of packets that were sent from this queue. Counts TX_RSP * @txq_full: Set to true when tx_wq is full * @allocated_queue: Whether queue is allocated */ struct tx_queue_data { wait_queue_head_t tx_wq; u64 tx_counter; bool txq_full; bool allocated_queue; }; /** * tx_payload - Holds tx payload * @length: Payload length in bytes * @payload: Payload buffer */ struct tx_payload { u16 length; u8 payload[]; }; /** * iwl_sw_stack_config - Holds active SW stack config as set from user space * @load_mask: Which FW are to be loaded during SW stack up * @iwl_phy_cfg_cmd: Which calibrations should be done */ struct iwl_sw_stack_config { u32 load_mask; u32 calib_override_mask; u32 fw_dbg_flags; struct iwl_phy_cfg_cmd fw_calib_cmd_cfg[IWL_UCODE_TYPE_MAX]; }; /* Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_error_event_table_v1 { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 pc; /* program counter */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 bcon_time; /* beacon timer */ u32 tsf_low; /* network timestamp function timer */ u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ u32 gp3; /* GP3 timer register */ u32 ucode_ver; /* uCode version */ u32 hw_ver; /* HW Silicon version */ u32 brd_ver; /* HW board version */ u32 log_pc; /* log program counter */ u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ u32 isr0; /* isr status register LMPM_NIC_ISR0: * rxtx_flag */ u32 isr1; /* isr status register LMPM_NIC_ISR1: * host_flag */ u32 isr2; /* isr status register LMPM_NIC_ISR2: * enc_flag */ u32 isr3; /* isr status register LMPM_NIC_ISR3: * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ u32 lmpm_pmg_sel; /* indicate which clocks are turned on * (LMPM_PMG_SEL) */ u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ } __packed; /* Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_error_event_table_v2 { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 trm_hw_status0; /* TRM HW status */ u32 trm_hw_status1; /* TRM HW status */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 bcon_time; /* beacon timer */ u32 tsf_low; /* network timestamp function timer */ u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ u32 fw_rev_type; /* firmware revision type */ u32 major; /* uCode version major */ u32 minor; /* uCode version minor */ u32 hw_ver; /* HW Silicon version */ u32 brd_ver; /* HW board version */ u32 log_pc; /* log program counter */ u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ u32 isr0; /* isr status register LMPM_NIC_ISR0: * rxtx_flag */ u32 isr1; /* isr status register LMPM_NIC_ISR1: * host_flag */ u32 isr2; /* isr status register LMPM_NIC_ISR2: * enc_flag */ u32 isr3; /* isr status register LMPM_NIC_ISR3: * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ u32 last_cmd_id; /* last HCMD id handled by the firmware */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ u32 lmpm_pmg_sel; /* indicate which clocks are turned on * (LMPM_PMG_SEL) */ u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; /* UMAC error struct - relevant starting from family 8000 chip. * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_umac_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 umac_major; u32 umac_minor; u32 frame_pointer; /* core register 27*/ u32 stack_pointer; /* core register 28 */ u32 cmd_header; /* latest host cmd sent to UMAC */ u32 nic_isr_pref; /* ISR status register */ } __packed; /** * struct iwl_xvt_skb_info - driver data per skb * @dev_cmd: a pointer to the iwl_dev_cmd associated with this skb * @trans: transport data */ struct iwl_xvt_skb_info { struct iwl_device_cmd *dev_cmd; void *trans[2]; }; /** * struct iwl_xvt - the xvt op_mode * * @trans: pointer to the transport layer * @cfg: pointer to the driver's configuration * @fw: a pointer to the fw object * @dev: pointer to struct device for printing purposes */ struct iwl_xvt { struct iwl_trans *trans; const struct iwl_cfg *cfg; struct iwl_phy_db *phy_db; const struct iwl_fw *fw; struct device *dev; struct dentry *debugfs_dir; struct mutex mutex; /* Protects access to xVT struct */ spinlock_t notif_lock;; /* Protects notifications processing */ enum iwl_xvt_state state; bool fw_error; struct iwl_notif_wait_data notif_wait; bool fw_running; bool support_umac_log; struct iwl_sw_stack_config sw_stack_cfg; bool rx_hdr_enabled; bool apmg_pd_en; /* DMA buffer information */ u32 dma_buffer_size; u8 *dma_cpu_addr; dma_addr_t dma_addr; struct iwl_fw_runtime fwrt; bool is_nvm_mac_override; u8 nvm_hw_addr[ETH_ALEN]; u8 nvm_mac_addr[ETH_ALEN]; struct tx_meta_data tx_meta_data[NUM_OF_LMACS]; struct iwl_xvt_reorder_buffer reorder_bufs[IWL_MAX_BAID]; /* members for enhanced tx command */ struct tx_payload *payloads[IWL_XVT_MAX_PAYLOADS_AMOUNT]; struct task_struct *tx_task; bool is_enhanced_tx; bool send_tx_resp; bool send_rx_mpdu; u64 num_of_tx_resp; u64 expected_tx_amount; wait_queue_head_t tx_done_wq; struct tx_queue_data queue_data[IWL_MAX_HW_QUEUES]; }; #define IWL_OP_MODE_GET_XVT(_op_mode) \ ((struct iwl_xvt *)((_op_mode)->op_mode_specific)) /****************** * XVT Methods ******************/ /* Host Commands */ int __must_check iwl_xvt_send_cmd(struct iwl_xvt *xvt, struct iwl_host_cmd *cmd); int __must_check iwl_xvt_send_cmd_pdu(struct iwl_xvt *xvt, u32 id, u32 flags, u16 len, const void *data); /* Utils */ void iwl_xvt_get_nic_error_log_v1(struct iwl_xvt *xvt, struct iwl_error_event_table_v1 *table); void iwl_xvt_dump_nic_error_log_v1(struct iwl_xvt *xvt, struct iwl_error_event_table_v1 *table); void iwl_xvt_get_nic_error_log_v2(struct iwl_xvt *xvt, struct iwl_error_event_table_v2 *table); void iwl_xvt_dump_nic_error_log_v2(struct iwl_xvt *xvt, struct iwl_error_event_table_v2 *table); void iwl_xvt_get_umac_error_log(struct iwl_xvt *xvt, struct iwl_umac_error_event_table *table); void iwl_xvt_dump_umac_error_log(struct iwl_xvt *xvt, struct iwl_umac_error_event_table *table); /* User interface */ int iwl_xvt_user_cmd_execute(struct iwl_testmode *testmode, u32 cmd, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out, bool *supported_cmd); /* FW */ int iwl_xvt_run_fw(struct iwl_xvt *xvt, u32 ucode_type); /* NVM */ int iwl_xvt_nvm_init(struct iwl_xvt *xvt); /* RX */ bool iwl_xvt_reorder(struct iwl_xvt *xvt, struct iwl_rx_packet *pkt); void iwl_xvt_rx_frame_release(struct iwl_xvt *xvt, struct iwl_rx_packet *pkt); void iwl_xvt_destroy_reorder_buffer(struct iwl_xvt *xvt, struct iwl_xvt_reorder_buffer *buf); /* Based on mvm function: iwl_mvm_has_new_tx_api */ static inline bool iwl_xvt_is_unified_fw(struct iwl_xvt *xvt) { /* TODO - replace with TLV once defined */ return xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_xvt_is_cdb_supported(struct iwl_xvt *xvt) { /* * TODO: * The issue of how to determine CDB APIs and usage is still not fully * defined. * There is a compilation for CDB and non-CDB FW, but there may * be also runtime check. * For now there is a TLV for checking compilation mode, but a * runtime check will also have to be here - once defined. */ return fw_has_capa(&xvt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT); } static inline struct agg_tx_status* iwl_xvt_get_agg_status(struct iwl_xvt *xvt, struct iwl_mvm_tx_resp *tx_resp) { if (iwl_xvt_is_unified_fw(xvt)) return &((struct iwl_mvm_tx_resp *)tx_resp)->status; else return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status; } static inline u32 iwl_xvt_get_scd_ssn(struct iwl_xvt *xvt, struct iwl_mvm_tx_resp *tx_resp) { return le32_to_cpup((__le32 *)iwl_xvt_get_agg_status(xvt, tx_resp) + tx_resp->frame_count) & 0xfff; } static inline bool iwl_xvt_has_default_txq(struct iwl_xvt *xvt) { return !(xvt->sw_stack_cfg.fw_dbg_flags & IWL_XVT_DBG_FLAGS_NO_DEFAULT_TXQ); } void iwl_xvt_free_tx_queue(struct iwl_xvt *xvt, u8 lmac_id); int iwl_xvt_allocate_tx_queue(struct iwl_xvt *xvt, u8 sta_id, u8 lmac_id); void iwl_xvt_txq_disable(struct iwl_xvt *xvt); /* XVT debugfs */ #ifdef CPTCFG_IWLWIFI_DEBUGFS int iwl_xvt_dbgfs_register(struct iwl_xvt *xvt, struct dentry *dbgfs_dir); #else static inline int iwl_xvt_dbgfs_register(struct iwl_xvt *xvt, struct dentry *dbgfs_dir) { return 0; } #endif /* CPTCFG_IWLWIFI_DEBUGFS */ #endif backport-iwlwifi-dkms-7906/drivers/net/wireless/mac80211_hwsim.c000066400000000000000000003245201351064634500245170ustar00rootroot00000000000000/* * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 * Copyright (c) 2008, Jouni Malinen * Copyright (c) 2011, Javier Lopez * Copyright (c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * TODO: * - Add TSF sync and fix IBSS beacon transmission by adding * competition for "air time" at TBTT * - RX filtering based on filter configuration (data->rx_filter) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mac80211_hwsim.h" #define WARN_QUEUE 100 #define MAX_QUEUE 200 MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); MODULE_LICENSE("GPL"); static int radios = 2; module_param(radios, int, 0444); MODULE_PARM_DESC(radios, "Number of simulated radios"); static int channels = 1; module_param(channels, int, 0444); MODULE_PARM_DESC(channels, "Number of concurrent channels"); static bool paged_rx = false; module_param(paged_rx, bool, 0644); MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones"); static bool rctbl = false; module_param(rctbl, bool, 0444); MODULE_PARM_DESC(rctbl, "Handle rate control table"); static bool support_p2p_device = true; module_param(support_p2p_device, bool, 0444); MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type"); /** * enum hwsim_regtest - the type of regulatory tests we offer * * These are the different values you can use for the regtest * module parameter. This is useful to help test world roaming * and the driver regulatory_hint() call and combinations of these. * If you want to do specific alpha2 regulatory domain tests simply * use the userspace regulatory request as that will be respected as * well without the need of this module parameter. This is designed * only for testing the driver regulatory request, world roaming * and all possible combinations. * * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed, * this is the default value. * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory * hint, only one driver regulatory hint will be sent as such the * secondary radios are expected to follow. * @HWSIM_REGTEST_DRIVER_REG_ALL: Used for testing the driver regulatory * request with all radios reporting the same regulatory domain. * @HWSIM_REGTEST_DIFF_COUNTRY: Used for testing the drivers calling * different regulatory domains requests. Expected behaviour is for * an intersection to occur but each device will still use their * respective regulatory requested domains. Subsequent radios will * use the resulting intersection. * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We accomplish * this by using a custom beacon-capable regulatory domain for the first * radio. All other device world roam. * @HWSIM_REGTEST_CUSTOM_WORLD: Used for testing the custom world regulatory * domain requests. All radios will adhere to this custom world regulatory * domain. * @HWSIM_REGTEST_CUSTOM_WORLD_2: Used for testing 2 custom world regulatory * domain requests. The first radio will adhere to the first custom world * regulatory domain, the second one to the second custom world regulatory * domain. All other devices will world roam. * @HWSIM_REGTEST_STRICT_FOLLOW_: Used for testing strict regulatory domain * settings, only the first radio will send a regulatory domain request * and use strict settings. The rest of the radios are expected to follow. * @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain * settings. All radios will adhere to this. * @HWSIM_REGTEST_STRICT_AND_DRIVER_REG: Used for testing strict regulatory * domain settings, combined with secondary driver regulatory domain * settings. The first radio will get a strict regulatory domain setting * using the first driver regulatory request and the second radio will use * non-strict settings using the second driver regulatory request. All * other devices should follow the intersection created between the * first two. * @HWSIM_REGTEST_ALL: Used for testing every possible mix. You will need * at least 6 radios for a complete test. We will test in this order: * 1 - driver custom world regulatory domain * 2 - second custom world regulatory domain * 3 - first driver regulatory domain request * 4 - second driver regulatory domain request * 5 - strict regulatory domain settings using the third driver regulatory * domain request * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio * regulatory requests. */ enum hwsim_regtest { HWSIM_REGTEST_DISABLED = 0, HWSIM_REGTEST_DRIVER_REG_FOLLOW = 1, HWSIM_REGTEST_DRIVER_REG_ALL = 2, HWSIM_REGTEST_DIFF_COUNTRY = 3, HWSIM_REGTEST_WORLD_ROAM = 4, HWSIM_REGTEST_CUSTOM_WORLD = 5, HWSIM_REGTEST_CUSTOM_WORLD_2 = 6, HWSIM_REGTEST_STRICT_FOLLOW = 7, HWSIM_REGTEST_STRICT_ALL = 8, HWSIM_REGTEST_STRICT_AND_DRIVER_REG = 9, HWSIM_REGTEST_ALL = 10, }; /* Set to one of the HWSIM_REGTEST_* values above */ static int regtest = HWSIM_REGTEST_DISABLED; module_param(regtest, int, 0444); MODULE_PARM_DESC(regtest, "The type of regulatory test we want to run"); static const char *hwsim_alpha2s[] = { "FI", "AL", "US", "DE", "JP", "AL", }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = { .n_reg_rules = 4, .alpha2 = "99", .reg_rules = { REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), REG_RULE(2484-10, 2484+10, 40, 0, 20, 0), REG_RULE(5150-10, 5240+10, 40, 0, 30, 0), REG_RULE(5745-10, 5825+10, 40, 0, 30, 0), } }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = { .n_reg_rules = 2, .alpha2 = "99", .reg_rules = { REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), REG_RULE(5725-10, 5850+10, 40, 0, 30, NL80211_RRF_NO_IR), } }; static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = { &hwsim_world_regdom_custom_01, &hwsim_world_regdom_custom_02, }; struct hwsim_vif_priv { u32 magic; u8 bssid[ETH_ALEN]; bool assoc; bool bcn_en; u16 aid; }; #define HWSIM_VIF_MAGIC 0x69537748 static inline void hwsim_check_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; WARN(vp->magic != HWSIM_VIF_MAGIC, "Invalid VIF (%p) magic %#x, %pM, %d/%d\n", vif, vp->magic, vif->addr, vif->type, vif->p2p); } static inline void hwsim_set_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; vp->magic = HWSIM_VIF_MAGIC; } static inline void hwsim_clear_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; vp->magic = 0; } struct hwsim_sta_priv { u32 magic; }; #define HWSIM_STA_MAGIC 0x6d537749 static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; WARN_ON(sp->magic != HWSIM_STA_MAGIC); } static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; sp->magic = HWSIM_STA_MAGIC; } static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; sp->magic = 0; } struct hwsim_chanctx_priv { u32 magic; }; #define HWSIM_CHANCTX_MAGIC 0x6d53774a static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC); } static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; cp->magic = HWSIM_CHANCTX_MAGIC; } static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; cp->magic = 0; } static unsigned int hwsim_net_id; static DEFINE_IDA(hwsim_netgroup_ida); struct hwsim_net { int netgroup; u32 wmediumd; }; static inline int hwsim_net_get_netgroup(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); return hwsim_net->netgroup; } static inline int hwsim_net_set_netgroup(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); hwsim_net->netgroup = ida_simple_get(&hwsim_netgroup_ida, 0, 0, GFP_KERNEL); return hwsim_net->netgroup >= 0 ? 0 : -ENOMEM; } static inline u32 hwsim_net_get_wmediumd(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); return hwsim_net->wmediumd; } static inline void hwsim_net_set_wmediumd(struct net *net, u32 portid) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); hwsim_net->wmediumd = portid; } static struct class *hwsim_class; static struct net_device *hwsim_mon; /* global monitor netdev */ #define CHAN2G(_freq) { \ .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ .max_power = 20, \ } #define CHAN5G(_freq) { \ .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ .max_power = 20, \ } static const struct ieee80211_channel hwsim_channels_2ghz[] = { CHAN2G(2412), /* Channel 1 */ CHAN2G(2417), /* Channel 2 */ CHAN2G(2422), /* Channel 3 */ CHAN2G(2427), /* Channel 4 */ CHAN2G(2432), /* Channel 5 */ CHAN2G(2437), /* Channel 6 */ CHAN2G(2442), /* Channel 7 */ CHAN2G(2447), /* Channel 8 */ CHAN2G(2452), /* Channel 9 */ CHAN2G(2457), /* Channel 10 */ CHAN2G(2462), /* Channel 11 */ CHAN2G(2467), /* Channel 12 */ CHAN2G(2472), /* Channel 13 */ CHAN2G(2484), /* Channel 14 */ }; static const struct ieee80211_channel hwsim_channels_5ghz[] = { CHAN5G(5180), /* Channel 36 */ CHAN5G(5200), /* Channel 40 */ CHAN5G(5220), /* Channel 44 */ CHAN5G(5240), /* Channel 48 */ CHAN5G(5260), /* Channel 52 */ CHAN5G(5280), /* Channel 56 */ CHAN5G(5300), /* Channel 60 */ CHAN5G(5320), /* Channel 64 */ CHAN5G(5500), /* Channel 100 */ CHAN5G(5520), /* Channel 104 */ CHAN5G(5540), /* Channel 108 */ CHAN5G(5560), /* Channel 112 */ CHAN5G(5580), /* Channel 116 */ CHAN5G(5600), /* Channel 120 */ CHAN5G(5620), /* Channel 124 */ CHAN5G(5640), /* Channel 128 */ CHAN5G(5660), /* Channel 132 */ CHAN5G(5680), /* Channel 136 */ CHAN5G(5700), /* Channel 140 */ CHAN5G(5745), /* Channel 149 */ CHAN5G(5765), /* Channel 153 */ CHAN5G(5785), /* Channel 157 */ CHAN5G(5805), /* Channel 161 */ CHAN5G(5825), /* Channel 165 */ CHAN5G(5845), /* Channel 169 */ }; static const struct ieee80211_rate hwsim_rates[] = { { .bitrate = 10 }, { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60 }, { .bitrate = 90 }, { .bitrate = 120 }, { .bitrate = 180 }, { .bitrate = 240 }, { .bitrate = 360 }, { .bitrate = 480 }, { .bitrate = 540 } }; #define DEFAULT_RX_RSSI -50 static const u32 hwsim_ciphers[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_CCMP_256, WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, }; #define OUI_QCA 0x001374 #define QCA_NL80211_SUBCMD_TEST 1 enum qca_nl80211_vendor_subcmds { QCA_WLAN_VENDOR_ATTR_TEST = 8, QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_TEST }; static const struct nla_policy hwsim_vendor_test_policy[QCA_WLAN_VENDOR_ATTR_MAX + 1] = { [QCA_WLAN_VENDOR_ATTR_MAX] = { .type = NLA_U32 }, }; static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct sk_buff *skb; struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1]; int err; u32 val; err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len, hwsim_vendor_test_policy, NULL); if (err) return err; if (!tb[QCA_WLAN_VENDOR_ATTR_TEST]) return -EINVAL; val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]); wiphy_dbg(wiphy, "%s: test=%u\n", __func__, val); /* Send a vendor event as a test. Note that this would not normally be * done within a command handler, but rather, based on some other * trigger. For simplicity, this command is used to trigger the event * here. * * event_idx = 0 (index in mac80211_hwsim_vendor_commands) */ skb = cfg80211_vendor_event_alloc(wiphy, wdev, 100, 0, GFP_KERNEL); if (skb) { /* skb_put() or nla_put() will fill up data within * NL80211_ATTR_VENDOR_DATA. */ /* Add vendor data */ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); /* Send the event - this will call nla_nest_end() */ cfg80211_vendor_event(skb, GFP_KERNEL); } /* Send a response to the command */ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 10); if (!skb) return -ENOMEM; /* skb_put() or nla_put() will fill up data within * NL80211_ATTR_VENDOR_DATA */ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 2); return cfg80211_vendor_cmd_reply(skb); } static struct wiphy_vendor_command mac80211_hwsim_vendor_commands[] = { { .info = { .vendor_id = OUI_QCA, .subcmd = QCA_NL80211_SUBCMD_TEST }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV, .doit = mac80211_hwsim_vendor_cmd_test, } }; /* Advertise support vendor specific events */ static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = { { .vendor_id = OUI_QCA, .subcmd = 1 }, }; static spinlock_t hwsim_radio_lock; static LIST_HEAD(hwsim_radios); static struct rhashtable hwsim_radios_rht; static int hwsim_radio_idx; static int hwsim_radios_generation = 1; static struct platform_driver mac80211_hwsim_driver = { .driver = { .name = "mac80211_hwsim", }, }; struct mac80211_hwsim_data { struct list_head list; struct rhash_head rht; struct ieee80211_hw *hw; struct device *dev; struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; struct ieee80211_iface_combination if_combination; struct ieee80211_iface_limit if_limits[3]; int n_if_limits; u32 ciphers[ARRAY_SIZE(hwsim_ciphers)]; struct mac_address addresses[2]; int channels, idx; bool use_chanctx; bool destroy_on_close; u32 portid; char alpha2[2]; const struct ieee80211_regdomain *regd; struct ieee80211_channel *tmp_chan; struct ieee80211_channel *roc_chan; u32 roc_duration; struct delayed_work roc_start; struct delayed_work roc_done; struct delayed_work hw_scan; struct cfg80211_scan_request *hw_scan_request; struct ieee80211_vif *hw_scan_vif; int scan_chan_idx; u8 scan_addr[ETH_ALEN]; struct { struct ieee80211_channel *channel; unsigned long next_start, start, end; } survey_data[ARRAY_SIZE(hwsim_channels_2ghz) + ARRAY_SIZE(hwsim_channels_5ghz)]; struct ieee80211_channel *channel; u64 beacon_int /* beacon interval in us */; unsigned int rx_filter; bool started, idle, scanning; struct mutex mutex; struct tasklet_hrtimer beacon_timer; enum ps_mode { PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL } ps; bool ps_poll_pending; struct dentry *debugfs; uintptr_t pending_cookie; struct sk_buff_head pending; /* packets pending */ /* * Only radios in the same group can communicate together (the * channel has to match too). Each bit represents a group. A * radio can be in more than one group. */ u64 group; /* group shared by radios created in the same netns */ int netgroup; /* wmediumd portid responsible for netgroup of this radio */ u32 wmediumd; /* difference between this hw's clock and the real clock, in usecs */ s64 tsf_offset; s64 bcn_delta; /* absolute beacon transmission time. Used to cover up "tx" delay. */ u64 abs_bcn_ts; /* Stats */ u64 tx_pkts; u64 rx_pkts; u64 tx_bytes; u64 rx_bytes; u64 tx_dropped; u64 tx_failed; /* RSSI in rx status of the receiver */ int rx_rssi; }; static const struct rhashtable_params hwsim_rht_params = { .nelem_hint = 2, .automatic_shrinking = true, .key_len = ETH_ALEN, .key_offset = offsetof(struct mac80211_hwsim_data, addresses[1]), .head_offset = offsetof(struct mac80211_hwsim_data, rht), }; struct hwsim_radiotap_hdr { struct ieee80211_radiotap_header hdr; __le64 rt_tsft; u8 rt_flags; u8 rt_rate; __le16 rt_channel; __le16 rt_chbitmask; } __packed; struct hwsim_radiotap_ack_hdr { struct ieee80211_radiotap_header hdr; u8 rt_flags; u8 pad; __le16 rt_channel; __le16 rt_chbitmask; } __packed; /* MAC80211_HWSIM netlink family */ static struct genl_family hwsim_genl_family; enum hwsim_multicast_groups { HWSIM_MCGRP_CONFIG, }; static __genl_const struct genl_multicast_group hwsim_mcgrps[] = { [HWSIM_MCGRP_CONFIG] = { .name = "config", }, }; /* MAC80211_HWSIM netlink policy */ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [HWSIM_ATTR_FLAGS] = { .type = NLA_U32 }, [HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 }, [HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 }, [HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC, .len = IEEE80211_TX_MAX_RATES * sizeof(struct hwsim_tx_rate)}, [HWSIM_ATTR_COOKIE] = { .type = NLA_U64 }, [HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 }, [HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 }, [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 }, [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 }, [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG }, [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG }, [HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG }, [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING }, [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG }, [HWSIM_ATTR_FREQ] = { .type = NLA_U32 }, [HWSIM_ATTR_PERM_ADDR] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, [HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 }, [HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY }, }; static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan); /* sysfs attributes */ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_pspoll *pspoll; if (!vp->assoc) return; wiphy_dbg(data->hw->wiphy, "%s: send PS-Poll to %pM for aid %d\n", __func__, vp->bssid, vp->aid); skb = dev_alloc_skb(sizeof(*pspoll)); if (!skb) return; pspoll = skb_put(skb, sizeof(*pspoll)); pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM); pspoll->aid = cpu_to_le16(0xc000 | vp->aid); memcpy(pspoll->bssid, vp->bssid, ETH_ALEN); memcpy(pspoll->ta, mac, ETH_ALEN); rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->chanctx_conf)->def.chan); rcu_read_unlock(); } static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct ieee80211_vif *vif, int ps) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; if (!vp->assoc) return; wiphy_dbg(data->hw->wiphy, "%s: send data::nullfunc to %pM ps=%d\n", __func__, vp->bssid, ps); skb = dev_alloc_skb(sizeof(*hdr)); if (!skb) return; hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN); hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS | (ps ? IEEE80211_FCTL_PM : 0)); hdr->duration_id = cpu_to_le16(0); memcpy(hdr->addr1, vp->bssid, ETH_ALEN); memcpy(hdr->addr2, mac, ETH_ALEN); memcpy(hdr->addr3, vp->bssid, ETH_ALEN); rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->chanctx_conf)->def.chan); rcu_read_unlock(); } static void hwsim_send_nullfunc_ps(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; hwsim_send_nullfunc(data, mac, vif, 1); } static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; hwsim_send_nullfunc(data, mac, vif, 0); } static int hwsim_fops_ps_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->ps; return 0; } static int hwsim_fops_ps_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; enum ps_mode old_ps; if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL && val != PS_MANUAL_POLL) return -EINVAL; if (val == PS_MANUAL_POLL) { if (data->ps != PS_ENABLED) return -EINVAL; local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); local_bh_enable(); return 0; } old_ps = data->ps; data->ps = val; local_bh_disable(); if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); } else if (old_ps != PS_DISABLED && val == PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_no_ps, data); } local_bh_enable(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write, "%llu\n"); static int hwsim_write_simulate_radar(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; ieee80211_radar_detected(data->hw); return 0; } DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL, hwsim_write_simulate_radar, "%llu\n"); static int hwsim_fops_group_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->group; return 0; } static int hwsim_fops_group_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; data->group = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group, hwsim_fops_group_read, hwsim_fops_group_write, "%llx\n"); static int hwsim_fops_rx_rssi_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->rx_rssi; return 0; } static int hwsim_fops_rx_rssi_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; int rssi = (int)val; if (rssi >= 0 || rssi < -100) return -EINVAL; data->rx_rssi = rssi; return 0; } DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_rx_rssi, hwsim_fops_rx_rssi_read, hwsim_fops_rx_rssi_write, "%lld\n"); static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, struct net_device *dev) { /* TODO: allow packet injection */ dev_kfree_skb(skb); return NETDEV_TX_OK; } static inline u64 mac80211_hwsim_get_tsf_raw(void) { return ktime_to_us(ktime_get_real()); } static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data) { u64 now = mac80211_hwsim_get_tsf_raw(); return cpu_to_le64(now + data->tsf_offset); } static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = hw->priv; return le64_to_cpu(__mac80211_hwsim_get_tsf(data)); } static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf) { struct mac80211_hwsim_data *data = hw->priv; u64 now = mac80211_hwsim_get_tsf(hw, vif); u32 bcn_int = data->beacon_int; u64 delta = abs(tsf - now); /* adjust after beaconing with new timestamp at old TBTT */ if (tsf > now) { data->tsf_offset += delta; data->bcn_delta = do_div(delta, bcn_int); } else { data->tsf_offset -= delta; data->bcn_delta = -(s64)do_div(delta, bcn_int); } } static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, struct sk_buff *tx_skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv; struct sk_buff *skb; struct hwsim_radiotap_hdr *hdr; u16 flags; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb); struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); if (WARN_ON(!txrate)) return; if (!netif_running(hwsim_mon)) return; skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC); if (skb == NULL) return; hdr = skb_push(skb, sizeof(*hdr)); hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; hdr->hdr.it_pad = 0; hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_TSFT) | (1 << IEEE80211_RADIOTAP_CHANNEL)); hdr->rt_tsft = __mac80211_hwsim_get_tsf(data); hdr->rt_flags = 0; hdr->rt_rate = txrate->bitrate / 5; hdr->rt_channel = cpu_to_le16(chan->center_freq); flags = IEEE80211_CHAN_2GHZ; if (txrate->flags & IEEE80211_RATE_ERP_G) flags |= IEEE80211_CHAN_OFDM; else flags |= IEEE80211_CHAN_CCK; hdr->rt_chbitmask = cpu_to_le16(flags); skb->dev = hwsim_mon; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, const u8 *addr) { struct sk_buff *skb; struct hwsim_radiotap_ack_hdr *hdr; u16 flags; struct ieee80211_hdr *hdr11; if (!netif_running(hwsim_mon)) return; skb = dev_alloc_skb(100); if (skb == NULL) return; hdr = skb_put(skb, sizeof(*hdr)); hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; hdr->hdr.it_pad = 0; hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_CHANNEL)); hdr->rt_flags = 0; hdr->pad = 0; hdr->rt_channel = cpu_to_le16(chan->center_freq); flags = IEEE80211_CHAN_2GHZ; hdr->rt_chbitmask = cpu_to_le16(flags); hdr11 = skb_put(skb, 10); hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); hdr11->duration_id = cpu_to_le16(0); memcpy(hdr11->addr1, addr, ETH_ALEN); skb->dev = hwsim_mon; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } struct mac80211_hwsim_addr_match_data { u8 addr[ETH_ALEN]; bool ret; }; static void mac80211_hwsim_addr_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_addr_match_data *md = data; if (memcmp(mac, md->addr, ETH_ALEN) == 0) md->ret = true; } static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, const u8 *addr) { struct mac80211_hwsim_addr_match_data md = { .ret = false, }; if (data->scanning && memcmp(addr, data->scan_addr, ETH_ALEN) == 0) return true; memcpy(md.addr, addr, ETH_ALEN); ieee80211_iterate_active_interfaces_atomic(data->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_addr_iter, &md); return md.ret; } static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, struct sk_buff *skb) { switch (data->ps) { case PS_DISABLED: return true; case PS_ENABLED: return false; case PS_AUTO_POLL: /* TODO: accept (some) Beacons by default and other frames only * if pending PS-Poll has been sent */ return true; case PS_MANUAL_POLL: /* Allow unicast frames to own address if there is a pending * PS-Poll */ if (data->ps_poll_pending && mac80211_hwsim_addr_match(data, skb->data + 4)) { data->ps_poll_pending = false; return true; } return false; } return true; } static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, struct sk_buff *skb, int portid) { struct net *net; bool found = false; int res = -ENOENT; rcu_read_lock(); for_each_net_rcu(net) { if (data->netgroup == hwsim_net_get_netgroup(net)) { res = genlmsg_unicast(net, skb, portid); found = true; break; } } rcu_read_unlock(); if (!found) nlmsg_free(skb); return res; } static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate) { u16 result = 0; if (rate->flags & IEEE80211_TX_RC_USE_RTS_CTS) result |= MAC80211_HWSIM_TX_RC_USE_RTS_CTS; if (rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) result |= MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT; if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) result |= MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE; if (rate->flags & IEEE80211_TX_RC_MCS) result |= MAC80211_HWSIM_TX_RC_MCS; if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) result |= MAC80211_HWSIM_TX_RC_GREEN_FIELD; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH; if (rate->flags & IEEE80211_TX_RC_DUP_DATA) result |= MAC80211_HWSIM_TX_RC_DUP_DATA; if (rate->flags & IEEE80211_TX_RC_SHORT_GI) result |= MAC80211_HWSIM_TX_RC_SHORT_GI; if (rate->flags & IEEE80211_TX_RC_VHT_MCS) result |= MAC80211_HWSIM_TX_RC_VHT_MCS; if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH; if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH; return result; } static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, struct sk_buff *my_skb, int dst_portid) { struct sk_buff *skb; struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) my_skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(my_skb); void *msg_head; unsigned int hwsim_flags = 0; int i; struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES]; struct hwsim_tx_rate_flag tx_attempts_flags[IEEE80211_TX_MAX_RATES]; uintptr_t cookie; if (data->ps != PS_DISABLED) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); /* If the queue contains MAX_QUEUE skb's drop some */ if (skb_queue_len(&data->pending) >= MAX_QUEUE) { /* Droping until WARN_QUEUE level */ while (skb_queue_len(&data->pending) >= WARN_QUEUE) { ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); data->tx_dropped++; } } skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (skb == NULL) goto nla_put_failure; msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_FRAME); if (msg_head == NULL) { pr_debug("mac80211_hwsim: problem with msg_head\n"); goto nla_put_failure; } if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, data->addresses[1].addr)) goto nla_put_failure; /* We get the skb->data */ if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data)) goto nla_put_failure; /* We get the flags for this transmission, and we translate them to wmediumd flags */ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) hwsim_flags |= HWSIM_TX_CTL_REQ_TX_STATUS; if (info->flags & IEEE80211_TX_CTL_NO_ACK) hwsim_flags |= HWSIM_TX_CTL_NO_ACK; if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags)) goto nla_put_failure; if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq)) goto nla_put_failure; /* We get the tx control (rate and retries) info*/ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { tx_attempts[i].idx = info->status.rates[i].idx; tx_attempts_flags[i].idx = info->status.rates[i].idx; tx_attempts[i].count = info->status.rates[i].count; tx_attempts_flags[i].flags = trans_tx_rate_flags_ieee2hwsim( &info->status.rates[i]); } if (nla_put(skb, HWSIM_ATTR_TX_INFO, sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, tx_attempts)) goto nla_put_failure; if (nla_put(skb, HWSIM_ATTR_TX_INFO_FLAGS, sizeof(struct hwsim_tx_rate_flag) * IEEE80211_TX_MAX_RATES, tx_attempts_flags)) goto nla_put_failure; /* We create a cookie to identify this skb */ data->pending_cookie++; cookie = data->pending_cookie; info->rate_driver_data[0] = (void *)cookie; if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD)) goto nla_put_failure; genlmsg_end(skb, msg_head); if (hwsim_unicast_netgroup(data, skb, dst_portid)) goto err_free_txskb; /* Enqueue the packet */ skb_queue_tail(&data->pending, my_skb); data->tx_pkts++; data->tx_bytes += my_skb->len; return; nla_put_failure: nlmsg_free(skb); err_free_txskb: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); ieee80211_free_txskb(hw, my_skb); data->tx_failed++; } static bool hwsim_chans_compat(struct ieee80211_channel *c1, struct ieee80211_channel *c2) { if (!c1 || !c2) return false; return c1->center_freq == c2->center_freq; } struct tx_iter_data { struct ieee80211_channel *channel; bool receive; }; static void mac80211_hwsim_tx_iter(void *_data, u8 *addr, struct ieee80211_vif *vif) { struct tx_iter_data *data = _data; if (!vif->chanctx_conf) return; if (!hwsim_chans_compat(data->channel, rcu_dereference(vif->chanctx_conf)->def.chan)) return; data->receive = true; } static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb) { /* * To enable this code, #define the HWSIM_RADIOTAP_OUI, * e.g. like this: * #define HWSIM_RADIOTAP_OUI "\x02\x00\x00" * (but you should use a valid OUI, not that) * * If anyone wants to 'donate' a radiotap OUI/subns code * please send a patch removing this #ifdef and changing * the values accordingly. */ #ifdef HWSIM_RADIOTAP_OUI struct ieee80211_vendor_radiotap *rtap; /* * Note that this code requires the headroom in the SKB * that was allocated earlier. */ rtap = skb_push(skb, sizeof(*rtap) + 8 + 4); rtap->oui[0] = HWSIM_RADIOTAP_OUI[0]; rtap->oui[1] = HWSIM_RADIOTAP_OUI[1]; rtap->oui[2] = HWSIM_RADIOTAP_OUI[2]; rtap->subns = 127; /* * Radiotap vendor namespaces can (and should) also be * split into fields by using the standard radiotap * presence bitmap mechanism. Use just BIT(0) here for * the presence bitmap. */ rtap->present = BIT(0); /* We have 8 bytes of (dummy) data */ rtap->len = 8; /* For testing, also require it to be aligned */ rtap->align = 8; /* And also test that padding works, 4 bytes */ rtap->pad = 4; /* push the data */ memcpy(rtap->data, "ABCDEFGH", 8); /* make sure to clear padding, mac80211 doesn't */ memset(rtap->data + 8, 0, 4); IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; #endif } static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv, *data2; bool ack = false; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_rx_status rx_status; u64 now; memset(&rx_status, 0, sizeof(rx_status)); rx_status.flag |= RX_FLAG_MACTIME_START; rx_status.freq = chan->center_freq; rx_status.band = chan->band; if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { rx_status.rate_idx = ieee80211_rate_get_vht_mcs(&info->control.rates[0]); rx_status.nss = ieee80211_rate_get_vht_nss(&info->control.rates[0]); rx_status.encoding = RX_ENC_VHT; } else { rx_status.rate_idx = info->control.rates[0].idx; if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) rx_status.encoding = RX_ENC_HT; } if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_40; else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_80; else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_160; else rx_status.bw = RATE_INFO_BW_20; if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; /* TODO: simulate optional packet loss */ rx_status.signal = data->rx_rssi; if (info->control.vif) rx_status.signal += info->control.vif->bss_conf.txpower; if (data->ps != PS_DISABLED) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); /* release the skb's source info */ skb_orphan(skb); skb_dst_drop(skb); skb->mark = 0; secpath_reset(skb); nf_reset(skb); /* * Get absolute mactime here so all HWs RX at the "same time", and * absolute TX time for beacon mactime so the timestamp matches. * Giving beacons a different mactime than non-beacons looks messy, but * it helps the Toffset be exact and a ~10us mactime discrepancy * probably doesn't really matter. */ if (ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) now = data->abs_bcn_ts; else now = mac80211_hwsim_get_tsf_raw(); /* Copy skb to all enabled radios that are on the current frequency */ spin_lock(&hwsim_radio_lock); list_for_each_entry(data2, &hwsim_radios, list) { struct sk_buff *nskb; struct tx_iter_data tx_iter_data = { .receive = false, .channel = chan, }; if (data == data2) continue; if (!data2->started || (data2->idle && !data2->tmp_chan) || !hwsim_ps_rx_ok(data2, skb)) continue; if (!(data->group & data2->group)) continue; if (data->netgroup != data2->netgroup) continue; if (!hwsim_chans_compat(chan, data2->tmp_chan) && !hwsim_chans_compat(chan, data2->channel)) { ieee80211_iterate_active_interfaces_atomic( data2->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_tx_iter, &tx_iter_data); if (!tx_iter_data.receive) continue; } /* * reserve some space for our vendor and the normal * radiotap header, since we're copying anyway */ if (skb->len < PAGE_SIZE && paged_rx) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) continue; nskb = dev_alloc_skb(128); if (!nskb) { __free_page(page); continue; } memcpy(page_address(page), skb->data, skb->len); skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len); } else { nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) continue; } if (mac80211_hwsim_addr_match(data2, hdr->addr1)) ack = true; rx_status.mactime = now + data2->tsf_offset; memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); mac80211_hwsim_add_vendor_rtap(nskb); data2->rx_pkts++; data2->rx_bytes += nskb->len; ieee80211_rx_irqsafe(data2->hw, nskb); } spin_unlock(&hwsim_radio_lock); return ack; } static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; bool ack; u32 _portid; if (WARN_ON(skb->len < 10)) { /* Should not happen; just a sanity check for addr1 use */ ieee80211_free_txskb(hw, skb); return; } if (!data->use_chanctx) { channel = data->channel; } else if (txi->hw_queue == 4) { channel = data->tmp_chan; } else { chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf); if (chanctx_conf) channel = chanctx_conf->def.chan; else channel = NULL; } if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) { ieee80211_free_txskb(hw, skb); return; } if (data->idle && !data->tmp_chan) { wiphy_dbg(hw->wiphy, "Trying to TX when idle - reject\n"); ieee80211_free_txskb(hw, skb); return; } if (txi->control.vif) hwsim_check_magic(txi->control.vif); if (control->sta) hwsim_check_sta_magic(control->sta); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) ieee80211_get_tx_rates(txi->control.vif, control->sta, skb, txi->control.rates, ARRAY_SIZE(txi->control.rates)); if (skb->len >= 24 + 8 && ieee80211_is_probe_resp(hdr->frame_control)) { /* fake header transmission time */ struct ieee80211_mgmt *mgmt; struct ieee80211_rate *txrate; u64 ts; mgmt = (struct ieee80211_mgmt *)skb->data; txrate = ieee80211_get_tx_rate(hw, txi); ts = mac80211_hwsim_get_tsf_raw(); mgmt->u.probe_resp.timestamp = cpu_to_le64(ts + data->tsf_offset + 24 * 8 * 10 / txrate->bitrate); } mac80211_hwsim_monitor_rx(hw, skb, channel); /* wmediumd mode check */ _portid = READ_ONCE(data->wmediumd); if (_portid) return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); /* NO wmediumd detected, perfect medium simulation */ data->tx_pkts++; data->tx_bytes += skb->len; ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel); if (ack && skb->len >= 16) mac80211_hwsim_monitor_ack(channel, hdr->addr2); ieee80211_tx_info_clear_status(txi); /* frame was transmitted at most favorable rate at first attempt */ txi->control.rates[0].count = 1; txi->control.rates[1].idx = -1; if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) txi->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, skb); } static int mac80211_hwsim_start(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; wiphy_dbg(hw->wiphy, "%s\n", __func__); data->started = true; return 0; } static void mac80211_hwsim_stop(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; data->started = false; tasklet_hrtimer_cancel(&data->beacon_timer); wiphy_dbg(hw->wiphy, "%s\n", __func__); } static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { wiphy_dbg(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), vif->addr); hwsim_set_magic(vif); vif->cab_queue = 0; vif->hw_queue[IEEE80211_AC_VO] = 0; vif->hw_queue[IEEE80211_AC_VI] = 1; vif->hw_queue[IEEE80211_AC_BE] = 2; vif->hw_queue[IEEE80211_AC_BK] = 3; return 0; } static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype newtype, bool newp2p) { newtype = ieee80211_iftype_p2p(newtype, newp2p); wiphy_dbg(hw->wiphy, "%s (old type=%d, new type=%d, mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), newtype, vif->addr); hwsim_check_magic(vif); /* * interface may change from non-AP to AP in * which case this needs to be set up again */ vif->cab_queue = 0; return 0; } static void mac80211_hwsim_remove_interface( struct ieee80211_hw *hw, struct ieee80211_vif *vif) { wiphy_dbg(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), vif->addr); hwsim_check_magic(vif); hwsim_clear_magic(vif); } static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv; u32 _pid = READ_ONCE(data->wmediumd); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); ieee80211_get_tx_rates(txi->control.vif, NULL, skb, txi->control.rates, ARRAY_SIZE(txi->control.rates)); } mac80211_hwsim_monitor_rx(hw, skb, chan); if (_pid) return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); mac80211_hwsim_tx_frame_no_nl(hw, skb, chan); dev_kfree_skb(skb); } static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = arg; struct ieee80211_hw *hw = data->hw; struct ieee80211_tx_info *info; struct ieee80211_rate *txrate; struct ieee80211_mgmt *mgmt; struct sk_buff *skb; hwsim_check_magic(vif); if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MESH_POINT && vif->type != NL80211_IFTYPE_ADHOC) return; skb = ieee80211_beacon_get(hw, vif); if (skb == NULL) return; info = IEEE80211_SKB_CB(skb); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) ieee80211_get_tx_rates(vif, NULL, skb, info->control.rates, ARRAY_SIZE(info->control.rates)); txrate = ieee80211_get_tx_rate(hw, info); mgmt = (struct ieee80211_mgmt *) skb->data; /* fake header transmission time */ data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw(); mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts + data->tsf_offset + 24 * 8 * 10 / txrate->bitrate); mac80211_hwsim_tx_frame(hw, skb, rcu_dereference(vif->chanctx_conf)->def.chan); if (vif->csa_active && ieee80211_csa_is_complete(vif)) ieee80211_csa_finish(vif); } static enum hrtimer_restart mac80211_hwsim_beacon(struct hrtimer *timer) { struct mac80211_hwsim_data *data = container_of(timer, struct mac80211_hwsim_data, beacon_timer.timer); struct ieee80211_hw *hw = data->hw; u64 bcn_int = data->beacon_int; ktime_t next_bcn; if (!data->started) goto out; ieee80211_iterate_active_interfaces_atomic( hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_beacon_tx, data); /* beacon at new TBTT + beacon interval */ if (data->bcn_delta) { bcn_int -= data->bcn_delta; data->bcn_delta = 0; } next_bcn = ktime_add(hrtimer_get_expires(timer), ns_to_ktime(bcn_int * 1000)); tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS); out: return HRTIMER_NORESTART; } static const char * const hwsim_chanwidths[] = { [NL80211_CHAN_WIDTH_20_NOHT] = "noht", [NL80211_CHAN_WIDTH_20] = "ht20", [NL80211_CHAN_WIDTH_40] = "ht40", [NL80211_CHAN_WIDTH_80] = "vht80", [NL80211_CHAN_WIDTH_80P80] = "vht80p80", [NL80211_CHAN_WIDTH_160] = "vht160", }; static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) { struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_conf *conf = &hw->conf; static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { [IEEE80211_SMPS_AUTOMATIC] = "auto", [IEEE80211_SMPS_OFF] = "off", [IEEE80211_SMPS_STATIC] = "static", [IEEE80211_SMPS_DYNAMIC] = "dynamic", }; int idx; if (conf->chandef.chan) wiphy_dbg(hw->wiphy, "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n", __func__, conf->chandef.chan->center_freq, conf->chandef.center_freq1, conf->chandef.center_freq2, hwsim_chanwidths[conf->chandef.width], !!(conf->flags & IEEE80211_CONF_IDLE), !!(conf->flags & IEEE80211_CONF_PS), smps_modes[conf->smps_mode]); else wiphy_dbg(hw->wiphy, "%s (freq=0 idle=%d ps=%d smps=%s)\n", __func__, !!(conf->flags & IEEE80211_CONF_IDLE), !!(conf->flags & IEEE80211_CONF_PS), smps_modes[conf->smps_mode]); data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); WARN_ON(conf->chandef.chan && data->use_chanctx); mutex_lock(&data->mutex); if (data->scanning && conf->chandef.chan) { for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { if (data->survey_data[idx].channel == data->channel) { data->survey_data[idx].start = data->survey_data[idx].next_start; data->survey_data[idx].end = jiffies; break; } } data->channel = conf->chandef.chan; for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { if (data->survey_data[idx].channel && data->survey_data[idx].channel != data->channel) continue; data->survey_data[idx].channel = data->channel; data->survey_data[idx].next_start = jiffies; break; } } else { data->channel = conf->chandef.chan; } mutex_unlock(&data->mutex); if (!data->started || !data->beacon_int) tasklet_hrtimer_cancel(&data->beacon_timer); else if (!hrtimer_is_queued(&data->beacon_timer.timer)) { u64 tsf = mac80211_hwsim_get_tsf(hw, NULL); u32 bcn_int = data->beacon_int; u64 until_tbtt = bcn_int - do_div(tsf, bcn_int); tasklet_hrtimer_start(&data->beacon_timer, ns_to_ktime(until_tbtt * 1000), HRTIMER_MODE_REL); } return 0; } static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags,u64 multicast) { struct mac80211_hwsim_data *data = hw->priv; wiphy_dbg(hw->wiphy, "%s\n", __func__); data->rx_filter = 0; if (*total_flags & FIF_ALLMULTI) data->rx_filter |= FIF_ALLMULTI; *total_flags = data->rx_filter; } static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { unsigned int *count = data; struct hwsim_vif_priv *vp = (void *)vif->drv_priv; if (vp->bcn_en) (*count)++; } static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct mac80211_hwsim_data *data = hw->priv; hwsim_check_magic(vif); wiphy_dbg(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n", __func__, changed, vif->addr); if (changed & BSS_CHANGED_BSSID) { wiphy_dbg(hw->wiphy, "%s: BSSID changed: %pM\n", __func__, info->bssid); memcpy(vp->bssid, info->bssid, ETH_ALEN); } if (changed & BSS_CHANGED_ASSOC) { wiphy_dbg(hw->wiphy, " ASSOC: assoc=%d aid=%d\n", info->assoc, info->aid); vp->assoc = info->assoc; vp->aid = info->aid; } if (changed & BSS_CHANGED_BEACON_ENABLED) { wiphy_dbg(hw->wiphy, " BCN EN: %d (BI=%u)\n", info->enable_beacon, info->beacon_int); vp->bcn_en = info->enable_beacon; if (data->started && !hrtimer_is_queued(&data->beacon_timer.timer) && info->enable_beacon) { u64 tsf, until_tbtt; u32 bcn_int; data->beacon_int = info->beacon_int * 1024; tsf = mac80211_hwsim_get_tsf(hw, vif); bcn_int = data->beacon_int; until_tbtt = bcn_int - do_div(tsf, bcn_int); tasklet_hrtimer_start(&data->beacon_timer, ns_to_ktime(until_tbtt * 1000), HRTIMER_MODE_REL); } else if (!info->enable_beacon) { unsigned int count = 0; ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_bcn_en_iter, &count); wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u", count); if (count == 0) { tasklet_hrtimer_cancel(&data->beacon_timer); data->beacon_int = 0; } } } if (changed & BSS_CHANGED_ERP_CTS_PROT) { wiphy_dbg(hw->wiphy, " ERP_CTS_PROT: %d\n", info->use_cts_prot); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { wiphy_dbg(hw->wiphy, " ERP_PREAMBLE: %d\n", info->use_short_preamble); } if (changed & BSS_CHANGED_ERP_SLOT) { wiphy_dbg(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot); } if (changed & BSS_CHANGED_HT) { wiphy_dbg(hw->wiphy, " HT: op_mode=0x%x\n", info->ht_operation_mode); } if (changed & BSS_CHANGED_BASIC_RATES) { wiphy_dbg(hw->wiphy, " BASIC_RATES: 0x%llx\n", (unsigned long long) info->basic_rates); } if (changed & BSS_CHANGED_TXPOWER) wiphy_dbg(hw->wiphy, " TX Power: %d dBm\n", info->txpower); } static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { hwsim_check_magic(vif); hwsim_set_sta_magic(sta); return 0; } static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { hwsim_check_magic(vif); hwsim_clear_sta_magic(sta); return 0; } static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { hwsim_check_magic(vif); switch (cmd) { case STA_NOTIFY_SLEEP: case STA_NOTIFY_AWAKE: /* TODO: make good use of these flags */ break; default: WARN(1, "Invalid sta notify: %d\n", cmd); break; } } static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { hwsim_check_sta_magic(sta); return 0; } static int mac80211_hwsim_conf_tx( struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) { wiphy_dbg(hw->wiphy, "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n", __func__, queue, params->txop, params->cw_min, params->cw_max, params->aifs); return 0; } static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct mac80211_hwsim_data *hwsim = hw->priv; if (idx < 0 || idx >= ARRAY_SIZE(hwsim->survey_data)) return -ENOENT; mutex_lock(&hwsim->mutex); survey->channel = hwsim->survey_data[idx].channel; if (!survey->channel) { mutex_unlock(&hwsim->mutex); return -ENOENT; } /* * Magically conjured dummy values --- this is only ok for simulated hardware. * * A real driver which cannot determine real values noise MUST NOT * report any, especially not a magically conjured ones :-) */ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; survey->noise = -92; survey->time = jiffies_to_msecs(hwsim->survey_data[idx].end - hwsim->survey_data[idx].start); /* report 12.5% of channel time is used */ survey->time_busy = survey->time/8; mutex_unlock(&hwsim->mutex); return 0; } #ifdef CPTCFG_NL80211_TESTMODE /* * This section contains example code for using netlink * attributes with the testmode command in nl80211. */ /* These enums need to be kept in sync with userspace */ enum hwsim_testmode_attr { __HWSIM_TM_ATTR_INVALID = 0, HWSIM_TM_ATTR_CMD = 1, HWSIM_TM_ATTR_PS = 2, /* keep last */ __HWSIM_TM_ATTR_AFTER_LAST, HWSIM_TM_ATTR_MAX = __HWSIM_TM_ATTR_AFTER_LAST - 1 }; enum hwsim_testmode_cmd { HWSIM_TM_CMD_SET_PS = 0, HWSIM_TM_CMD_GET_PS = 1, HWSIM_TM_CMD_STOP_QUEUES = 2, HWSIM_TM_CMD_WAKE_QUEUES = 3, }; static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = { [HWSIM_TM_ATTR_CMD] = { .type = NLA_U32 }, [HWSIM_TM_ATTR_PS] = { .type = NLA_U32 }, }; static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len) { struct mac80211_hwsim_data *hwsim = hw->priv; struct nlattr *tb[HWSIM_TM_ATTR_MAX + 1]; struct sk_buff *skb; int err, ps; err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len, hwsim_testmode_policy, NULL); if (err) return err; if (!tb[HWSIM_TM_ATTR_CMD]) return -EINVAL; switch (nla_get_u32(tb[HWSIM_TM_ATTR_CMD])) { case HWSIM_TM_CMD_SET_PS: if (!tb[HWSIM_TM_ATTR_PS]) return -EINVAL; ps = nla_get_u32(tb[HWSIM_TM_ATTR_PS]); return hwsim_fops_ps_write(hwsim, ps); case HWSIM_TM_CMD_GET_PS: skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, nla_total_size(sizeof(u32))); if (!skb) return -ENOMEM; if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps)) goto nla_put_failure; return cfg80211_testmode_reply(skb); case HWSIM_TM_CMD_STOP_QUEUES: ieee80211_stop_queues(hw); return 0; case HWSIM_TM_CMD_WAKE_QUEUES: ieee80211_wake_queues(hw); return 0; default: return -EOPNOTSUPP; } nla_put_failure: kfree_skb(skb); return -ENOBUFS; } #endif static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; switch (action) { case IEEE80211_AMPDU_TX_START: ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: break; case IEEE80211_AMPDU_RX_START: case IEEE80211_AMPDU_RX_STOP: break; default: return -EOPNOTSUPP; } return 0; } static void mac80211_hwsim_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { /* Not implemented, queues only on kernel side */ } static void hw_scan_work(struct work_struct *work) { struct mac80211_hwsim_data *hwsim = container_of(work, struct mac80211_hwsim_data, hw_scan.work); struct cfg80211_scan_request *req = hwsim->hw_scan_request; int dwell, i; mutex_lock(&hwsim->mutex); if (hwsim->scan_chan_idx >= req->n_channels) { struct cfg80211_scan_info info = { .aborted = false, }; wiphy_dbg(hwsim->hw->wiphy, "hw scan complete\n"); ieee80211_scan_completed(hwsim->hw, &info); hwsim->hw_scan_request = NULL; hwsim->hw_scan_vif = NULL; hwsim->tmp_chan = NULL; mutex_unlock(&hwsim->mutex); return; } wiphy_dbg(hwsim->hw->wiphy, "hw scan %d MHz\n", req->channels[hwsim->scan_chan_idx]->center_freq); hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx]; if (hwsim->tmp_chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR) || !req->n_ssids) { dwell = 120; } else { dwell = 30; /* send probes */ for (i = 0; i < req->n_ssids; i++) { struct sk_buff *probe; struct ieee80211_mgmt *mgmt; probe = ieee80211_probereq_get(hwsim->hw, hwsim->scan_addr, req->ssids[i].ssid, req->ssids[i].ssid_len, req->ie_len); if (!probe) continue; mgmt = (struct ieee80211_mgmt *) probe->data; memcpy(mgmt->da, req->bssid, ETH_ALEN); memcpy(mgmt->bssid, req->bssid, ETH_ALEN); if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len); local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); local_bh_enable(); } } ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, msecs_to_jiffies(dwell)); hwsim->survey_data[hwsim->scan_chan_idx].channel = hwsim->tmp_chan; hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies; hwsim->survey_data[hwsim->scan_chan_idx].end = jiffies + msecs_to_jiffies(dwell); hwsim->scan_chan_idx++; mutex_unlock(&hwsim->mutex); } static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct mac80211_hwsim_data *hwsim = hw->priv; struct cfg80211_scan_request *req = &hw_req->req; mutex_lock(&hwsim->mutex); if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) { mutex_unlock(&hwsim->mutex); return -EBUSY; } hwsim->hw_scan_request = req; hwsim->hw_scan_vif = vif; hwsim->scan_chan_idx = 0; if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) get_random_mask_addr(hwsim->scan_addr, hw_req->req.mac_addr, hw_req->req.mac_addr_mask); else memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN); memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); mutex_unlock(&hwsim->mutex); wiphy_dbg(hw->wiphy, "hwsim hw_scan request\n"); ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0); return 0; } static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; struct cfg80211_scan_info info = { .aborted = true, }; wiphy_dbg(hw->wiphy, "hwsim cancel_hw_scan\n"); cancel_delayed_work_sync(&hwsim->hw_scan); mutex_lock(&hwsim->mutex); ieee80211_scan_completed(hwsim->hw, &info); hwsim->tmp_chan = NULL; hwsim->hw_scan_request = NULL; hwsim->hw_scan_vif = NULL; mutex_unlock(&hwsim->mutex); } static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) { struct mac80211_hwsim_data *hwsim = hw->priv; mutex_lock(&hwsim->mutex); if (hwsim->scanning) { pr_debug("two hwsim sw_scans detected!\n"); goto out; } pr_debug("hwsim sw_scan request, prepping stuff\n"); memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN); hwsim->scanning = true; memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); out: mutex_unlock(&hwsim->mutex); } static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; mutex_lock(&hwsim->mutex); pr_debug("hwsim sw_scan_complete\n"); hwsim->scanning = false; eth_zero_addr(hwsim->scan_addr); mutex_unlock(&hwsim->mutex); } static void hw_roc_start(struct work_struct *work) { struct mac80211_hwsim_data *hwsim = container_of(work, struct mac80211_hwsim_data, roc_start.work); mutex_lock(&hwsim->mutex); wiphy_dbg(hwsim->hw->wiphy, "hwsim ROC begins\n"); hwsim->tmp_chan = hwsim->roc_chan; ieee80211_ready_on_channel(hwsim->hw); ieee80211_queue_delayed_work(hwsim->hw, &hwsim->roc_done, msecs_to_jiffies(hwsim->roc_duration)); mutex_unlock(&hwsim->mutex); } static void hw_roc_done(struct work_struct *work) { struct mac80211_hwsim_data *hwsim = container_of(work, struct mac80211_hwsim_data, roc_done.work); mutex_lock(&hwsim->mutex); ieee80211_remain_on_channel_expired(hwsim->hw); hwsim->tmp_chan = NULL; mutex_unlock(&hwsim->mutex); wiphy_dbg(hwsim->hw->wiphy, "hwsim ROC expired\n"); } static int mac80211_hwsim_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type) { struct mac80211_hwsim_data *hwsim = hw->priv; mutex_lock(&hwsim->mutex); if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) { mutex_unlock(&hwsim->mutex); return -EBUSY; } hwsim->roc_chan = chan; hwsim->roc_duration = duration; mutex_unlock(&hwsim->mutex); wiphy_dbg(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n", chan->center_freq, duration); ieee80211_queue_delayed_work(hw, &hwsim->roc_start, HZ/50); return 0; } static int mac80211_hwsim_croc(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *hwsim = hw->priv; cancel_delayed_work_sync(&hwsim->roc_start); cancel_delayed_work_sync(&hwsim->roc_done); mutex_lock(&hwsim->mutex); hwsim->tmp_chan = NULL; mutex_unlock(&hwsim->mutex); wiphy_dbg(hw->wiphy, "hwsim ROC canceled\n"); return 0; } static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { hwsim_set_chanctx_magic(ctx); wiphy_dbg(hw->wiphy, "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", ctx->def.chan->center_freq, ctx->def.width, ctx->def.center_freq1, ctx->def.center_freq2); return 0; } static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { wiphy_dbg(hw->wiphy, "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", ctx->def.chan->center_freq, ctx->def.width, ctx->def.center_freq1, ctx->def.center_freq2); hwsim_check_chanctx_magic(ctx); hwsim_clear_chanctx_magic(ctx); } static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { hwsim_check_chanctx_magic(ctx); wiphy_dbg(hw->wiphy, "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", ctx->def.chan->center_freq, ctx->def.width, ctx->def.center_freq1, ctx->def.center_freq2); } static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { hwsim_check_magic(vif); hwsim_check_chanctx_magic(ctx); return 0; } static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { hwsim_check_magic(vif); hwsim_check_chanctx_magic(ctx); } static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = { "tx_pkts_nic", "tx_bytes_nic", "rx_pkts_nic", "rx_bytes_nic", "d_tx_dropped", "d_tx_failed", "d_ps_mode", "d_group", }; #define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats) static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) memcpy(data, *mac80211_hwsim_gstrings_stats, sizeof(mac80211_hwsim_gstrings_stats)); } static int mac80211_hwsim_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset) { if (sset == ETH_SS_STATS) return MAC80211_HWSIM_SSTATS_LEN; return 0; } static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data) { struct mac80211_hwsim_data *ar = hw->priv; int i = 0; data[i++] = ar->tx_pkts; data[i++] = ar->tx_bytes; data[i++] = ar->rx_pkts; data[i++] = ar->rx_bytes; data[i++] = ar->tx_dropped; data[i++] = ar->tx_failed; data[i++] = ar->ps; data[i++] = ar->group; WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); } #define HWSIM_COMMON_OPS \ .tx = mac80211_hwsim_tx, \ .start = mac80211_hwsim_start, \ .stop = mac80211_hwsim_stop, \ .add_interface = mac80211_hwsim_add_interface, \ .change_interface = mac80211_hwsim_change_interface, \ .remove_interface = mac80211_hwsim_remove_interface, \ .config = mac80211_hwsim_config, \ .configure_filter = mac80211_hwsim_configure_filter, \ .bss_info_changed = mac80211_hwsim_bss_info_changed, \ .sta_add = mac80211_hwsim_sta_add, \ .sta_remove = mac80211_hwsim_sta_remove, \ .sta_notify = mac80211_hwsim_sta_notify, \ .set_tim = mac80211_hwsim_set_tim, \ .conf_tx = mac80211_hwsim_conf_tx, \ .get_survey = mac80211_hwsim_get_survey, \ CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) \ .ampdu_action = mac80211_hwsim_ampdu_action, \ .flush = mac80211_hwsim_flush, \ .get_tsf = mac80211_hwsim_get_tsf, \ .set_tsf = mac80211_hwsim_set_tsf, \ .get_et_sset_count = mac80211_hwsim_get_et_sset_count, \ .get_et_stats = mac80211_hwsim_get_et_stats, \ .get_et_strings = mac80211_hwsim_get_et_strings, static const struct ieee80211_ops mac80211_hwsim_ops = { HWSIM_COMMON_OPS .sw_scan_start = mac80211_hwsim_sw_scan, .sw_scan_complete = mac80211_hwsim_sw_scan_complete, }; static const struct ieee80211_ops mac80211_hwsim_mchan_ops = { HWSIM_COMMON_OPS .hw_scan = mac80211_hwsim_hw_scan, .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan, .sw_scan_start = NULL, .sw_scan_complete = NULL, .remain_on_channel = mac80211_hwsim_roc, .cancel_remain_on_channel = mac80211_hwsim_croc, .add_chanctx = mac80211_hwsim_add_chanctx, .remove_chanctx = mac80211_hwsim_remove_chanctx, .change_chanctx = mac80211_hwsim_change_chanctx, .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx, .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, }; struct hwsim_new_radio_params { unsigned int channels; const char *reg_alpha2; const struct ieee80211_regdomain *regd; bool reg_strict; bool p2p_device; bool use_chanctx; bool destroy_on_close; const char *hwname; bool no_vif; const u8 *perm_addr; u32 iftypes; u32 *ciphers; u8 n_ciphers; }; static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, struct genl_info *info) { if (info) genl_notify(&hwsim_genl_family, mcast_skb, info, HWSIM_MCGRP_CONFIG, GFP_KERNEL); else genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0, HWSIM_MCGRP_CONFIG, GFP_KERNEL); } static int append_radio_msg(struct sk_buff *skb, int id, struct hwsim_new_radio_params *param) { int ret; ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); if (ret < 0) return ret; if (param->channels) { ret = nla_put_u32(skb, HWSIM_ATTR_CHANNELS, param->channels); if (ret < 0) return ret; } if (param->reg_alpha2) { ret = nla_put(skb, HWSIM_ATTR_REG_HINT_ALPHA2, 2, param->reg_alpha2); if (ret < 0) return ret; } if (param->regd) { int i; for (i = 0; i < ARRAY_SIZE(hwsim_world_regdom_custom); i++) { if (hwsim_world_regdom_custom[i] != param->regd) continue; ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i); if (ret < 0) return ret; break; } } if (param->reg_strict) { ret = nla_put_flag(skb, HWSIM_ATTR_REG_STRICT_REG); if (ret < 0) return ret; } if (param->p2p_device) { ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_P2P_DEVICE); if (ret < 0) return ret; } if (param->use_chanctx) { ret = nla_put_flag(skb, HWSIM_ATTR_USE_CHANCTX); if (ret < 0) return ret; } if (param->hwname) { ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(param->hwname), param->hwname); if (ret < 0) return ret; } return 0; } static void hwsim_mcast_new_radio(int id, struct genl_info *info, struct hwsim_new_radio_params *param) { struct sk_buff *mcast_skb; void *data; mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!mcast_skb) return; data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_NEW_RADIO); if (!data) goto out_err; if (append_radio_msg(mcast_skb, id, param) < 0) goto out_err; genlmsg_end(mcast_skb, data); hwsim_mcast_config_msg(mcast_skb, info); return; out_err: nlmsg_free(mcast_skb); } static const struct ieee80211_sband_iftype_data he_capa_2ghz = { /* TODO: should we support other types, e.g., P2P?*/ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, /* Leave all the other PHY capability bytes unset, as * DCM, beam forming, RU and PPE threshold information * are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xffff), .tx_mcs_160 = cpu_to_le16(0xffff), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, }, }; static const struct ieee80211_sband_iftype_data he_capa_5ghz = { /* TODO: should we support other types, e.g., P2P?*/ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, /* Leave all the other PHY capability bytes unset, as * DCM, beam forming, RU and PPE threshold information * are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xfffa), .tx_mcs_80p80 = cpu_to_le16(0xfffa), }, }, }; static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband) { if (sband->band == NL80211_BAND_2GHZ) sband->iftype_data = (struct ieee80211_sband_iftype_data *)&he_capa_2ghz; else if (sband->band == NL80211_BAND_5GHZ) sband->iftype_data = (struct ieee80211_sband_iftype_data *)&he_capa_5ghz; else return; sband->n_iftype_data = 1; } #ifdef CPTCFG_MAC80211_MESH #define HWSIM_MESH_BIT BIT(NL80211_IFTYPE_MESH_POINT) #else #define HWSIM_MESH_BIT 0 #endif #define HWSIM_DEFAULT_IF_LIMIT \ (BIT(NL80211_IFTYPE_STATION) | \ BIT(NL80211_IFTYPE_P2P_CLIENT) | \ BIT(NL80211_IFTYPE_AP) | \ BIT(NL80211_IFTYPE_P2P_GO) | \ HWSIM_MESH_BIT) #define HWSIM_IFTYPE_SUPPORT_MASK \ (BIT(NL80211_IFTYPE_STATION) | \ BIT(NL80211_IFTYPE_AP) | \ BIT(NL80211_IFTYPE_P2P_CLIENT) | \ BIT(NL80211_IFTYPE_P2P_GO) | \ BIT(NL80211_IFTYPE_ADHOC) | \ BIT(NL80211_IFTYPE_MESH_POINT)) static int mac80211_hwsim_new_radio(struct genl_info *info, struct hwsim_new_radio_params *param) { int err; u8 addr[ETH_ALEN]; struct mac80211_hwsim_data *data; struct ieee80211_hw *hw; enum nl80211_band band; const struct ieee80211_ops *ops = &mac80211_hwsim_ops; struct net *net; int idx; int n_limits = 0; if (WARN_ON(param->channels > 1 && !param->use_chanctx)) return -EINVAL; spin_lock_bh(&hwsim_radio_lock); idx = hwsim_radio_idx++; spin_unlock_bh(&hwsim_radio_lock); if (param->use_chanctx) ops = &mac80211_hwsim_mchan_ops; hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname); if (!hw) { pr_debug("mac80211_hwsim: ieee80211_alloc_hw failed\n"); err = -ENOMEM; goto failed; } /* ieee80211_alloc_hw_nm may have used a default name */ param->hwname = wiphy_name(hw->wiphy); if (info) net = genl_info_net(info); else net = &init_net; wiphy_net_set(hw->wiphy, net); data = hw->priv; data->hw = hw; data->dev = device_create(hwsim_class, NULL, 0, hw, "hwsim%d", idx); if (IS_ERR(data->dev)) { printk(KERN_DEBUG "mac80211_hwsim: device_create failed (%ld)\n", PTR_ERR(data->dev)); err = -ENOMEM; goto failed_drvdata; } data->dev->driver = &mac80211_hwsim_driver.driver; err = device_bind_driver(data->dev); if (err != 0) { pr_debug("mac80211_hwsim: device_bind_driver failed (%d)\n", err); goto failed_bind; } skb_queue_head_init(&data->pending); SET_IEEE80211_DEV(hw, data->dev); if (!param->perm_addr) { eth_zero_addr(addr); addr[0] = 0x02; addr[3] = idx >> 8; addr[4] = idx; memcpy(data->addresses[0].addr, addr, ETH_ALEN); /* Why need here second address ? */ memcpy(data->addresses[1].addr, addr, ETH_ALEN); data->addresses[1].addr[0] |= 0x40; hw->wiphy->n_addresses = 2; hw->wiphy->addresses = data->addresses; /* possible address clash is checked at hash table insertion */ } else { memcpy(data->addresses[0].addr, param->perm_addr, ETH_ALEN); /* compatibility with automatically generated mac addr */ memcpy(data->addresses[1].addr, param->perm_addr, ETH_ALEN); hw->wiphy->n_addresses = 2; hw->wiphy->addresses = data->addresses; } data->channels = param->channels; data->use_chanctx = param->use_chanctx; data->idx = idx; data->destroy_on_close = param->destroy_on_close; if (info) data->portid = genl_info_snd_portid(info); /* setup interface limits, only on interface types we support */ if (param->iftypes & BIT(NL80211_IFTYPE_ADHOC)) { data->if_limits[n_limits].max = 1; data->if_limits[n_limits].types = BIT(NL80211_IFTYPE_ADHOC); n_limits++; } if (param->iftypes & HWSIM_DEFAULT_IF_LIMIT) { data->if_limits[n_limits].max = 2048; /* * For this case, we may only support a subset of * HWSIM_DEFAULT_IF_LIMIT, therefore we only want to add the * bits that both param->iftype & HWSIM_DEFAULT_IF_LIMIT have. */ data->if_limits[n_limits].types = HWSIM_DEFAULT_IF_LIMIT & param->iftypes; n_limits++; } if (param->iftypes & BIT(NL80211_IFTYPE_P2P_DEVICE)) { data->if_limits[n_limits].max = 1; data->if_limits[n_limits].types = BIT(NL80211_IFTYPE_P2P_DEVICE); n_limits++; } if (data->use_chanctx) { hw->wiphy->max_scan_ssids = 255; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; hw->wiphy->max_remain_on_channel_duration = 1000; data->if_combination.radar_detect_widths = 0; data->if_combination.num_different_channels = data->channels; } else { data->if_combination.num_different_channels = 1; data->if_combination.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160); } data->if_combination.n_limits = n_limits; data->if_combination.max_interfaces = 2048; data->if_combination.limits = data->if_limits; hw->wiphy->iface_combinations = &data->if_combination; hw->wiphy->n_iface_combinations = 1; if (param->ciphers) { int ciphers_len = param->n_ciphers * sizeof(data->ciphers[0]); if (WARN_ON_ONCE(ciphers_len > sizeof(data->ciphers))) ciphers_len = sizeof(data->ciphers); memcpy(data->ciphers, param->ciphers, ciphers_len); hw->wiphy->cipher_suites = data->ciphers; hw->wiphy->n_cipher_suites = ciphers_len / sizeof(data->ciphers[0]); } data->rx_rssi = DEFAULT_RX_RSSI; INIT_DELAYED_WORK(&data->roc_start, hw_roc_start); INIT_DELAYED_WORK(&data->roc_done, hw_roc_done); INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work); hw->queues = 5; hw->offchannel_tx_hw_queue = 4; ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(hw, QUEUE_CONTROL); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, TDLS_WIDER_BW); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); hw->wiphy->interface_modes = param->iftypes; /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); hw->sta_data_size = sizeof(struct hwsim_sta_priv); hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv); memcpy(data->channels_2ghz, hwsim_channels_2ghz, sizeof(hwsim_channels_2ghz)); memcpy(data->channels_5ghz, hwsim_channels_5ghz, sizeof(hwsim_channels_5ghz)); memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = &data->bands[band]; sband->band = band; switch (band) { case NL80211_BAND_2GHZ: sband->channels = data->channels_2ghz; sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz); sband->bitrates = data->rates; sband->n_bitrates = ARRAY_SIZE(hwsim_rates); break; case NL80211_BAND_5GHZ: sband->channels = data->channels_5ghz; sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz); sband->bitrates = data->rates + 4; sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4; sband->vht_cap.vht_supported = true; sband->vht_cap.cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ | IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_RXSTBC_4 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; sband->vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 14); sband->vht_cap.vht_mcs.tx_mcs_map = sband->vht_cap.vht_mcs.rx_mcs_map; break; default: continue; } sband->ht_cap.ht_supported = true; sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40; sband->ht_cap.ampdu_factor = 0x3; sband->ht_cap.ampdu_density = 0x6; memset(&sband->ht_cap.mcs, 0, sizeof(sband->ht_cap.mcs)); sband->ht_cap.mcs.rx_mask[0] = 0xff; sband->ht_cap.mcs.rx_mask[1] = 0xff; sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; mac80211_hswim_he_capab(sband); hw->wiphy->bands[band] = sband; } /* By default all radios belong to the first group */ data->group = 1; mutex_init(&data->mutex); data->netgroup = hwsim_net_get_netgroup(net); data->wmediumd = hwsim_net_get_wmediumd(net); /* Enable frame retransmissions for lossy channels */ hw->max_rates = 4; hw->max_rate_tries = 11; hw->wiphy->vendor_commands = mac80211_hwsim_vendor_commands; hw->wiphy->n_vendor_commands = ARRAY_SIZE(mac80211_hwsim_vendor_commands); hw->wiphy->vendor_events = mac80211_hwsim_vendor_events; hw->wiphy->n_vendor_events = ARRAY_SIZE(mac80211_hwsim_vendor_events); if (param->reg_strict) hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG; if (param->regd) { data->regd = param->regd; hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; wiphy_apply_custom_regulatory(hw->wiphy, param->regd); /* give the regulatory workqueue a chance to run */ schedule_timeout_interruptible(1); } if (param->no_vif) ieee80211_hw_set(hw, NO_AUTO_VIF); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); tasklet_hrtimer_init(&data->beacon_timer, mac80211_hwsim_beacon, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); err = ieee80211_register_hw(hw); if (err < 0) { pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n", err); goto failed_hw; } wiphy_dbg(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr); if (param->reg_alpha2) { data->alpha2[0] = param->reg_alpha2[0]; data->alpha2[1] = param->reg_alpha2[1]; regulatory_hint(hw->wiphy, param->reg_alpha2); } data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir); debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps); debugfs_create_file("group", 0666, data->debugfs, data, &hwsim_fops_group); debugfs_create_file("rx_rssi", 0666, data->debugfs, data, &hwsim_fops_rx_rssi); if (!data->use_chanctx) debugfs_create_file("dfs_simulate_radar", 0222, data->debugfs, data, &hwsim_simulate_radar); spin_lock_bh(&hwsim_radio_lock); err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); if (err < 0) { if (info) { GENL_SET_ERR_MSG(info, "perm addr already present"); NL_SET_BAD_ATTR(genl_info_extack(info), info->attrs[HWSIM_ATTR_PERM_ADDR]); } spin_unlock_bh(&hwsim_radio_lock); goto failed_final_insert; } list_add_tail(&data->list, &hwsim_radios); hwsim_radios_generation++; spin_unlock_bh(&hwsim_radio_lock); hwsim_mcast_new_radio(idx, info, param); return idx; failed_final_insert: debugfs_remove_recursive(data->debugfs); ieee80211_unregister_hw(data->hw); failed_hw: device_release_driver(data->dev); failed_bind: device_unregister(data->dev); failed_drvdata: ieee80211_free_hw(hw); failed: return err; } static void hwsim_mcast_del_radio(int id, const char *hwname, struct genl_info *info) { struct sk_buff *skb; void *data; int ret; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return; data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_DEL_RADIO); if (!data) goto error; ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); if (ret < 0) goto error; ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname), hwname); if (ret < 0) goto error; genlmsg_end(skb, data); hwsim_mcast_config_msg(skb, info); return; error: nlmsg_free(skb); } static void mac80211_hwsim_del_radio(struct mac80211_hwsim_data *data, const char *hwname, struct genl_info *info) { hwsim_mcast_del_radio(data->idx, hwname, info); debugfs_remove_recursive(data->debugfs); ieee80211_unregister_hw(data->hw); device_release_driver(data->dev); device_unregister(data->dev); ieee80211_free_hw(data->hw); } static int mac80211_hwsim_get_radio(struct sk_buff *skb, struct mac80211_hwsim_data *data, u32 portid, u32 seq, struct netlink_callback *cb, int flags) { void *hdr; struct hwsim_new_radio_params param = { }; int res = -EMSGSIZE; hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags, HWSIM_CMD_GET_RADIO); if (!hdr) return -EMSGSIZE; if (cb) genl_dump_check_consistent(cb, hdr); if (data->alpha2[0] && data->alpha2[1]) param.reg_alpha2 = data->alpha2; param.reg_strict = !!(data->hw->wiphy->regulatory_flags & REGULATORY_STRICT_REG); param.p2p_device = !!(data->hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE)); param.use_chanctx = data->use_chanctx; param.regd = data->regd; param.channels = data->channels; param.hwname = wiphy_name(data->hw->wiphy); res = append_radio_msg(skb, data->idx, ¶m); if (res < 0) goto out_err; genlmsg_end(skb, hdr); return 0; out_err: genlmsg_cancel(skb, hdr); return res; } static void mac80211_hwsim_free(void) { struct mac80211_hwsim_data *data; spin_lock_bh(&hwsim_radio_lock); while ((data = list_first_entry_or_null(&hwsim_radios, struct mac80211_hwsim_data, list))) { list_del(&data->list); spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); spin_lock_bh(&hwsim_radio_lock); } spin_unlock_bh(&hwsim_radio_lock); class_destroy(hwsim_class); } static const struct net_device_ops hwsim_netdev_ops = { .ndo_start_xmit = hwsim_mon_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static void hwsim_mon_setup(struct net_device *dev) { dev->netdev_ops = &hwsim_netdev_ops; netdev_set_def_destructor(dev); ether_setup(dev); #if LINUX_VERSION_IS_GEQ(4,3,0) dev->priv_flags |= IFF_NO_QUEUE; #else dev->tx_queue_len = 0; #endif dev->type = ARPHRD_IEEE80211_RADIOTAP; eth_zero_addr(dev->dev_addr); dev->dev_addr[0] = 0x12; } static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) { return rhashtable_lookup_fast(&hwsim_radios_rht, addr, hwsim_rht_params); } static void hwsim_register_wmediumd(struct net *net, u32 portid) { struct mac80211_hwsim_data *data; hwsim_net_set_wmediumd(net, portid); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { if (data->netgroup == hwsim_net_get_netgroup(net)) data->wmediumd = portid; } spin_unlock_bh(&hwsim_radio_lock); } static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, struct genl_info *info) { struct ieee80211_hdr *hdr; struct mac80211_hwsim_data *data2; struct ieee80211_tx_info *txi; struct hwsim_tx_rate *tx_attempts; u64 ret_skb_cookie; struct sk_buff *skb, *tmp; const u8 *src; unsigned int hwsim_flags; int i; bool found = false; if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || !info->attrs[HWSIM_ATTR_FLAGS] || !info->attrs[HWSIM_ATTR_COOKIE] || !info->attrs[HWSIM_ATTR_SIGNAL] || !info->attrs[HWSIM_ATTR_TX_INFO]) goto out; src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]); hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]); ret_skb_cookie = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]); data2 = get_hwsim_data_ref_from_addr(src); if (!data2) goto out; if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) goto out; if (genl_info_snd_portid(info) != data2->wmediumd) goto out; /* look for the skb matching the cookie passed back from user */ skb_queue_walk_safe(&data2->pending, skb, tmp) { u64 skb_cookie; txi = IEEE80211_SKB_CB(skb); skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0]; if (skb_cookie == ret_skb_cookie) { skb_unlink(skb, &data2->pending); found = true; break; } } /* not found */ if (!found) goto out; /* Tx info received because the frame was broadcasted on user space, so we get all the necessary info: tx attempts and skb control buff */ tx_attempts = (struct hwsim_tx_rate *)nla_data( info->attrs[HWSIM_ATTR_TX_INFO]); /* now send back TX status */ txi = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(txi); for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { txi->status.rates[i].idx = tx_attempts[i].idx; txi->status.rates[i].count = tx_attempts[i].count; } txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); if (!(hwsim_flags & HWSIM_TX_CTL_NO_ACK) && (hwsim_flags & HWSIM_TX_STAT_ACK)) { if (skb->len >= 16) { hdr = (struct ieee80211_hdr *) skb->data; mac80211_hwsim_monitor_ack(data2->channel, hdr->addr2); } txi->flags |= IEEE80211_TX_STAT_ACK; } ieee80211_tx_status_irqsafe(data2->hw, skb); return 0; out: return -EINVAL; } static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, struct genl_info *info) { struct mac80211_hwsim_data *data2; struct ieee80211_rx_status rx_status; const u8 *dst; int frame_data_len; void *frame_data; struct sk_buff *skb = NULL; if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || !info->attrs[HWSIM_ATTR_FRAME] || !info->attrs[HWSIM_ATTR_RX_RATE] || !info->attrs[HWSIM_ATTR_SIGNAL]) goto out; dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]); frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); /* Allocate new skb here */ skb = alloc_skb(frame_data_len, GFP_KERNEL); if (skb == NULL) goto err; if (frame_data_len > IEEE80211_MAX_DATA_LEN) goto err; /* Copy the data */ skb_put_data(skb, frame_data, frame_data_len); data2 = get_hwsim_data_ref_from_addr(dst); if (!data2) goto out; if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) goto out; if (genl_info_snd_portid(info) != data2->wmediumd) goto out; /* check if radio is configured properly */ if (data2->idle || !data2->started) goto out; /* A frame is received from user space */ memset(&rx_status, 0, sizeof(rx_status)); if (info->attrs[HWSIM_ATTR_FREQ]) { /* throw away off-channel packets, but allow both the temporary * ("hw" scan/remain-on-channel) and regular channel, since the * internal datapath also allows this */ mutex_lock(&data2->mutex); rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]); if (rx_status.freq != data2->channel->center_freq && (!data2->tmp_chan || rx_status.freq != data2->tmp_chan->center_freq)) { mutex_unlock(&data2->mutex); goto out; } mutex_unlock(&data2->mutex); } else { rx_status.freq = data2->channel->center_freq; } rx_status.band = data2->channel->band; rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); data2->rx_pkts++; data2->rx_bytes += skb->len; ieee80211_rx_irqsafe(data2->hw, skb); return 0; err: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); out: dev_kfree_skb(skb); return -EINVAL; } static int hwsim_register_received_nl(struct sk_buff *skb_2, struct genl_info *info) { struct net *net = genl_info_net(info); struct mac80211_hwsim_data *data; int chans = 1; spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) chans = max(chans, data->channels); spin_unlock_bh(&hwsim_radio_lock); /* In the future we should revise the userspace API and allow it * to set a flag that it does support multi-channel, then we can * let this pass conditionally on the flag. * For current userspace, prohibit it since it won't work right. */ if (chans > 1) return -EOPNOTSUPP; if (hwsim_net_get_wmediumd(net)) return -EBUSY; hwsim_register_wmediumd(net, genl_info_snd_portid(info)); pr_debug("mac80211_hwsim: received a REGISTER, " "switching to wmediumd mode with pid %d\n", genl_info_snd_portid(info)); return 0; } /* ensures ciphers only include ciphers listed in 'hwsim_ciphers' array */ static bool hwsim_known_ciphers(const u32 *ciphers, int n_ciphers) { int i; for (i = 0; i < n_ciphers; i++) { int j; int found = 0; for (j = 0; j < ARRAY_SIZE(hwsim_ciphers); j++) { if (ciphers[i] == hwsim_ciphers[j]) { found = 1; break; } } if (!found) return false; } return true; } static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; const char *hwname = NULL; int ret; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; param.channels = channels; param.destroy_on_close = info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE]; if (info->attrs[HWSIM_ATTR_CHANNELS]) param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); if (param.channels < 1) { GENL_SET_ERR_MSG(info, "must have at least one channel"); return -EINVAL; } if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { GENL_SET_ERR_MSG(info, "too many channels specified"); return -EINVAL; } if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) param.use_chanctx = true; else param.use_chanctx = (param.channels > 1); if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) param.reg_alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) return -EINVAL; idx = array_index_nospec(idx, ARRAY_SIZE(hwsim_world_regdom_custom)); param.regd = hwsim_world_regdom_custom[idx]; } if (info->attrs[HWSIM_ATTR_PERM_ADDR]) { if (!is_valid_ether_addr( nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]))) { GENL_SET_ERR_MSG(info,"MAC is no valid source addr"); NL_SET_BAD_ATTR(genl_info_extack(info), info->attrs[HWSIM_ATTR_PERM_ADDR]); return -EINVAL; } param.perm_addr = nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]); } if (info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT]) { param.iftypes = nla_get_u32(info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT]); if (param.iftypes & ~HWSIM_IFTYPE_SUPPORT_MASK) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT], "cannot support more iftypes than kernel"); return -EINVAL; } } else { param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; } /* ensure both flag and iftype support is honored */ if (param.p2p_device || param.iftypes & BIT(NL80211_IFTYPE_P2P_DEVICE)) { param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE); param.p2p_device = true; } if (info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]) { u32 len = nla_len(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]); param.ciphers = nla_data(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]); if (len % sizeof(u32)) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], "bad cipher list length"); return -EINVAL; } param.n_ciphers = len / sizeof(u32); if (param.n_ciphers > ARRAY_SIZE(hwsim_ciphers)) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], "too many ciphers specified"); return -EINVAL; } if (!hwsim_known_ciphers(param.ciphers, param.n_ciphers)) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], "unsupported ciphers specified"); return -EINVAL; } } if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { hwname = kasprintf(GFP_KERNEL, "%.*s", nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); if (!hwname) return -ENOMEM; param.hwname = hwname; } ret = mac80211_hwsim_new_radio(info, ¶m); kfree(hwname); return ret; } static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct mac80211_hwsim_data *data; s64 idx = -1; const char *hwname = NULL; if (info->attrs[HWSIM_ATTR_RADIO_ID]) { idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { hwname = kasprintf(GFP_KERNEL, "%.*s", nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); if (!hwname) return -ENOMEM; } else return -EINVAL; spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { if (idx >= 0) { if (data->idx != idx) continue; } else { if (!hwname || strcmp(hwname, wiphy_name(data->hw->wiphy))) continue; } if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) continue; list_del(&data->list); rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); hwsim_radios_generation++; spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), info); kfree(hwname); return 0; } spin_unlock_bh(&hwsim_radio_lock); kfree(hwname); return -ENODEV; } static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct mac80211_hwsim_data *data; struct sk_buff *skb; int idx, res = -ENODEV; if (!info->attrs[HWSIM_ATTR_RADIO_ID]) return -EINVAL; idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { if (data->idx != idx) continue; if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) continue; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) { res = -ENOMEM; goto out_err; } res = mac80211_hwsim_get_radio(skb, data, genl_info_snd_portid(info), info->snd_seq, NULL, 0); if (res < 0) { nlmsg_free(skb); goto out_err; } genlmsg_reply(skb, info); break; } out_err: spin_unlock_bh(&hwsim_radio_lock); return res; } static int hwsim_dump_radio_nl(struct sk_buff *skb, struct netlink_callback *cb) { int last_idx = cb->args[0] - 1; struct mac80211_hwsim_data *data = NULL; int res = 0; void *hdr; spin_lock_bh(&hwsim_radio_lock); cb->seq = hwsim_radios_generation; if (last_idx >= hwsim_radio_idx-1) goto done; list_for_each_entry(data, &hwsim_radios, list) { if (data->idx <= last_idx) continue; if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk))) continue; res = mac80211_hwsim_get_radio(skb, data, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); if (res < 0) break; last_idx = data->idx; } cb->args[0] = last_idx + 1; /* list changed, but no new element sent, set interrupted flag */ if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { hdr = genlmsg_put(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, &hwsim_genl_family, NLM_F_MULTI, HWSIM_CMD_GET_RADIO); if (!hdr) res = -EMSGSIZE; genl_dump_check_consistent(cb, hdr); genlmsg_end(skb, hdr); } done: spin_unlock_bh(&hwsim_radio_lock); return res ?: skb->len; } /* Generic Netlink operations array */ static __genl_const struct genl_ops hwsim_ops[] = { { .cmd = HWSIM_CMD_REGISTER, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = hwsim_register_received_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_FRAME, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = hwsim_cloned_frame_received_nl, }, { .cmd = HWSIM_CMD_TX_INFO_FRAME, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = hwsim_tx_info_frame_received_nl, }, { .cmd = HWSIM_CMD_NEW_RADIO, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = hwsim_new_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_DEL_RADIO, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = hwsim_del_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_GET_RADIO, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = hwsim_get_radio_nl, .dumpit = hwsim_dump_radio_nl, }, }; static struct genl_family hwsim_genl_family __genl_ro_after_init = { .name = "MAC80211_HWSIM", .version = 1, .maxattr = HWSIM_ATTR_MAX, .policy = hwsim_genl_policy, .netnsok = true, .module = THIS_MODULE, .ops = hwsim_ops, .n_ops = ARRAY_SIZE(hwsim_ops), .mcgrps = hwsim_mcgrps, .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), }; static void remove_user_radios(u32 portid) { struct mac80211_hwsim_data *entry, *tmp; LIST_HEAD(list); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { if (entry->destroy_on_close && entry->portid == portid) { list_move(&entry->list, &list); rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, hwsim_rht_params); hwsim_radios_generation++; } } spin_unlock_bh(&hwsim_radio_lock); list_for_each_entry_safe(entry, tmp, &list, list) { list_del(&entry->list); mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy), NULL); } } static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, unsigned long state, void *_notify) { struct netlink_notify *notify = _notify; if (state != NETLINK_URELEASE) return NOTIFY_DONE; remove_user_radios(netlink_notify_portid(notify)); if (netlink_notify_portid(notify) == hwsim_net_get_wmediumd(notify->net)) { printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" " socket, switching to perfect channel medium\n"); hwsim_register_wmediumd(notify->net, 0); } return NOTIFY_DONE; } static struct notifier_block hwsim_netlink_notifier = { .notifier_call = mac80211_hwsim_netlink_notify, }; static int __init hwsim_init_netlink(void) { int rc; printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); rc = genl_register_family(&hwsim_genl_family); if (rc) goto failure; rc = netlink_register_notifier(&hwsim_netlink_notifier); if (rc) { genl_unregister_family(&hwsim_genl_family); goto failure; } return 0; failure: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); return -EINVAL; } static __net_init int hwsim_init_net(struct net *net) { return hwsim_net_set_netgroup(net); } static void __net_exit hwsim_exit_net(struct net *net) { struct mac80211_hwsim_data *data, *tmp; LIST_HEAD(list); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { if (!net_eq(wiphy_net(data->hw->wiphy), net)) continue; /* Radios created in init_net are returned to init_net. */ if (data->netgroup == hwsim_net_get_netgroup(&init_net)) continue; list_move(&data->list, &list); rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); hwsim_radios_generation++; } spin_unlock_bh(&hwsim_radio_lock); list_for_each_entry_safe(data, tmp, &list, list) { list_del(&data->list); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); } ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net)); } static struct pernet_operations hwsim_net_ops = { .init = hwsim_init_net, .exit = hwsim_exit_net, .id = &hwsim_net_id, .size = sizeof(struct hwsim_net), }; static void hwsim_exit_netlink(void) { /* unregister the notifier */ netlink_unregister_notifier(&hwsim_netlink_notifier); /* unregister the family */ genl_unregister_family(&hwsim_genl_family); } static int __init init_mac80211_hwsim(void) { int i, err; if (radios < 0 || radios > 100) return -EINVAL; if (channels < 1) return -EINVAL; spin_lock_init(&hwsim_radio_lock); err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); if (err) return err; err = register_pernet_device(&hwsim_net_ops); if (err) goto out_free_rht; err = platform_driver_register(&mac80211_hwsim_driver); if (err) goto out_unregister_pernet; err = hwsim_init_netlink(); if (err) goto out_unregister_driver; hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); if (IS_ERR(hwsim_class)) { err = PTR_ERR(hwsim_class); goto out_exit_netlink; } for (i = 0; i < radios; i++) { struct hwsim_new_radio_params param = { 0 }; param.channels = channels; switch (regtest) { case HWSIM_REGTEST_DIFF_COUNTRY: if (i < ARRAY_SIZE(hwsim_alpha2s)) param.reg_alpha2 = hwsim_alpha2s[i]; break; case HWSIM_REGTEST_DRIVER_REG_FOLLOW: if (!i) param.reg_alpha2 = hwsim_alpha2s[0]; break; case HWSIM_REGTEST_STRICT_ALL: param.reg_strict = true; case HWSIM_REGTEST_DRIVER_REG_ALL: param.reg_alpha2 = hwsim_alpha2s[0]; break; case HWSIM_REGTEST_WORLD_ROAM: if (i == 0) param.regd = &hwsim_world_regdom_custom_01; break; case HWSIM_REGTEST_CUSTOM_WORLD: param.regd = &hwsim_world_regdom_custom_01; break; case HWSIM_REGTEST_CUSTOM_WORLD_2: if (i == 0) param.regd = &hwsim_world_regdom_custom_01; else if (i == 1) param.regd = &hwsim_world_regdom_custom_02; break; case HWSIM_REGTEST_STRICT_FOLLOW: if (i == 0) { param.reg_strict = true; param.reg_alpha2 = hwsim_alpha2s[0]; } break; case HWSIM_REGTEST_STRICT_AND_DRIVER_REG: if (i == 0) { param.reg_strict = true; param.reg_alpha2 = hwsim_alpha2s[0]; } else if (i == 1) { param.reg_alpha2 = hwsim_alpha2s[1]; } break; case HWSIM_REGTEST_ALL: switch (i) { case 0: param.regd = &hwsim_world_regdom_custom_01; break; case 1: param.regd = &hwsim_world_regdom_custom_02; break; case 2: param.reg_alpha2 = hwsim_alpha2s[0]; break; case 3: param.reg_alpha2 = hwsim_alpha2s[1]; break; case 4: param.reg_strict = true; param.reg_alpha2 = hwsim_alpha2s[2]; break; } break; default: break; } param.p2p_device = support_p2p_device; param.use_chanctx = channels > 1; param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; if (param.p2p_device) param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE); err = mac80211_hwsim_new_radio(NULL, ¶m); if (err < 0) goto out_free_radios; } hwsim_mon = alloc_netdev(0, "hwsim%d", NET_NAME_UNKNOWN, hwsim_mon_setup); if (hwsim_mon == NULL) { err = -ENOMEM; goto out_free_radios; } rtnl_lock(); err = dev_alloc_name(hwsim_mon, hwsim_mon->name); if (err < 0) { rtnl_unlock(); goto out_free_radios; } err = register_netdevice(hwsim_mon); if (err < 0) { rtnl_unlock(); goto out_free_mon; } rtnl_unlock(); return 0; out_free_mon: free_netdev(hwsim_mon); out_free_radios: mac80211_hwsim_free(); out_exit_netlink: hwsim_exit_netlink(); out_unregister_driver: platform_driver_unregister(&mac80211_hwsim_driver); out_unregister_pernet: unregister_pernet_device(&hwsim_net_ops); out_free_rht: rhashtable_destroy(&hwsim_radios_rht); return err; } module_init(init_mac80211_hwsim); static void __exit exit_mac80211_hwsim(void) { pr_debug("mac80211_hwsim: unregister radios\n"); hwsim_exit_netlink(); mac80211_hwsim_free(); rhashtable_destroy(&hwsim_radios_rht); unregister_netdev(hwsim_mon); platform_driver_unregister(&mac80211_hwsim_driver); unregister_pernet_device(&hwsim_net_ops); } module_exit(exit_mac80211_hwsim); backport-iwlwifi-dkms-7906/drivers/net/wireless/mac80211_hwsim.h000066400000000000000000000223001351064634500245130ustar00rootroot00000000000000/* * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 * Copyright (c) 2008, Jouni Malinen * Copyright (c) 2011, Javier Lopez * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __MAC80211_HWSIM_H #define __MAC80211_HWSIM_H /** * enum hwsim_tx_control_flags - flags to describe transmission info/status * * These flags are used to give the wmediumd extra information in order to * modify its behavior for each frame * * @HWSIM_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame. * @HWSIM_TX_CTL_NO_ACK: tell the wmediumd not to wait for an ack * @HWSIM_TX_STAT_ACK: Frame was acknowledged * */ enum hwsim_tx_control_flags { HWSIM_TX_CTL_REQ_TX_STATUS = BIT(0), HWSIM_TX_CTL_NO_ACK = BIT(1), HWSIM_TX_STAT_ACK = BIT(2), }; /** * DOC: Frame transmission/registration support * * Frame transmission and registration support exists to allow userspace * entities such as wmediumd to receive and process all broadcasted * frames from a mac80211_hwsim radio device. * * This allow user space applications to decide if the frame should be * dropped or not and implement a wireless medium simulator at user space. * * Registration is done by sending a register message to the driver and * will be automatically unregistered if the user application doesn't * responds to sent frames. * Once registered the user application has to take responsibility of * broadcasting the frames to all listening mac80211_hwsim radio * interfaces. * * For more technical details, see the corresponding command descriptions * below. */ /** * enum hwsim_commands - supported hwsim commands * * @HWSIM_CMD_UNSPEC: unspecified command to catch errors * * @HWSIM_CMD_REGISTER: request to register and received all broadcasted * frames by any mac80211_hwsim radio device. * @HWSIM_CMD_FRAME: send/receive a broadcasted frame from/to kernel/user * space, uses: * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER, * %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE, * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional) * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to * kernel, uses: * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS, * %HWSIM_ATTR_TX_INFO, %WSIM_ATTR_TX_INFO_FLAGS, * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters, * returns the radio ID (>= 0) or negative on errors, if successful * then multicast the result, uses optional parameter: * %HWSIM_ATTR_REG_STRICT_REG, %HWSIM_ATTR_SUPPORT_P2P_DEVICE, * %HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, %HWSIM_ATTR_CHANNELS, * %HWSIM_ATTR_NO_VIF, %HWSIM_ATTR_RADIO_NAME, %HWSIM_ATTR_USE_CHANCTX, * %HWSIM_ATTR_REG_HINT_ALPHA2, %HWSIM_ATTR_REG_CUSTOM_REG, * %HWSIM_ATTR_PERM_ADDR * @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted * @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses: * %HWSIM_ATTR_RADIO_ID * @__HWSIM_CMD_MAX: enum limit */ enum { HWSIM_CMD_UNSPEC, HWSIM_CMD_REGISTER, HWSIM_CMD_FRAME, HWSIM_CMD_TX_INFO_FRAME, HWSIM_CMD_NEW_RADIO, HWSIM_CMD_DEL_RADIO, HWSIM_CMD_GET_RADIO, __HWSIM_CMD_MAX, }; #define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1) #define HWSIM_CMD_CREATE_RADIO HWSIM_CMD_NEW_RADIO #define HWSIM_CMD_DESTROY_RADIO HWSIM_CMD_DEL_RADIO /** * enum hwsim_attrs - hwsim netlink attributes * * @HWSIM_ATTR_UNSPEC: unspecified attribute to catch errors * * @HWSIM_ATTR_ADDR_RECEIVER: MAC address of the radio device that * the frame is broadcasted to * @HWSIM_ATTR_ADDR_TRANSMITTER: MAC address of the radio device that * the frame was broadcasted from * @HWSIM_ATTR_FRAME: Data array * @HWSIM_ATTR_FLAGS: mac80211 transmission flags, used to process properly the frame at user space * @HWSIM_ATTR_RX_RATE: estimated rx rate index for this frame at user space * @HWSIM_ATTR_SIGNAL: estimated RX signal for this frame at user space * @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array * @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame * @HWSIM_ATTR_CHANNELS: u32 attribute used with the %HWSIM_CMD_CREATE_RADIO * command giving the number of channels supported by the new radio * @HWSIM_ATTR_RADIO_ID: u32 attribute used with %HWSIM_CMD_DESTROY_RADIO * only to destroy a radio * @HWSIM_ATTR_REG_HINT_ALPHA2: alpha2 for regulatoro driver hint * (nla string, length 2) * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute) * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute) * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag) * @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO * command to force use of channel contexts even when only a * single channel is supported * @HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE: used with the %HWSIM_CMD_CREATE_RADIO * command to force radio removal when process that created the radio dies * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666 * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio. * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received. * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding * rates of %HWSIM_ATTR_TX_INFO * @HWSIM_ATTR_PERM_ADDR: permanent mac address of new radio * @HWSIM_ATTR_IFTYPE_SUPPORT: u32 attribute of supported interface types bits * @HWSIM_ATTR_CIPHER_SUPPORT: u32 array of supported cipher types * @__HWSIM_ATTR_MAX: enum limit */ enum { HWSIM_ATTR_UNSPEC, HWSIM_ATTR_ADDR_RECEIVER, HWSIM_ATTR_ADDR_TRANSMITTER, HWSIM_ATTR_FRAME, HWSIM_ATTR_FLAGS, HWSIM_ATTR_RX_RATE, HWSIM_ATTR_SIGNAL, HWSIM_ATTR_TX_INFO, HWSIM_ATTR_COOKIE, HWSIM_ATTR_CHANNELS, HWSIM_ATTR_RADIO_ID, HWSIM_ATTR_REG_HINT_ALPHA2, HWSIM_ATTR_REG_CUSTOM_REG, HWSIM_ATTR_REG_STRICT_REG, HWSIM_ATTR_SUPPORT_P2P_DEVICE, HWSIM_ATTR_USE_CHANCTX, HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, HWSIM_ATTR_RADIO_NAME, HWSIM_ATTR_NO_VIF, HWSIM_ATTR_FREQ, HWSIM_ATTR_PAD, HWSIM_ATTR_TX_INFO_FLAGS, HWSIM_ATTR_PERM_ADDR, HWSIM_ATTR_IFTYPE_SUPPORT, HWSIM_ATTR_CIPHER_SUPPORT, __HWSIM_ATTR_MAX, }; #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) /** * struct hwsim_tx_rate - rate selection/status * * @idx: rate index to attempt to send with * @count: number of tries in this rate before going to the next rate * * A value of -1 for @idx indicates an invalid rate and, if used * in an array of retry rates, that no more rates should be tried. * * When used for transmit status reporting, the driver should * always report the rate and number of retries used. * */ struct hwsim_tx_rate { s8 idx; u8 count; } __packed; /** * enum hwsim_tx_rate_flags - per-rate flags set by the rate control algorithm. * Inspired by structure mac80211_rate_control_flags. New flags may be * appended, but old flags not deleted, to keep compatibility for * userspace. * * These flags are set by the Rate control algorithm for each rate during tx, * in the @flags member of struct ieee80211_tx_rate. * * @MAC80211_HWSIM_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate. * @MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required. * This is set if the current BSS requires ERP protection. * @MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE: Use short preamble. * @MAC80211_HWSIM_TX_RC_MCS: HT rate. * @MAC80211_HWSIM_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is * split into a higher 4 bits (Nss) and lower 4 bits (MCS number) * @MAC80211_HWSIM_TX_RC_GREEN_FIELD: Indicates whether this rate should be used * in Greenfield mode. * @MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be * 40 MHz. * @MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission * @MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission * (80+80 isn't supported yet) * @MAC80211_HWSIM_TX_RC_DUP_DATA: The frame should be transmitted on both of * the adjacent 20 MHz channels, if the current channel type is * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS. * @MAC80211_HWSIM_TX_RC_SHORT_GI: Short Guard interval should be used for this * rate. */ enum hwsim_tx_rate_flags { MAC80211_HWSIM_TX_RC_USE_RTS_CTS = BIT(0), MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT = BIT(1), MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE = BIT(2), /* rate index is an HT/VHT MCS instead of an index */ MAC80211_HWSIM_TX_RC_MCS = BIT(3), MAC80211_HWSIM_TX_RC_GREEN_FIELD = BIT(4), MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH = BIT(5), MAC80211_HWSIM_TX_RC_DUP_DATA = BIT(6), MAC80211_HWSIM_TX_RC_SHORT_GI = BIT(7), MAC80211_HWSIM_TX_RC_VHT_MCS = BIT(8), MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH = BIT(9), MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH = BIT(10), }; /** * struct hwsim_tx_rate - rate selection/status * * @idx: rate index to attempt to send with * @count: number of tries in this rate before going to the next rate * * A value of -1 for @idx indicates an invalid rate and, if used * in an array of retry rates, that no more rates should be tried. * * When used for transmit status reporting, the driver should * always report the rate and number of retries used. * */ struct hwsim_tx_rate_flag { s8 idx; u16 flags; } __packed; #endif /* __MAC80211_HWSIM_H */ backport-iwlwifi-dkms-7906/include/000077500000000000000000000000001351064634500173245ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/include/crypto/000077500000000000000000000000001351064634500206445ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/include/crypto/backport-pkcs7.h000066400000000000000000000023301351064634500236450ustar00rootroot00000000000000/* PKCS#7 crypto data parser * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #ifndef _CRYPTO_PKCS7_H #define _CRYPTO_PKCS7_H #include #include struct key; struct pkcs7_message; /* * pkcs7_parser.c */ extern struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen); extern void pkcs7_free_message(struct pkcs7_message *pkcs7); extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7, const void **_data, size_t *_datalen, size_t *_headerlen); /* * pkcs7_trust.c */ extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7, struct key *trust_keyring); /* * pkcs7_verify.c */ extern int pkcs7_verify(struct pkcs7_message *pkcs7, enum key_being_used_for usage); extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, const void *data, size_t datalen); #endif /* _CRYPTO_PKCS7_H */ backport-iwlwifi-dkms-7906/include/crypto/backport-public_key.h000066400000000000000000000027351351064634500247550ustar00rootroot00000000000000/* Asymmetric public-key algorithm definitions * * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_PUBLIC_KEY_H #define _LINUX_PUBLIC_KEY_H #include /* * Cryptographic data for the public-key subtype of the asymmetric key type. * * Note that this may include private part of the key as well as the public * part. */ struct public_key { void *key; u32 keylen; bool key_is_private; const char *id_type; const char *pkey_algo; }; extern void public_key_free(struct public_key *key); /* * Public key cryptography signature data */ struct public_key_signature { struct asymmetric_key_id *auth_ids[2]; u8 *s; /* Signature */ u32 s_size; /* Number of bytes in signature */ u8 *digest; u8 digest_size; /* Number of bytes in digest */ const char *pkey_algo; const char *hash_algo; const char *encoding; }; extern void public_key_signature_free(struct public_key_signature *sig); extern struct asymmetric_key_subtype public_key_subtype; int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig); #endif /* _LINUX_PUBLIC_KEY_H */ backport-iwlwifi-dkms-7906/include/crypto/pkcs7.h000066400000000000000000000007611351064634500220500ustar00rootroot00000000000000/* Automatically created during backport process */ #ifndef CPTCFG_BPAUTO_PKCS7 #include_next #else #define pkcs7_verify LINUX_BACKPORT(pkcs7_verify) #define pkcs7_get_content_data LINUX_BACKPORT(pkcs7_get_content_data) #define pkcs7_parse_message LINUX_BACKPORT(pkcs7_parse_message) #define pkcs7_free_message LINUX_BACKPORT(pkcs7_free_message) #define pkcs7_validate_trust LINUX_BACKPORT(pkcs7_validate_trust) #include #endif /* CPTCFG_BPAUTO_PKCS7 */ backport-iwlwifi-dkms-7906/include/crypto/public_key.h000066400000000000000000000003151351064634500231420ustar00rootroot00000000000000/* Automatically created during backport process */ #ifndef CPTCFG_BPAUTO_PUBLIC_KEY #include_next #else #include #endif /* CPTCFG_BPAUTO_PUBLIC_KEY */ backport-iwlwifi-dkms-7906/include/linux/000077500000000000000000000000001351064634500204635ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/include/linux/asn1.h000066400000000000000000000037671351064634500215130ustar00rootroot00000000000000/* ASN.1 BER/DER/CER encoding definitions * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_ASN1_H #define _LINUX_ASN1_H /* Class */ enum asn1_class { ASN1_UNIV = 0, /* Universal */ ASN1_APPL = 1, /* Application */ ASN1_CONT = 2, /* Context */ ASN1_PRIV = 3 /* Private */ }; #define ASN1_CLASS_BITS 0xc0 enum asn1_method { ASN1_PRIM = 0, /* Primitive */ ASN1_CONS = 1 /* Constructed */ }; #define ASN1_CONS_BIT 0x20 /* Tag */ enum asn1_tag { ASN1_EOC = 0, /* End Of Contents or N/A */ ASN1_BOOL = 1, /* Boolean */ ASN1_INT = 2, /* Integer */ ASN1_BTS = 3, /* Bit String */ ASN1_OTS = 4, /* Octet String */ ASN1_NULL = 5, /* Null */ ASN1_OID = 6, /* Object Identifier */ ASN1_ODE = 7, /* Object Description */ ASN1_EXT = 8, /* External */ ASN1_REAL = 9, /* Real float */ ASN1_ENUM = 10, /* Enumerated */ ASN1_EPDV = 11, /* Embedded PDV */ ASN1_UTF8STR = 12, /* UTF8 String */ ASN1_RELOID = 13, /* Relative OID */ /* 14 - Reserved */ /* 15 - Reserved */ ASN1_SEQ = 16, /* Sequence and Sequence of */ ASN1_SET = 17, /* Set and Set of */ ASN1_NUMSTR = 18, /* Numerical String */ ASN1_PRNSTR = 19, /* Printable String */ ASN1_TEXSTR = 20, /* T61 String / Teletext String */ ASN1_VIDSTR = 21, /* Videotex String */ ASN1_IA5STR = 22, /* IA5 String */ ASN1_UNITIM = 23, /* Universal Time */ ASN1_GENTIM = 24, /* General Time */ ASN1_GRASTR = 25, /* Graphic String */ ASN1_VISSTR = 26, /* Visible String */ ASN1_GENSTR = 27, /* General String */ ASN1_UNISTR = 28, /* Universal String */ ASN1_CHRSTR = 29, /* Character String */ ASN1_BMPSTR = 30, /* BMP String */ ASN1_LONG_TAG = 31 /* Long form tag */ }; #define ASN1_INDEFINITE_LENGTH 0x80 #endif /* _LINUX_ASN1_H */ backport-iwlwifi-dkms-7906/include/linux/asn1_ber_bytecode.h000066400000000000000000000053371351064634500242140ustar00rootroot00000000000000/* ASN.1 BER/DER/CER parsing state machine internal definitions * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_ASN1_BER_BYTECODE_H #define _LINUX_ASN1_BER_BYTECODE_H #ifdef __KERNEL__ #include #endif #include typedef int (*asn1_action_t)(void *context, size_t hdrlen, /* In case of ANY type */ unsigned char tag, /* In case of ANY type */ const void *value, size_t vlen); struct asn1_decoder { const unsigned char *machine; size_t machlen; const asn1_action_t *actions; }; enum asn1_opcode { /* The tag-matching ops come first and the odd-numbered slots * are for OR_SKIP ops. */ #define ASN1_OP_MATCH__SKIP 0x01 #define ASN1_OP_MATCH__ACT 0x02 #define ASN1_OP_MATCH__JUMP 0x04 #define ASN1_OP_MATCH__ANY 0x08 #define ASN1_OP_MATCH__COND 0x10 ASN1_OP_MATCH = 0x00, ASN1_OP_MATCH_OR_SKIP = 0x01, ASN1_OP_MATCH_ACT = 0x02, ASN1_OP_MATCH_ACT_OR_SKIP = 0x03, ASN1_OP_MATCH_JUMP = 0x04, ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05, ASN1_OP_MATCH_ANY = 0x08, ASN1_OP_MATCH_ANY_OR_SKIP = 0x09, ASN1_OP_MATCH_ANY_ACT = 0x0a, ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b, /* Everything before here matches unconditionally */ ASN1_OP_COND_MATCH_OR_SKIP = 0x11, ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13, ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15, ASN1_OP_COND_MATCH_ANY = 0x18, ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19, ASN1_OP_COND_MATCH_ANY_ACT = 0x1a, ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b, /* Everything before here will want a tag from the data */ #define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP /* These are here to help fill up space */ ASN1_OP_COND_FAIL = 0x1c, ASN1_OP_COMPLETE = 0x1d, ASN1_OP_ACT = 0x1e, ASN1_OP_MAYBE_ACT = 0x1f, /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */ ASN1_OP_END_SEQ = 0x20, ASN1_OP_END_SET = 0x21, ASN1_OP_END_SEQ_OF = 0x22, ASN1_OP_END_SET_OF = 0x23, ASN1_OP_END_SEQ_ACT = 0x24, ASN1_OP_END_SET_ACT = 0x25, ASN1_OP_END_SEQ_OF_ACT = 0x26, ASN1_OP_END_SET_OF_ACT = 0x27, #define ASN1_OP_END__SET 0x01 #define ASN1_OP_END__OF 0x02 #define ASN1_OP_END__ACT 0x04 ASN1_OP_RETURN = 0x28, ASN1_OP__NR }; #define _tag(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | ASN1_##TAG) #define _tagn(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | TAG) #define _jump_target(N) (N) #define _action(N) (N) #endif /* _LINUX_ASN1_BER_BYTECODE_H */ backport-iwlwifi-dkms-7906/include/linux/asn1_decoder.h000066400000000000000000000004451351064634500231660ustar00rootroot00000000000000/* Automatically created during backport process */ #ifndef CPTCFG_BPAUTO_ASN1_DECODER #include_next #else #undef asn1_ber_decoder #define asn1_ber_decoder LINUX_BACKPORT(asn1_ber_decoder) #include #endif /* CPTCFG_BPAUTO_ASN1_DECODER */ backport-iwlwifi-dkms-7906/include/linux/average.h000066400000000000000000000046611351064634500222550ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_AVERAGE_H #define _LINUX_AVERAGE_H #include #include #include /* * Exponentially weighted moving average (EWMA) * * This implements a fixed-precision EWMA algorithm, with both the * precision and fall-off coefficient determined at compile-time * and built into the generated helper funtions. * * The first argument to the macro is the name that will be used * for the struct and helper functions. * * The second argument, the precision, expresses how many bits are * used for the fractional part of the fixed-precision values. * * The third argument, the weight reciprocal, determines how the * new values will be weighed vs. the old state, new values will * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note * that this parameter must be a power of two for efficiency. */ #define DECLARE_EWMA(name, _precision, _weight_rcp) \ struct ewma_##name { \ unsigned long internal; \ }; \ static inline void ewma_##name##_init(struct ewma_##name *e) \ { \ BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ /* \ * Even if you want to feed it just 0/1 you should have \ * some bits for the non-fractional part... \ */ \ BUILD_BUG_ON((_precision) > 30); \ BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ e->internal = 0; \ } \ static inline unsigned long \ ewma_##name##_read(struct ewma_##name *e) \ { \ BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ BUILD_BUG_ON((_precision) > 30); \ BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ return e->internal >> (_precision); \ } \ static inline void ewma_##name##_add(struct ewma_##name *e, \ unsigned long val) \ { \ unsigned long internal = READ_ONCE(e->internal); \ unsigned long weight_rcp = ilog2(_weight_rcp); \ unsigned long precision = _precision; \ \ BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ BUILD_BUG_ON((_precision) > 30); \ BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ \ WRITE_ONCE(e->internal, internal ? \ (((internal << weight_rcp) - internal) + \ (val << precision)) >> weight_rcp : \ (val << precision)); \ } #endif /* _LINUX_AVERAGE_H */ backport-iwlwifi-dkms-7906/include/linux/backport-asn1_decoder.h000066400000000000000000000012431351064634500247660ustar00rootroot00000000000000/* ASN.1 decoder * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_ASN1_DECODER_H #define _LINUX_ASN1_DECODER_H #include struct asn1_decoder; extern int asn1_ber_decoder(const struct asn1_decoder *decoder, void *context, const unsigned char *data, size_t datalen); #endif /* _LINUX_ASN1_DECODER_H */ backport-iwlwifi-dkms-7906/include/linux/backport-devcoredump.h000066400000000000000000000054711351064634500247630ustar00rootroot00000000000000/* * This file is provided under the GPLv2 license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. */ #ifndef __DEVCOREDUMP_H #define __DEVCOREDUMP_H #include #include #include #include #include /* * _devcd_free_sgtable - free all the memory of the given scatterlist table * (i.e. both pages and scatterlist instances) * NOTE: if two tables allocated and chained using the sg_chain function then * this function should be called only once on the first table * @table: pointer to sg_table to free */ static inline void _devcd_free_sgtable(struct scatterlist *table) { int i; struct page *page; struct scatterlist *iter; struct scatterlist *delete_iter; /* free pages */ iter = table; for_each_sg(table, iter, sg_nents(table), i) { page = sg_page(iter); if (page) __free_page(page); } /* then free all chained tables */ iter = table; delete_iter = table; /* always points on a head of a table */ while (!sg_is_last(iter)) { iter++; if (sg_is_chain(iter)) { iter = sg_chain_ptr(iter); kfree(delete_iter); delete_iter = iter; } } /* free the last table */ kfree(delete_iter); } #ifdef CPTCFG_BPAUTO_WANT_DEV_COREDUMP void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp); void dev_coredumpm(struct device *dev, struct module *owner, void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen), void (*free)(void *data)); void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp); #else static inline void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp) { vfree(data); } static inline void dev_coredumpm(struct device *dev, struct module *owner, void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen), void (*free)(void *data)) { free(data); } static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp) { _devcd_free_sgtable(table); } #endif /* CPTCFG_BPAUTO_WANT_DEV_COREDUMP */ #endif /* __DEVCOREDUMP_H */ backport-iwlwifi-dkms-7906/include/linux/backport-oid_registry.h000066400000000000000000000075761351064634500251610ustar00rootroot00000000000000/* ASN.1 Object identifier (OID) registry * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_OID_REGISTRY_H #define _LINUX_OID_REGISTRY_H #include /* * OIDs are turned into these values if possible, or OID__NR if not held here. * * NOTE! Do not mess with the format of each line as this is read by * build_OID_registry.pl to generate the data for look_up_OID(). */ enum OID { OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */ OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */ OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */ OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */ OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */ OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */ OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */ OID_sha224WithRSAEncryption, /* 1.2.840.113549.1.1.14 */ /* PKCS#7 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-7(7)} */ OID_data, /* 1.2.840.113549.1.7.1 */ OID_signed_data, /* 1.2.840.113549.1.7.2 */ /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */ OID_email_address, /* 1.2.840.113549.1.9.1 */ OID_contentType, /* 1.2.840.113549.1.9.3 */ OID_messageDigest, /* 1.2.840.113549.1.9.4 */ OID_signingTime, /* 1.2.840.113549.1.9.5 */ OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */ /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */ OID_md2, /* 1.2.840.113549.2.2 */ OID_md4, /* 1.2.840.113549.2.4 */ OID_md5, /* 1.2.840.113549.2.5 */ /* Microsoft Authenticode & Software Publishing */ OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */ OID_msSpOpusInfo, /* 1.3.6.1.4.1.311.2.1.12 */ OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */ OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ OID_sha224, /* 2.16.840.1.101.3.4.2.4 */ /* Distinguished Name attribute IDs [RFC 2256] */ OID_commonName, /* 2.5.4.3 */ OID_surname, /* 2.5.4.4 */ OID_countryName, /* 2.5.4.6 */ OID_locality, /* 2.5.4.7 */ OID_stateOrProvinceName, /* 2.5.4.8 */ OID_organizationName, /* 2.5.4.10 */ OID_organizationUnitName, /* 2.5.4.11 */ OID_title, /* 2.5.4.12 */ OID_description, /* 2.5.4.13 */ OID_name, /* 2.5.4.41 */ OID_givenName, /* 2.5.4.42 */ OID_initials, /* 2.5.4.43 */ OID_generationalQualifier, /* 2.5.4.44 */ /* Certificate extension IDs */ OID_subjectKeyIdentifier, /* 2.5.29.14 */ OID_keyUsage, /* 2.5.29.15 */ OID_subjectAltName, /* 2.5.29.17 */ OID_issuerAltName, /* 2.5.29.18 */ OID_basicConstraints, /* 2.5.29.19 */ OID_crlDistributionPoints, /* 2.5.29.31 */ OID_certPolicies, /* 2.5.29.32 */ OID_authorityKeyIdentifier, /* 2.5.29.35 */ OID_extKeyUsage, /* 2.5.29.37 */ OID__NR }; extern enum OID look_up_OID(const void *data, size_t datasize); extern int sprint_oid(const void *, size_t, char *, size_t); extern int sprint_OID(enum OID, char *, size_t); #endif /* _LINUX_OID_REGISTRY_H */ backport-iwlwifi-dkms-7906/include/linux/backport-refcount.h000066400000000000000000000062361351064634500242730ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_REFCOUNT_H #define _LINUX_REFCOUNT_H #include #include #include struct mutex; /** * struct refcount_t - variant of atomic_t specialized for reference counts * @refs: atomic_t counter field * * The counter saturates at UINT_MAX and will not move once * there. This avoids wrapping the counter and causing 'spurious' * use-after-free bugs. */ typedef struct refcount_struct { atomic_t refs; } refcount_t; #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } /** * refcount_set - set a refcount's value * @r: the refcount * @n: value to which the refcount will be set */ static inline void refcount_set(refcount_t *r, unsigned int n) { atomic_set(&r->refs, n); } /** * refcount_read - get a refcount's value * @r: the refcount * * Return: the refcount's value */ static inline unsigned int refcount_read(const refcount_t *r) { return atomic_read(&r->refs); } extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r); extern void refcount_add_checked(unsigned int i, refcount_t *r); extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r); extern void refcount_inc_checked(refcount_t *r); extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r); extern __must_check bool refcount_dec_and_test_checked(refcount_t *r); extern void refcount_dec_checked(refcount_t *r); #ifdef CONFIG_REFCOUNT_FULL #define refcount_add_not_zero refcount_add_not_zero_checked #define refcount_add refcount_add_checked #define refcount_inc_not_zero refcount_inc_not_zero_checked #define refcount_inc refcount_inc_checked #define refcount_sub_and_test refcount_sub_and_test_checked #define refcount_dec_and_test refcount_dec_and_test_checked #define refcount_dec refcount_dec_checked #else # ifdef CONFIG_ARCH_HAS_REFCOUNT # include # else static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r) { return atomic_add_unless(&r->refs, i, 0); } static inline void refcount_add(unsigned int i, refcount_t *r) { atomic_add(i, &r->refs); } static inline __must_check bool refcount_inc_not_zero(refcount_t *r) { return atomic_add_unless(&r->refs, 1, 0); } static inline void refcount_inc(refcount_t *r) { atomic_inc(&r->refs); } static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { return atomic_sub_and_test(i, &r->refs); } static inline __must_check bool refcount_dec_and_test(refcount_t *r) { return atomic_dec_and_test(&r->refs); } static inline void refcount_dec(refcount_t *r) { atomic_dec(&r->refs); } # endif /* !CONFIG_ARCH_HAS_REFCOUNT */ #endif /* CONFIG_REFCOUNT_FULL */ extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, unsigned long *flags); #endif /* _LINUX_REFCOUNT_H */ backport-iwlwifi-dkms-7906/include/linux/backport-rhashtable-types.h000066400000000000000000000070511351064634500257210ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Resizable, Scalable, Concurrent Hash Table * * Simple structures that might be needed in include * files. */ #ifndef _LINUX_RHASHTABLE_TYPES_H #define _LINUX_RHASHTABLE_TYPES_H #include #include #include #include struct rhash_head { struct rhash_head __rcu *next; }; struct rhlist_head { struct rhash_head rhead; struct rhlist_head __rcu *next; }; struct bucket_table; /** * struct rhashtable_compare_arg - Key for the function rhashtable_compare * @ht: Hash table * @key: Key to compare against */ struct rhashtable_compare_arg { struct rhashtable *ht; const void *key; }; typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, const void *obj); /** * struct rhashtable_params - Hash table construction parameters * @nelem_hint: Hint on number of elements, should be 75% of desired size * @key_len: Length of key * @key_offset: Offset of key in struct to be hashed * @head_offset: Offset of rhash_head in struct to be hashed * @max_size: Maximum size while expanding * @min_size: Minimum size while shrinking * @locks_mul: Number of bucket locks to allocate per cpu (default: 32) * @automatic_shrinking: Enable automatic shrinking of tables * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) * @obj_hashfn: Function to hash object * @obj_cmpfn: Function to compare key with object */ struct rhashtable_params { u16 nelem_hint; u16 key_len; u16 key_offset; u16 head_offset; unsigned int max_size; u16 min_size; bool automatic_shrinking; u8 locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; rht_obj_cmpfn_t obj_cmpfn; }; /** * struct rhashtable - Hash table handle * @tbl: Bucket table * @key_len: Key length for hashfn * @max_elems: Maximum number of elements in table * @p: Configuration parameters * @rhlist: True if this is an rhltable * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping * @lock: Spin lock to protect walker list * @nelems: Number of elements in table */ struct rhashtable { struct bucket_table __rcu *tbl; unsigned int key_len; unsigned int max_elems; struct rhashtable_params p; bool rhlist; struct work_struct run_work; struct mutex mutex; spinlock_t lock; atomic_t nelems; }; /** * struct rhltable - Hash table with duplicate objects in a list * @ht: Underlying rhtable */ struct rhltable { struct rhashtable ht; }; /** * struct rhashtable_walker - Hash table walker * @list: List entry on list of walkers * @tbl: The table that we were walking over */ struct rhashtable_walker { struct list_head list; struct bucket_table *tbl; }; /** * struct rhashtable_iter - Hash table iterator * @ht: Table to iterate through * @p: Current pointer * @list: Current hash list pointer * @walker: Associated rhashtable walker * @slot: Current slot * @skip: Number of entries to skip in slot */ struct rhashtable_iter { struct rhashtable *ht; struct rhash_head *p; struct rhlist_head *list; struct rhashtable_walker walker; unsigned int slot; unsigned int skip; bool end_of_table; }; int rhashtable_init(struct rhashtable *ht, const struct rhashtable_params *params); int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params); #endif /* _LINUX_RHASHTABLE_TYPES_H */ backport-iwlwifi-dkms-7906/include/linux/backport-rhashtable.h000066400000000000000000001052431351064634500245610ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Resizable, Scalable, Concurrent Hash Table * * Copyright (c) 2015-2016 Herbert Xu * Copyright (c) 2014-2015 Thomas Graf * Copyright (c) 2008-2014 Patrick McHardy * * Code partially derived from nft_hash * Rewritten with rehash code from br_multicast plus single list * pointer as suggested by Josh Triplett * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H #include #include #include #include #include #include #include /* * The end of the chain is marked with a special nulls marks which has * the least significant bit set. */ /* Maximum chain length before rehash * * The maximum (not average) chain length grows with the size of the hash * table, at a rate of (log N)/(log log N). * * The value of 16 is selected so that even if the hash table grew to * 2^32 you would not expect the maximum chain length to exceed it * unless we are under attack (or extremely unlucky). * * As this limit is only to detect attacks, we don't need to set it to a * lower value as you'd need the chain length to vastly exceed 16 to have * any real effect on the system. */ #define RHT_ELASTICITY 16u /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets * @nest: Number of bits of first-level nested table. * @rehash: Current bucket being rehashed * @hash_rnd: Random seed to fold into hash * @locks_mask: Mask to apply before accessing locks[] * @locks: Array of spinlocks protecting individual buckets * @walkers: List of active walkers * @rcu: RCU structure for freeing the table * @future_tbl: Table under construction during rehashing * @ntbl: Nested table used when out of memory. * @buckets: size * hash buckets */ struct bucket_table { unsigned int size; unsigned int nest; unsigned int rehash; u32 hash_rnd; unsigned int locks_mask; spinlock_t *locks; struct list_head walkers; struct rcu_head rcu; struct bucket_table __rcu *future_tbl; struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; /* * NULLS_MARKER() expects a hash value with the low * bits mostly likely to be significant, and it discards * the msb. * We git it an address, in which the bottom 2 bits are * always 0, and the msb might be significant. * So we shift the address down one bit to align with * expectations and avoid losing a significant bit. */ #define RHT_NULLS_MARKER(ptr) \ ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) #define INIT_RHT_NULLS_HEAD(ptr) \ ((ptr) = RHT_NULLS_MARKER(&(ptr))) static inline bool rht_is_a_nulls(const struct rhash_head *ptr) { return ((unsigned long) ptr & 1); } static inline void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) { return (char *)he - ht->p.head_offset; } static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, unsigned int hash) { return hash & (tbl->size - 1); } static inline unsigned int rht_key_get_hash(struct rhashtable *ht, const void *key, const struct rhashtable_params params, unsigned int hash_rnd) { unsigned int hash; /* params must be equal to ht->p if it isn't constant. */ if (!__builtin_constant_p(params.key_len)) hash = ht->p.hashfn(key, ht->key_len, hash_rnd); else if (params.key_len) { unsigned int key_len = params.key_len; if (params.hashfn) hash = params.hashfn(key, key_len, hash_rnd); else if (key_len & (sizeof(u32) - 1)) hash = jhash(key, key_len, hash_rnd); else hash = jhash2(key, key_len / sizeof(u32), hash_rnd); } else { unsigned int key_len = ht->p.key_len; if (params.hashfn) hash = params.hashfn(key, key_len, hash_rnd); else hash = jhash(key, key_len, hash_rnd); } return hash; } static inline unsigned int rht_key_hashfn( struct rhashtable *ht, const struct bucket_table *tbl, const void *key, const struct rhashtable_params params) { unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); return rht_bucket_index(tbl, hash); } static inline unsigned int rht_head_hashfn( struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he, const struct rhashtable_params params) { const char *ptr = rht_obj(ht, he); return likely(params.obj_hashfn) ? rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: ht->p.key_len, tbl->hash_rnd)) : rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); } /** * rht_grow_above_75 - returns true if nelems > 0.75 * table-size * @ht: hash table * @tbl: current table */ static inline bool rht_grow_above_75(const struct rhashtable *ht, const struct bucket_table *tbl) { /* Expand table when exceeding 75% load */ return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && (!ht->p.max_size || tbl->size < ht->p.max_size); } /** * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size * @ht: hash table * @tbl: current table */ static inline bool rht_shrink_below_30(const struct rhashtable *ht, const struct bucket_table *tbl) { /* Shrink table beneath 30% load */ return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && tbl->size > ht->p.min_size; } /** * rht_grow_above_100 - returns true if nelems > table-size * @ht: hash table * @tbl: current table */ static inline bool rht_grow_above_100(const struct rhashtable *ht, const struct bucket_table *tbl) { return atomic_read(&ht->nelems) > tbl->size && (!ht->p.max_size || tbl->size < ht->p.max_size); } /** * rht_grow_above_max - returns true if table is above maximum * @ht: hash table * @tbl: current table */ static inline bool rht_grow_above_max(const struct rhashtable *ht, const struct bucket_table *tbl) { return atomic_read(&ht->nelems) >= ht->max_elems; } /* The bucket lock is selected based on the hash and protects mutations * on a group of hash buckets. * * A maximum of tbl->size/2 bucket locks is allocated. This ensures that * a single lock always covers both buckets which may both contains * entries which link to the same bucket of the old table during resizing. * This allows to simplify the locking as locking the bucket in both * tables during resize always guarantee protection. * * IMPORTANT: When holding the bucket lock of both the old and new table * during expansions and shrinking, the old bucket lock must always be * acquired first. */ static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, unsigned int hash) { return &tbl->locks[hash & tbl->locks_mask]; } #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(struct rhashtable *ht); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); #else static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return 1; } static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { return 1; } #endif /* CONFIG_PROVE_LOCKING */ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj); void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); static inline void rhashtable_walk_start(struct rhashtable_iter *iter) { (void)rhashtable_walk_start_check(iter); } void *rhashtable_walk_next(struct rhashtable_iter *iter); void *rhashtable_walk_peek(struct rhashtable_iter *iter); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg); void rhashtable_destroy(struct rhashtable *ht); struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash); struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) #define rht_dereference_rcu(p, ht) \ rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) #define rht_dereference_bucket(p, tbl, hash) \ rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) #define rht_dereference_bucket_rcu(p, tbl, hash) \ rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) static inline struct rhash_head __rcu *const *rht_bucket( const struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } static inline struct rhash_head __rcu **rht_bucket_var( struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } static inline struct rhash_head __rcu **rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : &tbl->buckets[hash]; } /** * rht_for_each_continue - continue iterating over hash chain * @pos: the &struct rhash_head to use as a loop cursor. * @head: the previous &struct rhash_head to continue from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index */ #define rht_for_each_continue(pos, head, tbl, hash) \ for (pos = rht_dereference_bucket(head, tbl, hash); \ !rht_is_a_nulls(pos); \ pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** * rht_for_each - iterate over hash chain * @pos: the &struct rhash_head to use as a loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index */ #define rht_for_each(pos, tbl, hash) \ rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) /** * rht_for_each_entry_continue - continue iterating over hash chain * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @head: the previous &struct rhash_head to continue from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. */ #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ for (pos = rht_dereference_bucket(head, tbl, hash); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** * rht_for_each_entry - iterate over hash chain of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. */ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ tbl, hash, member) /** * rht_for_each_entry_safe - safely iterate over hash chain of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @next: the &struct rhash_head to use as next in loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive allows for the looped code to * remove the loop cursor from the list. */ #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = next, \ next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL) /** * rht_for_each_rcu_continue - continue iterating over rcu hash chain * @pos: the &struct rhash_head to use as a loop cursor. * @head: the previous &struct rhash_head to continue from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * * This hash chain list-traversal primitive may safely run concurrently with * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_rcu_continue(pos, head, tbl, hash) \ for (({barrier(); }), \ pos = rht_dereference_bucket_rcu(head, tbl, hash); \ !rht_is_a_nulls(pos); \ pos = rcu_dereference_raw(pos->next)) /** * rht_for_each_rcu - iterate over rcu hash chain * @pos: the &struct rhash_head to use as a loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * * This hash chain list-traversal primitive may safely run concurrently with * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_rcu(pos, tbl, hash) \ rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) /** * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @head: the previous &struct rhash_head to continue from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive may safely run concurrently with * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ for (({barrier(); }), \ pos = rht_dereference_bucket_rcu(head, tbl, hash); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) /** * rht_for_each_entry_rcu - iterate over rcu hash chain of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive may safely run concurrently with * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ tbl, hash, member) /** * rhl_for_each_rcu - iterate over rcu hash table list * @pos: the &struct rlist_head to use as a loop cursor. * @list: the head of the list * * This hash chain list-traversal primitive should be used on the * list returned by rhltable_lookup. */ #define rhl_for_each_rcu(pos, list) \ for (pos = list; pos; pos = rcu_dereference_raw(pos->next)) /** * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct rlist_head to use as a loop cursor. * @list: the head of the list * @member: name of the &struct rlist_head within the hashable struct. * * This hash chain list-traversal primitive should be used on the * list returned by rhltable_lookup. */ #define rhl_for_each_entry_rcu(tpos, pos, list, member) \ for (pos = list; pos && rht_entry(tpos, pos, member); \ pos = rcu_dereference_raw(pos->next)) static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, const void *obj) { struct rhashtable *ht = arg->ht; const char *ptr = obj; return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); } /* Internal function, do not use. */ static inline struct rhash_head *__rhashtable_lookup( struct rhashtable *ht, const void *key, const struct rhashtable_params params) { struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; struct rhash_head __rcu * const *head; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; tbl = rht_dereference_rcu(ht->tbl, ht); restart: hash = rht_key_hashfn(ht, tbl, key, params); head = rht_bucket(tbl, hash); do { rht_for_each_rcu_continue(he, *head, tbl, hash) { if (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, he)) : rhashtable_compare(&arg, rht_obj(ht, he))) continue; return he; } /* An object might have been moved to a different hash chain, * while we walk along it - better check and retry. */ } while (he != RHT_NULLS_MARKER(head)); /* Ensure we see any new tables. */ smp_rmb(); tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (unlikely(tbl)) goto restart; return NULL; } /** * rhashtable_lookup - search hash table * @ht: hash table * @key: the pointer to the key * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking * for a entry with an identical key. The first matching entry is returned. * * This must only be called under the RCU read lock. * * Returns the first entry on which the compare function returned true. */ static inline void *rhashtable_lookup( struct rhashtable *ht, const void *key, const struct rhashtable_params params) { struct rhash_head *he = __rhashtable_lookup(ht, key, params); return he ? rht_obj(ht, he) : NULL; } /** * rhashtable_lookup_fast - search hash table, without RCU read lock * @ht: hash table * @key: the pointer to the key * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking * for a entry with an identical key. The first matching entry is returned. * * Only use this function when you have other mechanisms guaranteeing * that the object won't go away after the RCU read lock is released. * * Returns the first entry on which the compare function returned true. */ static inline void *rhashtable_lookup_fast( struct rhashtable *ht, const void *key, const struct rhashtable_params params) { void *obj; rcu_read_lock(); obj = rhashtable_lookup(ht, key, params); rcu_read_unlock(); return obj; } /** * rhltable_lookup - search hash list table * @hlt: hash table * @key: the pointer to the key * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking * for a entry with an identical key. All matching entries are returned * in a list. * * This must only be called under the RCU read lock. * * Returns the list of entries that match the given key. */ static inline struct rhlist_head *rhltable_lookup( struct rhltable *hlt, const void *key, const struct rhashtable_params params) { struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); return he ? container_of(he, struct rhlist_head, rhead) : NULL; } /* Internal function, please use rhashtable_insert_fast() instead. This * function returns the existing element already in hashes in there is a clash, * otherwise it returns an error via ERR_PTR(). */ static inline void *__rhashtable_insert_fast( struct rhashtable *ht, const void *key, struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; spinlock_t *lock; unsigned int hash; int elasticity; void *data; rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht); hash = rht_head_hashfn(ht, tbl, obj, params); lock = rht_bucket_lock(tbl, hash); spin_lock_bh(lock); if (unlikely(rcu_access_pointer(tbl->future_tbl))) { slow_path: spin_unlock_bh(lock); rcu_read_unlock(); return rhashtable_insert_slow(ht, key, obj); } elasticity = RHT_ELASTICITY; pprev = rht_bucket_insert(ht, tbl, hash); data = ERR_PTR(-ENOMEM); if (!pprev) goto out; rht_for_each_continue(head, *pprev, tbl, hash) { struct rhlist_head *plist; struct rhlist_head *list; elasticity--; if (!key || (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, head)) : rhashtable_compare(&arg, rht_obj(ht, head)))) { pprev = &head->next; continue; } data = rht_obj(ht, head); if (!rhlist) goto out; list = container_of(obj, struct rhlist_head, rhead); plist = container_of(head, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); rcu_assign_pointer(*pprev, obj); goto good; } if (elasticity <= 0) goto slow_path; data = ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_max(ht, tbl))) goto out; if (unlikely(rht_grow_above_100(ht, tbl))) goto slow_path; head = rht_dereference_bucket(*pprev, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (rhlist) { struct rhlist_head *list; list = container_of(obj, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, NULL); } rcu_assign_pointer(*pprev, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); good: data = NULL; out: spin_unlock_bh(lock); rcu_read_unlock(); return data; } /** * rhashtable_insert_fast - insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * * Will take a per bucket spinlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless * they map to the same bucket lock. * * It is safe to call this function from atomic context. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. */ static inline int rhashtable_insert_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { void *ret; ret = __rhashtable_insert_fast(ht, NULL, obj, params, false); if (IS_ERR(ret)) return PTR_ERR(ret); return ret == NULL ? 0 : -EEXIST; } /** * rhltable_insert_key - insert object into hash list table * @hlt: hash list table * @key: the pointer to the key * @list: pointer to hash list head inside object * @params: hash table parameters * * Will take a per bucket spinlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless * they map to the same bucket lock. * * It is safe to call this function from atomic context. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. */ static inline int rhltable_insert_key( struct rhltable *hlt, const void *key, struct rhlist_head *list, const struct rhashtable_params params) { return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead, params, true)); } /** * rhltable_insert - insert object into hash list table * @hlt: hash list table * @list: pointer to hash list head inside object * @params: hash table parameters * * Will take a per bucket spinlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless * they map to the same bucket lock. * * It is safe to call this function from atomic context. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. */ static inline int rhltable_insert( struct rhltable *hlt, struct rhlist_head *list, const struct rhashtable_params params) { const char *key = rht_obj(&hlt->ht, &list->rhead); key += params.key_offset; return rhltable_insert_key(hlt, key, list, params); } /** * rhashtable_lookup_insert_fast - lookup and insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * * Locks down the bucket chain in both the old and new table if a resize * is in progress to ensure that writers can't remove from the old table * and can't insert to the new table during the atomic operation of search * and insertion. Searches for duplicates in both the old and new table if * a resize is in progress. * * This lookup function may only be used for fixed key hash table (key_len * parameter set). It will BUG() if used inappropriately. * * It is safe to call this function from atomic context. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. */ static inline int rhashtable_lookup_insert_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { const char *key = rht_obj(ht, obj); void *ret; BUG_ON(ht->p.obj_hashfn); ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, false); if (IS_ERR(ret)) return PTR_ERR(ret); return ret == NULL ? 0 : -EEXIST; } /** * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * * Just like rhashtable_lookup_insert_fast(), but this function returns the * object if it exists, NULL if it did not and the insertion was successful, * and an ERR_PTR otherwise. */ static inline void *rhashtable_lookup_get_insert_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { const char *key = rht_obj(ht, obj); BUG_ON(ht->p.obj_hashfn); return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, false); } /** * rhashtable_lookup_insert_key - search and insert object to hash table * with explicit key * @ht: hash table * @key: key * @obj: pointer to hash head inside object * @params: hash table parameters * * Locks down the bucket chain in both the old and new table if a resize * is in progress to ensure that writers can't remove from the old table * and can't insert to the new table during the atomic operation of search * and insertion. Searches for duplicates in both the old and new table if * a resize is in progress. * * Lookups may occur in parallel with hashtable mutations and resizing. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. * * Returns zero on success. */ static inline int rhashtable_lookup_insert_key( struct rhashtable *ht, const void *key, struct rhash_head *obj, const struct rhashtable_params params) { void *ret; BUG_ON(!ht->p.obj_hashfn || !key); ret = __rhashtable_insert_fast(ht, key, obj, params, false); if (IS_ERR(ret)) return PTR_ERR(ret); return ret == NULL ? 0 : -EEXIST; } /** * rhashtable_lookup_get_insert_key - lookup and insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * @data: pointer to element data already in hashes * * Just like rhashtable_lookup_insert_key(), but this function returns the * object if it exists, NULL if it does not and the insertion was successful, * and an ERR_PTR otherwise. */ static inline void *rhashtable_lookup_get_insert_key( struct rhashtable *ht, const void *key, struct rhash_head *obj, const struct rhashtable_params params) { BUG_ON(!ht->p.obj_hashfn || !key); return __rhashtable_insert_fast(ht, key, obj, params, false); } /* Internal function, please use rhashtable_remove_fast() instead */ static inline int __rhashtable_remove_fast_one( struct rhashtable *ht, struct bucket_table *tbl, struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { struct rhash_head __rcu **pprev; struct rhash_head *he; spinlock_t * lock; unsigned int hash; int err = -ENOENT; hash = rht_head_hashfn(ht, tbl, obj, params); lock = rht_bucket_lock(tbl, hash); spin_lock_bh(lock); pprev = rht_bucket_var(tbl, hash); rht_for_each_continue(he, *pprev, tbl, hash) { struct rhlist_head *list; list = container_of(he, struct rhlist_head, rhead); if (he != obj) { struct rhlist_head __rcu **lpprev; pprev = &he->next; if (!rhlist) continue; do { lpprev = &list->next; list = rht_dereference_bucket(list->next, tbl, hash); } while (list && obj != &list->rhead); if (!list) continue; list = rht_dereference_bucket(list->next, tbl, hash); RCU_INIT_POINTER(*lpprev, list); err = 0; break; } obj = rht_dereference_bucket(obj->next, tbl, hash); err = 1; if (rhlist) { list = rht_dereference_bucket(list->next, tbl, hash); if (list) { RCU_INIT_POINTER(list->rhead.next, obj); obj = &list->rhead; err = 0; } } rcu_assign_pointer(*pprev, obj); break; } spin_unlock_bh(lock); if (err > 0) { atomic_dec(&ht->nelems); if (unlikely(ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))) schedule_work(&ht->run_work); err = 0; } return err; } /* Internal function, please use rhashtable_remove_fast() instead */ static inline int __rhashtable_remove_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { struct bucket_table *tbl; int err; rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht); /* Because we have already taken (and released) the bucket * lock in old_tbl, if we find that future_tbl is not yet * visible then that guarantees the entry to still be in * the old tbl if it exists. */ while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, rhlist)) && (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) ; rcu_read_unlock(); return err; } /** * rhashtable_remove_fast - remove object from hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * * Will automatically shrink the table if permitted when residency drops * below 30%. * * Returns zero on success, -ENOENT if the entry could not be found. */ static inline int rhashtable_remove_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { return __rhashtable_remove_fast(ht, obj, params, false); } /** * rhltable_remove - remove object from hash list table * @hlt: hash list table * @list: pointer to hash list head inside object * @params: hash table parameters * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * * Will automatically shrink the table if permitted when residency drops * below 30% * * Returns zero on success, -ENOENT if the entry could not be found. */ static inline int rhltable_remove( struct rhltable *hlt, struct rhlist_head *list, const struct rhashtable_params params) { return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true); } /* Internal function, please use rhashtable_replace_fast() instead */ static inline int __rhashtable_replace_fast( struct rhashtable *ht, struct bucket_table *tbl, struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { struct rhash_head __rcu **pprev; struct rhash_head *he; spinlock_t *lock; unsigned int hash; int err = -ENOENT; /* Minimally, the old and new objects must have same hash * (which should mean identifiers are the same). */ hash = rht_head_hashfn(ht, tbl, obj_old, params); if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) return -EINVAL; lock = rht_bucket_lock(tbl, hash); spin_lock_bh(lock); pprev = rht_bucket_var(tbl, hash); rht_for_each_continue(he, *pprev, tbl, hash) { if (he != obj_old) { pprev = &he->next; continue; } rcu_assign_pointer(obj_new->next, obj_old->next); rcu_assign_pointer(*pprev, obj_new); err = 0; break; } spin_unlock_bh(lock); return err; } /** * rhashtable_replace_fast - replace an object in hash table * @ht: hash table * @obj_old: pointer to hash head inside object being replaced * @obj_new: pointer to hash head inside object which is new * @params: hash table parameters * * Replacing an object doesn't affect the number of elements in the hash table * or bucket, so we don't need to worry about shrinking or expanding the * table here. * * Returns zero on success, -ENOENT if the entry could not be found, * -EINVAL if hash is not the same for the old and new objects. */ static inline int rhashtable_replace_fast( struct rhashtable *ht, struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { struct bucket_table *tbl; int err; rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht); /* Because we have already taken (and released) the bucket * lock in old_tbl, if we find that future_tbl is not yet * visible then that guarantees the entry to still be in * the old tbl if it exists. */ while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, obj_new, params)) && (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) ; rcu_read_unlock(); return err; } /* Obsolete function, do not use in new code. */ static inline int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, gfp_t gfp) { rhashtable_walk_enter(ht, iter); return 0; } /** * rhltable_walk_enter - Initialise an iterator * @hlt: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * * This function may be called from any process context, including * non-preemptable context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns. */ static inline void rhltable_walk_enter(struct rhltable *hlt, struct rhashtable_iter *iter) { return rhashtable_walk_enter(&hlt->ht, iter); } /** * rhltable_free_and_destroy - free elements and destroy hash list table * @hlt: the hash list table to destroy * @free_fn: callback to release resources of element * @arg: pointer passed to free_fn * * See documentation for rhashtable_free_and_destroy. */ static inline void rhltable_free_and_destroy(struct rhltable *hlt, void (*free_fn)(void *ptr, void *arg), void *arg) { return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg); } static inline void rhltable_destroy(struct rhltable *hlt) { return rhltable_free_and_destroy(hlt, NULL, NULL); } #endif /* _LINUX_RHASHTABLE_H */ backport-iwlwifi-dkms-7906/include/linux/bitfield.h000066400000000000000000000113071351064634500224200ustar00rootroot00000000000000/* * Copyright (C) 2014 Felix Fietkau * Copyright (C) 2004 - 2009 Ivo van Doorn * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _LINUX_BITFIELD_H #define _LINUX_BITFIELD_H #include #include /* * Bitfield access macros * * FIELD_{GET,PREP} macros take as first parameter shifted mask * from which they extract the base mask and shift amount. * Mask must be a compilation time constant. * * Example: * * #define REG_FIELD_A GENMASK(6, 0) * #define REG_FIELD_B BIT(7) * #define REG_FIELD_C GENMASK(15, 8) * #define REG_FIELD_D GENMASK(31, 16) * * Get: * a = FIELD_GET(REG_FIELD_A, reg); * b = FIELD_GET(REG_FIELD_B, reg); * * Set: * reg = FIELD_PREP(REG_FIELD_A, 1) | * FIELD_PREP(REG_FIELD_B, 0) | * FIELD_PREP(REG_FIELD_C, c) | * FIELD_PREP(REG_FIELD_D, 0x40); * * Modify: * reg &= ~REG_FIELD_C; * reg |= FIELD_PREP(REG_FIELD_C, c); */ #define __bf_shf(x) (__builtin_ffsll(x) - 1) #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ ({ \ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ _pfx "mask is not constant"); \ BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ _pfx "value too large for the field"); \ BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \ _pfx "type of reg too small for mask"); \ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ (1ULL << __bf_shf(_mask))); \ }) /** * FIELD_FIT() - check if value fits in the field * @_mask: shifted mask defining the field's length and position * @_val: value to test against the field * * Return: true if @_val can fit inside @_mask, false if @_val is too big. */ #define FIELD_FIT(_mask, _val) \ ({ \ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) /** * FIELD_PREP() - prepare a bitfield element * @_mask: shifted mask defining the field's length and position * @_val: value to put in the field * * FIELD_PREP() masks and shifts up the value. The result should * be combined with other fields of the bitfield using logical OR. */ #define FIELD_PREP(_mask, _val) \ ({ \ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ }) /** * FIELD_GET() - extract a bitfield element * @_mask: shifted mask defining the field's length and position * @_reg: value of entire bitfield * * FIELD_GET() extracts the field specified by @_mask from the * bitfield passed in as @_reg by masking and shifting it down. */ #define FIELD_GET(_mask, _reg) \ ({ \ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ }) extern void __compiletime_error("value doesn't fit into mask") __field_overflow(void); extern void __compiletime_error("bad bitfield mask") __bad_mask(void); static __always_inline u64 field_multiplier(u64 field) { if ((field | (field - 1)) & ((field | (field - 1)) + 1)) __bad_mask(); return field & -field; } static __always_inline u64 field_mask(u64 field) { return field / field_multiplier(field); } #define ____MAKE_OP(type,base,to,from) \ static __always_inline __##type type##_encode_bits(base v, base field) \ { \ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ __field_overflow(); \ return to((v & field_mask(field)) * field_multiplier(field)); \ } \ static __always_inline __##type type##_replace_bits(__##type old, \ base val, base field) \ { \ return (old & ~to(field)) | type##_encode_bits(val, field); \ } \ static __always_inline void type##p_replace_bits(__##type *p, \ base val, base field) \ { \ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ } \ static __always_inline base type##_get_bits(__##type v, base field) \ { \ return (from(v) & field)/field_multiplier(field); \ } #define __MAKE_OP(size) \ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ ____MAKE_OP(u##size,u##size,,) ____MAKE_OP(u8,u8,,) __MAKE_OP(16) __MAKE_OP(32) __MAKE_OP(64) #undef __MAKE_OP #undef ____MAKE_OP #endif backport-iwlwifi-dkms-7906/include/linux/devcoredump.h000066400000000000000000000007471351064634500231610ustar00rootroot00000000000000/* Automatically created during backport process */ #ifndef CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP #include_next #include #else #undef dev_coredumpv #define dev_coredumpv LINUX_BACKPORT(dev_coredumpv) #undef dev_coredumpm #define dev_coredumpm LINUX_BACKPORT(dev_coredumpm) #undef dev_coredumpsg #define dev_coredumpsg LINUX_BACKPORT(dev_coredumpsg) #include #endif /* CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP */ backport-iwlwifi-dkms-7906/include/linux/ieee80211.h000066400000000000000000003223521351064634500221460ustar00rootroot00000000000000/* * IEEE 802.11 defines * * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * * Copyright (c) 2002-2003, Jouni Malinen * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH * Copyright (c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef LINUX_IEEE80211_H #define LINUX_IEEE80211_H #include #include #include #include #include /* * DS bit usage * * TA = transmitter address * RA = receiver address * DA = destination address * SA = source address * * ToDS FromDS A1(RA) A2(TA) A3 A4 Use * ----------------------------------------------------------------- * 0 0 DA SA BSSID - IBSS/DLS * 0 1 DA BSSID SA - AP -> STA * 1 0 BSSID SA DA - AP <- STA * 1 1 RA TA DA SA unspecified (WDS) */ #define FCS_LEN 4 #define IEEE80211_FCTL_VERS 0x0003 #define IEEE80211_FCTL_FTYPE 0x000c #define IEEE80211_FCTL_STYPE 0x00f0 #define IEEE80211_FCTL_TODS 0x0100 #define IEEE80211_FCTL_FROMDS 0x0200 #define IEEE80211_FCTL_MOREFRAGS 0x0400 #define IEEE80211_FCTL_RETRY 0x0800 #define IEEE80211_FCTL_PM 0x1000 #define IEEE80211_FCTL_MOREDATA 0x2000 #define IEEE80211_FCTL_PROTECTED 0x4000 #define IEEE80211_FCTL_ORDER 0x8000 #define IEEE80211_FCTL_CTL_EXT 0x0f00 #define IEEE80211_SCTL_FRAG 0x000F #define IEEE80211_SCTL_SEQ 0xFFF0 #define IEEE80211_FTYPE_MGMT 0x0000 #define IEEE80211_FTYPE_CTL 0x0004 #define IEEE80211_FTYPE_DATA 0x0008 #define IEEE80211_FTYPE_EXT 0x000c /* management */ #define IEEE80211_STYPE_ASSOC_REQ 0x0000 #define IEEE80211_STYPE_ASSOC_RESP 0x0010 #define IEEE80211_STYPE_REASSOC_REQ 0x0020 #define IEEE80211_STYPE_REASSOC_RESP 0x0030 #define IEEE80211_STYPE_PROBE_REQ 0x0040 #define IEEE80211_STYPE_PROBE_RESP 0x0050 #define IEEE80211_STYPE_BEACON 0x0080 #define IEEE80211_STYPE_ATIM 0x0090 #define IEEE80211_STYPE_DISASSOC 0x00A0 #define IEEE80211_STYPE_AUTH 0x00B0 #define IEEE80211_STYPE_DEAUTH 0x00C0 #define IEEE80211_STYPE_ACTION 0x00D0 /* control */ #define IEEE80211_STYPE_CTL_EXT 0x0060 #define IEEE80211_STYPE_BACK_REQ 0x0080 #define IEEE80211_STYPE_BACK 0x0090 #define IEEE80211_STYPE_PSPOLL 0x00A0 #define IEEE80211_STYPE_RTS 0x00B0 #define IEEE80211_STYPE_CTS 0x00C0 #define IEEE80211_STYPE_ACK 0x00D0 #define IEEE80211_STYPE_CFEND 0x00E0 #define IEEE80211_STYPE_CFENDACK 0x00F0 /* data */ #define IEEE80211_STYPE_DATA 0x0000 #define IEEE80211_STYPE_DATA_CFACK 0x0010 #define IEEE80211_STYPE_DATA_CFPOLL 0x0020 #define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030 #define IEEE80211_STYPE_NULLFUNC 0x0040 #define IEEE80211_STYPE_CFACK 0x0050 #define IEEE80211_STYPE_CFPOLL 0x0060 #define IEEE80211_STYPE_CFACKPOLL 0x0070 #define IEEE80211_STYPE_QOS_DATA 0x0080 #define IEEE80211_STYPE_QOS_DATA_CFACK 0x0090 #define IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0 #define IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0 #define IEEE80211_STYPE_QOS_NULLFUNC 0x00C0 #define IEEE80211_STYPE_QOS_CFACK 0x00D0 #define IEEE80211_STYPE_QOS_CFPOLL 0x00E0 #define IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0 /* extension, added by 802.11ad */ #define IEEE80211_STYPE_DMG_BEACON 0x0000 /* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */ #define IEEE80211_CTL_EXT_POLL 0x2000 #define IEEE80211_CTL_EXT_SPR 0x3000 #define IEEE80211_CTL_EXT_GRANT 0x4000 #define IEEE80211_CTL_EXT_DMG_CTS 0x5000 #define IEEE80211_CTL_EXT_DMG_DTS 0x6000 #define IEEE80211_CTL_EXT_SSW 0x8000 #define IEEE80211_CTL_EXT_SSW_FBACK 0x9000 #define IEEE80211_CTL_EXT_SSW_ACK 0xa000 #define IEEE80211_SN_MASK ((IEEE80211_SCTL_SEQ) >> 4) #define IEEE80211_MAX_SN IEEE80211_SN_MASK #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) static inline bool ieee80211_sn_less(u16 sn1, u16 sn2) { return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); } static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2) { return (sn1 + sn2) & IEEE80211_SN_MASK; } static inline u16 ieee80211_sn_inc(u16 sn) { return ieee80211_sn_add(sn, 1); } static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) { return (sn1 - sn2) & IEEE80211_SN_MASK; } #define IEEE80211_SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) #define IEEE80211_SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) /* miscellaneous IEEE 802.11 constants */ #define IEEE80211_MAX_FRAG_THRESHOLD 2352 #define IEEE80211_MAX_RTS_THRESHOLD 2353 #define IEEE80211_MAX_AID 2007 #define IEEE80211_MAX_TIM_LEN 251 #define IEEE80211_MAX_MESH_PEERINGS 63 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section 6.2.1.1.2. 802.11e clarifies the figure in section 7.1.2. The frame body is up to 2304 octets long (maximum MSDU size) plus any crypt overhead. */ #define IEEE80211_MAX_DATA_LEN 2304 /* 802.11ad extends maximum MSDU size for DMG (freq > 40Ghz) networks * to 7920 bytes, see 8.2.3 General frame format */ #define IEEE80211_MAX_DATA_LEN_DMG 7920 /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ #define IEEE80211_MAX_FRAME_LEN 2352 /* Maximal size of an A-MSDU that can be transported in a HT BA session */ #define IEEE80211_MAX_MPDU_LEN_HT_BA 4095 /* Maximal size of an A-MSDU */ #define IEEE80211_MAX_MPDU_LEN_HT_3839 3839 #define IEEE80211_MAX_MPDU_LEN_HT_7935 7935 #define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895 #define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991 #define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454 #define IEEE80211_MAX_SSID_LEN 32 #define IEEE80211_MAX_MESH_ID_LEN 32 #define IEEE80211_FIRST_TSPEC_TSID 8 #define IEEE80211_NUM_TIDS 16 /* number of user priorities 802.11 uses */ #define IEEE80211_NUM_UPS 8 /* number of ACs */ #define IEEE80211_NUM_ACS 4 #define IEEE80211_QOS_CTL_LEN 2 /* 1d tag mask */ #define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 /* TID mask */ #define IEEE80211_QOS_CTL_TID_MASK 0x000f /* EOSP */ #define IEEE80211_QOS_CTL_EOSP 0x0010 /* ACK policy */ #define IEEE80211_QOS_CTL_ACK_POLICY_NORMAL 0x0000 #define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 #define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 #define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 #define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060 /* A-MSDU 802.11n */ #define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 /* Mesh Control 802.11s */ #define IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT 0x0100 /* Mesh Power Save Level */ #define IEEE80211_QOS_CTL_MESH_PS_LEVEL 0x0200 /* Mesh Receiver Service Period Initiated */ #define IEEE80211_QOS_CTL_RSPI 0x0400 /* U-APSD queue for WMM IEs sent by AP */ #define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7) #define IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK 0x0f /* U-APSD queues for WMM IEs sent by STA */ #define IEEE80211_WMM_IE_STA_QOSINFO_AC_VO (1<<0) #define IEEE80211_WMM_IE_STA_QOSINFO_AC_VI (1<<1) #define IEEE80211_WMM_IE_STA_QOSINFO_AC_BK (1<<2) #define IEEE80211_WMM_IE_STA_QOSINFO_AC_BE (1<<3) #define IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK 0x0f /* U-APSD max SP length for WMM IEs sent by STA */ #define IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 0x00 #define IEEE80211_WMM_IE_STA_QOSINFO_SP_2 0x01 #define IEEE80211_WMM_IE_STA_QOSINFO_SP_4 0x02 #define IEEE80211_WMM_IE_STA_QOSINFO_SP_6 0x03 #define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03 #define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5 #define IEEE80211_HT_CTL_LEN 4 struct ieee80211_hdr { __le16 frame_control; __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; __le16 seq_ctrl; u8 addr4[ETH_ALEN]; } __packed __aligned(2); struct ieee80211_hdr_3addr { __le16 frame_control; __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; __le16 seq_ctrl; } __packed __aligned(2); struct ieee80211_qos_hdr { __le16 frame_control; __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; __le16 seq_ctrl; __le16 qos_ctrl; } __packed __aligned(2); /** * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_tods(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; } /** * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_fromds(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; } /** * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_a4(__le16 fc) { __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); return (fc & tmp) == tmp; } /** * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_morefrags(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; } /** * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_retry(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; } /** * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_pm(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; } /** * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_moredata(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; } /** * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_protected(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; } /** * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_has_order(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; } /** * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_mgmt(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT); } /** * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_ctl(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL); } /** * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_data(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA); } /** * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_data_qos(__le16 fc) { /* * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need * to check the one bit */ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_STYPE_QOS_DATA)) == cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA); } /** * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_data_present(__le16 fc) { /* * mask with 0x40 and test that that bit is clear to only return true * for the data-containing substypes. */ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | 0x40)) == cpu_to_le16(IEEE80211_FTYPE_DATA); } /** * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_assoc_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); } /** * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_assoc_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); } /** * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_reassoc_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); } /** * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_reassoc_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); } /** * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_probe_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); } /** * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_probe_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); } /** * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_beacon(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); } /** * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_atim(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); } /** * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_disassoc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); } /** * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_auth(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); } /** * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_deauth(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); } /** * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_action(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); } /** * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_back_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); } /** * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_back(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); } /** * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_pspoll(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); } /** * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_rts(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); } /** * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_cts(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); } /** * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_ack(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); } /** * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_cfend(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); } /** * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_cfendack(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); } /** * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_nullfunc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); } /** * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame * @fc: frame control bytes in little-endian byteorder */ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); } /** * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU * @fc: frame control field in little-endian byteorder */ static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc) { /* IEEE 802.11-2012, definition of "bufferable management frame"; * note that this ignores the IBSS special case. */ return ieee80211_is_mgmt(fc) && (ieee80211_is_action(fc) || ieee80211_is_disassoc(fc) || ieee80211_is_deauth(fc)); } /** * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set * @seq_ctrl: frame sequence control bytes in little-endian byteorder */ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) { return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; } /** * ieee80211_is_frag - check if a frame is a fragment * @hdr: 802.11 header of the frame */ static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr) { return ieee80211_has_morefrags(hdr->frame_control) || hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); } struct ieee80211s_hdr { u8 flags; u8 ttl; __le32 seqnum; u8 eaddr1[ETH_ALEN]; u8 eaddr2[ETH_ALEN]; } __packed __aligned(2); /* Mesh flags */ #define MESH_FLAGS_AE_A4 0x1 #define MESH_FLAGS_AE_A5_A6 0x2 #define MESH_FLAGS_AE 0x3 #define MESH_FLAGS_PS_DEEP 0x4 /** * enum ieee80211_preq_flags - mesh PREQ element flags * * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield */ enum ieee80211_preq_flags { IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2, }; /** * enum ieee80211_preq_target_flags - mesh PREQ element per target flags * * @IEEE80211_PREQ_TO_FLAG: target only subfield * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield */ enum ieee80211_preq_target_flags { IEEE80211_PREQ_TO_FLAG = 1<<0, IEEE80211_PREQ_USN_FLAG = 1<<2, }; /** * struct ieee80211_quiet_ie * * This structure refers to "Quiet information element" */ struct ieee80211_quiet_ie { u8 count; u8 period; __le16 duration; __le16 offset; } __packed; /** * struct ieee80211_msrment_ie * * This structure refers to "Measurement Request/Report information element" */ struct ieee80211_msrment_ie { u8 token; u8 mode; u8 type; u8 request[0]; } __packed; /** * struct ieee80211_channel_sw_ie * * This structure refers to "Channel Switch Announcement information element" */ struct ieee80211_channel_sw_ie { u8 mode; u8 new_ch_num; u8 count; } __packed; /** * struct ieee80211_ext_chansw_ie * * This structure represents the "Extended Channel Switch Announcement element" */ struct ieee80211_ext_chansw_ie { u8 mode; u8 new_operating_class; u8 new_ch_num; u8 count; } __packed; /** * struct ieee80211_sec_chan_offs_ie - secondary channel offset IE * @sec_chan_offs: secondary channel offset, uses IEEE80211_HT_PARAM_CHA_SEC_* * values here * This structure represents the "Secondary Channel Offset element" */ struct ieee80211_sec_chan_offs_ie { u8 sec_chan_offs; } __packed; /** * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE * * This structure represents the "Mesh Channel Switch Paramters element" */ struct ieee80211_mesh_chansw_params_ie { u8 mesh_ttl; u8 mesh_flags; __le16 mesh_reason; __le16 mesh_pre_value; } __packed; /** * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE */ struct ieee80211_wide_bw_chansw_ie { u8 new_channel_width; u8 new_center_freq_seg0, new_center_freq_seg1; } __packed; /** * struct ieee80211_tim * * This structure refers to "Traffic Indication Map information element" */ struct ieee80211_tim_ie { u8 dtim_count; u8 dtim_period; u8 bitmap_ctrl; /* variable size: 1 - 251 bytes */ u8 virtual_map[1]; } __packed; /** * struct ieee80211_meshconf_ie * * This structure refers to "Mesh Configuration information element" */ struct ieee80211_meshconf_ie { u8 meshconf_psel; u8 meshconf_pmetric; u8 meshconf_congest; u8 meshconf_synch; u8 meshconf_auth; u8 meshconf_form; u8 meshconf_cap; } __packed; /** * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags * * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish * additional mesh peerings with other mesh STAs * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure * is ongoing * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has * neighbors in deep sleep mode */ enum mesh_config_capab_flags { IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01, IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08, IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20, IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40, }; #define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1 /** * mesh channel switch parameters element's flag indicator * */ #define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0) #define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1) #define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2) /** * struct ieee80211_rann_ie * * This structure refers to "Root Announcement information element" */ struct ieee80211_rann_ie { u8 rann_flags; u8 rann_hopcount; u8 rann_ttl; u8 rann_addr[ETH_ALEN]; __le32 rann_seq; __le32 rann_interval; __le32 rann_metric; } __packed; enum ieee80211_rann_flags { RANN_FLAG_IS_GATE = 1 << 0, }; enum ieee80211_ht_chanwidth_values { IEEE80211_HT_CHANWIDTH_20MHZ = 0, IEEE80211_HT_CHANWIDTH_ANY = 1, }; /** * enum ieee80211_opmode_bits - VHT operating mode field bits * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask * (the NSS value is the value of this field + 1) * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU * using a beamforming steering matrix */ enum ieee80211_vht_opmode_bits { IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3, IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0, IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1, IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2, IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3, IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70, IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4, IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80, }; #define WLAN_SA_QUERY_TR_ID_LEN 2 #define WLAN_MEMBERSHIP_LEN 8 #define WLAN_USER_POSITION_LEN 16 /** * struct ieee80211_tpc_report_ie * * This structure refers to "TPC Report element" */ struct ieee80211_tpc_report_ie { u8 tx_power; u8 link_margin; } __packed; struct ieee80211_mgmt { __le16 frame_control; __le16 duration; u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; u8 bssid[ETH_ALEN]; __le16 seq_ctrl; union { struct { __le16 auth_alg; __le16 auth_transaction; __le16 status_code; /* possibly followed by Challenge text */ u8 variable[0]; } __packed auth; struct { __le16 reason_code; } __packed deauth; struct { __le16 capab_info; __le16 listen_interval; /* followed by SSID and Supported rates */ u8 variable[0]; } __packed assoc_req; struct { __le16 capab_info; __le16 status_code; __le16 aid; /* followed by Supported rates */ u8 variable[0]; } __packed assoc_resp, reassoc_resp; struct { __le16 capab_info; __le16 listen_interval; u8 current_ap[ETH_ALEN]; /* followed by SSID and Supported rates */ u8 variable[0]; } __packed reassoc_req; struct { __le16 reason_code; } __packed disassoc; struct { __le64 timestamp; __le16 beacon_int; __le16 capab_info; /* followed by some of SSID, Supported rates, * FH Params, DS Params, CF Params, IBSS Params, TIM */ u8 variable[0]; } __packed beacon; struct { /* only variable items: SSID, Supported rates */ u8 variable[0]; } __packed probe_req; struct { __le64 timestamp; __le16 beacon_int; __le16 capab_info; /* followed by some of SSID, Supported rates, * FH Params, DS Params, CF Params, IBSS Params */ u8 variable[0]; } __packed probe_resp; struct { u8 category; union { struct { u8 action_code; u8 dialog_token; u8 status_code; u8 variable[0]; } __packed wme_action; struct{ u8 action_code; u8 variable[0]; } __packed chan_switch; struct{ u8 action_code; struct ieee80211_ext_chansw_ie data; u8 variable[0]; } __packed ext_chan_switch; struct{ u8 action_code; u8 dialog_token; u8 element_id; u8 length; struct ieee80211_msrment_ie msr_elem; } __packed measurement; struct{ u8 action_code; u8 dialog_token; __le16 capab; __le16 timeout; __le16 start_seq_num; } __packed addba_req; struct{ u8 action_code; u8 dialog_token; __le16 status; __le16 capab; __le16 timeout; } __packed addba_resp; struct{ u8 action_code; __le16 params; __le16 reason_code; } __packed delba; struct { u8 action_code; u8 variable[0]; } __packed self_prot; struct{ u8 action_code; u8 variable[0]; } __packed mesh_action; struct { u8 action; u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN]; } __packed sa_query; struct { u8 action; u8 smps_control; } __packed ht_smps; struct { u8 action_code; u8 chanwidth; } __packed ht_notify_cw; struct { u8 action_code; u8 dialog_token; __le16 capability; u8 variable[0]; } __packed tdls_discover_resp; struct { u8 action_code; u8 operating_mode; } __packed vht_opmode_notif; struct { u8 action_code; u8 membership[WLAN_MEMBERSHIP_LEN]; u8 position[WLAN_USER_POSITION_LEN]; } __packed vht_group_notif; struct { u8 action_code; u8 dialog_token; u8 tpc_elem_id; u8 tpc_elem_length; struct ieee80211_tpc_report_ie tpc; } __packed tpc_report; struct { u8 action_code; u8 dialog_token; u8 follow_up; u8 tod[6]; u8 toa[6]; __le16 tod_error; __le16 toa_error; u8 variable[0]; } __packed ftm; } u; } __packed action; } u; } __packed __aligned(2); /* Supported rates membership selectors */ #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) /* Management MIC information element (IEEE 802.11w) */ struct ieee80211_mmie { u8 element_id; u8 length; __le16 key_id; u8 sequence_number[6]; u8 mic[8]; } __packed; /* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */ struct ieee80211_mmie_16 { u8 element_id; u8 length; __le16 key_id; u8 sequence_number[6]; u8 mic[16]; } __packed; struct ieee80211_vendor_ie { u8 element_id; u8 len; u8 oui[3]; u8 oui_type; } __packed; struct ieee80211_wmm_ac_param { u8 aci_aifsn; /* AIFSN, ACM, ACI */ u8 cw; /* ECWmin, ECWmax (CW = 2^ECW - 1) */ __le16 txop_limit; } __packed; struct ieee80211_wmm_param_ie { u8 element_id; /* Element ID: 221 (0xdd); */ u8 len; /* Length: 24 */ /* required fields for WMM version 1 */ u8 oui[3]; /* 00:50:f2 */ u8 oui_type; /* 2 */ u8 oui_subtype; /* 1 */ u8 version; /* 1 for WMM version 1.0 */ u8 qos_info; /* AP/STA specific QoS info */ u8 reserved; /* 0 */ /* AC_BE, AC_BK, AC_VI, AC_VO */ struct ieee80211_wmm_ac_param ac[4]; } __packed; /* Control frames */ struct ieee80211_rts { __le16 frame_control; __le16 duration; u8 ra[ETH_ALEN]; u8 ta[ETH_ALEN]; } __packed __aligned(2); struct ieee80211_cts { __le16 frame_control; __le16 duration; u8 ra[ETH_ALEN]; } __packed __aligned(2); struct ieee80211_pspoll { __le16 frame_control; __le16 aid; u8 bssid[ETH_ALEN]; u8 ta[ETH_ALEN]; } __packed __aligned(2); /* TDLS */ /* Channel switch timing */ struct ieee80211_ch_switch_timing { __le16 switch_time; __le16 switch_timeout; } __packed; /* Link-id information element */ struct ieee80211_tdls_lnkie { u8 ie_type; /* Link Identifier IE */ u8 ie_len; u8 bssid[ETH_ALEN]; u8 init_sta[ETH_ALEN]; u8 resp_sta[ETH_ALEN]; } __packed; struct ieee80211_tdls_data { u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; __be16 ether_type; u8 payload_type; u8 category; u8 action_code; union { struct { u8 dialog_token; __le16 capability; u8 variable[0]; } __packed setup_req; struct { __le16 status_code; u8 dialog_token; __le16 capability; u8 variable[0]; } __packed setup_resp; struct { __le16 status_code; u8 dialog_token; u8 variable[0]; } __packed setup_cfm; struct { __le16 reason_code; u8 variable[0]; } __packed teardown; struct { u8 dialog_token; u8 variable[0]; } __packed discover_req; struct { u8 target_channel; u8 oper_class; u8 variable[0]; } __packed chan_switch_req; struct { __le16 status_code; u8 variable[0]; } __packed chan_switch_resp; } u; } __packed; /* * Peer-to-Peer IE attribute related definitions. */ /** * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute. */ enum ieee80211_p2p_attr_id { IEEE80211_P2P_ATTR_STATUS = 0, IEEE80211_P2P_ATTR_MINOR_REASON, IEEE80211_P2P_ATTR_CAPABILITY, IEEE80211_P2P_ATTR_DEVICE_ID, IEEE80211_P2P_ATTR_GO_INTENT, IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT, IEEE80211_P2P_ATTR_LISTEN_CHANNEL, IEEE80211_P2P_ATTR_GROUP_BSSID, IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING, IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR, IEEE80211_P2P_ATTR_MANAGABILITY, IEEE80211_P2P_ATTR_CHANNEL_LIST, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, IEEE80211_P2P_ATTR_DEVICE_INFO, IEEE80211_P2P_ATTR_GROUP_INFO, IEEE80211_P2P_ATTR_GROUP_ID, IEEE80211_P2P_ATTR_INTERFACE, IEEE80211_P2P_ATTR_OPER_CHANNEL, IEEE80211_P2P_ATTR_INVITE_FLAGS, /* 19 - 220: Reserved */ IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221, IEEE80211_P2P_ATTR_MAX }; /* Notice of Absence attribute - described in P2P spec 4.1.14 */ /* Typical max value used here */ #define IEEE80211_P2P_NOA_DESC_MAX 4 struct ieee80211_p2p_noa_desc { u8 count; __le32 duration; __le32 interval; __le32 start_time; } __packed; struct ieee80211_p2p_noa_attr { u8 index; u8 oppps_ctwindow; struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX]; } __packed; #define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7) #define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F /** * struct ieee80211_bar - HT Block Ack Request * * This structure refers to "HT BlockAckReq" as * described in 802.11n draft section 7.2.1.7.1 */ struct ieee80211_bar { __le16 frame_control; __le16 duration; __u8 ra[ETH_ALEN]; __u8 ta[ETH_ALEN]; __le16 control; __le16 start_seq_num; } __packed; /* 802.11 BAR control masks */ #define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000 #define IEEE80211_BAR_CTRL_MULTI_TID 0x0002 #define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004 #define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000 #define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12 #define IEEE80211_HT_MCS_MASK_LEN 10 /** * struct ieee80211_mcs_info - MCS information * @rx_mask: RX mask * @rx_highest: highest supported RX rate. If set represents * the highest supported RX data rate in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest RX data rate supported. * @tx_params: TX parameters */ struct ieee80211_mcs_info { u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN]; __le16 rx_highest; u8 tx_params; u8 reserved[3]; } __packed; /* 802.11n HT capability MSC set */ #define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff #define IEEE80211_HT_MCS_TX_DEFINED 0x01 #define IEEE80211_HT_MCS_TX_RX_DIFF 0x02 /* value 0 == 1 stream etc */ #define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C #define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2 #define IEEE80211_HT_MCS_TX_MAX_STREAMS 4 #define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10 /* * 802.11n D5.0 20.3.5 / 20.6 says: * - indices 0 to 7 and 32 are single spatial stream * - 8 to 31 are multiple spatial streams using equal modulation * [8..15 for two streams, 16..23 for three and 24..31 for four] * - remainder are multiple spatial streams using unequal modulation */ #define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33 #define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \ (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8) /** * struct ieee80211_ht_cap - HT capabilities * * This structure is the "HT capabilities element" as * described in 802.11n D5.0 7.3.2.57 */ struct ieee80211_ht_cap { __le16 cap_info; u8 ampdu_params_info; /* 16 bytes MCS information */ struct ieee80211_mcs_info mcs; __le16 extended_ht_cap_info; __le32 tx_BF_cap_info; u8 antenna_selection_info; } __packed; /* 802.11n HT capabilities masks (for cap_info) */ #define IEEE80211_HT_CAP_LDPC_CODING 0x0001 #define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002 #define IEEE80211_HT_CAP_SM_PS 0x000C #define IEEE80211_HT_CAP_SM_PS_SHIFT 2 #define IEEE80211_HT_CAP_GRN_FLD 0x0010 #define IEEE80211_HT_CAP_SGI_20 0x0020 #define IEEE80211_HT_CAP_SGI_40 0x0040 #define IEEE80211_HT_CAP_TX_STBC 0x0080 #define IEEE80211_HT_CAP_RX_STBC 0x0300 #define IEEE80211_HT_CAP_RX_STBC_SHIFT 8 #define IEEE80211_HT_CAP_DELAY_BA 0x0400 #define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 #define IEEE80211_HT_CAP_DSSSCCK40 0x1000 #define IEEE80211_HT_CAP_RESERVED 0x2000 #define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000 #define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000 /* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */ #define IEEE80211_HT_EXT_CAP_PCO 0x0001 #define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006 #define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1 #define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300 #define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8 #define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400 #define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800 /* 802.11n HT capability AMPDU settings (for ampdu_params_info) */ #define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03 #define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 /* * Maximum length of AMPDU that the STA can receive in high-throughput (HT). * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) */ enum ieee80211_max_ampdu_length_exp { IEEE80211_HT_MAX_AMPDU_8K = 0, IEEE80211_HT_MAX_AMPDU_16K = 1, IEEE80211_HT_MAX_AMPDU_32K = 2, IEEE80211_HT_MAX_AMPDU_64K = 3 }; /* * Maximum length of AMPDU that the STA can receive in VHT. * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) */ enum ieee80211_vht_max_ampdu_length_exp { IEEE80211_VHT_MAX_AMPDU_8K = 0, IEEE80211_VHT_MAX_AMPDU_16K = 1, IEEE80211_VHT_MAX_AMPDU_32K = 2, IEEE80211_VHT_MAX_AMPDU_64K = 3, IEEE80211_VHT_MAX_AMPDU_128K = 4, IEEE80211_VHT_MAX_AMPDU_256K = 5, IEEE80211_VHT_MAX_AMPDU_512K = 6, IEEE80211_VHT_MAX_AMPDU_1024K = 7 }; #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 /* Minimum MPDU start spacing */ enum ieee80211_min_mpdu_spacing { IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */ IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */ IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */ IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */ IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */ IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */ IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */ IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */ }; /** * struct ieee80211_ht_operation - HT operation IE * * This structure is the "HT operation element" as * described in 802.11n-2009 7.3.2.57 */ struct ieee80211_ht_operation { u8 primary_chan; u8 ht_param; __le16 operation_mode; __le16 stbc_param; u8 basic_set[16]; } __packed; /* for ht_param */ #define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03 #define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00 #define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01 #define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03 #define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04 #define IEEE80211_HT_PARAM_RIFS_MODE 0x08 /* for operation_mode */ #define IEEE80211_HT_OP_MODE_PROTECTION 0x0003 #define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0 #define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1 #define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2 #define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3 #define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010 #define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5 #define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0 /* for stbc_param */ #define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040 #define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080 #define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100 #define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200 #define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400 #define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800 /* block-ack parameters */ #define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001 #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 /* * A-MPDU buffer sizes * According to HT size varies from 8 to 64 frames * HE adds the ability to have up to 256 frames. */ #define IEEE80211_MIN_AMPDU_BUF 0x8 #define IEEE80211_MAX_AMPDU_BUF_HT 0x40 #define IEEE80211_MAX_AMPDU_BUF 0x100 /* Spatial Multiplexing Power Save Modes (for capability) */ #define WLAN_HT_CAP_SM_PS_STATIC 0 #define WLAN_HT_CAP_SM_PS_DYNAMIC 1 #define WLAN_HT_CAP_SM_PS_INVALID 2 #define WLAN_HT_CAP_SM_PS_DISABLED 3 /* for SM power control field lower two bits */ #define WLAN_HT_SMPS_CONTROL_DISABLED 0 #define WLAN_HT_SMPS_CONTROL_STATIC 1 #define WLAN_HT_SMPS_CONTROL_DYNAMIC 3 /** * struct ieee80211_vht_mcs_info - VHT MCS information * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams * @rx_highest: Indicates highest long GI VHT PPDU data rate * STA can receive. Rate expressed in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest RX data rate supported. * The top 3 bits of this field indicate the Maximum NSTS,total * (a beamformee capability.) * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams * @tx_highest: Indicates highest long GI VHT PPDU data rate * STA can transmit. Rate expressed in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest TX data rate supported. * The top 2 bits of this field are reserved, the * 3rd bit from the top indiciates VHT Extended NSS BW * Capability. */ struct ieee80211_vht_mcs_info { __le16 rx_mcs_map; __le16 rx_highest; __le16 tx_mcs_map; __le16 tx_highest; } __packed; /* for rx_highest */ #define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13 #define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT) /* for tx_highest */ #define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13) /** * enum ieee80211_vht_mcs_support - VHT MCS support definitions * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the * number of streams * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported * * These definitions are used in each 2-bit subfield of the @rx_mcs_map * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are * both split into 8 subfields by number of streams. These values indicate * which MCSes are supported for the number of streams the value appears * for. */ enum ieee80211_vht_mcs_support { IEEE80211_VHT_MCS_SUPPORT_0_7 = 0, IEEE80211_VHT_MCS_SUPPORT_0_8 = 1, IEEE80211_VHT_MCS_SUPPORT_0_9 = 2, IEEE80211_VHT_MCS_NOT_SUPPORTED = 3, }; /** * struct ieee80211_vht_cap - VHT capabilities * * This structure is the "VHT capabilities element" as * described in 802.11ac D3.0 8.4.2.160 * @vht_cap_info: VHT capability info * @supp_mcs: VHT MCS supported rates */ struct ieee80211_vht_cap { __le32 vht_cap_info; struct ieee80211_vht_mcs_info supp_mcs; } __packed; /** * enum ieee80211_vht_chanwidth - VHT channel width * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to * determine the channel width (20 or 40 MHz) * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth */ enum ieee80211_vht_chanwidth { IEEE80211_VHT_CHANWIDTH_USE_HT = 0, IEEE80211_VHT_CHANWIDTH_80MHZ = 1, IEEE80211_VHT_CHANWIDTH_160MHZ = 2, IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3, }; /** * struct ieee80211_vht_operation - VHT operation IE * * This structure is the "VHT operation element" as * described in 802.11ac D3.0 8.4.2.161 * @chan_width: Operating channel width * @center_freq_seg0_idx: center freq segment 0 index * @center_freq_seg1_idx: center freq segment 1 index * @basic_mcs_set: VHT Basic MCS rate set */ struct ieee80211_vht_operation { u8 chan_width; u8 center_freq_seg0_idx; u8 center_freq_seg1_idx; __le16 basic_mcs_set; } __packed; /** * struct ieee80211_he_cap_elem - HE capabilities element * * This structure is the "HE capabilities element" fixed fields as * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3 */ struct ieee80211_he_cap_elem { u8 mac_cap_info[6]; u8 phy_cap_info[11]; } __packed; #define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5 /** * enum ieee80211_he_mcs_support - HE MCS support definitions * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the * number of streams * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported * * These definitions are used in each 2-bit subfield of the rx_mcs_* * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are * both split into 8 subfields by number of streams. These values indicate * which MCSes are supported for the number of streams the value appears * for. */ enum ieee80211_he_mcs_support { IEEE80211_HE_MCS_SUPPORT_0_7 = 0, IEEE80211_HE_MCS_SUPPORT_0_9 = 1, IEEE80211_HE_MCS_SUPPORT_0_11 = 2, IEEE80211_HE_MCS_NOT_SUPPORTED = 3, }; /** * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field * * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field * described in P802.11ax_D2.0 section 9.4.2.237.4 * * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel * widths less than 80MHz. * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel * widths less than 80MHz. * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel * width 160MHz. * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel * width 160MHz. * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for * channel width 80p80MHz. * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for * channel width 80p80MHz. */ struct ieee80211_he_mcs_nss_supp { __le16 rx_mcs_80; __le16 tx_mcs_80; __le16 rx_mcs_160; __le16 tx_mcs_160; __le16 rx_mcs_80p80; __le16 tx_mcs_80p80; } __packed; /** * struct ieee80211_he_operation - HE capabilities element * * This structure is the "HE operation element" fields as * described in P802.11ax_D4.0 section 9.4.2.243 */ struct ieee80211_he_operation { __le32 he_oper_params; __le16 he_mcs_nss_set; /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */ u8 optional[0]; } __packed; /** * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field * * This structure is the "MU AC Parameter Record" fields as * described in P802.11ax_D4.0 section 9.4.2.245 */ struct ieee80211_he_mu_edca_param_ac_rec { u8 aifsn; u8 ecw_min_max; u8 mu_edca_timer; } __packed; /** * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element * * This structure is the "MU EDCA Parameter Set element" fields as * described in P802.11ax_D4.0 section 9.4.2.245 */ struct ieee80211_mu_edca_param_set { u8 mu_qos_info; struct ieee80211_he_mu_edca_param_ac_rec ac_be; struct ieee80211_he_mu_edca_param_ac_rec ac_bk; struct ieee80211_he_mu_edca_param_ac_rec ac_vi; struct ieee80211_he_mu_edca_param_ac_rec ac_vo; } __packed; /* 802.11ac VHT Capabilities */ #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002 #define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2 #define IEEE80211_VHT_CAP_RXLDPC 0x00000010 #define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020 #define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040 #define IEEE80211_VHT_CAP_TXSTBC 0x00000080 #define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100 #define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200 #define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 #define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 #define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 #define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8 #define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 #define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 #define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13 #define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \ (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT) #define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16 #define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \ (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT) #define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000 #define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000 #define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000 #define IEEE80211_VHT_CAP_HTC_VHT 0x00400000 #define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23 #define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \ (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT) #define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000 #define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000 #define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 #define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 #define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30 #define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000 /** * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS * @cap: VHT capabilities of the peer * @bw: bandwidth to use * @mcs: MCS index to use * @ext_nss_bw_capable: indicates whether or not the local transmitter * (rate scaling algorithm) can deal with the new logic * (dot11VHTExtendedNSSBWCapable) * * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can * vary for a given BW/MCS. This function parses the data. * * Note: This function is exported by cfg80211. */ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, enum ieee80211_vht_chanwidth bw, int mcs, bool ext_nss_bw_capable); /* 802.11ax HE MAC capabilities */ #define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 #define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02 #define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04 #define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00 #define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08 #define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10 #define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18 #define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0 #define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0 #define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00 #define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01 #define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02 #define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03 #define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03 #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00 #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04 #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08 #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00 #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10 #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20 #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30 #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40 #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50 #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60 #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70 #define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70 /* Link adaptation is split between byte HE_MAC_CAP1 and * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE * in which case the following values apply: * 0 = No feedback. * 1 = reserved. * 2 = Unsolicited feedback. * 3 = both */ #define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80 #define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01 #define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 #define IEEE80211_HE_MAC_CAP2_TRS 0x04 #define IEEE80211_HE_MAC_CAP2_BSR 0x08 #define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10 #define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20 #define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40 #define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80 #define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02 #define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04 /* The maximum length of an A-MDPU is defined by the combination of the Maximum * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the * same field in the HE capabilities. */ #define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00 #define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08 #define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10 #define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18 #define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18 #define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20 #define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 #define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 #define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 #define IEEE80211_HE_MAC_CAP4_QTP 0x02 #define IEEE80211_HE_MAC_CAP4_BQR 0x04 #define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08 #define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 #define IEEE80211_HE_MAC_CAP4_OPS 0x20 #define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40 /* Multi TID agg TX is split between byte #4 and #5 * The value is a combination of B39,B40,B41 */ #define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80 #define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01 #define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02 #define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04 #define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08 #define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10 #define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20 #define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40 #define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80 /* 802.11ax HE PHY capabilities */ #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe #define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01 #define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02 #define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04 #define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08 #define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f #define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10 #define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 #define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 /* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */ #define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80 #define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01 #define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 #define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 #define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 #define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10 #define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20 /* Note that the meaning of UL MU below is different between an AP and a non-AP * sta, where in the AP case it indicates support for Rx and in the non-AP sta * case it indicates support for Tx. */ #define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40 #define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20 #define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40 #define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80 #define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01 #define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02 /* Minimal allowed value of Max STS under 80MHz is 3 */ #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10 #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14 #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18 #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c /* Minimal allowed value of Max STS above 80MHz is 3 */ #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60 #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80 #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0 #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0 #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0 #define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38 #define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38 #define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40 #define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80 #define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01 #define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02 #define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04 #define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08 #define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10 #define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20 #define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40 #define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80 #define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01 #define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02 #define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04 #define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08 #define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10 #define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18 #define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20 #define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28 #define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30 #define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38 #define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38 #define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40 #define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80 #define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01 #define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02 #define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04 #define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 #define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 #define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20 #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00 #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40 #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80 #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0 #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0 #define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01 #define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02 #define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04 #define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08 #define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10 #define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US 0x00 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US 0x40 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US 0x80 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0 /* 802.11ax HE TX/RX MCS NSS Support */ #define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) #define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6) #define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11) #define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0 #define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800 /* TX/RX HE MCS Support field Highest MCS subfield encoding */ enum ieee80211_he_highest_mcs_supported_subfield_enc { HIGHEST_MCS_SUPPORTED_MCS7 = 0, HIGHEST_MCS_SUPPORTED_MCS8, HIGHEST_MCS_SUPPORTED_MCS9, HIGHEST_MCS_SUPPORTED_MCS10, HIGHEST_MCS_SUPPORTED_MCS11, }; /* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */ static inline u8 ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap) { u8 count = 4; if (he_cap->phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) count += 4; if (he_cap->phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) count += 4; return count; } /* 802.11ax HE PPE Thresholds */ #define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1) #define IEEE80211_PPE_THRES_NSS_POS (0) #define IEEE80211_PPE_THRES_NSS_MASK (7) #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \ (BIT(5) | BIT(6)) #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78 #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3) #define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3) /* * Calculate 802.11ax HE capabilities IE PPE field size * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8* */ static inline u8 ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) { u8 n; if ((phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) return 0; n = hweight8(ppe_thres_hdr & IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >> IEEE80211_PPE_THRES_NSS_POS)); /* * Each pair is 6 bits, and we need to add the 7 "header" bits to the * total size. */ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; n = DIV_ROUND_UP(n, 8); return n; } /* HE Operation defines */ #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 #define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000 #define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000 #define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000 #define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000 #define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24 #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 /* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the the byte * after the ext ID byte. It is assumed that he_oper_ie has at least * sizeof(struct ieee80211_he_operation) bytes, checked already in * ieee802_11_parse_elems_crc() * @return the actual size of the IE data (not including header), or 0 on error */ static inline u8 ieee80211_he_oper_size(const u8 *he_oper_ie) { struct ieee80211_he_operation *he_oper = (void *)he_oper_ie; u8 oper_len = sizeof(struct ieee80211_he_operation); u32 he_oper_params; /* Make sure the input is not NULL */ if (!he_oper_ie) return 0; /* Calc required length */ he_oper_params = le32_to_cpu(he_oper->he_oper_params); if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) oper_len += 3; if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) oper_len++; if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO) oper_len += 4; /* Add the first byte (extension ID) to the total length */ oper_len++; return oper_len; } /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 #define WLAN_AUTH_FT 2 #define WLAN_AUTH_SAE 3 #define WLAN_AUTH_FILS_SK 4 #define WLAN_AUTH_FILS_SK_PFS 5 #define WLAN_AUTH_FILS_PK 6 #define WLAN_AUTH_LEAP 128 #define WLAN_AUTH_CHALLENGE_LEN 128 #define WLAN_CAPABILITY_ESS (1<<0) #define WLAN_CAPABILITY_IBSS (1<<1) /* * A mesh STA sets the ESS and IBSS capability bits to zero. * however, this holds true for p2p probe responses (in the p2p_find * phase) as well. */ #define WLAN_CAPABILITY_IS_STA_BSS(cap) \ (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS))) #define WLAN_CAPABILITY_CF_POLLABLE (1<<2) #define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3) #define WLAN_CAPABILITY_PRIVACY (1<<4) #define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) #define WLAN_CAPABILITY_PBCC (1<<6) #define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) /* 802.11h */ #define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8) #define WLAN_CAPABILITY_QOS (1<<9) #define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) #define WLAN_CAPABILITY_APSD (1<<11) #define WLAN_CAPABILITY_RADIO_MEASURE (1<<12) #define WLAN_CAPABILITY_DSSS_OFDM (1<<13) #define WLAN_CAPABILITY_DEL_BACK (1<<14) #define WLAN_CAPABILITY_IMM_BACK (1<<15) /* DMG (60gHz) 802.11ad */ /* type - bits 0..1 */ #define WLAN_CAPABILITY_DMG_TYPE_MASK (3<<0) #define WLAN_CAPABILITY_DMG_TYPE_IBSS (1<<0) /* Tx by: STA */ #define WLAN_CAPABILITY_DMG_TYPE_PBSS (2<<0) /* Tx by: PCP */ #define WLAN_CAPABILITY_DMG_TYPE_AP (3<<0) /* Tx by: AP */ #define WLAN_CAPABILITY_DMG_CBAP_ONLY (1<<2) #define WLAN_CAPABILITY_DMG_CBAP_SOURCE (1<<3) #define WLAN_CAPABILITY_DMG_PRIVACY (1<<4) #define WLAN_CAPABILITY_DMG_ECPAC (1<<5) #define WLAN_CAPABILITY_DMG_SPECTRUM_MGMT (1<<8) #define WLAN_CAPABILITY_DMG_RADIO_MEASURE (1<<12) /* measurement */ #define IEEE80211_SPCT_MSR_RPRT_MODE_LATE (1<<0) #define IEEE80211_SPCT_MSR_RPRT_MODE_INCAPABLE (1<<1) #define IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED (1<<2) #define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0 #define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1 #define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2 #define IEEE80211_SPCT_MSR_RPRT_TYPE_LCI 8 #define IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC 11 /* 802.11g ERP information element */ #define WLAN_ERP_NON_ERP_PRESENT (1<<0) #define WLAN_ERP_USE_PROTECTION (1<<1) #define WLAN_ERP_BARKER_PREAMBLE (1<<2) /* WLAN_ERP_BARKER_PREAMBLE values */ enum { WLAN_ERP_PREAMBLE_SHORT = 0, WLAN_ERP_PREAMBLE_LONG = 1, }; /* Band ID, 802.11ad #8.4.1.45 */ enum { IEEE80211_BANDID_TV_WS = 0, /* TV white spaces */ IEEE80211_BANDID_SUB1 = 1, /* Sub-1 GHz (excluding TV white spaces) */ IEEE80211_BANDID_2G = 2, /* 2.4 GHz */ IEEE80211_BANDID_3G = 3, /* 3.6 GHz */ IEEE80211_BANDID_5G = 4, /* 4.9 and 5 GHz */ IEEE80211_BANDID_60G = 5, /* 60 GHz */ }; /* Status codes */ enum ieee80211_statuscode { WLAN_STATUS_SUCCESS = 0, WLAN_STATUS_UNSPECIFIED_FAILURE = 1, WLAN_STATUS_CAPS_UNSUPPORTED = 10, WLAN_STATUS_REASSOC_NO_ASSOC = 11, WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12, WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13, WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14, WLAN_STATUS_CHALLENGE_FAIL = 15, WLAN_STATUS_AUTH_TIMEOUT = 16, WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17, WLAN_STATUS_ASSOC_DENIED_RATES = 18, /* 802.11b */ WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19, WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20, WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21, /* 802.11h */ WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22, WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23, WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24, /* 802.11g */ WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, /* 802.11w */ WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30, WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31, /* 802.11i */ WLAN_STATUS_INVALID_IE = 40, WLAN_STATUS_INVALID_GROUP_CIPHER = 41, WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42, WLAN_STATUS_INVALID_AKMP = 43, WLAN_STATUS_UNSUPP_RSN_VERSION = 44, WLAN_STATUS_INVALID_RSN_IE_CAP = 45, WLAN_STATUS_CIPHER_SUITE_REJECTED = 46, /* 802.11e */ WLAN_STATUS_UNSPECIFIED_QOS = 32, WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33, WLAN_STATUS_ASSOC_DENIED_LOWACK = 34, WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35, WLAN_STATUS_REQUEST_DECLINED = 37, WLAN_STATUS_INVALID_QOS_PARAM = 38, WLAN_STATUS_CHANGE_TSPEC = 39, WLAN_STATUS_WAIT_TS_DELAY = 47, WLAN_STATUS_NO_DIRECT_LINK = 48, WLAN_STATUS_STA_NOT_PRESENT = 49, WLAN_STATUS_STA_NOT_QSTA = 50, /* 802.11s */ WLAN_STATUS_ANTI_CLOG_REQUIRED = 76, WLAN_STATUS_FCG_NOT_SUPP = 78, WLAN_STATUS_STA_NO_TBTT = 78, /* 802.11ad */ WLAN_STATUS_REJECTED_WITH_SUGGESTED_CHANGES = 39, WLAN_STATUS_REJECTED_FOR_DELAY_PERIOD = 47, WLAN_STATUS_REJECT_WITH_SCHEDULE = 83, WLAN_STATUS_PENDING_ADMITTING_FST_SESSION = 86, WLAN_STATUS_PERFORMING_FST_NOW = 87, WLAN_STATUS_PENDING_GAP_IN_BA_WINDOW = 88, WLAN_STATUS_REJECT_U_PID_SETTING = 89, WLAN_STATUS_REJECT_DSE_BAND = 96, WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99, WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103, /* 802.11ai */ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108, WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109, }; /* Reason codes */ enum ieee80211_reasoncode { WLAN_REASON_UNSPECIFIED = 1, WLAN_REASON_PREV_AUTH_NOT_VALID = 2, WLAN_REASON_DEAUTH_LEAVING = 3, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4, WLAN_REASON_DISASSOC_AP_BUSY = 5, WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7, WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8, WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9, /* 802.11h */ WLAN_REASON_DISASSOC_BAD_POWER = 10, WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11, /* 802.11i */ WLAN_REASON_INVALID_IE = 13, WLAN_REASON_MIC_FAILURE = 14, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15, WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16, WLAN_REASON_IE_DIFFERENT = 17, WLAN_REASON_INVALID_GROUP_CIPHER = 18, WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19, WLAN_REASON_INVALID_AKMP = 20, WLAN_REASON_UNSUPP_RSN_VERSION = 21, WLAN_REASON_INVALID_RSN_IE_CAP = 22, WLAN_REASON_IEEE8021X_FAILED = 23, WLAN_REASON_CIPHER_SUITE_REJECTED = 24, /* TDLS (802.11z) */ WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE = 25, WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED = 26, /* 802.11e */ WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32, WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33, WLAN_REASON_DISASSOC_LOW_ACK = 34, WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35, WLAN_REASON_QSTA_LEAVE_QBSS = 36, WLAN_REASON_QSTA_NOT_USE = 37, WLAN_REASON_QSTA_REQUIRE_SETUP = 38, WLAN_REASON_QSTA_TIMEOUT = 39, WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, /* 802.11s */ WLAN_REASON_MESH_PEER_CANCELED = 52, WLAN_REASON_MESH_MAX_PEERS = 53, WLAN_REASON_MESH_CONFIG = 54, WLAN_REASON_MESH_CLOSE = 55, WLAN_REASON_MESH_MAX_RETRIES = 56, WLAN_REASON_MESH_CONFIRM_TIMEOUT = 57, WLAN_REASON_MESH_INVALID_GTK = 58, WLAN_REASON_MESH_INCONSISTENT_PARAM = 59, WLAN_REASON_MESH_INVALID_SECURITY = 60, WLAN_REASON_MESH_PATH_ERROR = 61, WLAN_REASON_MESH_PATH_NOFORWARD = 62, WLAN_REASON_MESH_PATH_DEST_UNREACHABLE = 63, WLAN_REASON_MAC_EXISTS_IN_MBSS = 64, WLAN_REASON_MESH_CHAN_REGULATORY = 65, WLAN_REASON_MESH_CHAN = 66, }; /* Information Element IDs */ enum ieee80211_eid { WLAN_EID_SSID = 0, WLAN_EID_SUPP_RATES = 1, WLAN_EID_FH_PARAMS = 2, /* reserved now */ WLAN_EID_DS_PARAMS = 3, WLAN_EID_CF_PARAMS = 4, WLAN_EID_TIM = 5, WLAN_EID_IBSS_PARAMS = 6, WLAN_EID_COUNTRY = 7, /* 8, 9 reserved */ WLAN_EID_REQUEST = 10, WLAN_EID_QBSS_LOAD = 11, WLAN_EID_EDCA_PARAM_SET = 12, WLAN_EID_TSPEC = 13, WLAN_EID_TCLAS = 14, WLAN_EID_SCHEDULE = 15, WLAN_EID_CHALLENGE = 16, /* 17-31 reserved for challenge text extension */ WLAN_EID_PWR_CONSTRAINT = 32, WLAN_EID_PWR_CAPABILITY = 33, WLAN_EID_TPC_REQUEST = 34, WLAN_EID_TPC_REPORT = 35, WLAN_EID_SUPPORTED_CHANNELS = 36, WLAN_EID_CHANNEL_SWITCH = 37, WLAN_EID_MEASURE_REQUEST = 38, WLAN_EID_MEASURE_REPORT = 39, WLAN_EID_QUIET = 40, WLAN_EID_IBSS_DFS = 41, WLAN_EID_ERP_INFO = 42, WLAN_EID_TS_DELAY = 43, WLAN_EID_TCLAS_PROCESSING = 44, WLAN_EID_HT_CAPABILITY = 45, WLAN_EID_QOS_CAPA = 46, /* 47 reserved for Broadcom */ WLAN_EID_RSN = 48, WLAN_EID_802_15_COEX = 49, WLAN_EID_EXT_SUPP_RATES = 50, WLAN_EID_AP_CHAN_REPORT = 51, WLAN_EID_NEIGHBOR_REPORT = 52, WLAN_EID_RCPI = 53, WLAN_EID_MOBILITY_DOMAIN = 54, WLAN_EID_FAST_BSS_TRANSITION = 55, WLAN_EID_TIMEOUT_INTERVAL = 56, WLAN_EID_RIC_DATA = 57, WLAN_EID_DSE_REGISTERED_LOCATION = 58, WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59, WLAN_EID_EXT_CHANSWITCH_ANN = 60, WLAN_EID_HT_OPERATION = 61, WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62, WLAN_EID_BSS_AVG_ACCESS_DELAY = 63, WLAN_EID_ANTENNA_INFO = 64, WLAN_EID_RSNI = 65, WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66, WLAN_EID_BSS_AVAILABLE_CAPACITY = 67, WLAN_EID_BSS_AC_ACCESS_DELAY = 68, WLAN_EID_TIME_ADVERTISEMENT = 69, WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, WLAN_EID_MULTIPLE_BSSID = 71, WLAN_EID_BSS_COEX_2040 = 72, WLAN_EID_BSS_INTOLERANT_CHL_REPORT = 73, WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74, WLAN_EID_RIC_DESCRIPTOR = 75, WLAN_EID_MMIE = 76, WLAN_EID_ASSOC_COMEBACK_TIME = 77, WLAN_EID_EVENT_REQUEST = 78, WLAN_EID_EVENT_REPORT = 79, WLAN_EID_DIAGNOSTIC_REQUEST = 80, WLAN_EID_DIAGNOSTIC_REPORT = 81, WLAN_EID_LOCATION_PARAMS = 82, WLAN_EID_NON_TX_BSSID_CAP = 83, WLAN_EID_SSID_LIST = 84, WLAN_EID_MULTI_BSSID_IDX = 85, WLAN_EID_FMS_DESCRIPTOR = 86, WLAN_EID_FMS_REQUEST = 87, WLAN_EID_FMS_RESPONSE = 88, WLAN_EID_QOS_TRAFFIC_CAPA = 89, WLAN_EID_BSS_MAX_IDLE_PERIOD = 90, WLAN_EID_TSF_REQUEST = 91, WLAN_EID_TSF_RESPOSNE = 92, WLAN_EID_WNM_SLEEP_MODE = 93, WLAN_EID_TIM_BCAST_REQ = 94, WLAN_EID_TIM_BCAST_RESP = 95, WLAN_EID_COLL_IF_REPORT = 96, WLAN_EID_CHANNEL_USAGE = 97, WLAN_EID_TIME_ZONE = 98, WLAN_EID_DMS_REQUEST = 99, WLAN_EID_DMS_RESPONSE = 100, WLAN_EID_LINK_ID = 101, WLAN_EID_WAKEUP_SCHEDUL = 102, /* 103 reserved */ WLAN_EID_CHAN_SWITCH_TIMING = 104, WLAN_EID_PTI_CONTROL = 105, WLAN_EID_PU_BUFFER_STATUS = 106, WLAN_EID_INTERWORKING = 107, WLAN_EID_ADVERTISEMENT_PROTOCOL = 108, WLAN_EID_EXPEDITED_BW_REQ = 109, WLAN_EID_QOS_MAP_SET = 110, WLAN_EID_ROAMING_CONSORTIUM = 111, WLAN_EID_EMERGENCY_ALERT = 112, WLAN_EID_MESH_CONFIG = 113, WLAN_EID_MESH_ID = 114, WLAN_EID_LINK_METRIC_REPORT = 115, WLAN_EID_CONGESTION_NOTIFICATION = 116, WLAN_EID_PEER_MGMT = 117, WLAN_EID_CHAN_SWITCH_PARAM = 118, WLAN_EID_MESH_AWAKE_WINDOW = 119, WLAN_EID_BEACON_TIMING = 120, WLAN_EID_MCCAOP_SETUP_REQ = 121, WLAN_EID_MCCAOP_SETUP_RESP = 122, WLAN_EID_MCCAOP_ADVERT = 123, WLAN_EID_MCCAOP_TEARDOWN = 124, WLAN_EID_GANN = 125, WLAN_EID_RANN = 126, WLAN_EID_EXT_CAPABILITY = 127, /* 128, 129 reserved for Agere */ WLAN_EID_PREQ = 130, WLAN_EID_PREP = 131, WLAN_EID_PERR = 132, /* 133-136 reserved for Cisco */ WLAN_EID_PXU = 137, WLAN_EID_PXUC = 138, WLAN_EID_AUTH_MESH_PEER_EXCH = 139, WLAN_EID_MIC = 140, WLAN_EID_DESTINATION_URI = 141, WLAN_EID_UAPSD_COEX = 142, WLAN_EID_WAKEUP_SCHEDULE = 143, WLAN_EID_EXT_SCHEDULE = 144, WLAN_EID_STA_AVAILABILITY = 145, WLAN_EID_DMG_TSPEC = 146, WLAN_EID_DMG_AT = 147, WLAN_EID_DMG_CAP = 148, /* 149 reserved for Cisco */ WLAN_EID_CISCO_VENDOR_SPECIFIC = 150, WLAN_EID_DMG_OPERATION = 151, WLAN_EID_DMG_BSS_PARAM_CHANGE = 152, WLAN_EID_DMG_BEAM_REFINEMENT = 153, WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154, /* 155-156 reserved for Cisco */ WLAN_EID_AWAKE_WINDOW = 157, WLAN_EID_MULTI_BAND = 158, WLAN_EID_ADDBA_EXT = 159, WLAN_EID_NEXT_PCP_LIST = 160, WLAN_EID_PCP_HANDOVER = 161, WLAN_EID_DMG_LINK_MARGIN = 162, WLAN_EID_SWITCHING_STREAM = 163, WLAN_EID_SESSION_TRANSITION = 164, WLAN_EID_DYN_TONE_PAIRING_REPORT = 165, WLAN_EID_CLUSTER_REPORT = 166, WLAN_EID_RELAY_CAP = 167, WLAN_EID_RELAY_XFER_PARAM_SET = 168, WLAN_EID_BEAM_LINK_MAINT = 169, WLAN_EID_MULTIPLE_MAC_ADDR = 170, WLAN_EID_U_PID = 171, WLAN_EID_DMG_LINK_ADAPT_ACK = 172, /* 173 reserved for Symbol */ WLAN_EID_MCCAOP_ADV_OVERVIEW = 174, WLAN_EID_QUIET_PERIOD_REQ = 175, /* 176 reserved for Symbol */ WLAN_EID_QUIET_PERIOD_RESP = 177, /* 178-179 reserved for Symbol */ /* 180 reserved for ISO/IEC 20011 */ WLAN_EID_EPAC_POLICY = 182, WLAN_EID_CLISTER_TIME_OFF = 183, WLAN_EID_INTER_AC_PRIO = 184, WLAN_EID_SCS_DESCRIPTOR = 185, WLAN_EID_QLOAD_REPORT = 186, WLAN_EID_HCCA_TXOP_UPDATE_COUNT = 187, WLAN_EID_HL_STREAM_ID = 188, WLAN_EID_GCR_GROUP_ADDR = 189, WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190, WLAN_EID_VHT_CAPABILITY = 191, WLAN_EID_VHT_OPERATION = 192, WLAN_EID_EXTENDED_BSS_LOAD = 193, WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, WLAN_EID_AID = 197, WLAN_EID_QUIET_CHANNEL = 198, WLAN_EID_OPMODE_NOTIF = 199, WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, WLAN_EID_CAG_NUMBER = 237, WLAN_EID_AP_CSN = 239, WLAN_EID_FILS_INDICATION = 240, WLAN_EID_DILS = 241, WLAN_EID_FRAGMENT = 242, WLAN_EID_EXTENSION = 255 }; /* Element ID Extensions for Element ID 255 */ enum ieee80211_eid_ext { WLAN_EID_EXT_ASSOC_DELAY_INFO = 1, WLAN_EID_EXT_FILS_REQ_PARAMS = 2, WLAN_EID_EXT_FILS_KEY_CONFIRM = 3, WLAN_EID_EXT_FILS_SESSION = 4, WLAN_EID_EXT_FILS_HLP_CONTAINER = 5, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6, WLAN_EID_EXT_KEY_DELIVERY = 7, WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, WLAN_EID_EXT_FILS_NONCE = 13, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14, WLAN_EID_EXT_HE_CAPABILITY = 35, WLAN_EID_EXT_HE_OPERATION = 36, WLAN_EID_EXT_UORA = 37, WLAN_EID_EXT_HE_MU_EDCA = 38, WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, WLAN_EID_EXT_NON_INHERITANCE = 56, }; /* Action category code */ enum ieee80211_category { WLAN_CATEGORY_SPECTRUM_MGMT = 0, WLAN_CATEGORY_QOS = 1, WLAN_CATEGORY_DLS = 2, WLAN_CATEGORY_BACK = 3, WLAN_CATEGORY_PUBLIC = 4, WLAN_CATEGORY_RADIO_MEASUREMENT = 5, WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, WLAN_CATEGORY_WNM = 10, WLAN_CATEGORY_WNM_UNPROTECTED = 11, WLAN_CATEGORY_TDLS = 12, WLAN_CATEGORY_MESH_ACTION = 13, WLAN_CATEGORY_MULTIHOP_ACTION = 14, WLAN_CATEGORY_SELF_PROTECTED = 15, WLAN_CATEGORY_DMG = 16, WLAN_CATEGORY_WMM = 17, WLAN_CATEGORY_FST = 18, WLAN_CATEGORY_UNPROT_DMG = 20, WLAN_CATEGORY_VHT = 21, WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, WLAN_CATEGORY_VENDOR_SPECIFIC = 127, }; /* SPECTRUM_MGMT action code */ enum ieee80211_spectrum_mgmt_actioncode { WLAN_ACTION_SPCT_MSR_REQ = 0, WLAN_ACTION_SPCT_MSR_RPRT = 1, WLAN_ACTION_SPCT_TPC_REQ = 2, WLAN_ACTION_SPCT_TPC_RPRT = 3, WLAN_ACTION_SPCT_CHL_SWITCH = 4, }; /* HT action codes */ enum ieee80211_ht_actioncode { WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0, WLAN_HT_ACTION_SMPS = 1, WLAN_HT_ACTION_PSMP = 2, WLAN_HT_ACTION_PCO_PHASE = 3, WLAN_HT_ACTION_CSI = 4, WLAN_HT_ACTION_NONCOMPRESSED_BF = 5, WLAN_HT_ACTION_COMPRESSED_BF = 6, WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7, }; /* VHT action codes */ enum ieee80211_vht_actioncode { WLAN_VHT_ACTION_COMPRESSED_BF = 0, WLAN_VHT_ACTION_GROUPID_MGMT = 1, WLAN_VHT_ACTION_OPMODE_NOTIF = 2, }; /* Self Protected Action codes */ enum ieee80211_self_protected_actioncode { WLAN_SP_RESERVED = 0, WLAN_SP_MESH_PEERING_OPEN = 1, WLAN_SP_MESH_PEERING_CONFIRM = 2, WLAN_SP_MESH_PEERING_CLOSE = 3, WLAN_SP_MGK_INFORM = 4, WLAN_SP_MGK_ACK = 5, }; /* Mesh action codes */ enum ieee80211_mesh_actioncode { WLAN_MESH_ACTION_LINK_METRIC_REPORT, WLAN_MESH_ACTION_HWMP_PATH_SELECTION, WLAN_MESH_ACTION_GATE_ANNOUNCEMENT, WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION, WLAN_MESH_ACTION_MCCA_SETUP_REQUEST, WLAN_MESH_ACTION_MCCA_SETUP_REPLY, WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST, WLAN_MESH_ACTION_MCCA_ADVERTISEMENT, WLAN_MESH_ACTION_MCCA_TEARDOWN, WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST, WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE, }; /* Security key length */ enum ieee80211_key_len { WLAN_KEY_LEN_WEP40 = 5, WLAN_KEY_LEN_WEP104 = 13, WLAN_KEY_LEN_CCMP = 16, WLAN_KEY_LEN_CCMP_256 = 32, WLAN_KEY_LEN_TKIP = 32, WLAN_KEY_LEN_AES_CMAC = 16, WLAN_KEY_LEN_SMS4 = 32, WLAN_KEY_LEN_GCMP = 16, WLAN_KEY_LEN_GCMP_256 = 32, WLAN_KEY_LEN_BIP_CMAC_256 = 32, WLAN_KEY_LEN_BIP_GMAC_128 = 16, WLAN_KEY_LEN_BIP_GMAC_256 = 32, }; #define IEEE80211_WEP_IV_LEN 4 #define IEEE80211_WEP_ICV_LEN 4 #define IEEE80211_CCMP_HDR_LEN 8 #define IEEE80211_CCMP_MIC_LEN 8 #define IEEE80211_CCMP_PN_LEN 6 #define IEEE80211_CCMP_256_HDR_LEN 8 #define IEEE80211_CCMP_256_MIC_LEN 16 #define IEEE80211_CCMP_256_PN_LEN 6 #define IEEE80211_TKIP_IV_LEN 8 #define IEEE80211_TKIP_ICV_LEN 4 #define IEEE80211_CMAC_PN_LEN 6 #define IEEE80211_GMAC_PN_LEN 6 #define IEEE80211_GCMP_HDR_LEN 8 #define IEEE80211_GCMP_MIC_LEN 16 #define IEEE80211_GCMP_PN_LEN 6 #define FILS_NONCE_LEN 16 #define FILS_MAX_KEK_LEN 64 #define FILS_ERP_MAX_USERNAME_LEN 16 #define FILS_ERP_MAX_REALM_LEN 253 #define FILS_ERP_MAX_RRK_LEN 64 #define PMK_MAX_LEN 64 /* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */ enum ieee80211_pub_actioncode { WLAN_PUB_ACTION_20_40_BSS_COEX = 0, WLAN_PUB_ACTION_DSE_ENABLEMENT = 1, WLAN_PUB_ACTION_DSE_DEENABLEMENT = 2, WLAN_PUB_ACTION_DSE_REG_LOC_ANN = 3, WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, WLAN_PUB_ACTION_DSE_MSMT_REQ = 5, WLAN_PUB_ACTION_DSE_MSMT_RESP = 6, WLAN_PUB_ACTION_MSMT_PILOT = 7, WLAN_PUB_ACTION_DSE_PC = 8, WLAN_PUB_ACTION_VENDOR_SPECIFIC = 9, WLAN_PUB_ACTION_GAS_INITIAL_REQ = 10, WLAN_PUB_ACTION_GAS_INITIAL_RESP = 11, WLAN_PUB_ACTION_GAS_COMEBACK_REQ = 12, WLAN_PUB_ACTION_GAS_COMEBACK_RESP = 13, WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14, WLAN_PUB_ACTION_LOC_TRACK_NOTI = 15, WLAN_PUB_ACTION_QAB_REQUEST_FRAME = 16, WLAN_PUB_ACTION_QAB_RESPONSE_FRAME = 17, WLAN_PUB_ACTION_QMF_POLICY = 18, WLAN_PUB_ACTION_QMF_POLICY_CHANGE = 19, WLAN_PUB_ACTION_QLOAD_REQUEST = 20, WLAN_PUB_ACTION_QLOAD_REPORT = 21, WLAN_PUB_ACTION_HCCA_TXOP_ADVERT = 22, WLAN_PUB_ACTION_HCCA_TXOP_RESPONSE = 23, WLAN_PUB_ACTION_PUBLIC_KEY = 24, WLAN_PUB_ACTION_CHANNEL_AVAIL_QUERY = 25, WLAN_PUB_ACTION_CHANNEL_SCHEDULE_MGMT = 26, WLAN_PUB_ACTION_CONTACT_VERI_SIGNAL = 27, WLAN_PUB_ACTION_GDD_ENABLEMENT_REQ = 28, WLAN_PUB_ACTION_GDD_ENABLEMENT_RESP = 29, WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30, WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31, WLAN_PUB_ACTION_FTM_REQUEST = 32, WLAN_PUB_ACTION_FTM = 33, WLAN_PUB_ACTION_FILS_DISCOVERY = 34, }; /* TDLS action codes */ enum ieee80211_tdls_actioncode { WLAN_TDLS_SETUP_REQUEST = 0, WLAN_TDLS_SETUP_RESPONSE = 1, WLAN_TDLS_SETUP_CONFIRM = 2, WLAN_TDLS_TEARDOWN = 3, WLAN_TDLS_PEER_TRAFFIC_INDICATION = 4, WLAN_TDLS_CHANNEL_SWITCH_REQUEST = 5, WLAN_TDLS_CHANNEL_SWITCH_RESPONSE = 6, WLAN_TDLS_PEER_PSM_REQUEST = 7, WLAN_TDLS_PEER_PSM_RESPONSE = 8, WLAN_TDLS_PEER_TRAFFIC_RESPONSE = 9, WLAN_TDLS_DISCOVERY_REQUEST = 10, }; /* Extended Channel Switching capability to be set in the 1st byte of * the @WLAN_EID_EXT_CAPABILITY information element */ #define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) /* Multiple BSSID capability is set in the 6th bit of 3rd byte of the * @WLAN_EID_EXT_CAPABILITY information element */ #define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6) /* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) #define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) /* Interworking capabilities are set in 7th bit of 4th byte of the * @WLAN_EID_EXT_CAPABILITY information element */ #define WLAN_EXT_CAPA4_INTERWORKING_ENABLED BIT(7) /* * TDLS capabililites to be enabled in the 5th byte of the * @WLAN_EID_EXT_CAPABILITY information element */ #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) #define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5) #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) /* Defines the maximal number of MSDUs in an A-MSDU. */ #define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(7) #define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(0) /* * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY * information element */ #define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7) /* Defines support for TWT Requester and TWT Responder */ #define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5) #define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6) /* * When set, indicates that the AP is able to tolerate 26-tone RU UL * OFDMA transmissions using HE TB PPDU from OBSS (not falsely classify the * 26-tone RU UL OFDMA transmissions as radar pulses). */ #define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7) /* Defines support for enhanced multi-bssid advertisement*/ #define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(1) /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 /* BSS Coex IE information field bits */ #define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) /** * enum ieee80211_mesh_sync_method - mesh synchronization method identifier * * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method * that will be specified in a vendor specific information element */ enum ieee80211_mesh_sync_method { IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1, IEEE80211_SYNC_METHOD_VENDOR = 255, }; /** * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier * * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will * be specified in a vendor specific information element */ enum ieee80211_mesh_path_protocol { IEEE80211_PATH_PROTOCOL_HWMP = 1, IEEE80211_PATH_PROTOCOL_VENDOR = 255, }; /** * enum ieee80211_mesh_path_metric - mesh path selection metric identifier * * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be * specified in a vendor specific information element */ enum ieee80211_mesh_path_metric { IEEE80211_PATH_METRIC_AIRTIME = 1, IEEE80211_PATH_METRIC_VENDOR = 255, }; /** * enum ieee80211_root_mode_identifier - root mesh STA mode identifier * * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode * * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default) * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than * this value * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports * the proactive PREQ with proactive PREP subfield set to 0 * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA * supports the proactive PREQ with proactive PREP subfield set to 1 * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports * the proactive RANN */ enum ieee80211_root_mode_identifier { IEEE80211_ROOTMODE_NO_ROOT = 0, IEEE80211_ROOTMODE_ROOT = 1, IEEE80211_PROACTIVE_PREQ_NO_PREP = 2, IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3, IEEE80211_PROACTIVE_RANN = 4, }; /* * IEEE 802.11-2007 7.3.2.9 Country information element * * Minimum length is 8 octets, ie len must be evenly * divisible by 2 */ /* Although the spec says 8 I'm seeing 6 in practice */ #define IEEE80211_COUNTRY_IE_MIN_LEN 6 /* The Country String field of the element shall be 3 octets in length */ #define IEEE80211_COUNTRY_STRING_LEN 3 /* * For regulatory extension stuff see IEEE 802.11-2007 * Annex I (page 1141) and Annex J (page 1147). Also * review 7.3.2.9. * * When dot11RegulatoryClassesRequired is true and the * first_channel/reg_extension_id is >= 201 then the IE * compromises of the 'ext' struct represented below: * * - Regulatory extension ID - when generating IE this just needs * to be monotonically increasing for each triplet passed in * the IE * - Regulatory class - index into set of rules * - Coverage class - index into air propagation time (Table 7-27), * in microseconds, you can compute the air propagation time from * the index by multiplying by 3, so index 10 yields a propagation * of 10 us. Valid values are 0-31, values 32-255 are not defined * yet. A value of 0 inicates air propagation of <= 1 us. * * See also Table I.2 for Emission limit sets and table * I.3 for Behavior limit sets. Table J.1 indicates how to map * a reg_class to an emission limit set and behavior limit set. */ #define IEEE80211_COUNTRY_EXTENSION_ID 201 /* * Channels numbers in the IE must be monotonically increasing * if dot11RegulatoryClassesRequired is not true. * * If dot11RegulatoryClassesRequired is true consecutive * subband triplets following a regulatory triplet shall * have monotonically increasing first_channel number fields. * * Channel numbers shall not overlap. * * Note that max_power is signed. */ struct ieee80211_country_ie_triplet { union { struct { u8 first_channel; u8 num_channels; s8 max_power; } __packed chans; struct { u8 reg_extension_id; u8 reg_class; u8 coverage_class; } __packed ext; }; } __packed; enum ieee80211_timeout_interval_type { WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */, WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */, WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */, }; /** * struct ieee80211_timeout_interval_ie - Timeout Interval element * @type: type, see &enum ieee80211_timeout_interval_type * @value: timeout interval value */ struct ieee80211_timeout_interval_ie { u8 type; __le32 value; } __packed; /** * enum ieee80211_idle_options - BSS idle options * @WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE: the station should send an RSN * protected frame to the AP to reset the idle timer at the AP for * the station. */ enum ieee80211_idle_options { WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE = BIT(0), }; /** * struct ieee80211_bss_max_idle_period_ie * * This structure refers to "BSS Max idle period element" * * @max_idle_period: indicates the time period during which a station can * refrain from transmitting frames to its associated AP without being * disassociated. In units of 1000 TUs. * @idle_options: indicates the options associated with the BSS idle capability * as specified in &enum ieee80211_idle_options. */ struct ieee80211_bss_max_idle_period_ie { __le16 max_idle_period; u8 idle_options; } __packed; /* BACK action code */ enum ieee80211_back_actioncode { WLAN_ACTION_ADDBA_REQ = 0, WLAN_ACTION_ADDBA_RESP = 1, WLAN_ACTION_DELBA = 2, }; /* BACK (block-ack) parties */ enum ieee80211_back_parties { WLAN_BACK_RECIPIENT = 0, WLAN_BACK_INITIATOR = 1, }; /* SA Query action */ enum ieee80211_sa_query_action { WLAN_ACTION_SA_QUERY_REQUEST = 0, WLAN_ACTION_SA_QUERY_RESPONSE = 1, }; /** * struct ieee80211_bssid_index * * This structure refers to "Multiple BSSID-index element" * * @bssid_index: BSSID index * @dtim_period: optional, overrides transmitted BSS dtim period * @dtim_count: optional, overrides transmitted BSS dtim count */ struct ieee80211_bssid_index { u8 bssid_index; u8 dtim_period; u8 dtim_count; }; /** * struct ieee80211_multiple_bssid_configuration * * This structure refers to "Multiple BSSID Configuration element" * * @bssid_count: total number of active BSSIDs in the set * @profile_periodicity: the least number of beacon frames need to be received * in order to discover all the nontransmitted BSSIDs in the set. */ struct ieee80211_multiple_bssid_configuration { u8 bssid_count; u8 profile_periodicity; }; #define SUITE(oui, id) (((oui) << 8) | (id)) /* cipher suite selectors */ #define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0) #define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1) #define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2) /* reserved: SUITE(0x000FAC, 3) */ #define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4) #define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5) #define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6) #define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8) #define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9) #define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10) #define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11) #define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12) #define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13) #define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1) /* AKM suite selectors */ #define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) #define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) #define WLAN_AKM_SUITE_FT_8021X SUITE(0x000FAC, 3) #define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4) #define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) #define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) #define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) #define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) #define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) #define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11) #define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12) #define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) #define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) #define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) #define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) #define WLAN_MAX_KEY_LEN 32 #define WLAN_PMK_NAME_LEN 16 #define WLAN_PMKID_LEN 16 #define WLAN_PMK_LEN_EAP_LEAP 16 #define WLAN_PMK_LEN 32 #define WLAN_PMK_LEN_SUITE_B_192 48 #define WLAN_OUI_WFA 0x506f9a #define WLAN_OUI_TYPE_WFA_P2P 9 #define WLAN_OUI_MICROSOFT 0x0050f2 #define WLAN_OUI_TYPE_MICROSOFT_WPA 1 #define WLAN_OUI_TYPE_MICROSOFT_WMM 2 #define WLAN_OUI_TYPE_MICROSOFT_WPS 4 #define WLAN_OUI_TYPE_MICROSOFT_TPC 8 /* * WMM/802.11e Tspec Element */ #define IEEE80211_WMM_IE_TSPEC_TID_MASK 0x0F #define IEEE80211_WMM_IE_TSPEC_TID_SHIFT 1 enum ieee80211_tspec_status_code { IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED = 0, IEEE80211_TSPEC_STATUS_ADDTS_INVAL_PARAMS = 0x1, }; struct ieee80211_tspec_ie { u8 element_id; u8 len; u8 oui[3]; u8 oui_type; u8 oui_subtype; u8 version; __le16 tsinfo; u8 tsinfo_resvd; __le16 nominal_msdu; __le16 max_msdu; __le32 min_service_int; __le32 max_service_int; __le32 inactivity_int; __le32 suspension_int; __le32 service_start_time; __le32 min_data_rate; __le32 mean_data_rate; __le32 peak_data_rate; __le32 max_burst_size; __le32 delay_bound; __le32 min_phy_rate; __le16 sba; __le16 medium_time; } __packed; /** * ieee80211_get_qos_ctl - get pointer to qos control bytes * @hdr: the frame * * The qos ctrl bytes come after the frame_control, duration, seq_num * and 3 or 4 addresses of length ETH_ALEN. * 3 addr: 2 + 2 + 2 + 3*6 = 24 * 4 addr: 2 + 2 + 2 + 4*6 = 30 */ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) { if (ieee80211_has_a4(hdr->frame_control)) return (u8 *)hdr + 30; else return (u8 *)hdr + 24; } /** * ieee80211_get_tid - get qos TID * @hdr: the frame */ static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr) { u8 *qc = ieee80211_get_qos_ctl(hdr); return qc[0] & IEEE80211_QOS_CTL_TID_MASK; } /** * ieee80211_get_SA - get pointer to SA * @hdr: the frame * * Given an 802.11 frame, this function returns the offset * to the source address (SA). It does not verify that the * header is long enough to contain the address, and the * header must be long enough to contain the frame control * field. */ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) { if (ieee80211_has_a4(hdr->frame_control)) return hdr->addr4; if (ieee80211_has_fromds(hdr->frame_control)) return hdr->addr3; return hdr->addr2; } /** * ieee80211_get_DA - get pointer to DA * @hdr: the frame * * Given an 802.11 frame, this function returns the offset * to the destination address (DA). It does not verify that * the header is long enough to contain the address, and the * header must be long enough to contain the frame control * field. */ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) { if (ieee80211_has_tods(hdr->frame_control)) return hdr->addr3; else return hdr->addr1; } /** * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame * @hdr: the frame (buffer must include at least the first octet of payload) */ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) { if (ieee80211_is_disassoc(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control)) return true; if (ieee80211_is_action(hdr->frame_control)) { u8 *category; /* * Action frames, excluding Public Action frames, are Robust * Management Frames. However, if we are looking at a Protected * frame, skip the check since the data may be encrypted and * the frame has already been found to be a Robust Management * Frame (by the other end). */ if (ieee80211_has_protected(hdr->frame_control)) return true; category = ((u8 *) hdr) + 24; return *category != WLAN_CATEGORY_PUBLIC && *category != WLAN_CATEGORY_HT && *category != WLAN_CATEGORY_WNM_UNPROTECTED && *category != WLAN_CATEGORY_SELF_PROTECTED && *category != WLAN_CATEGORY_UNPROT_DMG && *category != WLAN_CATEGORY_VHT && *category != WLAN_CATEGORY_VENDOR_SPECIFIC; } return false; } /** * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame * @skb: the skb containing the frame, length will be checked */ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) { if (skb->len < IEEE80211_MIN_ACTION_SIZE) return false; return _ieee80211_is_robust_mgmt_frame((void *)skb->data); } /** * ieee80211_is_public_action - check if frame is a public action frame * @hdr: the frame * @len: length of the frame */ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, size_t len) { struct ieee80211_mgmt *mgmt = (void *)hdr; if (len < IEEE80211_MIN_ACTION_SIZE) return false; if (!ieee80211_is_action(hdr->frame_control)) return false; return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC; } /** * _ieee80211_is_group_privacy_action - check if frame is a group addressed * privacy action frame * @hdr: the frame */ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr) { struct ieee80211_mgmt *mgmt = (void *)hdr; if (!ieee80211_is_action(hdr->frame_control) || !is_multicast_ether_addr(hdr->addr1)) return false; return mgmt->u.action.category == WLAN_CATEGORY_MESH_ACTION || mgmt->u.action.category == WLAN_CATEGORY_MULTIHOP_ACTION; } /** * ieee80211_is_group_privacy_action - check if frame is a group addressed * privacy action frame * @skb: the skb containing the frame, length will be checked */ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb) { if (skb->len < IEEE80211_MIN_ACTION_SIZE) return false; return _ieee80211_is_group_privacy_action((void *)skb->data); } /** * ieee80211_tu_to_usec - convert time units (TU) to microseconds * @tu: the TUs */ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) { return 1024 * tu; } /** * ieee80211_check_tim - check if AID bit is set in TIM * @tim: the TIM IE * @tim_len: length of the TIM IE * @aid: the AID to look for */ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, u8 tim_len, u16 aid) { u8 mask; u8 index, indexn1, indexn2; if (unlikely(!tim || tim_len < sizeof(*tim))) return false; aid &= 0x3fff; index = aid / 8; mask = 1 << (aid & 7); indexn1 = tim->bitmap_ctrl & 0xfe; indexn2 = tim_len + indexn1 - 4; if (index < indexn1 || index > indexn2) return false; index -= indexn1; return !!(tim->virtual_map[index] & mask); } /** * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) * @skb: the skb containing the frame, length will not be checked * @hdr_size: the size of the ieee80211_hdr that starts at skb->data * * This function assumes the frame is a data frame, and that the network header * is in the correct place. */ static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) { if (!skb_is_nonlinear(skb) && skb->len > (skb_network_offset(skb) + 2)) { /* Point to where the indication of TDLS should start */ const u8 *tdls_data = skb_network_header(skb) - 2; if (get_unaligned_be16(tdls_data) == ETH_P_TDLS && tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE && tdls_data[3] == WLAN_CATEGORY_TDLS) return tdls_data[4]; } return -1; } /* convert time units */ #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) /** * ieee80211_action_contains_tpc - checks if the frame contains TPC element * @skb: the skb containing the frame, length will be checked * * This function checks if it's either TPC report action frame or Link * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5 * and 8.5.7.5 accordingly. */ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; if (!ieee80211_is_action(mgmt->frame_control)) return false; if (skb->len < IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.tpc_report)) return false; /* * TPC report - check that: * category = 0 (Spectrum Management) or 5 (Radio Measurement) * spectrum management action = 3 (TPC/Link Measurement report) * TPC report EID = 35 * TPC report element length = 2 * * The spectrum management's tpc_report struct is used here both for * parsing tpc_report and radio measurement's link measurement report * frame, since the relevant part is identical in both frames. */ if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT && mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT) return false; /* both spectrum mgmt and link measurement have same action code */ if (mgmt->u.action.u.tpc_report.action_code != WLAN_ACTION_SPCT_TPC_RPRT) return false; if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT || mgmt->u.action.u.tpc_report.tpc_elem_length != sizeof(struct ieee80211_tpc_report_ie)) return false; return true; } struct element { u8 id; u8 datalen; u8 data[]; }; /* element iteration helpers */ #define for_each_element(element, _data, _datalen) \ for (element = (void *)(_data); \ (u8 *)(_data) + (_datalen) - (u8 *)element >= \ sizeof(*element) && \ (u8 *)(_data) + (_datalen) - (u8 *)element >= \ sizeof(*element) + element->datalen; \ element = (void *)(element->data + element->datalen)) #define for_each_element_id(element, _id, data, datalen) \ for_each_element(element, data, datalen) \ if (element->id == (_id)) #define for_each_element_extid(element, extid, _data, _datalen) \ for_each_element(element, _data, _datalen) \ if (element->id == WLAN_EID_EXTENSION && \ element->datalen > 0 && \ element->data[0] == (extid)) #define for_each_subelement(sub, element) \ for_each_element(sub, (element)->data, (element)->datalen) #define for_each_subelement_id(sub, id, element) \ for_each_element_id(sub, id, (element)->data, (element)->datalen) #define for_each_subelement_extid(sub, extid, element) \ for_each_element_extid(sub, extid, (element)->data, (element)->datalen) /** * for_each_element_completed - determine if element parsing consumed all data * @element: element pointer after for_each_element() or friends * @data: same data pointer as passed to for_each_element() or friends * @datalen: same data length as passed to for_each_element() or friends * * This function returns %true if all the data was parsed or considered * while walking the elements. Only use this if your for_each_element() * loop cannot be broken out of, otherwise it always returns %false. * * If some data was malformed, this returns %false since the last parsed * element will not fill the whole remaining data. */ static inline bool for_each_element_completed(const struct element *element, const void *data, size_t datalen) { return (u8 *)element == (u8 *)data + datalen; } #endif /* LINUX_IEEE80211_H */ backport-iwlwifi-dkms-7906/include/linux/mpls.h000066400000000000000000000006121351064634500216060ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MPLS_H #define _LINUX_MPLS_H #include #define MPLS_TTL_MASK (MPLS_LS_TTL_MASK >> MPLS_LS_TTL_SHIFT) #define MPLS_BOS_MASK (MPLS_LS_S_MASK >> MPLS_LS_S_SHIFT) #define MPLS_TC_MASK (MPLS_LS_TC_MASK >> MPLS_LS_TC_SHIFT) #define MPLS_LABEL_MASK (MPLS_LS_LABEL_MASK >> MPLS_LS_LABEL_SHIFT) #endif /* _LINUX_MPLS_H */ backport-iwlwifi-dkms-7906/include/linux/oid_registry.h000066400000000000000000000006721351064634500233440ustar00rootroot00000000000000/* Automatically created during backport process */ #ifndef CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION #include_next #else #undef look_up_OID #define look_up_OID LINUX_BACKPORT(look_up_OID) #undef sprint_oid #define sprint_oid LINUX_BACKPORT(sprint_oid) #undef sprint_OID #define sprint_OID LINUX_BACKPORT(sprint_OID) #include #endif /* CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION */ backport-iwlwifi-dkms-7906/include/linux/overflow.h000066400000000000000000000223611351064634500225030ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 OR MIT */ #ifndef __LINUX_OVERFLOW_H #define __LINUX_OVERFLOW_H #include /* * In the fallback code below, we need to compute the minimum and * maximum values representable in a given type. These macros may also * be useful elsewhere, so we provide them outside the * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. * * It would seem more obvious to do something like * * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) * * Unfortunately, the middle expressions, strictly speaking, have * undefined behaviour, and at least some versions of gcc warn about * the type_max expression (but not if -fsanitize=undefined is in * effect; in that case, the warning is deferred to runtime...). * * The slightly excessive casting in type_min is to make sure the * macros also produce sensible values for the exotic type _Bool. [The * overflow checkers only almost work for _Bool, but that's * a-feature-not-a-bug, since people shouldn't be doing arithmetic on * _Bools. Besides, the gcc builtins don't allow _Bool* as third * argument.] * * Idea stolen from * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - * credit to Christian Biere. */ #define is_signed_type(type) (((type)(-1)) < (type)1) #define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) #define type_min(T) ((T)((T)-type_max(T)-(T)1)) #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW /* * For simplicity and code hygiene, the fallback code below insists on * a, b and *d having the same type (similar to the min() and max() * macros), whereas gcc's type-generic overflow checkers accept * different types. Hence we don't just make check_add_overflow an * alias for __builtin_add_overflow, but add type checks similar to * below. */ #define check_add_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ __builtin_add_overflow(__a, __b, __d); \ }) #define check_sub_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ __builtin_sub_overflow(__a, __b, __d); \ }) #define check_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ __builtin_mul_overflow(__a, __b, __d); \ }) #else /* Checking for unsigned overflow is relatively easy without causing UB. */ #define __unsigned_add_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = __a + __b; \ *__d < __a; \ }) #define __unsigned_sub_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = __a - __b; \ __a < __b; \ }) /* * If one of a or b is a compile-time constant, this avoids a division. */ #define __unsigned_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = __a * __b; \ __builtin_constant_p(__b) ? \ __b > 0 && __a > type_max(typeof(__a)) / __b : \ __a > 0 && __b > type_max(typeof(__b)) / __a; \ }) /* * For signed types, detecting overflow is much harder, especially if * we want to avoid UB. But the interface of these macros is such that * we must provide a result in *d, and in fact we must produce the * result promised by gcc's builtins, which is simply the possibly * wrapped-around value. Fortunately, we can just formally do the * operations in the widest relevant unsigned type (u64) and then * truncate the result - gcc is smart enough to generate the same code * with and without the (u64) casts. */ /* * Adding two signed integers can overflow only if they have the same * sign, and overflow has happened iff the result has the opposite * sign. */ #define __signed_add_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = (u64)__a + (u64)__b; \ (((~(__a ^ __b)) & (*__d ^ __a)) \ & type_min(typeof(__a))) != 0; \ }) /* * Subtraction is similar, except that overflow can now happen only * when the signs are opposite. In this case, overflow has happened if * the result has the opposite sign of a. */ #define __signed_sub_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = (u64)__a - (u64)__b; \ ((((__a ^ __b)) & (*__d ^ __a)) \ & type_min(typeof(__a))) != 0; \ }) /* * Signed multiplication is rather hard. gcc always follows C99, so * division is truncated towards 0. This means that we can write the * overflow check like this: * * (a > 0 && (b > MAX/a || b < MIN/a)) || * (a < -1 && (b > MIN/a || b < MAX/a) || * (a == -1 && b == MIN) * * The redundant casts of -1 are to silence an annoying -Wtype-limits * (included in -Wextra) warning: When the type is u8 or u16, the * __b_c_e in check_mul_overflow obviously selects * __unsigned_mul_overflow, but unfortunately gcc still parses this * code and warns about the limited range of __b. */ #define __signed_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ typeof(a) __tmax = type_max(typeof(a)); \ typeof(a) __tmin = type_min(typeof(a)); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = (u64)__a * (u64)__b; \ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ (__b == (typeof(__b))-1 && __a == __tmin); \ }) #define check_add_overflow(a, b, d) \ __builtin_choose_expr(is_signed_type(typeof(a)), \ __signed_add_overflow(a, b, d), \ __unsigned_add_overflow(a, b, d)) #define check_sub_overflow(a, b, d) \ __builtin_choose_expr(is_signed_type(typeof(a)), \ __signed_sub_overflow(a, b, d), \ __unsigned_sub_overflow(a, b, d)) #define check_mul_overflow(a, b, d) \ __builtin_choose_expr(is_signed_type(typeof(a)), \ __signed_mul_overflow(a, b, d), \ __unsigned_mul_overflow(a, b, d)) #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ /** check_shl_overflow() - Calculate a left-shifted value and check overflow * * @a: Value to be shifted * @s: How many bits left to shift * @d: Pointer to where to store the result * * Computes *@d = (@a << @s) * * Returns true if '*d' cannot hold the result or when 'a << s' doesn't * make sense. Example conditions: * - 'a << s' causes bits to be lost when stored in *d. * - 's' is garbage (e.g. negative) or so large that the result of * 'a << s' is guaranteed to be 0. * - 'a' is negative. * - 'a << s' sets the sign bit, if any, in '*d'. * * '*d' will hold the results of the attempted shift, but is not * considered "safe for use" if false is returned. */ #define check_shl_overflow(a, s, d) ({ \ typeof(a) _a = a; \ typeof(s) _s = s; \ typeof(d) _d = d; \ u64 _a_full = _a; \ unsigned int _to_shift = \ _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ *_d = (_a_full << _to_shift); \ (_to_shift != _s || *_d < 0 || _a < 0 || \ (*_d >> _to_shift) != _a); \ }) /** * array_size() - Calculate size of 2-dimensional array. * * @a: dimension one * @b: dimension two * * Calculates size of 2-dimensional array: @a * @b. * * Returns: number of bytes needed to represent the array or SIZE_MAX on * overflow. */ static inline __must_check size_t array_size(size_t a, size_t b) { size_t bytes; if (check_mul_overflow(a, b, &bytes)) return SIZE_MAX; return bytes; } /** * array3_size() - Calculate size of 3-dimensional array. * * @a: dimension one * @b: dimension two * @c: dimension three * * Calculates size of 3-dimensional array: @a * @b * @c. * * Returns: number of bytes needed to represent the array or SIZE_MAX on * overflow. */ static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) { size_t bytes; if (check_mul_overflow(a, b, &bytes)) return SIZE_MAX; if (check_mul_overflow(bytes, c, &bytes)) return SIZE_MAX; return bytes; } static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) { size_t bytes; if (check_mul_overflow(n, size, &bytes)) return SIZE_MAX; if (check_add_overflow(bytes, c, &bytes)) return SIZE_MAX; return bytes; } /** * struct_size() - Calculate size of structure with trailing array. * @p: Pointer to the structure. * @member: Name of the array member. * @n: Number of elements in the array. * * Calculates size of memory needed for structure @p followed by an * array of @n @member elements. * * Return: number of bytes needed or SIZE_MAX on overflow. */ #define struct_size(p, member, n) \ __ab_c_size(n, \ sizeof(*(p)->member) + __must_be_array((p)->member),\ sizeof(*(p))) #endif /* __LINUX_OVERFLOW_H */ backport-iwlwifi-dkms-7906/include/linux/pci_ids.h000066400000000000000000003603341351064634500222570ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * PCI Class, Vendor and Device IDs * * Please keep sorted. * * Do not add new entries to this file unless the definitions * are shared between multiple drivers. */ #ifndef _LINUX_PCI_IDS_H #define _LINUX_PCI_IDS_H /* Device classes and subclasses */ #define PCI_CLASS_NOT_DEFINED 0x0000 #define PCI_CLASS_NOT_DEFINED_VGA 0x0001 #define PCI_BASE_CLASS_STORAGE 0x01 #define PCI_CLASS_STORAGE_SCSI 0x0100 #define PCI_CLASS_STORAGE_IDE 0x0101 #define PCI_CLASS_STORAGE_FLOPPY 0x0102 #define PCI_CLASS_STORAGE_IPI 0x0103 #define PCI_CLASS_STORAGE_RAID 0x0104 #define PCI_CLASS_STORAGE_SATA 0x0106 #define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 #define PCI_CLASS_STORAGE_SAS 0x0107 #define PCI_CLASS_STORAGE_EXPRESS 0x010802 #define PCI_CLASS_STORAGE_OTHER 0x0180 #define PCI_BASE_CLASS_NETWORK 0x02 #define PCI_CLASS_NETWORK_ETHERNET 0x0200 #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 #define PCI_CLASS_NETWORK_FDDI 0x0202 #define PCI_CLASS_NETWORK_ATM 0x0203 #define PCI_CLASS_NETWORK_OTHER 0x0280 #define PCI_BASE_CLASS_DISPLAY 0x03 #define PCI_CLASS_DISPLAY_VGA 0x0300 #define PCI_CLASS_DISPLAY_XGA 0x0301 #define PCI_CLASS_DISPLAY_3D 0x0302 #define PCI_CLASS_DISPLAY_OTHER 0x0380 #define PCI_BASE_CLASS_MULTIMEDIA 0x04 #define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 #define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 #define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 #define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 #define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 #define PCI_BASE_CLASS_MEMORY 0x05 #define PCI_CLASS_MEMORY_RAM 0x0500 #define PCI_CLASS_MEMORY_FLASH 0x0501 #define PCI_CLASS_MEMORY_OTHER 0x0580 #define PCI_BASE_CLASS_BRIDGE 0x06 #define PCI_CLASS_BRIDGE_HOST 0x0600 #define PCI_CLASS_BRIDGE_ISA 0x0601 #define PCI_CLASS_BRIDGE_EISA 0x0602 #define PCI_CLASS_BRIDGE_MC 0x0603 #define PCI_CLASS_BRIDGE_PCI 0x0604 #define PCI_CLASS_BRIDGE_PCMCIA 0x0605 #define PCI_CLASS_BRIDGE_NUBUS 0x0606 #define PCI_CLASS_BRIDGE_CARDBUS 0x0607 #define PCI_CLASS_BRIDGE_RACEWAY 0x0608 #define PCI_CLASS_BRIDGE_OTHER 0x0680 #define PCI_BASE_CLASS_COMMUNICATION 0x07 #define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 #define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 #define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702 #define PCI_CLASS_COMMUNICATION_MODEM 0x0703 #define PCI_CLASS_COMMUNICATION_OTHER 0x0780 #define PCI_BASE_CLASS_SYSTEM 0x08 #define PCI_CLASS_SYSTEM_PIC 0x0800 #define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010 #define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020 #define PCI_CLASS_SYSTEM_DMA 0x0801 #define PCI_CLASS_SYSTEM_TIMER 0x0802 #define PCI_CLASS_SYSTEM_RTC 0x0803 #define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 #define PCI_CLASS_SYSTEM_SDHCI 0x0805 #define PCI_CLASS_SYSTEM_OTHER 0x0880 #define PCI_BASE_CLASS_INPUT 0x09 #define PCI_CLASS_INPUT_KEYBOARD 0x0900 #define PCI_CLASS_INPUT_PEN 0x0901 #define PCI_CLASS_INPUT_MOUSE 0x0902 #define PCI_CLASS_INPUT_SCANNER 0x0903 #define PCI_CLASS_INPUT_GAMEPORT 0x0904 #define PCI_CLASS_INPUT_OTHER 0x0980 #define PCI_BASE_CLASS_DOCKING 0x0a #define PCI_CLASS_DOCKING_GENERIC 0x0a00 #define PCI_CLASS_DOCKING_OTHER 0x0a80 #define PCI_BASE_CLASS_PROCESSOR 0x0b #define PCI_CLASS_PROCESSOR_386 0x0b00 #define PCI_CLASS_PROCESSOR_486 0x0b01 #define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 #define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 #define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 #define PCI_CLASS_PROCESSOR_MIPS 0x0b30 #define PCI_CLASS_PROCESSOR_CO 0x0b40 #define PCI_BASE_CLASS_SERIAL 0x0c #define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 #define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 #define PCI_CLASS_SERIAL_ACCESS 0x0c01 #define PCI_CLASS_SERIAL_SSA 0x0c02 #define PCI_CLASS_SERIAL_USB 0x0c03 #define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300 #define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 #define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 #define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 #define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 #define PCI_CLASS_SERIAL_IPMI 0x0c07 #define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700 #define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701 #define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702 #define PCI_BASE_CLASS_WIRELESS 0x0d #define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 #define PCI_CLASS_WIRELESS_WHCI 0x0d1010 #define PCI_BASE_CLASS_INTELLIGENT 0x0e #define PCI_CLASS_INTELLIGENT_I2O 0x0e00 #define PCI_BASE_CLASS_SATELLITE 0x0f #define PCI_CLASS_SATELLITE_TV 0x0f00 #define PCI_CLASS_SATELLITE_AUDIO 0x0f01 #define PCI_CLASS_SATELLITE_VOICE 0x0f03 #define PCI_CLASS_SATELLITE_DATA 0x0f04 #define PCI_BASE_CLASS_CRYPT 0x10 #define PCI_CLASS_CRYPT_NETWORK 0x1000 #define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001 #define PCI_CLASS_CRYPT_OTHER 0x1080 #define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11 #define PCI_CLASS_SP_DPIO 0x1100 #define PCI_CLASS_SP_OTHER 0x1180 #define PCI_CLASS_OTHERS 0xff /* Vendors and devices. Sort key: vendor first, device next. */ #define PCI_VENDOR_ID_TTTECH 0x0357 #define PCI_DEVICE_ID_TTTECH_MC322 0x000a #define PCI_VENDOR_ID_DYNALINK 0x0675 #define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702 #define PCI_VENDOR_ID_UBIQUITI 0x0777 #define PCI_VENDOR_ID_BERKOM 0x0871 #define PCI_DEVICE_ID_BERKOM_A1T 0xffa1 #define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2 #define PCI_DEVICE_ID_BERKOM_A4T 0xffa4 #define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8 #define PCI_VENDOR_ID_COMPAQ 0x0e11 #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508 #define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc #define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10 #define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32 #define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34 #define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33 #define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35 #define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40 #define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43 #define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011 #define PCI_DEVICE_ID_COMPAQ_CISS 0xb060 #define PCI_DEVICE_ID_COMPAQ_CISSB 0xb178 #define PCI_DEVICE_ID_COMPAQ_CISSC 0x46 #define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130 #define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150 #define PCI_VENDOR_ID_NCR 0x1000 #define PCI_VENDOR_ID_LSI_LOGIC 0x1000 #define PCI_DEVICE_ID_NCR_53C810 0x0001 #define PCI_DEVICE_ID_NCR_53C820 0x0002 #define PCI_DEVICE_ID_NCR_53C825 0x0003 #define PCI_DEVICE_ID_NCR_53C815 0x0004 #define PCI_DEVICE_ID_LSI_53C810AP 0x0005 #define PCI_DEVICE_ID_NCR_53C860 0x0006 #define PCI_DEVICE_ID_LSI_53C1510 0x000a #define PCI_DEVICE_ID_NCR_53C896 0x000b #define PCI_DEVICE_ID_NCR_53C895 0x000c #define PCI_DEVICE_ID_NCR_53C885 0x000d #define PCI_DEVICE_ID_NCR_53C875 0x000f #define PCI_DEVICE_ID_NCR_53C1510 0x0010 #define PCI_DEVICE_ID_LSI_53C895A 0x0012 #define PCI_DEVICE_ID_LSI_53C875A 0x0013 #define PCI_DEVICE_ID_LSI_53C1010_33 0x0020 #define PCI_DEVICE_ID_LSI_53C1010_66 0x0021 #define PCI_DEVICE_ID_LSI_53C1030 0x0030 #define PCI_DEVICE_ID_LSI_1030_53C1035 0x0032 #define PCI_DEVICE_ID_LSI_53C1035 0x0040 #define PCI_DEVICE_ID_NCR_53C875J 0x008f #define PCI_DEVICE_ID_LSI_FC909 0x0621 #define PCI_DEVICE_ID_LSI_FC929 0x0622 #define PCI_DEVICE_ID_LSI_FC929_LAN 0x0623 #define PCI_DEVICE_ID_LSI_FC919 0x0624 #define PCI_DEVICE_ID_LSI_FC919_LAN 0x0625 #define PCI_DEVICE_ID_LSI_FC929X 0x0626 #define PCI_DEVICE_ID_LSI_FC939X 0x0642 #define PCI_DEVICE_ID_LSI_FC949X 0x0640 #define PCI_DEVICE_ID_LSI_FC949ES 0x0646 #define PCI_DEVICE_ID_LSI_FC919X 0x0628 #define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701 #define PCI_DEVICE_ID_LSI_61C102 0x0901 #define PCI_DEVICE_ID_LSI_63C815 0x1000 #define PCI_DEVICE_ID_LSI_SAS1064 0x0050 #define PCI_DEVICE_ID_LSI_SAS1064R 0x0411 #define PCI_DEVICE_ID_LSI_SAS1066 0x005E #define PCI_DEVICE_ID_LSI_SAS1068 0x0054 #define PCI_DEVICE_ID_LSI_SAS1064A 0x005C #define PCI_DEVICE_ID_LSI_SAS1064E 0x0056 #define PCI_DEVICE_ID_LSI_SAS1066E 0x005A #define PCI_DEVICE_ID_LSI_SAS1068E 0x0058 #define PCI_DEVICE_ID_LSI_SAS1078 0x0060 #define PCI_VENDOR_ID_ATI 0x1002 /* Mach64 */ #define PCI_DEVICE_ID_ATI_68800 0x4158 #define PCI_DEVICE_ID_ATI_215CT222 0x4354 #define PCI_DEVICE_ID_ATI_210888CX 0x4358 #define PCI_DEVICE_ID_ATI_215ET222 0x4554 /* Mach64 / Rage */ #define PCI_DEVICE_ID_ATI_215GB 0x4742 #define PCI_DEVICE_ID_ATI_215GD 0x4744 #define PCI_DEVICE_ID_ATI_215GI 0x4749 #define PCI_DEVICE_ID_ATI_215GP 0x4750 #define PCI_DEVICE_ID_ATI_215GQ 0x4751 #define PCI_DEVICE_ID_ATI_215XL 0x4752 #define PCI_DEVICE_ID_ATI_215GT 0x4754 #define PCI_DEVICE_ID_ATI_215GTB 0x4755 #define PCI_DEVICE_ID_ATI_215_IV 0x4756 #define PCI_DEVICE_ID_ATI_215_IW 0x4757 #define PCI_DEVICE_ID_ATI_215_IZ 0x475A #define PCI_DEVICE_ID_ATI_210888GX 0x4758 #define PCI_DEVICE_ID_ATI_215_LB 0x4c42 #define PCI_DEVICE_ID_ATI_215_LD 0x4c44 #define PCI_DEVICE_ID_ATI_215_LG 0x4c47 #define PCI_DEVICE_ID_ATI_215_LI 0x4c49 #define PCI_DEVICE_ID_ATI_215_LM 0x4c4D #define PCI_DEVICE_ID_ATI_215_LN 0x4c4E #define PCI_DEVICE_ID_ATI_215_LR 0x4c52 #define PCI_DEVICE_ID_ATI_215_LS 0x4c53 #define PCI_DEVICE_ID_ATI_264_LT 0x4c54 /* Mach64 VT */ #define PCI_DEVICE_ID_ATI_264VT 0x5654 #define PCI_DEVICE_ID_ATI_264VU 0x5655 #define PCI_DEVICE_ID_ATI_264VV 0x5656 /* Rage128 GL */ #define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245 #define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246 #define PCI_DEVICE_ID_ATI_RAGE128_RG 0x5247 /* Rage128 VR */ #define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b #define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c #define PCI_DEVICE_ID_ATI_RAGE128_SE 0x5345 #define PCI_DEVICE_ID_ATI_RAGE128_SF 0x5346 #define PCI_DEVICE_ID_ATI_RAGE128_SG 0x5347 #define PCI_DEVICE_ID_ATI_RAGE128_SH 0x5348 #define PCI_DEVICE_ID_ATI_RAGE128_SK 0x534b #define PCI_DEVICE_ID_ATI_RAGE128_SL 0x534c #define PCI_DEVICE_ID_ATI_RAGE128_SM 0x534d #define PCI_DEVICE_ID_ATI_RAGE128_SN 0x534e /* Rage128 Ultra */ #define PCI_DEVICE_ID_ATI_RAGE128_TF 0x5446 #define PCI_DEVICE_ID_ATI_RAGE128_TL 0x544c #define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452 #define PCI_DEVICE_ID_ATI_RAGE128_TS 0x5453 #define PCI_DEVICE_ID_ATI_RAGE128_TT 0x5454 #define PCI_DEVICE_ID_ATI_RAGE128_TU 0x5455 /* Rage128 M3 */ #define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45 #define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46 /* Rage128 M4 */ #define PCI_DEVICE_ID_ATI_RAGE128_MF 0x4d46 #define PCI_DEVICE_ID_ATI_RAGE128_ML 0x4d4c /* Rage128 Pro GL */ #define PCI_DEVICE_ID_ATI_RAGE128_PA 0x5041 #define PCI_DEVICE_ID_ATI_RAGE128_PB 0x5042 #define PCI_DEVICE_ID_ATI_RAGE128_PC 0x5043 #define PCI_DEVICE_ID_ATI_RAGE128_PD 0x5044 #define PCI_DEVICE_ID_ATI_RAGE128_PE 0x5045 #define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046 /* Rage128 Pro VR */ #define PCI_DEVICE_ID_ATI_RAGE128_PG 0x5047 #define PCI_DEVICE_ID_ATI_RAGE128_PH 0x5048 #define PCI_DEVICE_ID_ATI_RAGE128_PI 0x5049 #define PCI_DEVICE_ID_ATI_RAGE128_PJ 0x504A #define PCI_DEVICE_ID_ATI_RAGE128_PK 0x504B #define PCI_DEVICE_ID_ATI_RAGE128_PL 0x504C #define PCI_DEVICE_ID_ATI_RAGE128_PM 0x504D #define PCI_DEVICE_ID_ATI_RAGE128_PN 0x504E #define PCI_DEVICE_ID_ATI_RAGE128_PO 0x504F #define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050 #define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051 #define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052 #define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053 #define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054 #define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055 #define PCI_DEVICE_ID_ATI_RAGE128_PV 0x5056 #define PCI_DEVICE_ID_ATI_RAGE128_PW 0x5057 #define PCI_DEVICE_ID_ATI_RAGE128_PX 0x5058 /* Rage128 M4 */ /* Radeon R100 */ #define PCI_DEVICE_ID_ATI_RADEON_QD 0x5144 #define PCI_DEVICE_ID_ATI_RADEON_QE 0x5145 #define PCI_DEVICE_ID_ATI_RADEON_QF 0x5146 #define PCI_DEVICE_ID_ATI_RADEON_QG 0x5147 /* Radeon RV100 (VE) */ #define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 #define PCI_DEVICE_ID_ATI_RADEON_QZ 0x515a /* Radeon R200 (8500) */ #define PCI_DEVICE_ID_ATI_RADEON_QL 0x514c #define PCI_DEVICE_ID_ATI_RADEON_QN 0x514e #define PCI_DEVICE_ID_ATI_RADEON_QO 0x514f #define PCI_DEVICE_ID_ATI_RADEON_Ql 0x516c #define PCI_DEVICE_ID_ATI_RADEON_BB 0x4242 /* Radeon R200 (9100) */ #define PCI_DEVICE_ID_ATI_RADEON_QM 0x514d /* Radeon RV200 (7500) */ #define PCI_DEVICE_ID_ATI_RADEON_QW 0x5157 #define PCI_DEVICE_ID_ATI_RADEON_QX 0x5158 /* Radeon NV-100 */ /* Radeon RV250 (9000) */ #define PCI_DEVICE_ID_ATI_RADEON_Id 0x4964 #define PCI_DEVICE_ID_ATI_RADEON_Ie 0x4965 #define PCI_DEVICE_ID_ATI_RADEON_If 0x4966 #define PCI_DEVICE_ID_ATI_RADEON_Ig 0x4967 /* Radeon RV280 (9200) */ #define PCI_DEVICE_ID_ATI_RADEON_Ya 0x5961 #define PCI_DEVICE_ID_ATI_RADEON_Yd 0x5964 /* Radeon R300 (9500) */ /* Radeon R300 (9700) */ #define PCI_DEVICE_ID_ATI_RADEON_ND 0x4e44 #define PCI_DEVICE_ID_ATI_RADEON_NE 0x4e45 #define PCI_DEVICE_ID_ATI_RADEON_NF 0x4e46 #define PCI_DEVICE_ID_ATI_RADEON_NG 0x4e47 /* Radeon R350 (9800) */ /* Radeon RV350 (9600) */ /* Radeon M6 */ #define PCI_DEVICE_ID_ATI_RADEON_LY 0x4c59 #define PCI_DEVICE_ID_ATI_RADEON_LZ 0x4c5a /* Radeon M7 */ #define PCI_DEVICE_ID_ATI_RADEON_LW 0x4c57 #define PCI_DEVICE_ID_ATI_RADEON_LX 0x4c58 /* Radeon M9 */ #define PCI_DEVICE_ID_ATI_RADEON_Ld 0x4c64 #define PCI_DEVICE_ID_ATI_RADEON_Le 0x4c65 #define PCI_DEVICE_ID_ATI_RADEON_Lf 0x4c66 #define PCI_DEVICE_ID_ATI_RADEON_Lg 0x4c67 /* Radeon */ /* RadeonIGP */ #define PCI_DEVICE_ID_ATI_RS100 0xcab0 #define PCI_DEVICE_ID_ATI_RS200 0xcab2 #define PCI_DEVICE_ID_ATI_RS200_B 0xcbb2 #define PCI_DEVICE_ID_ATI_RS250 0xcab3 #define PCI_DEVICE_ID_ATI_RS300_100 0x5830 #define PCI_DEVICE_ID_ATI_RS300_133 0x5831 #define PCI_DEVICE_ID_ATI_RS300_166 0x5832 #define PCI_DEVICE_ID_ATI_RS300_200 0x5833 #define PCI_DEVICE_ID_ATI_RS350_100 0x7830 #define PCI_DEVICE_ID_ATI_RS350_133 0x7831 #define PCI_DEVICE_ID_ATI_RS350_166 0x7832 #define PCI_DEVICE_ID_ATI_RS350_200 0x7833 #define PCI_DEVICE_ID_ATI_RS400_100 0x5a30 #define PCI_DEVICE_ID_ATI_RS400_133 0x5a31 #define PCI_DEVICE_ID_ATI_RS400_166 0x5a32 #define PCI_DEVICE_ID_ATI_RS400_200 0x5a33 #define PCI_DEVICE_ID_ATI_RS480 0x5950 /* ATI IXP Chipset */ #define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349 #define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353 #define PCI_DEVICE_ID_ATI_IXP300_SMBUS 0x4363 #define PCI_DEVICE_ID_ATI_IXP300_IDE 0x4369 #define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e #define PCI_DEVICE_ID_ATI_IXP400_SMBUS 0x4372 #define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376 #define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379 #define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a #define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380 #define PCI_DEVICE_ID_ATI_SBX00_SMBUS 0x4385 #define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c #define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 #define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c #define PCI_VENDOR_ID_VLSI 0x1004 #define PCI_DEVICE_ID_VLSI_82C592 0x0005 #define PCI_DEVICE_ID_VLSI_82C593 0x0006 #define PCI_DEVICE_ID_VLSI_82C594 0x0007 #define PCI_DEVICE_ID_VLSI_82C597 0x0009 #define PCI_DEVICE_ID_VLSI_82C541 0x000c #define PCI_DEVICE_ID_VLSI_82C543 0x000d #define PCI_DEVICE_ID_VLSI_82C532 0x0101 #define PCI_DEVICE_ID_VLSI_82C534 0x0102 #define PCI_DEVICE_ID_VLSI_82C535 0x0104 #define PCI_DEVICE_ID_VLSI_82C147 0x0105 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 /* AMD RD890 Chipset */ #define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 #define PCI_VENDOR_ID_ADL 0x1005 #define PCI_DEVICE_ID_ADL_2301 0x2301 #define PCI_VENDOR_ID_NS 0x100b #define PCI_DEVICE_ID_NS_87415 0x0002 #define PCI_DEVICE_ID_NS_87560_LIO 0x000e #define PCI_DEVICE_ID_NS_87560_USB 0x0012 #define PCI_DEVICE_ID_NS_83815 0x0020 #define PCI_DEVICE_ID_NS_83820 0x0022 #define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b #define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d #define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e #define PCI_DEVICE_ID_NS_CS5535_USB 0x002f #define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030 #define PCI_DEVICE_ID_NS_SATURN 0x0035 #define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 #define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 #define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502 #define PCI_DEVICE_ID_NS_SCx200_AUDIO 0x0503 #define PCI_DEVICE_ID_NS_SCx200_VIDEO 0x0504 #define PCI_DEVICE_ID_NS_SCx200_XBUS 0x0505 #define PCI_DEVICE_ID_NS_SC1100_BRIDGE 0x0510 #define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511 #define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515 #define PCI_DEVICE_ID_NS_87410 0xd001 #define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028 #define PCI_VENDOR_ID_TSENG 0x100c #define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 #define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 #define PCI_DEVICE_ID_TSENG_W32P_c 0x3206 #define PCI_DEVICE_ID_TSENG_W32P_d 0x3207 #define PCI_DEVICE_ID_TSENG_ET6000 0x3208 #define PCI_VENDOR_ID_WEITEK 0x100e #define PCI_DEVICE_ID_WEITEK_P9000 0x9001 #define PCI_DEVICE_ID_WEITEK_P9100 0x9100 #define PCI_VENDOR_ID_DEC 0x1011 #define PCI_DEVICE_ID_DEC_BRD 0x0001 #define PCI_DEVICE_ID_DEC_TULIP 0x0002 #define PCI_DEVICE_ID_DEC_TGA 0x0004 #define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009 #define PCI_DEVICE_ID_DEC_TGA2 0x000D #define PCI_DEVICE_ID_DEC_FDDI 0x000F #define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014 #define PCI_DEVICE_ID_DEC_21142 0x0019 #define PCI_DEVICE_ID_DEC_21052 0x0021 #define PCI_DEVICE_ID_DEC_21150 0x0022 #define PCI_DEVICE_ID_DEC_21152 0x0024 #define PCI_DEVICE_ID_DEC_21153 0x0025 #define PCI_DEVICE_ID_DEC_21154 0x0026 #define PCI_DEVICE_ID_DEC_21285 0x1065 #define PCI_DEVICE_ID_COMPAQ_42XX 0x0046 #define PCI_VENDOR_ID_CIRRUS 0x1013 #define PCI_DEVICE_ID_CIRRUS_7548 0x0038 #define PCI_DEVICE_ID_CIRRUS_5430 0x00a0 #define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4 #define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8 #define PCI_DEVICE_ID_CIRRUS_5436 0x00ac #define PCI_DEVICE_ID_CIRRUS_5446 0x00b8 #define PCI_DEVICE_ID_CIRRUS_5480 0x00bc #define PCI_DEVICE_ID_CIRRUS_5462 0x00d0 #define PCI_DEVICE_ID_CIRRUS_5464 0x00d4 #define PCI_DEVICE_ID_CIRRUS_5465 0x00d6 #define PCI_DEVICE_ID_CIRRUS_6729 0x1100 #define PCI_DEVICE_ID_CIRRUS_6832 0x1110 #define PCI_DEVICE_ID_CIRRUS_7543 0x1202 #define PCI_DEVICE_ID_CIRRUS_4610 0x6001 #define PCI_DEVICE_ID_CIRRUS_4612 0x6003 #define PCI_DEVICE_ID_CIRRUS_4615 0x6004 #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_DEVICE_ID_IBM_TR 0x0018 #define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e #define PCI_DEVICE_ID_IBM_CPC710_PCI64 0x00fc #define PCI_DEVICE_ID_IBM_SNIPE 0x0180 #define PCI_DEVICE_ID_IBM_CITRINE 0x028C #define PCI_DEVICE_ID_IBM_GEMSTONE 0xB166 #define PCI_DEVICE_ID_IBM_OBSIDIAN 0x02BD #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1 0x0031 #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 #define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 #define PCI_SUBVENDOR_ID_IBM 0x1014 #define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4 #define PCI_VENDOR_ID_UNISYS 0x1018 #define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ #define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 #define PCI_VENDOR_ID_WD 0x101c #define PCI_DEVICE_ID_WD_90C 0xc24a #define PCI_VENDOR_ID_AMI 0x101e #define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960 #define PCI_DEVICE_ID_AMI_MEGARAID 0x9010 #define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 #define PCI_VENDOR_ID_AMD 0x1022 #define PCI_DEVICE_ID_AMD_K8_NB 0x1100 #define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101 #define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102 #define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 #define PCI_DEVICE_ID_AMD_10H_NB_HT 0x1200 #define PCI_DEVICE_ID_AMD_10H_NB_MAP 0x1201 #define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202 #define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203 #define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204 #define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300 #define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301 #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e #define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573 #define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574 #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 #define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603 #define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604 #define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605 #define PCI_DEVICE_ID_AMD_16H_NB_F3 0x1533 #define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584 #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 #define PCI_DEVICE_ID_AMD_SCSI 0x2020 #define PCI_DEVICE_ID_AMD_SERENADE 0x36c0 #define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006 #define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007 #define PCI_DEVICE_ID_AMD_FE_GATE_700C 0x700C #define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E #define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401 #define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409 #define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B #define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410 #define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411 #define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413 #define PCI_DEVICE_ID_AMD_VIPER_7440 0x7440 #define PCI_DEVICE_ID_AMD_OPUS_7441 0x7441 #define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 #define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 #define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 #define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 #define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 #define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a #define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b #define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d #define PCI_DEVICE_ID_AMD_8151_0 0x7454 #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 #define PCI_DEVICE_ID_AMD_NL_USB 0x7912 #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 #define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 #define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094 #define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095 #define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096 #define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097 #define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092 #define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A #define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 #define PCI_DEVICE_ID_AMD_LX_AES 0x2082 #define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800 #define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b #define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c #define PCI_DEVICE_ID_AMD_KERNCZ_SMBUS 0x790b #define PCI_VENDOR_ID_TRIDENT 0x1023 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 #define PCI_DEVICE_ID_TRIDENT_9320 0x9320 #define PCI_DEVICE_ID_TRIDENT_9388 0x9388 #define PCI_DEVICE_ID_TRIDENT_9397 0x9397 #define PCI_DEVICE_ID_TRIDENT_939A 0x939A #define PCI_DEVICE_ID_TRIDENT_9520 0x9520 #define PCI_DEVICE_ID_TRIDENT_9525 0x9525 #define PCI_DEVICE_ID_TRIDENT_9420 0x9420 #define PCI_DEVICE_ID_TRIDENT_9440 0x9440 #define PCI_DEVICE_ID_TRIDENT_9660 0x9660 #define PCI_DEVICE_ID_TRIDENT_9750 0x9750 #define PCI_DEVICE_ID_TRIDENT_9850 0x9850 #define PCI_DEVICE_ID_TRIDENT_9880 0x9880 #define PCI_DEVICE_ID_TRIDENT_8400 0x8400 #define PCI_DEVICE_ID_TRIDENT_8420 0x8420 #define PCI_DEVICE_ID_TRIDENT_8500 0x8500 #define PCI_VENDOR_ID_AI 0x1025 #define PCI_DEVICE_ID_AI_M1435 0x1435 #define PCI_VENDOR_ID_DELL 0x1028 #define PCI_DEVICE_ID_DELL_RACIII 0x0008 #define PCI_DEVICE_ID_DELL_RAC4 0x0012 #define PCI_DEVICE_ID_DELL_PERC5 0x0015 #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 #define PCI_DEVICE_ID_MATROX_MIL 0x0519 #define PCI_DEVICE_ID_MATROX_MYS 0x051A #define PCI_DEVICE_ID_MATROX_MIL_2 0x051b #define PCI_DEVICE_ID_MATROX_MYS_AGP 0x051e #define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f #define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10 #define PCI_DEVICE_ID_MATROX_G100_MM 0x1000 #define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001 #define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520 #define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521 #define PCI_DEVICE_ID_MATROX_G400 0x0525 #define PCI_DEVICE_ID_MATROX_G200EV_PCI 0x0530 #define PCI_DEVICE_ID_MATROX_G550 0x2527 #define PCI_DEVICE_ID_MATROX_VIA 0x4536 #define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2 #define PCI_VENDOR_ID_CT 0x102c #define PCI_DEVICE_ID_CT_69000 0x00c0 #define PCI_DEVICE_ID_CT_65545 0x00d8 #define PCI_DEVICE_ID_CT_65548 0x00dc #define PCI_DEVICE_ID_CT_65550 0x00e0 #define PCI_DEVICE_ID_CT_65554 0x00e4 #define PCI_DEVICE_ID_CT_65555 0x00e5 #define PCI_VENDOR_ID_MIRO 0x1031 #define PCI_DEVICE_ID_MIRO_36050 0x5601 #define PCI_DEVICE_ID_MIRO_DC10PLUS 0x7efe #define PCI_DEVICE_ID_MIRO_DC30PLUS 0xd801 #define PCI_VENDOR_ID_NEC 0x1033 #define PCI_DEVICE_ID_NEC_CBUS_1 0x0001 /* PCI-Cbus Bridge */ #define PCI_DEVICE_ID_NEC_LOCAL 0x0002 /* Local Bridge */ #define PCI_DEVICE_ID_NEC_ATM 0x0003 /* ATM LAN Controller */ #define PCI_DEVICE_ID_NEC_R4000 0x0004 /* R4000 Bridge */ #define PCI_DEVICE_ID_NEC_486 0x0005 /* 486 Like Peripheral Bus Bridge */ #define PCI_DEVICE_ID_NEC_ACCEL_1 0x0006 /* Graphic Accelerator */ #define PCI_DEVICE_ID_NEC_UXBUS 0x0007 /* UX-Bus Bridge */ #define PCI_DEVICE_ID_NEC_ACCEL_2 0x0008 /* Graphic Accelerator */ #define PCI_DEVICE_ID_NEC_GRAPH 0x0009 /* PCI-CoreGraph Bridge */ #define PCI_DEVICE_ID_NEC_VL 0x0016 /* PCI-VL Bridge */ #define PCI_DEVICE_ID_NEC_STARALPHA2 0x002c /* STAR ALPHA2 */ #define PCI_DEVICE_ID_NEC_CBUS_2 0x002d /* PCI-Cbus Bridge */ #define PCI_DEVICE_ID_NEC_USB 0x0035 /* PCI-USB Host */ #define PCI_DEVICE_ID_NEC_CBUS_3 0x003b #define PCI_DEVICE_ID_NEC_NAPCCARD 0x003e #define PCI_DEVICE_ID_NEC_PCX2 0x0046 /* PowerVR */ #define PCI_DEVICE_ID_NEC_VRC5476 0x009b #define PCI_DEVICE_ID_NEC_VRC4173 0x00a5 #define PCI_DEVICE_ID_NEC_VRC5477_AC97 0x00a6 #define PCI_DEVICE_ID_NEC_PC9821CS01 0x800c /* PC-9821-CS01 */ #define PCI_DEVICE_ID_NEC_PC9821NRB06 0x800d /* PC-9821NR-B06 */ #define PCI_VENDOR_ID_FD 0x1036 #define PCI_DEVICE_ID_FD_36C70 0x0000 #define PCI_VENDOR_ID_SI 0x1039 #define PCI_DEVICE_ID_SI_5591_AGP 0x0001 #define PCI_DEVICE_ID_SI_6202 0x0002 #define PCI_DEVICE_ID_SI_503 0x0008 #define PCI_DEVICE_ID_SI_ACPI 0x0009 #define PCI_DEVICE_ID_SI_SMBUS 0x0016 #define PCI_DEVICE_ID_SI_LPC 0x0018 #define PCI_DEVICE_ID_SI_5597_VGA 0x0200 #define PCI_DEVICE_ID_SI_6205 0x0205 #define PCI_DEVICE_ID_SI_501 0x0406 #define PCI_DEVICE_ID_SI_496 0x0496 #define PCI_DEVICE_ID_SI_300 0x0300 #define PCI_DEVICE_ID_SI_315H 0x0310 #define PCI_DEVICE_ID_SI_315 0x0315 #define PCI_DEVICE_ID_SI_315PRO 0x0325 #define PCI_DEVICE_ID_SI_530 0x0530 #define PCI_DEVICE_ID_SI_540 0x0540 #define PCI_DEVICE_ID_SI_550 0x0550 #define PCI_DEVICE_ID_SI_540_VGA 0x5300 #define PCI_DEVICE_ID_SI_550_VGA 0x5315 #define PCI_DEVICE_ID_SI_620 0x0620 #define PCI_DEVICE_ID_SI_630 0x0630 #define PCI_DEVICE_ID_SI_633 0x0633 #define PCI_DEVICE_ID_SI_635 0x0635 #define PCI_DEVICE_ID_SI_640 0x0640 #define PCI_DEVICE_ID_SI_645 0x0645 #define PCI_DEVICE_ID_SI_646 0x0646 #define PCI_DEVICE_ID_SI_648 0x0648 #define PCI_DEVICE_ID_SI_650 0x0650 #define PCI_DEVICE_ID_SI_651 0x0651 #define PCI_DEVICE_ID_SI_655 0x0655 #define PCI_DEVICE_ID_SI_661 0x0661 #define PCI_DEVICE_ID_SI_730 0x0730 #define PCI_DEVICE_ID_SI_733 0x0733 #define PCI_DEVICE_ID_SI_630_VGA 0x6300 #define PCI_DEVICE_ID_SI_735 0x0735 #define PCI_DEVICE_ID_SI_740 0x0740 #define PCI_DEVICE_ID_SI_741 0x0741 #define PCI_DEVICE_ID_SI_745 0x0745 #define PCI_DEVICE_ID_SI_746 0x0746 #define PCI_DEVICE_ID_SI_755 0x0755 #define PCI_DEVICE_ID_SI_760 0x0760 #define PCI_DEVICE_ID_SI_900 0x0900 #define PCI_DEVICE_ID_SI_961 0x0961 #define PCI_DEVICE_ID_SI_962 0x0962 #define PCI_DEVICE_ID_SI_963 0x0963 #define PCI_DEVICE_ID_SI_965 0x0965 #define PCI_DEVICE_ID_SI_966 0x0966 #define PCI_DEVICE_ID_SI_968 0x0968 #define PCI_DEVICE_ID_SI_1180 0x1180 #define PCI_DEVICE_ID_SI_5511 0x5511 #define PCI_DEVICE_ID_SI_5513 0x5513 #define PCI_DEVICE_ID_SI_5517 0x5517 #define PCI_DEVICE_ID_SI_5518 0x5518 #define PCI_DEVICE_ID_SI_5571 0x5571 #define PCI_DEVICE_ID_SI_5581 0x5581 #define PCI_DEVICE_ID_SI_5582 0x5582 #define PCI_DEVICE_ID_SI_5591 0x5591 #define PCI_DEVICE_ID_SI_5596 0x5596 #define PCI_DEVICE_ID_SI_5597 0x5597 #define PCI_DEVICE_ID_SI_5598 0x5598 #define PCI_DEVICE_ID_SI_5600 0x5600 #define PCI_DEVICE_ID_SI_7012 0x7012 #define PCI_DEVICE_ID_SI_7013 0x7013 #define PCI_DEVICE_ID_SI_7016 0x7016 #define PCI_DEVICE_ID_SI_7018 0x7018 #define PCI_VENDOR_ID_HP 0x103c #define PCI_VENDOR_ID_HP_3PAR 0x1590 #define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005 #define PCI_DEVICE_ID_HP_VISUALIZE_FX6 0x1006 #define PCI_DEVICE_ID_HP_VISUALIZE_FX4 0x1008 #define PCI_DEVICE_ID_HP_VISUALIZE_FX2 0x100a #define PCI_DEVICE_ID_HP_TACHYON 0x1028 #define PCI_DEVICE_ID_HP_TACHLITE 0x1029 #define PCI_DEVICE_ID_HP_J2585A 0x1030 #define PCI_DEVICE_ID_HP_J2585B 0x1031 #define PCI_DEVICE_ID_HP_J2973A 0x1040 #define PCI_DEVICE_ID_HP_J2970A 0x1042 #define PCI_DEVICE_ID_HP_DIVA 0x1048 #define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049 #define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A #define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B #define PCI_DEVICE_ID_HP_REO_IOC 0x10f1 #define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b #define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223 #define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226 #define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227 #define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a #define PCI_DEVICE_ID_HP_PCIX_LBA 0x122e #define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c #define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 #define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 #define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301 #define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a #define PCI_DEVICE_ID_HP_CISSA 0x3220 #define PCI_DEVICE_ID_HP_CISSC 0x3230 #define PCI_DEVICE_ID_HP_CISSD 0x3238 #define PCI_DEVICE_ID_HP_CISSE 0x323a #define PCI_DEVICE_ID_HP_CISSF 0x323b #define PCI_DEVICE_ID_HP_CISSH 0x323c #define PCI_DEVICE_ID_HP_CISSI 0x3239 #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 #define PCI_VENDOR_ID_PCTECH 0x1042 #define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000 #define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001 #define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020 #define PCI_VENDOR_ID_ASUSTEK 0x1043 #define PCI_DEVICE_ID_ASUSTEK_0675 0x0675 #define PCI_VENDOR_ID_DPT 0x1044 #define PCI_DEVICE_ID_DPT 0xa400 #define PCI_VENDOR_ID_OPTI 0x1045 #define PCI_DEVICE_ID_OPTI_82C558 0xc558 #define PCI_DEVICE_ID_OPTI_82C621 0xc621 #define PCI_DEVICE_ID_OPTI_82C700 0xc700 #define PCI_DEVICE_ID_OPTI_82C825 0xd568 #define PCI_VENDOR_ID_ELSA 0x1048 #define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000 #define PCI_DEVICE_ID_ELSA_QS3000 0x3000 #define PCI_VENDOR_ID_STMICRO 0x104A #define PCI_DEVICE_ID_STMICRO_USB_HOST 0xCC00 #define PCI_DEVICE_ID_STMICRO_USB_OHCI 0xCC01 #define PCI_DEVICE_ID_STMICRO_USB_OTG 0xCC02 #define PCI_DEVICE_ID_STMICRO_UART_HWFC 0xCC03 #define PCI_DEVICE_ID_STMICRO_UART_NO_HWFC 0xCC04 #define PCI_DEVICE_ID_STMICRO_SOC_DMA 0xCC05 #define PCI_DEVICE_ID_STMICRO_SATA 0xCC06 #define PCI_DEVICE_ID_STMICRO_I2C 0xCC07 #define PCI_DEVICE_ID_STMICRO_SPI_HS 0xCC08 #define PCI_DEVICE_ID_STMICRO_MAC 0xCC09 #define PCI_DEVICE_ID_STMICRO_SDIO_EMMC 0xCC0A #define PCI_DEVICE_ID_STMICRO_SDIO 0xCC0B #define PCI_DEVICE_ID_STMICRO_GPIO 0xCC0C #define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D #define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_DMA 0xCC0E #define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_SRCS 0xCC0F #define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_MSPS 0xCC10 #define PCI_DEVICE_ID_STMICRO_CAN 0xCC11 #define PCI_DEVICE_ID_STMICRO_MLB 0xCC12 #define PCI_DEVICE_ID_STMICRO_DBP 0xCC13 #define PCI_DEVICE_ID_STMICRO_SATA_PHY 0xCC14 #define PCI_DEVICE_ID_STMICRO_ESRAM 0xCC15 #define PCI_DEVICE_ID_STMICRO_VIC 0xCC16 #define PCI_VENDOR_ID_BUSLOGIC 0x104B #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 #define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130 #define PCI_VENDOR_ID_TI 0x104c #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_4450 0x8011 #define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 #define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033 #define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 #define PCI_DEVICE_ID_TI_X515 0x8036 #define PCI_DEVICE_ID_TI_XX12 0x8039 #define PCI_DEVICE_ID_TI_XX12_FM 0x803b #define PCI_DEVICE_ID_TI_XIO2000A 0x8231 #define PCI_DEVICE_ID_TI_1130 0xac12 #define PCI_DEVICE_ID_TI_1031 0xac13 #define PCI_DEVICE_ID_TI_1131 0xac15 #define PCI_DEVICE_ID_TI_1250 0xac16 #define PCI_DEVICE_ID_TI_1220 0xac17 #define PCI_DEVICE_ID_TI_1221 0xac19 #define PCI_DEVICE_ID_TI_1210 0xac1a #define PCI_DEVICE_ID_TI_1450 0xac1b #define PCI_DEVICE_ID_TI_1225 0xac1c #define PCI_DEVICE_ID_TI_1251A 0xac1d #define PCI_DEVICE_ID_TI_1211 0xac1e #define PCI_DEVICE_ID_TI_1251B 0xac1f #define PCI_DEVICE_ID_TI_4410 0xac41 #define PCI_DEVICE_ID_TI_4451 0xac42 #define PCI_DEVICE_ID_TI_4510 0xac44 #define PCI_DEVICE_ID_TI_4520 0xac46 #define PCI_DEVICE_ID_TI_7510 0xac47 #define PCI_DEVICE_ID_TI_7610 0xac48 #define PCI_DEVICE_ID_TI_7410 0xac49 #define PCI_DEVICE_ID_TI_1410 0xac50 #define PCI_DEVICE_ID_TI_1420 0xac51 #define PCI_DEVICE_ID_TI_1451A 0xac52 #define PCI_DEVICE_ID_TI_1620 0xac54 #define PCI_DEVICE_ID_TI_1520 0xac55 #define PCI_DEVICE_ID_TI_1510 0xac56 #define PCI_DEVICE_ID_TI_X620 0xac8d #define PCI_DEVICE_ID_TI_X420 0xac8e #define PCI_DEVICE_ID_TI_XX20_FM 0xac8f #define PCI_DEVICE_ID_TI_DRA74x 0xb500 #define PCI_DEVICE_ID_TI_DRA72x 0xb501 #define PCI_VENDOR_ID_SONY 0x104d /* Winbond have two vendor IDs! See 0x10ad as well */ #define PCI_VENDOR_ID_WINBOND2 0x1050 #define PCI_DEVICE_ID_WINBOND2_89C940F 0x5a5a #define PCI_DEVICE_ID_WINBOND2_6692 0x6692 #define PCI_VENDOR_ID_ANIGMA 0x1051 #define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 #define PCI_VENDOR_ID_EFAR 0x1055 #define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 #define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 #define PCI_VENDOR_ID_MOTOROLA 0x1057 #define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001 #define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002 #define PCI_DEVICE_ID_MOTOROLA_MPC107 0x0004 #define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801 #define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802 #define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803 #define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b #define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803 #define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809 #define PCI_VENDOR_ID_PROMISE 0x105a #define PCI_DEVICE_ID_PROMISE_20265 0x0d30 #define PCI_DEVICE_ID_PROMISE_20267 0x4d30 #define PCI_DEVICE_ID_PROMISE_20246 0x4d33 #define PCI_DEVICE_ID_PROMISE_20262 0x4d38 #define PCI_DEVICE_ID_PROMISE_20263 0x0D38 #define PCI_DEVICE_ID_PROMISE_20268 0x4d68 #define PCI_DEVICE_ID_PROMISE_20269 0x4d69 #define PCI_DEVICE_ID_PROMISE_20270 0x6268 #define PCI_DEVICE_ID_PROMISE_20271 0x6269 #define PCI_DEVICE_ID_PROMISE_20275 0x1275 #define PCI_DEVICE_ID_PROMISE_20276 0x5275 #define PCI_DEVICE_ID_PROMISE_20277 0x7275 #define PCI_VENDOR_ID_FOXCONN 0x105b #define PCI_VENDOR_ID_UMC 0x1060 #define PCI_DEVICE_ID_UMC_UM8673F 0x0101 #define PCI_DEVICE_ID_UMC_UM8886BF 0x673a #define PCI_DEVICE_ID_UMC_UM8886A 0x886a #define PCI_VENDOR_ID_PICOPOWER 0x1066 #define PCI_DEVICE_ID_PICOPOWER_PT86C523 0x0002 #define PCI_DEVICE_ID_PICOPOWER_PT86C523BBP 0x8002 #define PCI_VENDOR_ID_MYLEX 0x1069 #define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001 #define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002 #define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010 #define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020 #define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050 #define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56 #define PCI_DEVICE_ID_MYLEX_DAC960_GEM 0xB166 #define PCI_VENDOR_ID_APPLE 0x106b #define PCI_DEVICE_ID_APPLE_BANDIT 0x0001 #define PCI_DEVICE_ID_APPLE_HYDRA 0x000e #define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 #define PCI_DEVICE_ID_APPLE_UNI_N_GMACP 0x0024 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d #define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 #define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034 #define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b #define PCI_DEVICE_ID_APPLE_K2_ATA100 0x0043 #define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b #define PCI_DEVICE_ID_APPLE_K2_GMAC 0x004c #define PCI_DEVICE_ID_APPLE_SH_ATA 0x0050 #define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051 #define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058 #define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059 #define PCI_DEVICE_ID_APPLE_U4_PCIE 0x005b #define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066 #define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069 #define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a #define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b #define PCI_DEVICE_ID_APPLE_TIGON3 0x1645 #define PCI_VENDOR_ID_YAMAHA 0x1073 #define PCI_DEVICE_ID_YAMAHA_724 0x0004 #define PCI_DEVICE_ID_YAMAHA_724F 0x000d #define PCI_DEVICE_ID_YAMAHA_740 0x000a #define PCI_DEVICE_ID_YAMAHA_740C 0x000c #define PCI_DEVICE_ID_YAMAHA_744 0x0010 #define PCI_DEVICE_ID_YAMAHA_754 0x0012 #define PCI_VENDOR_ID_QLOGIC 0x1077 #define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 #define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 #define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 #define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 #define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 #define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 #define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 #define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 #define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300 #define PCI_DEVICE_ID_QLOGIC_ISP2312 0x2312 #define PCI_DEVICE_ID_QLOGIC_ISP2322 0x2322 #define PCI_DEVICE_ID_QLOGIC_ISP6312 0x6312 #define PCI_DEVICE_ID_QLOGIC_ISP6322 0x6322 #define PCI_DEVICE_ID_QLOGIC_ISP2422 0x2422 #define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432 #define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512 #define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522 #define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422 #define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432 #define PCI_VENDOR_ID_CYRIX 0x1078 #define PCI_DEVICE_ID_CYRIX_5510 0x0000 #define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001 #define PCI_DEVICE_ID_CYRIX_5520 0x0002 #define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100 #define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102 #define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103 #define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104 #define PCI_VENDOR_ID_CONTAQ 0x1080 #define PCI_DEVICE_ID_CONTAQ_82C693 0xc693 #define PCI_VENDOR_ID_OLICOM 0x108d #define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 #define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 #define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 #define PCI_VENDOR_ID_SUN 0x108e #define PCI_DEVICE_ID_SUN_EBUS 0x1000 #define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001 #define PCI_DEVICE_ID_SUN_RIO_EBUS 0x1100 #define PCI_DEVICE_ID_SUN_RIO_GEM 0x1101 #define PCI_DEVICE_ID_SUN_RIO_1394 0x1102 #define PCI_DEVICE_ID_SUN_RIO_USB 0x1103 #define PCI_DEVICE_ID_SUN_GEM 0x2bad #define PCI_DEVICE_ID_SUN_SIMBA 0x5000 #define PCI_DEVICE_ID_SUN_PBM 0x8000 #define PCI_DEVICE_ID_SUN_SCHIZO 0x8001 #define PCI_DEVICE_ID_SUN_SABRE 0xa000 #define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001 #define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 #define PCI_DEVICE_ID_SUN_CASSINI 0xabba #define PCI_VENDOR_ID_NI 0x1093 #define PCI_DEVICE_ID_NI_PCI2322 0xd130 #define PCI_DEVICE_ID_NI_PCI2324 0xd140 #define PCI_DEVICE_ID_NI_PCI2328 0xd150 #define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190 #define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0 #define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0 #define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0 #define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0 #define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1 #define PCI_DEVICE_ID_NI_PCI2322I 0xd250 #define PCI_DEVICE_ID_NI_PCI2324I 0xd270 #define PCI_DEVICE_ID_NI_PCI23216 0xd2b0 #define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080 #define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db #define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd #define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df #define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2 #define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4 #define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6 #define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7 #define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8 #define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea #define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec #define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee #define PCI_VENDOR_ID_CMD 0x1095 #define PCI_DEVICE_ID_CMD_643 0x0643 #define PCI_DEVICE_ID_CMD_646 0x0646 #define PCI_DEVICE_ID_CMD_648 0x0648 #define PCI_DEVICE_ID_CMD_649 0x0649 #define PCI_DEVICE_ID_SII_680 0x0680 #define PCI_DEVICE_ID_SII_3112 0x3112 #define PCI_DEVICE_ID_SII_1210SA 0x0240 #define PCI_VENDOR_ID_BROOKTREE 0x109e #define PCI_DEVICE_ID_BROOKTREE_878 0x0878 #define PCI_DEVICE_ID_BROOKTREE_879 0x0879 #define PCI_VENDOR_ID_SGI 0x10a9 #define PCI_DEVICE_ID_SGI_IOC3 0x0003 #define PCI_DEVICE_ID_SGI_LITHIUM 0x1002 #define PCI_DEVICE_ID_SGI_IOC4 0x100a #define PCI_VENDOR_ID_WINBOND 0x10ad #define PCI_DEVICE_ID_WINBOND_82C105 0x0105 #define PCI_DEVICE_ID_WINBOND_83C553 0x0565 #define PCI_VENDOR_ID_PLX 0x10b5 #define PCI_DEVICE_ID_PLX_R685 0x1030 #define PCI_DEVICE_ID_PLX_ROMULUS 0x106a #define PCI_DEVICE_ID_PLX_SPCOM800 0x1076 #define PCI_DEVICE_ID_PLX_1077 0x1077 #define PCI_DEVICE_ID_PLX_SPCOM200 0x1103 #define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151 #define PCI_DEVICE_ID_PLX_R753 0x1152 #define PCI_DEVICE_ID_PLX_OLITEC 0x1187 #define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 #define PCI_DEVICE_ID_PLX_9030 0x9030 #define PCI_DEVICE_ID_PLX_9050 0x9050 #define PCI_DEVICE_ID_PLX_9056 0x9056 #define PCI_DEVICE_ID_PLX_9080 0x9080 #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 #define PCI_VENDOR_ID_MADGE 0x10b6 #define PCI_DEVICE_ID_MADGE_MK2 0x0002 #define PCI_VENDOR_ID_3COM 0x10b7 #define PCI_DEVICE_ID_3COM_3C985 0x0001 #define PCI_DEVICE_ID_3COM_3C940 0x1700 #define PCI_DEVICE_ID_3COM_3C339 0x3390 #define PCI_DEVICE_ID_3COM_3C359 0x3590 #define PCI_DEVICE_ID_3COM_3C940B 0x80eb #define PCI_DEVICE_ID_3COM_3CR990 0x9900 #define PCI_DEVICE_ID_3COM_3CR990_TX_95 0x9902 #define PCI_DEVICE_ID_3COM_3CR990_TX_97 0x9903 #define PCI_DEVICE_ID_3COM_3CR990B 0x9904 #define PCI_DEVICE_ID_3COM_3CR990_FX 0x9905 #define PCI_DEVICE_ID_3COM_3CR990SVR95 0x9908 #define PCI_DEVICE_ID_3COM_3CR990SVR97 0x9909 #define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a #define PCI_VENDOR_ID_AL 0x10b9 #define PCI_DEVICE_ID_AL_M1533 0x1533 #define PCI_DEVICE_ID_AL_M1535 0x1535 #define PCI_DEVICE_ID_AL_M1541 0x1541 #define PCI_DEVICE_ID_AL_M1563 0x1563 #define PCI_DEVICE_ID_AL_M1621 0x1621 #define PCI_DEVICE_ID_AL_M1631 0x1631 #define PCI_DEVICE_ID_AL_M1632 0x1632 #define PCI_DEVICE_ID_AL_M1641 0x1641 #define PCI_DEVICE_ID_AL_M1644 0x1644 #define PCI_DEVICE_ID_AL_M1647 0x1647 #define PCI_DEVICE_ID_AL_M1651 0x1651 #define PCI_DEVICE_ID_AL_M1671 0x1671 #define PCI_DEVICE_ID_AL_M1681 0x1681 #define PCI_DEVICE_ID_AL_M1683 0x1683 #define PCI_DEVICE_ID_AL_M1689 0x1689 #define PCI_DEVICE_ID_AL_M5219 0x5219 #define PCI_DEVICE_ID_AL_M5228 0x5228 #define PCI_DEVICE_ID_AL_M5229 0x5229 #define PCI_DEVICE_ID_AL_M5451 0x5451 #define PCI_DEVICE_ID_AL_M7101 0x7101 #define PCI_VENDOR_ID_NEOMAGIC 0x10c8 #define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005 #define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006 #define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016 #define PCI_VENDOR_ID_TCONRAD 0x10da #define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 #define PCI_VENDOR_ID_NVIDIA 0x10de #define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 #define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 #define PCI_DEVICE_ID_NVIDIA_UTNT2 0x0029 #define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a #define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C #define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE 0x0042 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x0045 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000 0x004E #define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS 0x0052 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055 #define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059 #define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d #define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 #define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069 #define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084 #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085 #define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089 #define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e #define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800 0x0098 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099 #define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0 #define PCI_DEVICE_ID_GEFORCE_6800A 0x00c1 #define PCI_DEVICE_ID_GEFORCE_6800A_LE 0x00c2 #define PCI_DEVICE_ID_GEFORCE_GO_6800 0x00c8 #define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA 0x00c9 #define PCI_DEVICE_ID_QUADRO_FX_GO1400 0x00cc #define PCI_DEVICE_ID_QUADRO_FX_1400 0x00ce #define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1 #define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4 #define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5 #define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9 #define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da #define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5 #define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 #define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd #define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 #define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX 0x0110 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2 0x0111 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO 0x0112 #define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR 0x0113 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT 0x0140 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600 0x0141 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL 0x0145 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540 0x014E #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200 0x014F #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS 0x0150 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2 0x0151 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152 #define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200 0x0164 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250 0x0166 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1 0x0167 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1 0x0168 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460 0x0170 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440 0x0171 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420 0x0172 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE 0x0173 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO 0x0174 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO 0x0175 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO 0x0177 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL 0x0178 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_200 0x017A #define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL 0x017B #define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL 0x017C #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000 0x0185 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO 0x0186 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO 0x0187 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL 0x0188 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC 0x0189 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS 0x018A #define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL 0x018B #define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0 #define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4 #define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1 #define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4 #define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc #define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1 #define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202 #define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC 0x0203 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B 0x0211 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE 0x0212 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT 0x0215 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600 0x0250 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400 0x0251 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200 0x0253 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS 0x0264 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS 0x0368 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO 0x0286 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL 0x0288 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL 0x0289 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL 0x028C #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA 0x0301 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800 0x0302 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000 0x0308 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000 0x0309 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA 0x0311 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600 0x0312 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE 0x0314 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600 0x031A #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650 0x031B #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700 0x031C #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200 0x0320 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA 0x0321 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1 0x0322 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE 0x0323 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200 0x0324 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250 0x0325 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500 0x0326 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100 0x0327 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32 0x0328 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200 0x0329 #define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI 0x032A #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500 0x032B #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300 0x032C #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100 0x032D #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA 0x0330 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900 0x0331 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT 0x0332 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA 0x0333 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT 0x0334 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000 0x0338 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700 0x033F #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA 0x0341 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700 0x0342 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE 0x0343 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE 0x0344 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1 0x0347 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E #define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360 #define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364 #define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 #define PCI_VENDOR_ID_IMS 0x10e0 #define PCI_DEVICE_ID_IMS_TT128 0x9128 #define PCI_DEVICE_ID_IMS_TT3D 0x9135 #define PCI_VENDOR_ID_AMCC 0x10e8 #define PCI_VENDOR_ID_AMPERE 0x1def #define PCI_VENDOR_ID_INTERG 0x10ea #define PCI_DEVICE_ID_INTERG_1682 0x1682 #define PCI_DEVICE_ID_INTERG_2000 0x2000 #define PCI_DEVICE_ID_INTERG_2010 0x2010 #define PCI_DEVICE_ID_INTERG_5000 0x5000 #define PCI_DEVICE_ID_INTERG_5050 0x5050 #define PCI_VENDOR_ID_REALTEK 0x10ec #define PCI_DEVICE_ID_REALTEK_8139 0x8139 #define PCI_VENDOR_ID_XILINX 0x10ee #define PCI_DEVICE_ID_RME_DIGI96 0x3fc0 #define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1 #define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2 #define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3 #define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5 #define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6 #define PCI_VENDOR_ID_INIT 0x1101 #define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ #define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 #define PCI_DEVICE_ID_CREATIVE_20K1 0x0005 #define PCI_DEVICE_ID_CREATIVE_20K2 0x000b #define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024 #define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041 #define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042 #define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043 #define PCI_SUBDEVICE_ID_CREATIVE_SB1270 0x0062 #define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000 #define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ #define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 #define PCI_VENDOR_ID_TTI 0x1103 #define PCI_DEVICE_ID_TTI_HPT343 0x0003 #define PCI_DEVICE_ID_TTI_HPT366 0x0004 #define PCI_DEVICE_ID_TTI_HPT372 0x0005 #define PCI_DEVICE_ID_TTI_HPT302 0x0006 #define PCI_DEVICE_ID_TTI_HPT371 0x0007 #define PCI_DEVICE_ID_TTI_HPT374 0x0008 #define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */ #define PCI_VENDOR_ID_SIGMA 0x1105 #define PCI_VENDOR_ID_VIA 0x1106 #define PCI_DEVICE_ID_VIA_8763_0 0x0198 #define PCI_DEVICE_ID_VIA_8380_0 0x0204 #define PCI_DEVICE_ID_VIA_3238_0 0x0238 #define PCI_DEVICE_ID_VIA_PT880 0x0258 #define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308 #define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259 #define PCI_DEVICE_ID_VIA_3269_0 0x0269 #define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282 #define PCI_DEVICE_ID_VIA_3296_0 0x0296 #define PCI_DEVICE_ID_VIA_8363_0 0x0305 #define PCI_DEVICE_ID_VIA_P4M800CE 0x0314 #define PCI_DEVICE_ID_VIA_P4M890 0x0327 #define PCI_DEVICE_ID_VIA_VT3324 0x0324 #define PCI_DEVICE_ID_VIA_VT3336 0x0336 #define PCI_DEVICE_ID_VIA_VT3351 0x0351 #define PCI_DEVICE_ID_VIA_VT3364 0x0364 #define PCI_DEVICE_ID_VIA_8371_0 0x0391 #define PCI_DEVICE_ID_VIA_6415 0x0415 #define PCI_DEVICE_ID_VIA_8501_0 0x0501 #define PCI_DEVICE_ID_VIA_82C561 0x0561 #define PCI_DEVICE_ID_VIA_82C586_1 0x0571 #define PCI_DEVICE_ID_VIA_82C576 0x0576 #define PCI_DEVICE_ID_VIA_82C586_0 0x0586 #define PCI_DEVICE_ID_VIA_82C596 0x0596 #define PCI_DEVICE_ID_VIA_82C597_0 0x0597 #define PCI_DEVICE_ID_VIA_82C598_0 0x0598 #define PCI_DEVICE_ID_VIA_8601_0 0x0601 #define PCI_DEVICE_ID_VIA_8605_0 0x0605 #define PCI_DEVICE_ID_VIA_82C686 0x0686 #define PCI_DEVICE_ID_VIA_82C691_0 0x0691 #define PCI_DEVICE_ID_VIA_82C576_1 0x1571 #define PCI_DEVICE_ID_VIA_82C586_2 0x3038 #define PCI_DEVICE_ID_VIA_82C586_3 0x3040 #define PCI_DEVICE_ID_VIA_82C596_3 0x3050 #define PCI_DEVICE_ID_VIA_82C596B_3 0x3051 #define PCI_DEVICE_ID_VIA_82C686_4 0x3057 #define PCI_DEVICE_ID_VIA_82C686_5 0x3058 #define PCI_DEVICE_ID_VIA_8233_5 0x3059 #define PCI_DEVICE_ID_VIA_8233_0 0x3074 #define PCI_DEVICE_ID_VIA_8633_0 0x3091 #define PCI_DEVICE_ID_VIA_8367_0 0x3099 #define PCI_DEVICE_ID_VIA_8653_0 0x3101 #define PCI_DEVICE_ID_VIA_8622 0x3102 #define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104 #define PCI_DEVICE_ID_VIA_8233C_0 0x3109 #define PCI_DEVICE_ID_VIA_8361 0x3112 #define PCI_DEVICE_ID_VIA_XM266 0x3116 #define PCI_DEVICE_ID_VIA_612X 0x3119 #define PCI_DEVICE_ID_VIA_862X_0 0x3123 #define PCI_DEVICE_ID_VIA_8753_0 0x3128 #define PCI_DEVICE_ID_VIA_8233A 0x3147 #define PCI_DEVICE_ID_VIA_8703_51_0 0x3148 #define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 #define PCI_DEVICE_ID_VIA_XN266 0x3156 #define PCI_DEVICE_ID_VIA_6410 0x3164 #define PCI_DEVICE_ID_VIA_8754C_0 0x3168 #define PCI_DEVICE_ID_VIA_8235 0x3177 #define PCI_DEVICE_ID_VIA_8385_0 0x3188 #define PCI_DEVICE_ID_VIA_8377_0 0x3189 #define PCI_DEVICE_ID_VIA_8378_0 0x3205 #define PCI_DEVICE_ID_VIA_8783_0 0x3208 #define PCI_DEVICE_ID_VIA_8237 0x3227 #define PCI_DEVICE_ID_VIA_8251 0x3287 #define PCI_DEVICE_ID_VIA_8261 0x3402 #define PCI_DEVICE_ID_VIA_8237A 0x3337 #define PCI_DEVICE_ID_VIA_8237S 0x3372 #define PCI_DEVICE_ID_VIA_SATA_EIDE 0x5324 #define PCI_DEVICE_ID_VIA_8231 0x8231 #define PCI_DEVICE_ID_VIA_8231_4 0x8235 #define PCI_DEVICE_ID_VIA_8365_1 0x8305 #define PCI_DEVICE_ID_VIA_CX700 0x8324 #define PCI_DEVICE_ID_VIA_CX700_IDE 0x0581 #define PCI_DEVICE_ID_VIA_VX800 0x8353 #define PCI_DEVICE_ID_VIA_VX855 0x8409 #define PCI_DEVICE_ID_VIA_VX900 0x8410 #define PCI_DEVICE_ID_VIA_8371_1 0x8391 #define PCI_DEVICE_ID_VIA_82C598_1 0x8598 #define PCI_DEVICE_ID_VIA_838X_1 0xB188 #define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198 #define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409 #define PCI_DEVICE_ID_VIA_ANON 0xFFFF #define PCI_VENDOR_ID_SIEMENS 0x110A #define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102 #define PCI_VENDOR_ID_VORTEX 0x1119 #define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000 #define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001 #define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002 #define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003 #define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004 #define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005 #define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006 #define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007 #define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008 #define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009 #define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a #define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b #define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c #define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d #define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100 #define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101 #define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102 #define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103 #define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104 #define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105 #define PCI_VENDOR_ID_EF 0x111a #define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000 #define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002 #define PCI_DEVICE_ID_EF_ATM_LANAI2 0x0003 #define PCI_DEVICE_ID_EF_ATM_LANAIHB 0x0005 #define PCI_VENDOR_ID_IDT 0x111d #define PCI_DEVICE_ID_IDT_IDT77201 0x0001 #define PCI_VENDOR_ID_FORE 0x1127 #define PCI_DEVICE_ID_FORE_PCA200E 0x0300 #define PCI_VENDOR_ID_PHILIPS 0x1131 #define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146 #define PCI_DEVICE_ID_PHILIPS_SAA9730 0x9730 #define PCI_VENDOR_ID_EICON 0x1133 #define PCI_DEVICE_ID_EICON_DIVA20 0xe002 #define PCI_DEVICE_ID_EICON_DIVA20_U 0xe004 #define PCI_DEVICE_ID_EICON_DIVA201 0xe005 #define PCI_DEVICE_ID_EICON_DIVA202 0xe00b #define PCI_DEVICE_ID_EICON_MAESTRA 0xe010 #define PCI_DEVICE_ID_EICON_MAESTRAQ 0xe012 #define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 #define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 #define PCI_VENDOR_ID_CISCO 0x1137 #define PCI_VENDOR_ID_ZIATECH 0x1138 #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 #define PCI_VENDOR_ID_SYSKONNECT 0x1148 #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 #define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300 #define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320 #define PCI_DEVICE_ID_SYSKONNECT_9DXX 0x4400 #define PCI_DEVICE_ID_SYSKONNECT_9MXX 0x4500 #define PCI_VENDOR_ID_DIGI 0x114f #define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E 0x0070 #define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071 #define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072 #define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073 #define PCI_DEVICE_ID_DIGI_NEO_8 0x00B1 #define PCI_DEVICE_ID_NEO_2DB9 0x00C8 #define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9 #define PCI_DEVICE_ID_NEO_2RJ45 0x00CA #define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB #define PCIE_DEVICE_ID_NEO_4_IBM 0x00F4 #define PCI_VENDOR_ID_XIRCOM 0x115d #define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101 #define PCI_DEVICE_ID_XIRCOM_X3201_MDM 0x0103 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 #define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008 #define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 #define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017 #define PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB 0x0036 #define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103 #define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132 #define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200 #define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 #define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203 #define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205 #define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211 #define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212 #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213 #define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214 #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217 #define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 #define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408 #define PCI_VENDOR_ID_ALTERA 0x1172 #define PCI_VENDOR_ID_SBE 0x1176 #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 #define PCI_DEVICE_ID_SBE_WANXL200 0x0302 #define PCI_DEVICE_ID_SBE_WANXL400 0x0104 #define PCI_SUBDEVICE_ID_SBE_T3E3 0x0009 #define PCI_SUBDEVICE_ID_SBE_2T3E3_P0 0x0901 #define PCI_SUBDEVICE_ID_SBE_2T3E3_P1 0x0902 #define PCI_VENDOR_ID_TOSHIBA 0x1179 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0101 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0102 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_3 0x0103 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_5 0x0105 #define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a #define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f #define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617 #define PCI_VENDOR_ID_TOSHIBA_2 0x102f #define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030 #define PCI_DEVICE_ID_TOSHIBA_TC35815_NWU 0x0031 #define PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939 0x0032 #define PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE 0x0105 #define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108 #define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3 #define PCI_VENDOR_ID_ATTO 0x117c #define PCI_VENDOR_ID_RICOH 0x1180 #define PCI_DEVICE_ID_RICOH_RL5C465 0x0465 #define PCI_DEVICE_ID_RICOH_RL5C466 0x0466 #define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 #define PCI_DEVICE_ID_RICOH_R5C822 0x0822 #define PCI_DEVICE_ID_RICOH_R5CE822 0xe822 #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 #define PCI_DEVICE_ID_RICOH_R5C832 0x0832 #define PCI_DEVICE_ID_RICOH_R5C843 0x0843 #define PCI_VENDOR_ID_DLINK 0x1186 #define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00 #define PCI_VENDOR_ID_ARTOP 0x1191 #define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005 #define PCI_DEVICE_ID_ARTOP_ATP860 0x0006 #define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007 #define PCI_DEVICE_ID_ARTOP_ATP865 0x0008 #define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009 #define PCI_DEVICE_ID_ARTOP_ATP867A 0x000A #define PCI_DEVICE_ID_ARTOP_ATP867B 0x000B #define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002 #define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010 #define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020 #define PCI_DEVICE_ID_ARTOP_AEC7612S 0x8030 #define PCI_DEVICE_ID_ARTOP_AEC7612D 0x8040 #define PCI_DEVICE_ID_ARTOP_AEC7612SUW 0x8050 #define PCI_DEVICE_ID_ARTOP_8060 0x8060 #define PCI_VENDOR_ID_ZEITNET 0x1193 #define PCI_DEVICE_ID_ZEITNET_1221 0x0001 #define PCI_DEVICE_ID_ZEITNET_1225 0x0002 #define PCI_VENDOR_ID_FUJITSU_ME 0x119e #define PCI_DEVICE_ID_FUJITSU_FS155 0x0001 #define PCI_DEVICE_ID_FUJITSU_FS50 0x0003 #define PCI_SUBVENDOR_ID_KEYSPAN 0x11a9 #define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334 #define PCI_VENDOR_ID_MARVELL 0x11ab #define PCI_VENDOR_ID_MARVELL_EXT 0x1b4b #define PCI_DEVICE_ID_MARVELL_GT64111 0x4146 #define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 #define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 #define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 #define PCI_DEVICE_ID_MARVELL_88ALP01_NAND 0x4100 #define PCI_DEVICE_ID_MARVELL_88ALP01_SD 0x4101 #define PCI_DEVICE_ID_MARVELL_88ALP01_CCIC 0x4102 #define PCI_VENDOR_ID_V3 0x11b0 #define PCI_DEVICE_ID_V3_V960 0x0001 #define PCI_DEVICE_ID_V3_V351 0x0002 #define PCI_VENDOR_ID_ATT 0x11c1 #define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480 #define PCI_VENDOR_ID_SPECIALIX 0x11cb #define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004 #define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4 #define PCI_DEVICE_ID_AD1889JS 0x1889 #define PCI_DEVICE_ID_SEGA_BBA 0x1234 #define PCI_VENDOR_ID_ZORAN 0x11de #define PCI_DEVICE_ID_ZORAN_36057 0x6057 #define PCI_DEVICE_ID_ZORAN_36120 0x6120 #define PCI_VENDOR_ID_COMPEX 0x11f6 #define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 #define PCI_VENDOR_ID_PMC_Sierra 0x11f8 #define PCI_VENDOR_ID_MICROSEMI 0x11f8 #define PCI_VENDOR_ID_RP 0x11fe #define PCI_DEVICE_ID_RP32INTF 0x0001 #define PCI_DEVICE_ID_RP8INTF 0x0002 #define PCI_DEVICE_ID_RP16INTF 0x0003 #define PCI_DEVICE_ID_RP4QUAD 0x0004 #define PCI_DEVICE_ID_RP8OCTA 0x0005 #define PCI_DEVICE_ID_RP8J 0x0006 #define PCI_DEVICE_ID_RP4J 0x0007 #define PCI_DEVICE_ID_RP8SNI 0x0008 #define PCI_DEVICE_ID_RP16SNI 0x0009 #define PCI_DEVICE_ID_RPP4 0x000A #define PCI_DEVICE_ID_RPP8 0x000B #define PCI_DEVICE_ID_RP4M 0x000D #define PCI_DEVICE_ID_RP2_232 0x000E #define PCI_DEVICE_ID_RP2_422 0x000F #define PCI_DEVICE_ID_URP32INTF 0x0801 #define PCI_DEVICE_ID_URP8INTF 0x0802 #define PCI_DEVICE_ID_URP16INTF 0x0803 #define PCI_DEVICE_ID_URP8OCTA 0x0805 #define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C #define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D #define PCI_DEVICE_ID_CRP16INTF 0x0903 #define PCI_VENDOR_ID_CYCLADES 0x120e #define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 #define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 #define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 #define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 #define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 #define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 #define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 #define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 #define PCI_DEVICE_ID_PC300_RX_2 0x0300 #define PCI_DEVICE_ID_PC300_RX_1 0x0301 #define PCI_DEVICE_ID_PC300_TE_2 0x0310 #define PCI_DEVICE_ID_PC300_TE_1 0x0311 #define PCI_DEVICE_ID_PC300_TE_M_2 0x0320 #define PCI_DEVICE_ID_PC300_TE_M_1 0x0321 #define PCI_VENDOR_ID_ESSENTIAL 0x120f #define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001 #define PCI_VENDOR_ID_O2 0x1217 #define PCI_DEVICE_ID_O2_6729 0x6729 #define PCI_DEVICE_ID_O2_6730 0x673a #define PCI_DEVICE_ID_O2_6832 0x6832 #define PCI_DEVICE_ID_O2_6836 0x6836 #define PCI_DEVICE_ID_O2_6812 0x6872 #define PCI_DEVICE_ID_O2_6933 0x6933 #define PCI_DEVICE_ID_O2_8120 0x8120 #define PCI_DEVICE_ID_O2_8220 0x8220 #define PCI_DEVICE_ID_O2_8221 0x8221 #define PCI_DEVICE_ID_O2_8320 0x8320 #define PCI_DEVICE_ID_O2_8321 0x8321 #define PCI_VENDOR_ID_3DFX 0x121a #define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 #define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002 #define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003 #define PCI_DEVICE_ID_3DFX_VOODOO3 0x0005 #define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009 #define PCI_VENDOR_ID_AVM 0x1244 #define PCI_DEVICE_ID_AVM_B1 0x0700 #define PCI_DEVICE_ID_AVM_C4 0x0800 #define PCI_DEVICE_ID_AVM_A1 0x0a00 #define PCI_DEVICE_ID_AVM_A1_V2 0x0e00 #define PCI_DEVICE_ID_AVM_C2 0x1100 #define PCI_DEVICE_ID_AVM_T1 0x1200 #define PCI_VENDOR_ID_STALLION 0x124d /* Allied Telesyn */ #define PCI_VENDOR_ID_AT 0x1259 #define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 #define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 #define PCI_VENDOR_ID_ESS 0x125d #define PCI_DEVICE_ID_ESS_ESS1968 0x1968 #define PCI_DEVICE_ID_ESS_ESS1978 0x1978 #define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988 #define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989 #define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990 #define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992 #define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998 #define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999 #define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a #define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b #define PCI_VENDOR_ID_SATSAGEM 0x1267 #define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016 #define PCI_VENDOR_ID_ENSONIQ 0x1274 #define PCI_DEVICE_ID_ENSONIQ_CT5880 0x5880 #define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000 #define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 #define PCI_VENDOR_ID_TRANSMETA 0x1279 #define PCI_DEVICE_ID_EFFICEON 0x0060 #define PCI_VENDOR_ID_ROCKWELL 0x127A #define PCI_VENDOR_ID_ITE 0x1283 #define PCI_DEVICE_ID_ITE_8172 0x8172 #define PCI_DEVICE_ID_ITE_8211 0x8211 #define PCI_DEVICE_ID_ITE_8212 0x8212 #define PCI_DEVICE_ID_ITE_8213 0x8213 #define PCI_DEVICE_ID_ITE_8152 0x8152 #define PCI_DEVICE_ID_ITE_8872 0x8872 #define PCI_DEVICE_ID_ITE_IT8330G_0 0xe886 /* formerly Platform Tech */ #define PCI_DEVICE_ID_ESS_ESS0100 0x0100 #define PCI_VENDOR_ID_ALTEON 0x12ae #define PCI_SUBVENDOR_ID_CONNECT_TECH 0x12c4 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232 0x0001 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232 0x0002 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232 0x0003 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485 0x0004 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4 0x0005 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485 0x0006 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2 0x0007 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485 0x0008 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D #define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100 #define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201 #define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332 #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST16 0x0011 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC 0x0041 #define PCI_SUBVENDOR_ID_CHASE_PCIRAS 0x124D #define PCI_SUBDEVICE_ID_CHASE_PCIRAS4 0xF001 #define PCI_SUBDEVICE_ID_CHASE_PCIRAS8 0xF010 #define PCI_VENDOR_ID_AUREAL 0x12eb #define PCI_DEVICE_ID_AUREAL_VORTEX_1 0x0001 #define PCI_DEVICE_ID_AUREAL_VORTEX_2 0x0002 #define PCI_DEVICE_ID_AUREAL_ADVANTAGE 0x0003 #define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8 #define PCI_DEVICE_ID_LML_33R10 0x8a02 #define PCI_VENDOR_ID_ESDGMBH 0x12fe #define PCI_DEVICE_ID_ESDGMBH_CPCIASIO4 0x0111 #define PCI_VENDOR_ID_CB 0x1307 /* Measurement Computing */ #define PCI_VENDOR_ID_SIIG 0x131f #define PCI_SUBVENDOR_ID_SIIG 0x131f #define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000 #define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001 #define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002 #define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010 #define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011 #define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012 #define PCI_DEVICE_ID_SIIG_1P_10x 0x1020 #define PCI_DEVICE_ID_SIIG_2P_10x 0x1021 #define PCI_DEVICE_ID_SIIG_2S_10x_550 0x1030 #define PCI_DEVICE_ID_SIIG_2S_10x_650 0x1031 #define PCI_DEVICE_ID_SIIG_2S_10x_850 0x1032 #define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034 #define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035 #define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036 #define PCI_DEVICE_ID_SIIG_4S_10x_550 0x1050 #define PCI_DEVICE_ID_SIIG_4S_10x_650 0x1051 #define PCI_DEVICE_ID_SIIG_4S_10x_850 0x1052 #define PCI_DEVICE_ID_SIIG_1S_20x_550 0x2000 #define PCI_DEVICE_ID_SIIG_1S_20x_650 0x2001 #define PCI_DEVICE_ID_SIIG_1S_20x_850 0x2002 #define PCI_DEVICE_ID_SIIG_1P_20x 0x2020 #define PCI_DEVICE_ID_SIIG_2P_20x 0x2021 #define PCI_DEVICE_ID_SIIG_2S_20x_550 0x2030 #define PCI_DEVICE_ID_SIIG_2S_20x_650 0x2031 #define PCI_DEVICE_ID_SIIG_2S_20x_850 0x2032 #define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040 #define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041 #define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042 #define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010 #define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011 #define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012 #define PCI_DEVICE_ID_SIIG_4S_20x_550 0x2050 #define PCI_DEVICE_ID_SIIG_4S_20x_650 0x2051 #define PCI_DEVICE_ID_SIIG_4S_20x_850 0x2052 #define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 #define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 #define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 #define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080 #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 #define PCI_VENDOR_ID_RADISYS 0x1331 #define PCI_VENDOR_ID_MICRO_MEMORY 0x1332 #define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415 #define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425 #define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155 #define PCI_VENDOR_ID_DOMEX 0x134a #define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001 #define PCI_VENDOR_ID_INTASHIELD 0x135a #define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80 #define PCI_DEVICE_ID_INTASHIELD_IS400 0x0dc0 #define PCI_VENDOR_ID_QUATECH 0x135C #define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 #define PCI_DEVICE_ID_QUATECH_DSC100 0x0020 #define PCI_DEVICE_ID_QUATECH_DSC200 0x0030 #define PCI_DEVICE_ID_QUATECH_QSC200 0x0040 #define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050 #define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060 #define PCI_DEVICE_ID_QUATECH_QSCP100 0x0120 #define PCI_DEVICE_ID_QUATECH_DSCP100 0x0130 #define PCI_DEVICE_ID_QUATECH_QSCP200 0x0140 #define PCI_DEVICE_ID_QUATECH_DSCP200 0x0150 #define PCI_DEVICE_ID_QUATECH_QSCLP100 0x0170 #define PCI_DEVICE_ID_QUATECH_DSCLP100 0x0180 #define PCI_DEVICE_ID_QUATECH_DSC100E 0x0181 #define PCI_DEVICE_ID_QUATECH_SSCLP100 0x0190 #define PCI_DEVICE_ID_QUATECH_QSCLP200 0x01A0 #define PCI_DEVICE_ID_QUATECH_DSCLP200 0x01B0 #define PCI_DEVICE_ID_QUATECH_DSC200E 0x01B1 #define PCI_DEVICE_ID_QUATECH_SSCLP200 0x01C0 #define PCI_DEVICE_ID_QUATECH_ESCLP100 0x01E0 #define PCI_DEVICE_ID_QUATECH_SPPXP_100 0x0278 #define PCI_VENDOR_ID_SEALEVEL 0x135e #define PCI_DEVICE_ID_SEALEVEL_U530 0x7101 #define PCI_DEVICE_ID_SEALEVEL_UCOMM2 0x7201 #define PCI_DEVICE_ID_SEALEVEL_UCOMM422 0x7402 #define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 #define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 #define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 #define PCI_DEVICE_ID_SEALEVEL_7803 0x7803 #define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 #define PCI_VENDOR_ID_HYPERCOPE 0x1365 #define PCI_DEVICE_ID_HYPERCOPE_PLX 0x9050 #define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO 0x0104 #define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO 0x0106 #define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 #define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 #define PCI_VENDOR_ID_DIGIGRAM 0x1369 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 #define PCI_VENDOR_ID_KAWASAKI 0x136b #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 #define PCI_VENDOR_ID_CNET 0x1371 #define PCI_DEVICE_ID_CNET_GIGACARD 0x434e #define PCI_VENDOR_ID_LMC 0x1376 #define PCI_DEVICE_ID_LMC_HSSI 0x0003 #define PCI_DEVICE_ID_LMC_DS3 0x0004 #define PCI_DEVICE_ID_LMC_SSI 0x0005 #define PCI_DEVICE_ID_LMC_T1 0x0006 #define PCI_VENDOR_ID_NETGEAR 0x1385 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a #define PCI_VENDOR_ID_APPLICOM 0x1389 #define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001 #define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002 #define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 #define PCI_VENDOR_ID_MOXA 0x1393 #define PCI_DEVICE_ID_MOXA_RC7000 0x0001 #define PCI_DEVICE_ID_MOXA_CP102 0x1020 #define PCI_DEVICE_ID_MOXA_CP102UL 0x1021 #define PCI_DEVICE_ID_MOXA_CP102U 0x1022 #define PCI_DEVICE_ID_MOXA_C104 0x1040 #define PCI_DEVICE_ID_MOXA_CP104U 0x1041 #define PCI_DEVICE_ID_MOXA_CP104JU 0x1042 #define PCI_DEVICE_ID_MOXA_CP104EL 0x1043 #define PCI_DEVICE_ID_MOXA_CT114 0x1140 #define PCI_DEVICE_ID_MOXA_CP114 0x1141 #define PCI_DEVICE_ID_MOXA_CP118U 0x1180 #define PCI_DEVICE_ID_MOXA_CP118EL 0x1181 #define PCI_DEVICE_ID_MOXA_CP132 0x1320 #define PCI_DEVICE_ID_MOXA_CP132U 0x1321 #define PCI_DEVICE_ID_MOXA_CP134U 0x1340 #define PCI_DEVICE_ID_MOXA_C168 0x1680 #define PCI_DEVICE_ID_MOXA_CP168U 0x1681 #define PCI_DEVICE_ID_MOXA_CP168EL 0x1682 #define PCI_DEVICE_ID_MOXA_CP204J 0x2040 #define PCI_DEVICE_ID_MOXA_C218 0x2180 #define PCI_DEVICE_ID_MOXA_C320 0x3200 #define PCI_VENDOR_ID_CCD 0x1397 #define PCI_DEVICE_ID_CCD_HFC4S 0x08B4 #define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234 #define PCI_DEVICE_ID_CCD_HFC8S 0x16B8 #define PCI_DEVICE_ID_CCD_2BD0 0x2bd0 #define PCI_DEVICE_ID_CCD_HFCE1 0x30B1 #define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136 #define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137 #define PCI_DEVICE_ID_CCD_B000 0xb000 #define PCI_DEVICE_ID_CCD_B006 0xb006 #define PCI_DEVICE_ID_CCD_B007 0xb007 #define PCI_DEVICE_ID_CCD_B008 0xb008 #define PCI_DEVICE_ID_CCD_B009 0xb009 #define PCI_DEVICE_ID_CCD_B00A 0xb00a #define PCI_DEVICE_ID_CCD_B00B 0xb00b #define PCI_DEVICE_ID_CCD_B00C 0xb00c #define PCI_DEVICE_ID_CCD_B100 0xb100 #define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520 #define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521 #define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522 #define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523 #define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540 #define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550 #define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552 #define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553 #define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B #define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560 #define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562 #define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563 #define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564 #define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565 #define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566 #define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567 #define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568 #define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569 #define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A #define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B #define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620 #define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622 #define PCI_DEVICE_ID_CCD_B700 0xb700 #define PCI_DEVICE_ID_CCD_B701 0xb701 #define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523 #define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884 #define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888 #define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998 #define PCI_VENDOR_ID_EXAR 0x13a8 #define PCI_DEVICE_ID_EXAR_XR17C152 0x0152 #define PCI_DEVICE_ID_EXAR_XR17C154 0x0154 #define PCI_DEVICE_ID_EXAR_XR17C158 0x0158 #define PCI_DEVICE_ID_EXAR_XR17V352 0x0352 #define PCI_DEVICE_ID_EXAR_XR17V354 0x0354 #define PCI_DEVICE_ID_EXAR_XR17V358 0x0358 #define PCI_VENDOR_ID_MICROGATE 0x13c0 #define PCI_DEVICE_ID_MICROGATE_USC 0x0010 #define PCI_DEVICE_ID_MICROGATE_SCA 0x0030 #define PCI_VENDOR_ID_3WARE 0x13C1 #define PCI_DEVICE_ID_3WARE_1000 0x1000 #define PCI_DEVICE_ID_3WARE_7000 0x1001 #define PCI_DEVICE_ID_3WARE_9000 0x1002 #define PCI_VENDOR_ID_IOMEGA 0x13ca #define PCI_DEVICE_ID_IOMEGA_BUZ 0x4231 #define PCI_VENDOR_ID_ABOCOM 0x13D1 #define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1 #define PCI_VENDOR_ID_SUNDANCE 0x13f0 #define PCI_VENDOR_ID_CMEDIA 0x13f6 #define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100 #define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101 #define PCI_DEVICE_ID_CMEDIA_CM8738 0x0111 #define PCI_DEVICE_ID_CMEDIA_CM8738B 0x0112 #define PCI_VENDOR_ID_ADVANTECH 0x13fe #define PCI_VENDOR_ID_MEILHAUS 0x1402 #define PCI_VENDOR_ID_LAVA 0x1407 #define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */ #define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */ #define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */ #define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */ #define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */ #define PCI_DEVICE_ID_LAVA_QUAD_A 0x0201 /* 2x 16650, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUAD_B 0x0202 /* 2x 16650, half of 4 port */ #define PCI_DEVICE_ID_LAVA_SSERIAL 0x0500 /* 1x 16550 */ #define PCI_DEVICE_ID_LAVA_PORT_650 0x0600 /* 1x 16650 */ #define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000 #define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */ #define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */ #define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR 0x8800 #define PCI_VENDOR_ID_TIMEDIA 0x1409 #define PCI_DEVICE_ID_TIMEDIA_1889 0x7168 #define PCI_VENDOR_ID_ICE 0x1412 #define PCI_DEVICE_ID_ICE_1712 0x1712 #define PCI_DEVICE_ID_VT1724 0x1724 #define PCI_VENDOR_ID_OXSEMI 0x1415 #define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 #define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000 #define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004 #define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100 #define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 #define PCI_DEVICE_ID_OXSEMI_C950 0x950B #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 #define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 #define PCI_VENDOR_ID_CHELSIO 0x1425 #define PCI_VENDOR_ID_ADLINK 0x144a #define PCI_VENDOR_ID_SAMSUNG 0x144d #define PCI_VENDOR_ID_GIGABYTE 0x1458 #define PCI_VENDOR_ID_AMBIT 0x1468 #define PCI_VENDOR_ID_MYRICOM 0x14c1 #define PCI_VENDOR_ID_MEDIATEK 0x14c3 #define PCI_VENDOR_ID_TITAN 0x14D2 #define PCI_DEVICE_ID_TITAN_010L 0x8001 #define PCI_DEVICE_ID_TITAN_100L 0x8010 #define PCI_DEVICE_ID_TITAN_110L 0x8011 #define PCI_DEVICE_ID_TITAN_200L 0x8020 #define PCI_DEVICE_ID_TITAN_210L 0x8021 #define PCI_DEVICE_ID_TITAN_400L 0x8040 #define PCI_DEVICE_ID_TITAN_800L 0x8080 #define PCI_DEVICE_ID_TITAN_100 0xA001 #define PCI_DEVICE_ID_TITAN_200 0xA005 #define PCI_DEVICE_ID_TITAN_400 0xA003 #define PCI_DEVICE_ID_TITAN_800B 0xA004 #define PCI_VENDOR_ID_PANACOM 0x14d4 #define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400 #define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402 #define PCI_VENDOR_ID_SIPACKETS 0x14d9 #define PCI_DEVICE_ID_SP1011 0x0010 #define PCI_VENDOR_ID_AFAVLAB 0x14db #define PCI_DEVICE_ID_AFAVLAB_P028 0x2180 #define PCI_DEVICE_ID_AFAVLAB_P030 0x2182 #define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150 #define PCI_VENDOR_ID_AMPLICON 0x14dc #define PCI_VENDOR_ID_BCM_GVC 0x14a4 #define PCI_VENDOR_ID_BROADCOM 0x14e4 #define PCI_DEVICE_ID_TIGON3_5752 0x1600 #define PCI_DEVICE_ID_TIGON3_5752M 0x1601 #define PCI_DEVICE_ID_NX2_5709 0x1639 #define PCI_DEVICE_ID_NX2_5709S 0x163a #define PCI_DEVICE_ID_TIGON3_5700 0x1644 #define PCI_DEVICE_ID_TIGON3_5701 0x1645 #define PCI_DEVICE_ID_TIGON3_5702 0x1646 #define PCI_DEVICE_ID_TIGON3_5703 0x1647 #define PCI_DEVICE_ID_TIGON3_5704 0x1648 #define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649 #define PCI_DEVICE_ID_NX2_5706 0x164a #define PCI_DEVICE_ID_NX2_5708 0x164c #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d #define PCI_DEVICE_ID_NX2_57710 0x164e #define PCI_DEVICE_ID_NX2_57711 0x164f #define PCI_DEVICE_ID_NX2_57711E 0x1650 #define PCI_DEVICE_ID_TIGON3_5705 0x1653 #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 #define PCI_DEVICE_ID_TIGON3_5719 0x1657 #define PCI_DEVICE_ID_TIGON3_5721 0x1659 #define PCI_DEVICE_ID_TIGON3_5722 0x165a #define PCI_DEVICE_ID_TIGON3_5723 0x165b #define PCI_DEVICE_ID_TIGON3_5705M 0x165d #define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e #define PCI_DEVICE_ID_NX2_57712 0x1662 #define PCI_DEVICE_ID_NX2_57712E 0x1663 #define PCI_DEVICE_ID_NX2_57712_MF 0x1663 #define PCI_DEVICE_ID_TIGON3_5714 0x1668 #define PCI_DEVICE_ID_TIGON3_5714S 0x1669 #define PCI_DEVICE_ID_TIGON3_5780 0x166a #define PCI_DEVICE_ID_TIGON3_5780S 0x166b #define PCI_DEVICE_ID_TIGON3_5705F 0x166e #define PCI_DEVICE_ID_NX2_57712_VF 0x166f #define PCI_DEVICE_ID_TIGON3_5754M 0x1672 #define PCI_DEVICE_ID_TIGON3_5755M 0x1673 #define PCI_DEVICE_ID_TIGON3_5756 0x1674 #define PCI_DEVICE_ID_TIGON3_5750 0x1676 #define PCI_DEVICE_ID_TIGON3_5751 0x1677 #define PCI_DEVICE_ID_TIGON3_5715 0x1678 #define PCI_DEVICE_ID_TIGON3_5715S 0x1679 #define PCI_DEVICE_ID_TIGON3_5754 0x167a #define PCI_DEVICE_ID_TIGON3_5755 0x167b #define PCI_DEVICE_ID_TIGON3_5751M 0x167d #define PCI_DEVICE_ID_TIGON3_5751F 0x167e #define PCI_DEVICE_ID_TIGON3_5787F 0x167f #define PCI_DEVICE_ID_TIGON3_5761E 0x1680 #define PCI_DEVICE_ID_TIGON3_5761 0x1681 #define PCI_DEVICE_ID_TIGON3_5764 0x1684 #define PCI_DEVICE_ID_NX2_57800 0x168a #define PCI_DEVICE_ID_NX2_57840 0x168d #define PCI_DEVICE_ID_NX2_57810 0x168e #define PCI_DEVICE_ID_TIGON3_5787M 0x1693 #define PCI_DEVICE_ID_TIGON3_5782 0x1696 #define PCI_DEVICE_ID_TIGON3_5784 0x1698 #define PCI_DEVICE_ID_TIGON3_5786 0x169a #define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5788 0x169c #define PCI_DEVICE_ID_TIGON3_5789 0x169d #define PCI_DEVICE_ID_NX2_57840_4_10 0x16a1 #define PCI_DEVICE_ID_NX2_57840_2_20 0x16a2 #define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 #define PCI_DEVICE_ID_NX2_57800_MF 0x16a5 #define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 #define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 #define PCI_DEVICE_ID_NX2_5706S 0x16aa #define PCI_DEVICE_ID_NX2_5708S 0x16ac #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae #define PCI_DEVICE_ID_NX2_57810_VF 0x16af #define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6 #define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7 #define PCI_DEVICE_ID_TIGON3_5781 0x16dd #define PCI_DEVICE_ID_TIGON3_5753 0x16f7 #define PCI_DEVICE_ID_TIGON3_5753M 0x16fd #define PCI_DEVICE_ID_TIGON3_5753F 0x16fe #define PCI_DEVICE_ID_TIGON3_5901 0x170d #define PCI_DEVICE_ID_BCM4401B1 0x170c #define PCI_DEVICE_ID_TIGON3_5901_2 0x170e #define PCI_DEVICE_ID_TIGON3_5906 0x1712 #define PCI_DEVICE_ID_TIGON3_5906M 0x1713 #define PCI_DEVICE_ID_BCM4401 0x4401 #define PCI_DEVICE_ID_BCM4401B0 0x4402 #define PCI_VENDOR_ID_TOPIC 0x151f #define PCI_DEVICE_ID_TOPIC_TP560 0x0000 #define PCI_VENDOR_ID_MAINPINE 0x1522 #define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100 #define PCI_VENDOR_ID_ENE 0x1524 #define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510 #define PCI_DEVICE_ID_ENE_CB712_SD 0x0550 #define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551 #define PCI_DEVICE_ID_ENE_CB714_SD 0x0750 #define PCI_DEVICE_ID_ENE_CB714_SD_2 0x0751 #define PCI_DEVICE_ID_ENE_1211 0x1211 #define PCI_DEVICE_ID_ENE_1225 0x1225 #define PCI_DEVICE_ID_ENE_1410 0x1410 #define PCI_DEVICE_ID_ENE_710 0x1411 #define PCI_DEVICE_ID_ENE_712 0x1412 #define PCI_DEVICE_ID_ENE_1420 0x1420 #define PCI_DEVICE_ID_ENE_720 0x1421 #define PCI_DEVICE_ID_ENE_722 0x1422 #define PCI_SUBVENDOR_ID_PERLE 0x155f #define PCI_SUBDEVICE_ID_PCI_RAS4 0xf001 #define PCI_SUBDEVICE_ID_PCI_RAS8 0xf010 #define PCI_VENDOR_ID_SYBA 0x1592 #define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782 #define PCI_DEVICE_ID_SYBA_1P_ECP 0x0783 #define PCI_VENDOR_ID_MORETON 0x15aa #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 #define PCI_VENDOR_ID_VMWARE 0x15ad #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0 #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 #define PCI_VENDOR_ID_MELLANOX 0x15b3 #define PCI_DEVICE_ID_MELLANOX_CONNECTX3 0x1003 #define PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO 0x1007 #define PCI_DEVICE_ID_MELLANOX_CONNECTIB 0x1011 #define PCI_DEVICE_ID_MELLANOX_CONNECTX4 0x1013 #define PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX 0x1015 #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c #define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 #define PCI_DEVICE_ID_MELLANOX_HERMON_SDR 0x6340 #define PCI_DEVICE_ID_MELLANOX_HERMON_DDR 0x634a #define PCI_DEVICE_ID_MELLANOX_HERMON_QDR 0x6354 #define PCI_DEVICE_ID_MELLANOX_HERMON_EN 0x6368 #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN 0x6372 #define PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2 0x6732 #define PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2 0x673c #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2 0x6746 #define PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2 0x6750 #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2 0x675a #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2 0x6764 #define PCI_DEVICE_ID_MELLANOX_CONNECTX2 0x676e #define PCI_VENDOR_ID_DFI 0x15bd #define PCI_VENDOR_ID_QUICKNET 0x15e2 #define PCI_DEVICE_ID_QUICKNET_XJ 0x0500 /* * ADDI-DATA GmbH communication cards */ #define PCI_VENDOR_ID_ADDIDATA 0x15B8 #define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000 #define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001 #define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002 #define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009 #define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A #define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B #define PCI_DEVICE_ID_ADDIDATA_APCI7500_3 0x700C #define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D #define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E #define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F #define PCI_DEVICE_ID_ADDIDATA_APCIe7300 0x7010 #define PCI_DEVICE_ID_ADDIDATA_APCIe7420 0x7011 #define PCI_DEVICE_ID_ADDIDATA_APCIe7500 0x7012 #define PCI_DEVICE_ID_ADDIDATA_APCIe7800 0x7013 #define PCI_VENDOR_ID_PDC 0x15e9 #define PCI_VENDOR_ID_FARSITE 0x1619 #define PCI_DEVICE_ID_FARSITE_T2P 0x0400 #define PCI_DEVICE_ID_FARSITE_T4P 0x0440 #define PCI_DEVICE_ID_FARSITE_T1U 0x0610 #define PCI_DEVICE_ID_FARSITE_T2U 0x0620 #define PCI_DEVICE_ID_FARSITE_T4U 0x0640 #define PCI_DEVICE_ID_FARSITE_TE1 0x1610 #define PCI_DEVICE_ID_FARSITE_TE1C 0x1612 #define PCI_VENDOR_ID_ARIMA 0x161f #define PCI_VENDOR_ID_BROCADE 0x1657 #define PCI_DEVICE_ID_BROCADE_CT 0x0014 #define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017 #define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021 #define PCI_VENDOR_ID_SIBYTE 0x166d #define PCI_DEVICE_ID_BCM1250_PCI 0x0001 #define PCI_DEVICE_ID_BCM1250_HT 0x0002 #define PCI_VENDOR_ID_ATHEROS 0x168c #define PCI_VENDOR_ID_NETCELL 0x169c #define PCI_DEVICE_ID_REVOLUTION 0x0044 #define PCI_VENDOR_ID_CENATEK 0x16CA #define PCI_DEVICE_ID_CENATEK_IDE 0x0001 #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf #define PCI_VENDOR_ID_USR 0x16ec #define PCI_VENDOR_ID_VITESSE 0x1725 #define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 #define PCI_VENDOR_ID_LINKSYS 0x1737 #define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064 #define PCI_VENDOR_ID_ALTIMA 0x173b #define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8 #define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9 #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb #define PCI_VENDOR_ID_CAVIUM 0x177d #define PCI_VENDOR_ID_TECHWELL 0x1797 #define PCI_DEVICE_ID_TECHWELL_6800 0x6800 #define PCI_DEVICE_ID_TECHWELL_6801 0x6801 #define PCI_DEVICE_ID_TECHWELL_6804 0x6804 #define PCI_DEVICE_ID_TECHWELL_6816_1 0x6810 #define PCI_DEVICE_ID_TECHWELL_6816_2 0x6811 #define PCI_DEVICE_ID_TECHWELL_6816_3 0x6812 #define PCI_DEVICE_ID_TECHWELL_6816_4 0x6813 #define PCI_VENDOR_ID_BELKIN 0x1799 #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f #define PCI_VENDOR_ID_RDC 0x17f3 #define PCI_DEVICE_ID_RDC_R6020 0x6020 #define PCI_DEVICE_ID_RDC_R6030 0x6030 #define PCI_DEVICE_ID_RDC_R6040 0x6040 #define PCI_DEVICE_ID_RDC_R6060 0x6060 #define PCI_DEVICE_ID_RDC_R6061 0x6061 #define PCI_DEVICE_ID_RDC_D1010 0x1010 #define PCI_VENDOR_ID_LENOVO 0x17aa #define PCI_VENDOR_ID_QCOM 0x17cb #define PCI_VENDOR_ID_CDNS 0x17cd #define PCI_VENDOR_ID_ARECA 0x17d3 #define PCI_DEVICE_ID_ARECA_1110 0x1110 #define PCI_DEVICE_ID_ARECA_1120 0x1120 #define PCI_DEVICE_ID_ARECA_1130 0x1130 #define PCI_DEVICE_ID_ARECA_1160 0x1160 #define PCI_DEVICE_ID_ARECA_1170 0x1170 #define PCI_DEVICE_ID_ARECA_1200 0x1200 #define PCI_DEVICE_ID_ARECA_1201 0x1201 #define PCI_DEVICE_ID_ARECA_1202 0x1202 #define PCI_DEVICE_ID_ARECA_1210 0x1210 #define PCI_DEVICE_ID_ARECA_1220 0x1220 #define PCI_DEVICE_ID_ARECA_1230 0x1230 #define PCI_DEVICE_ID_ARECA_1260 0x1260 #define PCI_DEVICE_ID_ARECA_1270 0x1270 #define PCI_DEVICE_ID_ARECA_1280 0x1280 #define PCI_DEVICE_ID_ARECA_1380 0x1380 #define PCI_DEVICE_ID_ARECA_1381 0x1381 #define PCI_DEVICE_ID_ARECA_1680 0x1680 #define PCI_DEVICE_ID_ARECA_1681 0x1681 #define PCI_VENDOR_ID_S2IO 0x17d5 #define PCI_DEVICE_ID_S2IO_WIN 0x5731 #define PCI_DEVICE_ID_S2IO_UNI 0x5831 #define PCI_DEVICE_ID_HERC_WIN 0x5732 #define PCI_DEVICE_ID_HERC_UNI 0x5832 #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 #define PCI_VENDOR_ID_TOPSPIN 0x1867 #define PCI_VENDOR_ID_COMMTECH 0x18f7 #define PCI_VENDOR_ID_SILAN 0x1904 #define PCI_VENDOR_ID_RENESAS 0x1912 #define PCI_DEVICE_ID_RENESAS_SH7781 0x0001 #define PCI_DEVICE_ID_RENESAS_SH7780 0x0002 #define PCI_DEVICE_ID_RENESAS_SH7763 0x0004 #define PCI_DEVICE_ID_RENESAS_SH7785 0x0007 #define PCI_DEVICE_ID_RENESAS_SH7786 0x0010 #define PCI_VENDOR_ID_SOLARFLARE 0x1924 #define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0 0x0703 #define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1 0x6703 #define PCI_DEVICE_ID_SOLARFLARE_SFC4000B 0x0710 #define PCI_VENDOR_ID_TDI 0x192E #define PCI_DEVICE_ID_TDI_EHCI 0x0101 #define PCI_VENDOR_ID_FREESCALE 0x1957 #define PCI_DEVICE_ID_MPC8308 0xc006 #define PCI_DEVICE_ID_MPC8315E 0x00b4 #define PCI_DEVICE_ID_MPC8315 0x00b5 #define PCI_DEVICE_ID_MPC8314E 0x00b6 #define PCI_DEVICE_ID_MPC8314 0x00b7 #define PCI_DEVICE_ID_MPC8378E 0x00c4 #define PCI_DEVICE_ID_MPC8378 0x00c5 #define PCI_DEVICE_ID_MPC8377E 0x00c6 #define PCI_DEVICE_ID_MPC8377 0x00c7 #define PCI_DEVICE_ID_MPC8548E 0x0012 #define PCI_DEVICE_ID_MPC8548 0x0013 #define PCI_DEVICE_ID_MPC8543E 0x0014 #define PCI_DEVICE_ID_MPC8543 0x0015 #define PCI_DEVICE_ID_MPC8547E 0x0018 #define PCI_DEVICE_ID_MPC8545E 0x0019 #define PCI_DEVICE_ID_MPC8545 0x001a #define PCI_DEVICE_ID_MPC8569E 0x0061 #define PCI_DEVICE_ID_MPC8569 0x0060 #define PCI_DEVICE_ID_MPC8568E 0x0020 #define PCI_DEVICE_ID_MPC8568 0x0021 #define PCI_DEVICE_ID_MPC8567E 0x0022 #define PCI_DEVICE_ID_MPC8567 0x0023 #define PCI_DEVICE_ID_MPC8533E 0x0030 #define PCI_DEVICE_ID_MPC8533 0x0031 #define PCI_DEVICE_ID_MPC8544E 0x0032 #define PCI_DEVICE_ID_MPC8544 0x0033 #define PCI_DEVICE_ID_MPC8572E 0x0040 #define PCI_DEVICE_ID_MPC8572 0x0041 #define PCI_DEVICE_ID_MPC8536E 0x0050 #define PCI_DEVICE_ID_MPC8536 0x0051 #define PCI_DEVICE_ID_P2020E 0x0070 #define PCI_DEVICE_ID_P2020 0x0071 #define PCI_DEVICE_ID_P2010E 0x0078 #define PCI_DEVICE_ID_P2010 0x0079 #define PCI_DEVICE_ID_P1020E 0x0100 #define PCI_DEVICE_ID_P1020 0x0101 #define PCI_DEVICE_ID_P1021E 0x0102 #define PCI_DEVICE_ID_P1021 0x0103 #define PCI_DEVICE_ID_P1011E 0x0108 #define PCI_DEVICE_ID_P1011 0x0109 #define PCI_DEVICE_ID_P1022E 0x0110 #define PCI_DEVICE_ID_P1022 0x0111 #define PCI_DEVICE_ID_P1013E 0x0118 #define PCI_DEVICE_ID_P1013 0x0119 #define PCI_DEVICE_ID_P4080E 0x0400 #define PCI_DEVICE_ID_P4080 0x0401 #define PCI_DEVICE_ID_P4040E 0x0408 #define PCI_DEVICE_ID_P4040 0x0409 #define PCI_DEVICE_ID_P2040E 0x0410 #define PCI_DEVICE_ID_P2040 0x0411 #define PCI_DEVICE_ID_P3041E 0x041E #define PCI_DEVICE_ID_P3041 0x041F #define PCI_DEVICE_ID_P5020E 0x0420 #define PCI_DEVICE_ID_P5020 0x0421 #define PCI_DEVICE_ID_P5010E 0x0428 #define PCI_DEVICE_ID_P5010 0x0429 #define PCI_DEVICE_ID_MPC8641 0x7010 #define PCI_DEVICE_ID_MPC8641D 0x7011 #define PCI_DEVICE_ID_MPC8610 0x7018 #define PCI_VENDOR_ID_PASEMI 0x1959 #define PCI_VENDOR_ID_ATTANSIC 0x1969 #define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 #define PCI_DEVICE_ID_ATTANSIC_L2 0x2048 #define PCI_VENDOR_ID_JMICRON 0x197B #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 #define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 #define PCI_DEVICE_ID_JMICRON_JMB364 0x2364 #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 #define PCI_DEVICE_ID_JMICRON_JMB369 0x2369 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 #define PCI_DEVICE_ID_JMICRON_JMB385_MS 0x2388 #define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391 #define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392 #define PCI_DEVICE_ID_JMICRON_JMB390_MS 0x2393 #define PCI_VENDOR_ID_KORENIX 0x1982 #define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 #define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff #define PCI_VENDOR_ID_HUAWEI 0x19e5 #define PCI_VENDOR_ID_NETRONOME 0x19ee #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 #define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000 #define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 #define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003 #define PCI_VENDOR_ID_QMI 0x1a32 #define PCI_VENDOR_ID_AZWAVE 0x1a3b #define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4 #define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4 #define PCI_SUBDEVICE_ID_QEMU 0x1100 #define PCI_VENDOR_ID_ASMEDIA 0x1b21 #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 #define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 #define PCI_VENDOR_ID_AMAZON 0x1d0f #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 #define PCI_VENDOR_ID_TEHUTI 0x1fc9 #define PCI_DEVICE_ID_TEHUTI_3009 0x3009 #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 #define PCI_DEVICE_ID_TEHUTI_3014 0x3014 #define PCI_VENDOR_ID_SUNIX 0x1fd4 #define PCI_DEVICE_ID_SUNIX_1999 0x1999 #define PCI_VENDOR_ID_HINT 0x3388 #define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013 #define PCI_VENDOR_ID_3DLABS 0x3d3d #define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 #define PCI_VENDOR_ID_NETXEN 0x4040 #define PCI_DEVICE_ID_NX2031_10GXSR 0x0001 #define PCI_DEVICE_ID_NX2031_10GCX4 0x0002 #define PCI_DEVICE_ID_NX2031_4GCU 0x0003 #define PCI_DEVICE_ID_NX2031_IMEZ 0x0004 #define PCI_DEVICE_ID_NX2031_HMEZ 0x0005 #define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024 #define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025 #define PCI_DEVICE_ID_NX3031 0x0100 #define PCI_VENDOR_ID_AKS 0x416c #define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 #define PCI_VENDOR_ID_ACCESSIO 0x494f #define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0 #define PCI_VENDOR_ID_S3 0x5333 #define PCI_DEVICE_ID_S3_TRIO 0x8811 #define PCI_DEVICE_ID_S3_868 0x8880 #define PCI_DEVICE_ID_S3_968 0x88f0 #define PCI_DEVICE_ID_S3_SAVAGE4 0x8a25 #define PCI_DEVICE_ID_S3_PROSAVAGE8 0x8d04 #define PCI_DEVICE_ID_S3_SONICVIBES 0xca00 #define PCI_VENDOR_ID_DUNORD 0x5544 #define PCI_DEVICE_ID_DUNORD_I3000 0x0001 #define PCI_VENDOR_ID_DCI 0x6666 #define PCI_DEVICE_ID_DCI_PCCOM4 0x0001 #define PCI_DEVICE_ID_DCI_PCCOM8 0x0002 #define PCI_DEVICE_ID_DCI_PCCOM2 0x0004 #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_EESSC 0x0008 #define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 #define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 #define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 #define PCI_DEVICE_ID_INTEL_PXH_1 0x032A #define PCI_DEVICE_ID_INTEL_PXHV 0x032C #define PCI_DEVICE_ID_INTEL_80332_0 0x0330 #define PCI_DEVICE_ID_INTEL_80332_1 0x0332 #define PCI_DEVICE_ID_INTEL_80333_0 0x0370 #define PCI_DEVICE_ID_INTEL_80333_1 0x0372 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 #define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807 #define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808 #define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820 #define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821 #define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822 #define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 #define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 #define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F #define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E #define PCI_DEVICE_ID_INTEL_I960 0x0960 #define PCI_DEVICE_ID_INTEL_I960RM 0x0962 #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60 #define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 #define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085 #define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 #define PCI_DEVICE_ID_INTEL_7205_0 0x255d #define PCI_DEVICE_ID_INTEL_82437 0x122d #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e #define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230 #define PCI_DEVICE_ID_INTEL_82371MX 0x1234 #define PCI_DEVICE_ID_INTEL_82441 0x1237 #define PCI_DEVICE_ID_INTEL_82380FB 0x124b #define PCI_DEVICE_ID_INTEL_82439 0x1250 #define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */ #define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a #define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b #define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */ #define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548 #define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549 #define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */ #define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567 #define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568 #define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569 #define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */ #define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b #define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c #define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f #define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 #define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415 #define PCI_DEVICE_ID_INTEL_82801AA_6 0x2416 #define PCI_DEVICE_ID_INTEL_82801AA_8 0x2418 #define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420 #define PCI_DEVICE_ID_INTEL_82801AB_1 0x2421 #define PCI_DEVICE_ID_INTEL_82801AB_3 0x2423 #define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425 #define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426 #define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428 #define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440 #define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443 #define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445 #define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448 #define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a #define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b #define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c #define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e #define PCI_DEVICE_ID_INTEL_82801E_0 0x2450 #define PCI_DEVICE_ID_INTEL_82801E_11 0x245b #define PCI_DEVICE_ID_INTEL_82801CA_0 0x2480 #define PCI_DEVICE_ID_INTEL_82801CA_3 0x2483 #define PCI_DEVICE_ID_INTEL_82801CA_5 0x2485 #define PCI_DEVICE_ID_INTEL_82801CA_6 0x2486 #define PCI_DEVICE_ID_INTEL_82801CA_10 0x248a #define PCI_DEVICE_ID_INTEL_82801CA_11 0x248b #define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c #define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 #define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 #define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2 #define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 #define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 #define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 #define PCI_DEVICE_ID_INTEL_82801DB_9 0x24c9 #define PCI_DEVICE_ID_INTEL_82801DB_10 0x24ca #define PCI_DEVICE_ID_INTEL_82801DB_11 0x24cb #define PCI_DEVICE_ID_INTEL_82801DB_12 0x24cc #define PCI_DEVICE_ID_INTEL_82801EB_0 0x24d0 #define PCI_DEVICE_ID_INTEL_82801EB_1 0x24d1 #define PCI_DEVICE_ID_INTEL_82801EB_3 0x24d3 #define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5 #define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6 #define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db #define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc #define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd #define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1 #define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2 #define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4 #define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6 #define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab #define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac #define PCI_DEVICE_ID_INTEL_82820_HB 0x2500 #define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 #define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 #define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 #define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 #define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 #define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 #define PCI_DEVICE_ID_INTEL_82865_IG 0x2572 #define PCI_DEVICE_ID_INTEL_82875_HB 0x2578 #define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580 #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 #define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590 #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 #define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0 #define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5 #define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6 #define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 #define PCI_DEVICE_ID_INTEL_3000_HB 0x2778 #define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 #define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 #define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641 #define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642 #define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a #define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d #define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e #define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f #define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670 #define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698 #define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b #define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e #define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 #define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 #define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 #define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc #define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd #define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da #define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd #define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de #define PCI_DEVICE_ID_INTEL_ICH7_21 0x27df #define PCI_DEVICE_ID_INTEL_ICH8_0 0x2810 #define PCI_DEVICE_ID_INTEL_ICH8_1 0x2811 #define PCI_DEVICE_ID_INTEL_ICH8_2 0x2812 #define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814 #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 #define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0 #define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910 #define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 #define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912 #define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913 #define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914 #define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 #define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930 #define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 #define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 #define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18 #define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19 #define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a #define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20 #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21 #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22 #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23 #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28 #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29 #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 #define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41 #define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC 0x2ca3 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL 0x2ca8 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2 0x2d98 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2 0x2d99 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2 0x2d9a #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2 0x2d9c #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2 0x2da0 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2 0x2da1 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2 0x2da2 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2 0x2da3 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2 0x2da8 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2 0x2da9 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2 0x2daa #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2 0x2dab #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2 0x2db0 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a #define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b #define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c #define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e #define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 #define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 #define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 #define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 #define PCI_DEVICE_ID_INTEL_82854_HB 0x358c #define PCI_DEVICE_ID_INTEL_82854_IG 0x358e #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 #define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582 #define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590 #define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592 #define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595 #define PCI_DEVICE_ID_INTEL_MCH_PA1 0x3596 #define PCI_DEVICE_ID_INTEL_MCH_PB 0x3597 #define PCI_DEVICE_ID_INTEL_MCH_PB1 0x3598 #define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 #define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e #define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c #define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f #define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610 #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c #define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 #define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 #define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 #define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 #define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 #define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 #define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 #define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f #define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20 #define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21 #define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22 #define PCI_DEVICE_ID_INTEL_IOAT_SNB3 0x3c23 #define PCI_DEVICE_ID_INTEL_IOAT_SNB4 0x3c24 #define PCI_DEVICE_ID_INTEL_IOAT_SNB5 0x3c25 #define PCI_DEVICE_ID_INTEL_IOAT_SNB6 0x3c26 #define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27 #define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e #define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f #define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46 #define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0 #define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1 #define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4 #define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5 #define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41 #define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42 #define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43 #define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44 #define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */ #define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */ #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 #define PCI_DEVICE_ID_INTEL_5100_19 0x65f3 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 #define PCI_DEVICE_ID_INTEL_5100_22 0x65f6 #define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 #define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 #define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 #define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff #define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031 #define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032 #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 #define PCI_DEVICE_ID_INTEL_82437VX 0x7030 #define PCI_DEVICE_ID_INTEL_82439TX 0x7100 #define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 #define PCI_DEVICE_ID_INTEL_82371AB 0x7111 #define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 #define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 #define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120 #define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121 #define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122 #define PCI_DEVICE_ID_INTEL_82810_IG3 0x7123 #define PCI_DEVICE_ID_INTEL_82810E_MC 0x7124 #define PCI_DEVICE_ID_INTEL_82810E_IG 0x7125 #define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180 #define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181 #define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190 #define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 #define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 #define PCI_DEVICE_ID_INTEL_440MX 0x7195 #define PCI_DEVICE_ID_INTEL_440MX_6 0x7196 #define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198 #define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199 #define PCI_DEVICE_ID_INTEL_82443MX_3 0x719b #define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 #define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2 #define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 #define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119 #define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a #define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183 #define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186 #define PCI_DEVICE_ID_INTEL_82454GX 0x84c4 #define PCI_DEVICE_ID_INTEL_82450GX 0x84c5 #define PCI_DEVICE_ID_INTEL_82451NX 0x84ca #define PCI_DEVICE_ID_INTEL_82454NX 0x84cb #define PCI_DEVICE_ID_INTEL_84460GX 0x84ea #define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 #define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 #define PCI_VENDOR_ID_SCALEMP 0x8686 #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 #define PCI_VENDOR_ID_COMPUTONE 0x8e0e #define PCI_DEVICE_ID_COMPUTONE_PG 0x0302 #define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e #define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001 #define PCI_SUBDEVICE_ID_COMPUTONE_PG8 0x0002 #define PCI_SUBDEVICE_ID_COMPUTONE_PG6 0x0003 #define PCI_VENDOR_ID_KTI 0x8e2e #define PCI_VENDOR_ID_ADAPTEC 0x9004 #define PCI_DEVICE_ID_ADAPTEC_7810 0x1078 #define PCI_DEVICE_ID_ADAPTEC_7821 0x2178 #define PCI_DEVICE_ID_ADAPTEC_38602 0x3860 #define PCI_DEVICE_ID_ADAPTEC_7850 0x5078 #define PCI_DEVICE_ID_ADAPTEC_7855 0x5578 #define PCI_DEVICE_ID_ADAPTEC_3860 0x6038 #define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075 #define PCI_DEVICE_ID_ADAPTEC_7860 0x6078 #define PCI_DEVICE_ID_ADAPTEC_7861 0x6178 #define PCI_DEVICE_ID_ADAPTEC_7870 0x7078 #define PCI_DEVICE_ID_ADAPTEC_7871 0x7178 #define PCI_DEVICE_ID_ADAPTEC_7872 0x7278 #define PCI_DEVICE_ID_ADAPTEC_7873 0x7378 #define PCI_DEVICE_ID_ADAPTEC_7874 0x7478 #define PCI_DEVICE_ID_ADAPTEC_7895 0x7895 #define PCI_DEVICE_ID_ADAPTEC_7880 0x8078 #define PCI_DEVICE_ID_ADAPTEC_7881 0x8178 #define PCI_DEVICE_ID_ADAPTEC_7882 0x8278 #define PCI_DEVICE_ID_ADAPTEC_7883 0x8378 #define PCI_DEVICE_ID_ADAPTEC_7884 0x8478 #define PCI_DEVICE_ID_ADAPTEC_7885 0x8578 #define PCI_DEVICE_ID_ADAPTEC_7886 0x8678 #define PCI_DEVICE_ID_ADAPTEC_7887 0x8778 #define PCI_DEVICE_ID_ADAPTEC_7888 0x8878 #define PCI_VENDOR_ID_ADAPTEC2 0x9005 #define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010 #define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011 #define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013 #define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f #define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050 #define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051 #define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f #define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080 #define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081 #define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083 #define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f #define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0 #define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1 #define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3 #define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf #define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN 0x0500 #define PCI_DEVICE_ID_ADAPTEC2_SCAMP 0x0503 #define PCI_VENDOR_ID_HOLTEK 0x9412 #define PCI_DEVICE_ID_HOLTEK_6565 0x6565 #define PCI_VENDOR_ID_NETMOS 0x9710 #define PCI_DEVICE_ID_NETMOS_9705 0x9705 #define PCI_DEVICE_ID_NETMOS_9715 0x9715 #define PCI_DEVICE_ID_NETMOS_9735 0x9735 #define PCI_DEVICE_ID_NETMOS_9745 0x9745 #define PCI_DEVICE_ID_NETMOS_9755 0x9755 #define PCI_DEVICE_ID_NETMOS_9805 0x9805 #define PCI_DEVICE_ID_NETMOS_9815 0x9815 #define PCI_DEVICE_ID_NETMOS_9835 0x9835 #define PCI_DEVICE_ID_NETMOS_9845 0x9845 #define PCI_DEVICE_ID_NETMOS_9855 0x9855 #define PCI_DEVICE_ID_NETMOS_9865 0x9865 #define PCI_DEVICE_ID_NETMOS_9900 0x9900 #define PCI_DEVICE_ID_NETMOS_9901 0x9901 #define PCI_DEVICE_ID_NETMOS_9904 0x9904 #define PCI_DEVICE_ID_NETMOS_9912 0x9912 #define PCI_DEVICE_ID_NETMOS_9922 0x9922 #define PCI_VENDOR_ID_3COM_2 0xa727 #define PCI_VENDOR_ID_DIGIUM 0xd161 #define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 #define PCI_SUBVENDOR_ID_EXSYS 0xd84d #define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014 #define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055 #define PCI_VENDOR_ID_TIGERJET 0xe159 #define PCI_DEVICE_ID_TIGERJET_300 0x0001 #define PCI_DEVICE_ID_TIGERJET_100 0x0002 #define PCI_VENDOR_ID_XILINX_RME 0xea60 #define PCI_DEVICE_ID_RME_DIGI32 0x9896 #define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897 #define PCI_DEVICE_ID_RME_DIGI32_8 0x9898 #define PCI_VENDOR_ID_XEN 0x5853 #define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 #define PCI_VENDOR_ID_OCZ 0x1b85 #define PCI_VENDOR_ID_NCUBE 0x10ff #endif /* _LINUX_PCI_IDS_H */ backport-iwlwifi-dkms-7906/include/linux/refcount.h000066400000000000000000000027071351064634500224670ustar00rootroot00000000000000/* Automatically created during backport process */ #ifndef CPTCFG_BPAUTO_REFCOUNT #include_next #else #undef refcount_add_not_zero_checked #define refcount_add_not_zero_checked LINUX_BACKPORT(refcount_add_not_zero_checked) #undef refcount_add_checked #define refcount_add_checked LINUX_BACKPORT(refcount_add_checked) #undef refcount_inc_not_zero_checked #define refcount_inc_not_zero_checked LINUX_BACKPORT(refcount_inc_not_zero_checked) #undef refcount_inc_checked #define refcount_inc_checked LINUX_BACKPORT(refcount_inc_checked) #undef refcount_sub_and_test_checked #define refcount_sub_and_test_checked LINUX_BACKPORT(refcount_sub_and_test_checked) #undef refcount_dec_and_test_checked #define refcount_dec_and_test_checked LINUX_BACKPORT(refcount_dec_and_test_checked) #undef refcount_dec_checked #define refcount_dec_checked LINUX_BACKPORT(refcount_dec_checked) #undef refcount_dec_if_one #define refcount_dec_if_one LINUX_BACKPORT(refcount_dec_if_one) #undef refcount_dec_not_one #define refcount_dec_not_one LINUX_BACKPORT(refcount_dec_not_one) #undef refcount_dec_and_mutex_lock #define refcount_dec_and_mutex_lock LINUX_BACKPORT(refcount_dec_and_mutex_lock) #undef refcount_dec_and_lock #define refcount_dec_and_lock LINUX_BACKPORT(refcount_dec_and_lock) #undef refcount_dec_and_lock_irqsave #define refcount_dec_and_lock_irqsave LINUX_BACKPORT(refcount_dec_and_lock_irqsave) #include #endif /* CPTCFG_BPAUTO_REFCOUNT */ backport-iwlwifi-dkms-7906/include/linux/rhashtable-types.h000066400000000000000000000032061351064634500241140ustar00rootroot00000000000000/* Automatically created during backport process */ #ifndef CPTCFG_BPAUTO_RHASHTABLE #include_next #else #undef lockdep_rht_mutex_is_held #define lockdep_rht_mutex_is_held LINUX_BACKPORT(lockdep_rht_mutex_is_held) #undef lockdep_rht_bucket_is_held #define lockdep_rht_bucket_is_held LINUX_BACKPORT(lockdep_rht_bucket_is_held) #undef rhashtable_insert_slow #define rhashtable_insert_slow LINUX_BACKPORT(rhashtable_insert_slow) #undef rhashtable_walk_enter #define rhashtable_walk_enter LINUX_BACKPORT(rhashtable_walk_enter) #undef rhashtable_walk_exit #define rhashtable_walk_exit LINUX_BACKPORT(rhashtable_walk_exit) #undef rhashtable_walk_start_check #define rhashtable_walk_start_check LINUX_BACKPORT(rhashtable_walk_start_check) #undef rhashtable_walk_next #define rhashtable_walk_next LINUX_BACKPORT(rhashtable_walk_next) #undef rhashtable_walk_peek #define rhashtable_walk_peek LINUX_BACKPORT(rhashtable_walk_peek) #undef rhashtable_walk_stop #define rhashtable_walk_stop LINUX_BACKPORT(rhashtable_walk_stop) #undef rhashtable_init #define rhashtable_init LINUX_BACKPORT(rhashtable_init) #undef rhltable_init #define rhltable_init LINUX_BACKPORT(rhltable_init) #undef rhashtable_free_and_destroy #define rhashtable_free_and_destroy LINUX_BACKPORT(rhashtable_free_and_destroy) #undef rhashtable_destroy #define rhashtable_destroy LINUX_BACKPORT(rhashtable_destroy) #undef rht_bucket_nested #define rht_bucket_nested LINUX_BACKPORT(rht_bucket_nested) #undef rht_bucket_nested_insert #define rht_bucket_nested_insert LINUX_BACKPORT(rht_bucket_nested_insert) #include #endif /* CPTCFG_BPAUTO_RHASHTABLE */ backport-iwlwifi-dkms-7906/include/linux/rhashtable.h000066400000000000000000000031721351064634500227540ustar00rootroot00000000000000/* Automatically created during backport process */ #ifndef CPTCFG_BPAUTO_RHASHTABLE #include_next #else #undef lockdep_rht_mutex_is_held #define lockdep_rht_mutex_is_held LINUX_BACKPORT(lockdep_rht_mutex_is_held) #undef lockdep_rht_bucket_is_held #define lockdep_rht_bucket_is_held LINUX_BACKPORT(lockdep_rht_bucket_is_held) #undef rhashtable_insert_slow #define rhashtable_insert_slow LINUX_BACKPORT(rhashtable_insert_slow) #undef rhashtable_walk_enter #define rhashtable_walk_enter LINUX_BACKPORT(rhashtable_walk_enter) #undef rhashtable_walk_exit #define rhashtable_walk_exit LINUX_BACKPORT(rhashtable_walk_exit) #undef rhashtable_walk_start_check #define rhashtable_walk_start_check LINUX_BACKPORT(rhashtable_walk_start_check) #undef rhashtable_walk_next #define rhashtable_walk_next LINUX_BACKPORT(rhashtable_walk_next) #undef rhashtable_walk_peek #define rhashtable_walk_peek LINUX_BACKPORT(rhashtable_walk_peek) #undef rhashtable_walk_stop #define rhashtable_walk_stop LINUX_BACKPORT(rhashtable_walk_stop) #undef rhashtable_init #define rhashtable_init LINUX_BACKPORT(rhashtable_init) #undef rhltable_init #define rhltable_init LINUX_BACKPORT(rhltable_init) #undef rhashtable_free_and_destroy #define rhashtable_free_and_destroy LINUX_BACKPORT(rhashtable_free_and_destroy) #undef rhashtable_destroy #define rhashtable_destroy LINUX_BACKPORT(rhashtable_destroy) #undef rht_bucket_nested #define rht_bucket_nested LINUX_BACKPORT(rht_bucket_nested) #undef rht_bucket_nested_insert #define rht_bucket_nested_insert LINUX_BACKPORT(rht_bucket_nested_insert) #include #endif /* CPTCFG_BPAUTO_RHASHTABLE */ backport-iwlwifi-dkms-7906/include/net/000077500000000000000000000000001351064634500201125ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/include/net/cfg80211-wext.h000066400000000000000000000037201351064634500224050ustar00rootroot00000000000000#ifndef __NET_CFG80211_WEXT_H #define __NET_CFG80211_WEXT_H /* * 802.11 device and configuration interface -- wext handlers * * Copyright 2006-2010 Johannes Berg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include /* * Temporary wext handlers & helper functions * * These are used only by drivers that aren't yet fully * converted to cfg80211. */ int cfg80211_wext_giwname(struct net_device *dev, struct iw_request_info *info, char *name, char *extra); int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, u32 *mode, char *extra); int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, u32 *mode, char *extra); int cfg80211_wext_siwscan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra); int cfg80211_wext_giwscan(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra); int cfg80211_wext_giwrange(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra); int cfg80211_wext_siwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra); int cfg80211_wext_giwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra); int cfg80211_wext_siwfrag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra); int cfg80211_wext_giwfrag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra); int cfg80211_wext_giwretry(struct net_device *dev, struct iw_request_info *info, struct iw_param *retry, char *extra); #endif /* __NET_CFG80211_WEXT_H */ backport-iwlwifi-dkms-7906/include/net/cfg80211.h000066400000000000000000010016211351064634500214170ustar00rootroot00000000000000#ifndef __NET_CFG80211_H #define __NET_CFG80211_H /* * 802.11 device and configuration interface * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include /** * DOC: Introduction * * cfg80211 is the configuration API for 802.11 devices in Linux. It bridges * userspace and drivers, and offers some utility functionality associated * with 802.11. cfg80211 must, directly or indirectly via mac80211, be used * by all modern wireless drivers in Linux, so that they offer a consistent * API through nl80211. For backward compatibility, cfg80211 also offers * wireless extensions to userspace, but hides them from drivers completely. * * Additionally, cfg80211 contains code to help enforce regulatory spectrum * use restrictions. */ /** * DOC: Device registration * * In order for a driver to use cfg80211, it must register the hardware device * with cfg80211. This happens through a number of hardware capability structs * described below. * * The fundamental structure for each device is the 'wiphy', of which each * instance describes a physical wireless device connected to the system. Each * such wiphy can have zero, one, or many virtual interfaces associated with * it, which need to be identified as such by pointing the network interface's * @ieee80211_ptr pointer to a &struct wireless_dev which further describes * the wireless part of the interface, normally this struct is embedded in the * network interface's private data area. Drivers can optionally allow creating * or destroying virtual interfaces on the fly, but without at least one or the * ability to create some the wireless device isn't useful. * * Each wiphy structure contains device capability information, and also has * a pointer to the various operations the driver offers. The definitions and * structures here describe these capabilities in detail. */ struct wiphy; /* * wireless hardware capability structures */ /** * enum ieee80211_channel_flags - channel flags * * Channel flags set by the regulatory control code. * * @IEEE80211_CHAN_DISABLED: This channel is disabled. * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes * sending probe requests or beaconing. * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel * is not permitted. * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel * is not permitted. * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel. * @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band, * this flag indicates that an 80 MHz channel cannot use this * channel as the control or any of the secondary channels. * This may be due to the driver or due to regulatory bandwidth * restrictions. * @IEEE80211_CHAN_NO_160MHZ: If the driver supports 160 MHz on the band, * this flag indicates that an 160 MHz channel cannot use this * channel as the control or any of the secondary channels. * This may be due to the driver or due to regulatory bandwidth * restrictions. * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted * on this channel. * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted * on this channel. * */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, IEEE80211_CHAN_NO_IR = 1<<1, /* hole at 1<<2 */ IEEE80211_CHAN_RADAR = 1<<3, IEEE80211_CHAN_NO_HT40PLUS = 1<<4, IEEE80211_CHAN_NO_HT40MINUS = 1<<5, IEEE80211_CHAN_NO_OFDM = 1<<6, IEEE80211_CHAN_NO_80MHZ = 1<<7, IEEE80211_CHAN_NO_160MHZ = 1<<8, IEEE80211_CHAN_INDOOR_ONLY = 1<<9, IEEE80211_CHAN_IR_CONCURRENT = 1<<10, IEEE80211_CHAN_NO_20MHZ = 1<<11, IEEE80211_CHAN_NO_10MHZ = 1<<12, }; #define IEEE80211_CHAN_NO_HT40 \ (IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) #define IEEE80211_DFS_MIN_CAC_TIME_MS 60000 #define IEEE80211_DFS_MIN_NOP_TIME_MS (30 * 60 * 1000) /** * struct ieee80211_channel - channel definition * * This structure describes a single channel for use * with cfg80211. * * @center_freq: center frequency in MHz * @hw_value: hardware-specific value for the channel * @flags: channel flags from &enum ieee80211_channel_flags. * @orig_flags: channel flags at registration time, used by regulatory * code to support devices with additional restrictions * @band: band this channel belongs to. * @max_antenna_gain: maximum antenna gain in dBi * @max_power: maximum transmission power (in dBm) * @max_reg_power: maximum regulatory transmission power (in dBm) * @beacon_found: helper to regulatory code to indicate when a beacon * has been found on this channel. Use regulatory_hint_found_beacon() * to enable this, this is useful only on 5 GHz band. * @orig_mag: internal use * @orig_mpwr: internal use * @dfs_state: current state of this channel. Only relevant if radar is required * on this channel. * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered. * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels. */ struct ieee80211_channel { enum nl80211_band band; u32 center_freq; u16 hw_value; u32 flags; int max_antenna_gain; int max_power; int max_reg_power; bool beacon_found; u32 orig_flags; int orig_mag, orig_mpwr; enum nl80211_dfs_state dfs_state; unsigned long dfs_state_entered; unsigned int dfs_cac_ms; }; /** * enum ieee80211_rate_flags - rate flags * * Hardware/specification flags for rates. These are structured * in a way that allows using the same bitrate structure for * different bands/PHY modes. * * @IEEE80211_RATE_SHORT_PREAMBLE: Hardware can send with short * preamble on this bitrate; only relevant in 2.4GHz band and * with CCK rates. * @IEEE80211_RATE_MANDATORY_A: This bitrate is a mandatory rate * when used with 802.11a (on the 5 GHz band); filled by the * core code when registering the wiphy. * @IEEE80211_RATE_MANDATORY_B: This bitrate is a mandatory rate * when used with 802.11b (on the 2.4 GHz band); filled by the * core code when registering the wiphy. * @IEEE80211_RATE_MANDATORY_G: This bitrate is a mandatory rate * when used with 802.11g (on the 2.4 GHz band); filled by the * core code when registering the wiphy. * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode. * @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode */ enum ieee80211_rate_flags { IEEE80211_RATE_SHORT_PREAMBLE = 1<<0, IEEE80211_RATE_MANDATORY_A = 1<<1, IEEE80211_RATE_MANDATORY_B = 1<<2, IEEE80211_RATE_MANDATORY_G = 1<<3, IEEE80211_RATE_ERP_G = 1<<4, IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5, IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6, }; /** * enum ieee80211_bss_type - BSS type filter * * @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS * @IEEE80211_BSS_TYPE_PBSS: Personal BSS * @IEEE80211_BSS_TYPE_IBSS: Independent BSS * @IEEE80211_BSS_TYPE_MBSS: Mesh BSS * @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type */ enum ieee80211_bss_type { IEEE80211_BSS_TYPE_ESS, IEEE80211_BSS_TYPE_PBSS, IEEE80211_BSS_TYPE_IBSS, IEEE80211_BSS_TYPE_MBSS, IEEE80211_BSS_TYPE_ANY }; /** * enum ieee80211_privacy - BSS privacy filter * * @IEEE80211_PRIVACY_ON: privacy bit set * @IEEE80211_PRIVACY_OFF: privacy bit clear * @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting */ enum ieee80211_privacy { IEEE80211_PRIVACY_ON, IEEE80211_PRIVACY_OFF, IEEE80211_PRIVACY_ANY }; #define IEEE80211_PRIVACY(x) \ ((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF) /** * struct ieee80211_rate - bitrate definition * * This structure describes a bitrate that an 802.11 PHY can * operate with. The two values @hw_value and @hw_value_short * are only for driver use when pointers to this structure are * passed around. * * @flags: rate-specific flags * @bitrate: bitrate in units of 100 Kbps * @hw_value: driver/hardware value for this rate * @hw_value_short: driver/hardware value for this rate when * short preamble is used */ struct ieee80211_rate { u32 flags; u16 bitrate; u16 hw_value, hw_value_short; }; /** * struct ieee80211_sta_ht_cap - STA's HT capabilities * * This structure describes most essential parameters needed * to describe 802.11n HT capabilities for an STA. * * @ht_supported: is HT supported by the STA * @cap: HT capabilities map as described in 802.11n spec * @ampdu_factor: Maximum A-MPDU length factor * @ampdu_density: Minimum A-MPDU spacing * @mcs: Supported MCS rates */ struct ieee80211_sta_ht_cap { u16 cap; /* use IEEE80211_HT_CAP_ */ bool ht_supported; u8 ampdu_factor; u8 ampdu_density; struct ieee80211_mcs_info mcs; }; /** * struct ieee80211_sta_vht_cap - STA's VHT capabilities * * This structure describes most essential parameters needed * to describe 802.11ac VHT capabilities for an STA. * * @vht_supported: is VHT supported by the STA * @cap: VHT capabilities map as described in 802.11ac spec * @vht_mcs: Supported VHT MCS rates */ struct ieee80211_sta_vht_cap { bool vht_supported; u32 cap; /* use IEEE80211_VHT_CAP_ */ struct ieee80211_vht_mcs_info vht_mcs; }; #define IEEE80211_HE_PPE_THRES_MAX_LEN 25 /** * struct ieee80211_sta_he_cap - STA's HE capabilities * * This structure describes most essential parameters needed * to describe 802.11ax HE capabilities for a STA. * * @has_he: true iff HE data is valid. * @he_cap_elem: Fixed portion of the HE capabilities element. * @he_mcs_nss_supp: The supported NSS/MCS combinations. * @ppe_thres: Holds the PPE Thresholds data. */ struct ieee80211_sta_he_cap { bool has_he; struct ieee80211_he_cap_elem he_cap_elem; struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN]; }; /** * struct ieee80211_sband_iftype_data * * This structure encapsulates sband data that is relevant for the * interface types defined in @types_mask. Each type in the * @types_mask must be unique across all instances of iftype_data. * * @types_mask: interface types mask * @he_cap: holds the HE capabilities */ struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; }; /** * struct ieee80211_supported_band - frequency band definition * * This structure describes a frequency band a wiphy * is able to operate in. * * @channels: Array of channels the hardware can operate in * in this band. * @band: the band this structure represents * @n_channels: Number of channels in @channels * @bitrates: Array of bitrates the hardware can operate with * in this band. Must be sorted to give a valid "supported * rates" IE, i.e. CCK rates first, then OFDM. * @n_bitrates: Number of bitrates in @bitrates * @ht_cap: HT capabilities in this band * @vht_cap: VHT capabilities in this band * @n_iftype_data: number of iftype data entries * @iftype_data: interface type data entries. Note that the bits in * @types_mask inside this structure cannot overlap (i.e. only * one occurrence of each type is allowed across all instances of * iftype_data). */ struct ieee80211_supported_band { struct ieee80211_channel *channels; struct ieee80211_rate *bitrates; enum nl80211_band band; int n_channels; int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; u16 n_iftype_data; const struct ieee80211_sband_iftype_data *iftype_data; }; /** * ieee80211_get_sband_iftype_data - return sband data for a given iftype * @sband: the sband to search for the STA on * @iftype: enum nl80211_iftype * * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found */ static inline const struct ieee80211_sband_iftype_data * ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, u8 iftype) { int i; if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) return NULL; for (i = 0; i < sband->n_iftype_data; i++) { const struct ieee80211_sband_iftype_data *data = &sband->iftype_data[i]; if (data->types_mask & BIT(iftype)) return data; } return NULL; } /** * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA * @sband: the sband to search for the STA on * * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found */ static inline const struct ieee80211_sta_he_cap * ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) { const struct ieee80211_sband_iftype_data *data = ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION); if (data && data->he_cap.has_he) return &data->he_cap; return NULL; } /** * wiphy_read_of_freq_limits - read frequency limits from device tree * * @wiphy: the wireless device to get extra limits for * * Some devices may have extra limitations specified in DT. This may be useful * for chipsets that normally support more bands but are limited due to board * design (e.g. by antennas or external power amplifier). * * This function reads info from DT and uses it to *modify* channels (disable * unavailable ones). It's usually a *bad* idea to use it in drivers with * shared channel data as DT limitations are device specific. You should make * sure to call it only if channels in wiphy are copied and can be modified * without affecting other devices. * * As this function access device node it has to be called after set_wiphy_dev. * It also modifies channels so they have to be set first. * If using this helper, call it before wiphy_register(). */ #ifdef CONFIG_OF void wiphy_read_of_freq_limits(struct wiphy *wiphy); #else /* CONFIG_OF */ static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy) { } #endif /* !CONFIG_OF */ /* * Wireless hardware/device configuration structures and methods */ /** * DOC: Actions and configuration * * Each wireless device and each virtual interface offer a set of configuration * operations and other actions that are invoked by userspace. Each of these * actions is described in the operations structure, and the parameters these * operations use are described separately. * * Additionally, some operations are asynchronous and expect to get status * information via some functions that drivers need to call. * * Scanning and BSS list handling with its associated functionality is described * in a separate chapter. */ #define VHT_MUMIMO_GROUPS_DATA_LEN (WLAN_MEMBERSHIP_LEN +\ WLAN_USER_POSITION_LEN) /** * struct vif_params - describes virtual interface parameters * @flags: monitor interface flags, unchanged if 0, otherwise * %MONITOR_FLAG_CHANGED will be set * @use_4addr: use 4-address frames * @macaddr: address to use for this virtual interface. * If this parameter is set to zero address the driver may * determine the address as needed. * This feature is only fully supported by drivers that enable the * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating ** only p2p devices with specified MAC. * @vht_mumimo_groups: MU-MIMO groupID, used for monitoring MU-MIMO packets * belonging to that MU-MIMO groupID; %NULL if not changed * @vht_mumimo_follow_addr: MU-MIMO follow address, used for monitoring * MU-MIMO packets going to the specified station; %NULL if not changed */ struct vif_params { u32 flags; int use_4addr; u8 macaddr[ETH_ALEN]; const u8 *vht_mumimo_groups; const u8 *vht_mumimo_follow_addr; }; /** * struct key_params - key information * * Information about a key * * @key: key material * @key_len: length of key material * @cipher: cipher suite selector * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used * with the get_key() callback, must be in little endian, * length given by @seq_len. * @seq_len: length of @seq. */ struct key_params { const u8 *key; const u8 *seq; int key_len; int seq_len; u32 cipher; }; /** * struct cfg80211_chan_def - channel definition * @chan: the (control) channel * @width: channel width * @center_freq1: center frequency of first segment * @center_freq2: center frequency of second segment * (only with 80+80 MHz) */ struct cfg80211_chan_def { struct ieee80211_channel *chan; enum nl80211_chan_width width; u32 center_freq1; u32 center_freq2; }; /** * cfg80211_get_chandef_type - return old channel type from chandef * @chandef: the channel definition * * Return: The old channel type (NOHT, HT20, HT40+/-) from a given * chandef, which must have a bandwidth allowing this conversion. */ static inline enum nl80211_channel_type cfg80211_get_chandef_type(const struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: return NL80211_CHAN_NO_HT; case NL80211_CHAN_WIDTH_20: return NL80211_CHAN_HT20; case NL80211_CHAN_WIDTH_40: if (chandef->center_freq1 > chandef->chan->center_freq) return NL80211_CHAN_HT40PLUS; return NL80211_CHAN_HT40MINUS; default: WARN_ON(1); return NL80211_CHAN_NO_HT; } } /** * cfg80211_chandef_create - create channel definition using channel type * @chandef: the channel definition struct to fill * @channel: the control channel * @chantype: the channel type * * Given a channel type, create a channel definition. */ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, struct ieee80211_channel *channel, enum nl80211_channel_type chantype); /** * cfg80211_chandef_identical - check if two channel definitions are identical * @chandef1: first channel definition * @chandef2: second channel definition * * Return: %true if the channels defined by the channel definitions are * identical, %false otherwise. */ static inline bool cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1, const struct cfg80211_chan_def *chandef2) { return (chandef1->chan == chandef2->chan && chandef1->width == chandef2->width && chandef1->center_freq1 == chandef2->center_freq1 && chandef1->center_freq2 == chandef2->center_freq2); } /** * cfg80211_chandef_compatible - check if two channel definitions are compatible * @chandef1: first channel definition * @chandef2: second channel definition * * Return: %NULL if the given channel definitions are incompatible, * chandef1 or chandef2 otherwise. */ const struct cfg80211_chan_def * cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1, const struct cfg80211_chan_def *chandef2); /** * cfg80211_chandef_valid - check if a channel definition is valid * @chandef: the channel definition to check * Return: %true if the channel definition is valid. %false otherwise. */ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef); /** * cfg80211_chandef_usable - check if secondary channels can be used * @wiphy: the wiphy to validate against * @chandef: the channel definition to check * @prohibited_flags: the regulatory channel flags that must not be set * Return: %true if secondary channels are usable. %false otherwise. */ bool cfg80211_chandef_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, u32 prohibited_flags); /** * cfg80211_chandef_dfs_required - checks if radar detection is required * @wiphy: the wiphy to validate against * @chandef: the channel definition to check * @iftype: the interface type as specified in &enum nl80211_iftype * Returns: * 1 if radar detection is required, 0 if it is not, < 0 on error */ int cfg80211_chandef_dfs_required(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype); /** * ieee80211_chandef_rate_flags - returns rate flags for a channel * * In some channel types, not all rates may be used - for example CCK * rates may not be used in 5/10 MHz channels. * * @chandef: channel definition for the channel * * Returns: rate flags which apply for this channel */ static inline enum ieee80211_rate_flags ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_5: return IEEE80211_RATE_SUPPORTS_5MHZ; case NL80211_CHAN_WIDTH_10: return IEEE80211_RATE_SUPPORTS_10MHZ; default: break; } return 0; } /** * ieee80211_chandef_max_power - maximum transmission power for the chandef * * In some regulations, the transmit power may depend on the configured channel * bandwidth which may be defined as dBm/MHz. This function returns the actual * max_power for non-standard (20 MHz) channels. * * @chandef: channel definition for the channel * * Returns: maximum allowed transmission power in dBm for the chandef */ static inline int ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_5: return min(chandef->chan->max_reg_power - 6, chandef->chan->max_power); case NL80211_CHAN_WIDTH_10: return min(chandef->chan->max_reg_power - 3, chandef->chan->max_power); default: break; } return chandef->chan->max_power; } /** * enum survey_info_flags - survey information flags * * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in * @SURVEY_INFO_IN_USE: channel is currently being used * @SURVEY_INFO_TIME: active time (in ms) was filled in * @SURVEY_INFO_TIME_BUSY: busy time was filled in * @SURVEY_INFO_TIME_EXT_BUSY: extension channel busy time was filled in * @SURVEY_INFO_TIME_RX: receive time was filled in * @SURVEY_INFO_TIME_TX: transmit time was filled in * @SURVEY_INFO_TIME_SCAN: scan time was filled in * * Used by the driver to indicate which info in &struct survey_info * it has filled in during the get_survey(). */ enum survey_info_flags { SURVEY_INFO_NOISE_DBM = BIT(0), SURVEY_INFO_IN_USE = BIT(1), SURVEY_INFO_TIME = BIT(2), SURVEY_INFO_TIME_BUSY = BIT(3), SURVEY_INFO_TIME_EXT_BUSY = BIT(4), SURVEY_INFO_TIME_RX = BIT(5), SURVEY_INFO_TIME_TX = BIT(6), SURVEY_INFO_TIME_SCAN = BIT(7), }; /** * struct survey_info - channel survey response * * @channel: the channel this survey record reports, may be %NULL for a single * record to report global statistics * @filled: bitflag of flags from &enum survey_info_flags * @noise: channel noise in dBm. This and all following fields are * optional * @time: amount of time in ms the radio was turn on (on the channel) * @time_busy: amount of time the primary channel was sensed busy * @time_ext_busy: amount of time the extension channel was sensed busy * @time_rx: amount of time the radio spent receiving data * @time_tx: amount of time the radio spent transmitting data * @time_scan: amount of time the radio spent for scanning * * Used by dump_survey() to report back per-channel survey information. * * This structure can later be expanded with things like * channel duty cycle etc. */ struct survey_info { struct ieee80211_channel *channel; u64 time; u64 time_busy; u64 time_ext_busy; u64 time_rx; u64 time_tx; u64 time_scan; u32 filled; s8 noise; }; #define CFG80211_MAX_WEP_KEYS 4 /** * struct cfg80211_crypto_settings - Crypto settings * @wpa_versions: indicates which, if any, WPA versions are enabled * (from enum nl80211_wpa_versions) * @n_ciphers_group: number of supported group ciphers * @ciphers_group: group key cipher suites * @n_ciphers_pairwise: number of AP supported unicast ciphers * @ciphers_pairwise: unicast key cipher suites * @n_akm_suites: number of AKM suites * @akm_suites: AKM suites * @control_port: Whether user space controls IEEE 802.1X port, i.e., * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is * required to assume that the port is unauthorized until authorized by * user space. Otherwise, port is marked authorized by default. * @control_port_ethertype: the control port protocol that should be * allowed through even on unauthorized ports * @control_port_no_encrypt: TRUE to prevent encryption of control port * protocol frames. * @control_port_over_nl80211: TRUE if userspace expects to exchange control * port frames over NL80211 instead of the network interface. * @wep_keys: static WEP keys, if not NULL points to an array of * CFG80211_MAX_WEP_KEYS WEP keys * @wep_tx_key: key index (0..3) of the default TX static WEP key * @psk: PSK (for devices supporting 4-way-handshake offload) */ struct cfg80211_crypto_settings { u32 wpa_versions; int n_ciphers_group; u32 ciphers_group[NL80211_MAX_NR_CIPHER_SUITES]; int n_ciphers_pairwise; u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES]; int n_akm_suites; u32 akm_suites[NL80211_MAX_NR_AKM_SUITES]; bool control_port; __be16 control_port_ethertype; bool control_port_no_encrypt; bool control_port_over_nl80211; struct key_params *wep_keys; int wep_tx_key; const u8 *psk; }; /** * struct cfg80211_beacon_data - beacon data * @head: head portion of beacon (before TIM IE) * or %NULL if not changed * @tail: tail portion of beacon (after TIM IE) * or %NULL if not changed * @head_len: length of @head * @tail_len: length of @tail * @beacon_ies: extra information element(s) to add into Beacon frames or %NULL * @beacon_ies_len: length of beacon_ies in octets * @proberesp_ies: extra information element(s) to add into Probe Response * frames or %NULL * @proberesp_ies_len: length of proberesp_ies in octets * @assocresp_ies: extra information element(s) to add into (Re)Association * Response frames or %NULL * @assocresp_ies_len: length of assocresp_ies in octets * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) * @ftm_responder: enable FTM responder functionality; -1 for no change * (which also implies no change in LCI/civic location data) * @lci: Measurement Report element content, starting with Measurement Token * (measurement type 8) * @civicloc: Measurement Report element content, starting with Measurement * Token (measurement type 11) * @lci_len: LCI data length * @civicloc_len: Civic location data length */ struct cfg80211_beacon_data { const u8 *head, *tail; const u8 *beacon_ies; const u8 *proberesp_ies; const u8 *assocresp_ies; const u8 *probe_resp; const u8 *lci; const u8 *civicloc; s8 ftm_responder; size_t head_len, tail_len; size_t beacon_ies_len; size_t proberesp_ies_len; size_t assocresp_ies_len; size_t probe_resp_len; size_t lci_len; size_t civicloc_len; }; struct mac_address { u8 addr[ETH_ALEN]; }; /** * struct cfg80211_acl_data - Access control list data * * @acl_policy: ACL policy to be applied on the station's * entry specified by mac_addr * @n_acl_entries: Number of MAC address entries passed * @mac_addrs: List of MAC addresses of stations to be used for ACL */ struct cfg80211_acl_data { enum nl80211_acl_policy acl_policy; int n_acl_entries; /* Keep it last */ struct mac_address mac_addrs[]; }; /* * cfg80211_bitrate_mask - masks for bitrate control */ struct cfg80211_bitrate_mask { struct { u32 legacy; u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN]; u16 vht_mcs[NL80211_VHT_NSS_MAX]; enum nl80211_txrate_gi gi; } control[NUM_NL80211_BANDS]; }; /** * struct cfg80211_ap_settings - AP configuration * * Used to configure an AP interface. * * @chandef: defines the channel to use * @beacon: beacon data * @beacon_interval: beacon interval * @dtim_period: DTIM period * @ssid: SSID to be used in the BSS (note: may be %NULL if not provided from * user space) * @ssid_len: length of @ssid * @hidden_ssid: whether to hide the SSID in Beacon/Probe Response frames * @crypto: crypto settings * @privacy: the BSS uses privacy * @auth_type: Authentication type (algorithm) * @smps_mode: SMPS mode * @inactivity_timeout: time in seconds to determine station's inactivity. * @p2p_ctwindow: P2P CT Window * @p2p_opp_ps: P2P opportunistic PS * @acl: ACL configuration used by the drivers which has support for * MAC address based access control * @pbss: If set, start as a PCP instead of AP. Relevant for DMG * networks. * @beacon_rate: bitrate to be used for beacons * @ht_cap: HT capabilities (or %NULL if HT isn't enabled) * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled) * @he_cap: HE capabilities (or %NULL if HE isn't enabled) * @ht_required: stations must support HT * @vht_required: stations must support VHT */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; struct cfg80211_beacon_data beacon; int beacon_interval, dtim_period; const u8 *ssid; size_t ssid_len; enum nl80211_hidden_ssid hidden_ssid; struct cfg80211_crypto_settings crypto; bool privacy; enum nl80211_auth_type auth_type; enum nl80211_smps_mode smps_mode; int inactivity_timeout; u8 p2p_ctwindow; bool p2p_opp_ps; const struct cfg80211_acl_data *acl; bool pbss; struct cfg80211_bitrate_mask beacon_rate; const struct ieee80211_ht_cap *ht_cap; const struct ieee80211_vht_cap *vht_cap; const struct ieee80211_he_cap_elem *he_cap; bool ht_required, vht_required; }; /** * struct cfg80211_csa_settings - channel switch settings * * Used for channel switch * * @chandef: defines the channel to use after the switch * @beacon_csa: beacon data while performing the switch * @counter_offsets_beacon: offsets of the counters within the beacon (tail) * @counter_offsets_presp: offsets of the counters within the probe response * @n_counter_offsets_beacon: number of csa counters the beacon (tail) * @n_counter_offsets_presp: number of csa counters in the probe response * @beacon_after: beacon data to be used on the new channel * @radar_required: whether radar detection is required on the new channel * @block_tx: whether transmissions should be blocked while changing * @count: number of beacons until switch */ struct cfg80211_csa_settings { struct cfg80211_chan_def chandef; struct cfg80211_beacon_data beacon_csa; const u16 *counter_offsets_beacon; const u16 *counter_offsets_presp; unsigned int n_counter_offsets_beacon; unsigned int n_counter_offsets_presp; struct cfg80211_beacon_data beacon_after; bool radar_required; bool block_tx; u8 count; }; #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 /** * struct iface_combination_params - input parameters for interface combinations * * Used to pass interface combination parameters * * @num_different_channels: the number of different channels we want * to use for verification * @radar_detect: a bitmap where each bit corresponds to a channel * width where radar detection is needed, as in the definition of * &struct ieee80211_iface_combination.@radar_detect_widths * @iftype_num: array with the number of interfaces of each interface * type. The index is the interface type as specified in &enum * nl80211_iftype. * @new_beacon_int: set this to the beacon interval of a new interface * that's not operating yet, if such is to be checked as part of * the verification */ struct iface_combination_params { int num_different_channels; u8 radar_detect; int iftype_num[NUM_NL80211_IFTYPES]; u32 new_beacon_int; }; /** * enum station_parameters_apply_mask - station parameter values to apply * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) * @STATION_PARAM_APPLY_CAPABILITY: apply new capability * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state * * Not all station parameters have in-band "no change" signalling, * for those that don't these flags will are used. */ enum station_parameters_apply_mask { STATION_PARAM_APPLY_UAPSD = BIT(0), STATION_PARAM_APPLY_CAPABILITY = BIT(1), STATION_PARAM_APPLY_PLINK_STATE = BIT(2), }; /** * struct station_parameters - station parameters * * Used to change and create a new station. * * @vlan: vlan interface station should belong to * @supported_rates: supported rates in IEEE 802.11 format * (or NULL for no change) * @supported_rates_len: number of supported rates * @sta_flags_mask: station flags that changed * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @sta_flags_set: station flags values * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @listen_interval: listen interval or -1 for no change * @aid: AID or zero for no change * @peer_aid: mesh peer AID or zero for no change * @plink_action: plink action to take * @plink_state: set the peer link state for a station * @ht_capa: HT capabilities of station * @vht_capa: VHT capabilities of station * @uapsd_queues: bitmap of queues configured for uapsd. same format * as the AC bitmap in the QoS info field * @max_sp: max Service Period. same format as the MAX_SP in the * QoS info field (but already shifted down) * @sta_modify_mask: bitmap indicating which parameters changed * (for those that don't have a natural "no change" value), * see &enum station_parameters_apply_mask * @local_pm: local link-specific mesh power save mode (no change when set * to unknown) * @capability: station capability * @ext_capab: extended capabilities of the station * @ext_capab_len: number of extended capabilities * @supported_channels: supported channels in IEEE 802.11 format * @supported_channels_len: number of supported channels * @supported_oper_classes: supported oper classes in IEEE 802.11 format * @supported_oper_classes_len: number of supported operating classes * @opmode_notif: operating mode field from Operating Mode Notification * @opmode_notif_used: information if operating mode field is used * @support_p2p_ps: information if station supports P2P PS mechanism * @he_capa: HE capabilities of station * @he_capa_len: the length of the HE capabilities */ struct station_parameters { const u8 *supported_rates; struct net_device *vlan; u32 sta_flags_mask, sta_flags_set; u32 sta_modify_mask; int listen_interval; u16 aid; u16 peer_aid; u8 supported_rates_len; u8 plink_action; u8 plink_state; const struct ieee80211_ht_cap *ht_capa; const struct ieee80211_vht_cap *vht_capa; u8 uapsd_queues; u8 max_sp; enum nl80211_mesh_power_mode local_pm; u16 capability; const u8 *ext_capab; u8 ext_capab_len; const u8 *supported_channels; u8 supported_channels_len; const u8 *supported_oper_classes; u8 supported_oper_classes_len; u8 opmode_notif; bool opmode_notif_used; int support_p2p_ps; const struct ieee80211_he_cap_elem *he_capa; u8 he_capa_len; }; /** * struct station_del_parameters - station deletion parameters * * Used to delete a station entry (or all stations). * * @mac: MAC address of the station to remove or NULL to remove all stations * @subtype: Management frame subtype to use for indicating removal * (10 = Disassociation, 12 = Deauthentication) * @reason_code: Reason code for the Disassociation/Deauthentication frame */ struct station_del_parameters { const u8 *mac; u8 subtype; u16 reason_code; }; /** * enum cfg80211_station_type - the type of station being modified * @CFG80211_STA_AP_CLIENT: client of an AP interface * @CFG80211_STA_AP_CLIENT_UNASSOC: client of an AP interface that is still * unassociated (update properties for this type of client is permitted) * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has * the AP MLME in the device * @CFG80211_STA_AP_STA: AP station on managed interface * @CFG80211_STA_IBSS: IBSS station * @CFG80211_STA_TDLS_PEER_SETUP: TDLS peer on managed interface (dummy entry * while TDLS setup is in progress, it moves out of this state when * being marked authorized; use this only if TDLS with external setup is * supported/used) * @CFG80211_STA_TDLS_PEER_ACTIVE: TDLS peer on managed interface (active * entry that is operating, has been marked authorized by userspace) * @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed) * @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed) */ enum cfg80211_station_type { CFG80211_STA_AP_CLIENT, CFG80211_STA_AP_CLIENT_UNASSOC, CFG80211_STA_AP_MLME_CLIENT, CFG80211_STA_AP_STA, CFG80211_STA_IBSS, CFG80211_STA_TDLS_PEER_SETUP, CFG80211_STA_TDLS_PEER_ACTIVE, CFG80211_STA_MESH_PEER_KERNEL, CFG80211_STA_MESH_PEER_USER, }; /** * cfg80211_check_station_change - validate parameter changes * @wiphy: the wiphy this operates on * @params: the new parameters for a station * @statype: the type of station being modified * * Utility function for the @change_station driver method. Call this function * with the appropriate station type looking up the station (and checking that * it exists). It will verify whether the station change is acceptable, and if * not will return an error code. Note that it may modify the parameters for * backward compatibility reasons, so don't use them before calling this. */ int cfg80211_check_station_change(struct wiphy *wiphy, struct station_parameters *params, enum cfg80211_station_type statype); /** * enum station_info_rate_flags - bitrate info flags * * Used by the driver to indicate the specific rate transmission * type for 802.11n transmissions. * * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval * @RATE_INFO_FLAGS_60G: 60GHz MCS * @RATE_INFO_FLAGS_HE_MCS: HE MCS information */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), RATE_INFO_FLAGS_VHT_MCS = BIT(1), RATE_INFO_FLAGS_SHORT_GI = BIT(2), RATE_INFO_FLAGS_60G = BIT(3), RATE_INFO_FLAGS_HE_MCS = BIT(4), }; /** * enum rate_info_bw - rate bandwidth information * * Used by the driver to indicate the rate bandwidth. * * @RATE_INFO_BW_5: 5 MHz bandwidth * @RATE_INFO_BW_10: 10 MHz bandwidth * @RATE_INFO_BW_20: 20 MHz bandwidth * @RATE_INFO_BW_40: 40 MHz bandwidth * @RATE_INFO_BW_80: 80 MHz bandwidth * @RATE_INFO_BW_160: 160 MHz bandwidth * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation */ enum rate_info_bw { RATE_INFO_BW_20 = 0, RATE_INFO_BW_5, RATE_INFO_BW_10, RATE_INFO_BW_40, RATE_INFO_BW_80, RATE_INFO_BW_160, RATE_INFO_BW_HE_RU, }; /** * struct rate_info - bitrate information * * Information about a receiving or transmitting bitrate * * @flags: bitflag of flags from &enum rate_info_flags * @mcs: mcs index if struct describes an HT/VHT/HE rate * @legacy: bitrate in 100kbit/s for 802.11abg * @nss: number of streams (VHT & HE only) * @bw: bandwidth (from &enum rate_info_bw) * @he_gi: HE guard interval (from &enum nl80211_he_gi) * @he_dcm: HE DCM value * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc, * only valid if bw is %RATE_INFO_BW_HE_RU) */ struct rate_info { u8 flags; u8 mcs; u16 legacy; u8 nss; u8 bw; u8 he_gi; u8 he_dcm; u8 he_ru_alloc; }; /** * enum station_info_rate_flags - bitrate info flags * * Used by the driver to indicate the specific rate transmission * type for 802.11n transmissions. * * @BSS_PARAM_FLAGS_CTS_PROT: whether CTS protection is enabled * @BSS_PARAM_FLAGS_SHORT_PREAMBLE: whether short preamble is enabled * @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled */ enum bss_param_flags { BSS_PARAM_FLAGS_CTS_PROT = 1<<0, BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1, BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2, }; /** * struct sta_bss_parameters - BSS parameters for the attached station * * Information about the currently associated BSS * * @flags: bitflag of flags from &enum bss_param_flags * @dtim_period: DTIM period for the BSS * @beacon_interval: beacon interval */ struct sta_bss_parameters { u8 flags; u8 dtim_period; u16 beacon_interval; }; /** * struct cfg80211_txq_stats - TXQ statistics for this TID * @filled: bitmap of flags using the bits of &enum nl80211_txq_stats to * indicate the relevant values in this struct are filled * @backlog_bytes: total number of bytes currently backlogged * @backlog_packets: total number of packets currently backlogged * @flows: number of new flows seen * @drops: total number of packets dropped * @ecn_marks: total number of packets marked with ECN CE * @overlimit: number of drops due to queue space overflow * @overmemory: number of drops due to memory limit overflow * @collisions: number of hash collisions * @tx_bytes: total number of bytes dequeued * @tx_packets: total number of packets dequeued * @max_flows: maximum number of flows supported */ struct cfg80211_txq_stats { u32 filled; u32 backlog_bytes; u32 backlog_packets; u32 flows; u32 drops; u32 ecn_marks; u32 overlimit; u32 overmemory; u32 collisions; u32 tx_bytes; u32 tx_packets; u32 max_flows; }; /** * struct cfg80211_tid_stats - per-TID statistics * @filled: bitmap of flags using the bits of &enum nl80211_tid_stats to * indicate the relevant values in this struct are filled * @rx_msdu: number of received MSDUs * @tx_msdu: number of (attempted) transmitted MSDUs * @tx_msdu_retries: number of retries (not counting the first) for * transmitted MSDUs * @tx_msdu_failed: number of failed transmitted MSDUs * @txq_stats: TXQ statistics */ struct cfg80211_tid_stats { u32 filled; u64 rx_msdu; u64 tx_msdu; u64 tx_msdu_retries; u64 tx_msdu_failed; struct cfg80211_txq_stats txq_stats; }; #define IEEE80211_MAX_CHAINS 4 /** * struct station_info - station information * * Station information filled by driver for get_station() and dump_station. * * @filled: bitflag of flags using the bits of &enum nl80211_sta_info to * indicate the relevant values in this struct for them * @connected_time: time(in secs) since a station is last connected * @inactive_time: time since last station activity (tx/rx) in milliseconds * @rx_bytes: bytes (size of MPDUs) received from this station * @tx_bytes: bytes (size of MPDUs) transmitted to this station * @llid: mesh local link id * @plid: mesh peer link id * @plink_state: mesh peer link state * @signal: The signal strength, type depends on the wiphy's signal_type. * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. * @signal_avg: Average signal strength, type depends on the wiphy's signal_type. * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg * @chain_signal: per-chain signal strength of last received packet in dBm * @chain_signal_avg: per-chain signal strength average in dBm * @txrate: current unicast bitrate from this station * @rxrate: current unicast bitrate to this station * @rx_packets: packets (MSDUs & MMPDUs) received from this station * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this station * @tx_retries: cumulative retry counts (MPDUs) * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK) * @rx_dropped_misc: Dropped for un-specified reason. * @bss_param: current BSS parameters * @generation: generation number for nl80211 dumps. * This number should increase every time the list of stations * changes, i.e. when a station is added or removed, so that * userspace can tell whether it got a consistent snapshot. * @assoc_req_ies: IEs from (Re)Association Request. * This is used only when in AP mode with drivers that do not use * user space MLME/SME implementation. The information is provided for * the cfg80211_new_sta() calls to notify user space of the IEs. * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. * @sta_flags: station flags mask & values * @beacon_loss_count: Number of times beacon loss event has triggered. * @t_offset: Time offset of the station relative to this host. * @local_pm: local mesh STA power save mode * @peer_pm: peer mesh STA power save mode * @nonpeer_pm: non-peer mesh STA power save mode * @expected_throughput: expected throughput in kbps (including 802.11 headers) * towards this station. * @rx_beacon: number of beacons received from this peer * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received * from this peer * @connected_to_gate: true if mesh STA has a path to mesh gate * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs. * Note that this doesn't use the @filled bit, but is used if non-NULL. * @ack_signal: signal strength (in dBm) of the last ACK frame. * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has * been sent. * @rx_mpdu_count: number of MPDUs received from this station * @fcs_err_count: number of packets (MPDUs) received from this station with * an FCS error. This counter should be incremented only when TA of the * received packet with an FCS error matches the peer MAC address. */ struct station_info { u64 filled; u32 connected_time; u32 inactive_time; u64 rx_bytes; u64 tx_bytes; u16 llid; u16 plid; u8 plink_state; s8 signal; s8 signal_avg; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; s8 chain_signal_avg[IEEE80211_MAX_CHAINS]; struct rate_info txrate; struct rate_info rxrate; u32 rx_packets; u32 tx_packets; u32 tx_retries; u32 tx_failed; u32 rx_dropped_misc; struct sta_bss_parameters bss_param; struct nl80211_sta_flag_update sta_flags; int generation; const u8 *assoc_req_ies; size_t assoc_req_ies_len; u32 beacon_loss_count; s64 t_offset; enum nl80211_mesh_power_mode local_pm; enum nl80211_mesh_power_mode peer_pm; enum nl80211_mesh_power_mode nonpeer_pm; u32 expected_throughput; u64 rx_beacon; u64 rx_duration; u8 rx_beacon_signal_avg; u8 connected_to_gate; struct cfg80211_tid_stats *pertid; s8 ack_signal; s8 avg_ack_signal; u32 rx_mpdu_count; u32 fcs_err_count; }; #if IS_ENABLED(CPTCFG_CFG80211) /** * cfg80211_get_station - retrieve information about a given station * @dev: the device where the station is supposed to be connected to * @mac_addr: the mac address of the station of interest * @sinfo: pointer to the structure to fill with the information * * Returns 0 on success and sinfo is filled with the available information * otherwise returns a negative error code and the content of sinfo has to be * considered undefined. */ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo); #else static inline int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { return -ENOENT; } #endif /** * enum monitor_flags - monitor flags * * Monitor interface configuration flags. Note that these must be the bits * according to the nl80211 flags. * * @MONITOR_FLAG_CHANGED: set if the flags were changed * @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS * @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP * @MONITOR_FLAG_CONTROL: pass control frames * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering * @MONITOR_FLAG_COOK_FRAMES: report frames after processing * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address */ enum monitor_flags { MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID, MONITOR_FLAG_FCSFAIL = 1<beacon_ies in that case. * @signal: signal strength value (type depends on the wiphy's signal_type) * @chains: bitmask for filled values in @chain_signal. * @chain_signal: per-chain signal strength of last received BSS in dBm. * @bssid_index: index in the multiple BSS set * @max_bssid_indicator: max number of members in the BSS set * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes */ struct cfg80211_bss { struct ieee80211_channel *channel; enum nl80211_bss_scan_width scan_width; const struct cfg80211_bss_ies __rcu *ies; const struct cfg80211_bss_ies __rcu *beacon_ies; const struct cfg80211_bss_ies __rcu *proberesp_ies; struct cfg80211_bss *hidden_beacon_bss; struct cfg80211_bss *transmitted_bss; struct list_head nontrans_list; s32 signal; u16 beacon_interval; u16 capability; u8 bssid[ETH_ALEN]; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 bssid_index; u8 max_bssid_indicator; u8 priv[0] __aligned(sizeof(void *)); }; /** * ieee80211_bss_get_elem - find element with given ID * @bss: the bss to search * @id: the element ID * * Note that the return value is an RCU-protected pointer, so * rcu_read_lock() must be held when calling this function. * Return: %NULL if not found. */ const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id); /** * ieee80211_bss_get_ie - find IE with given ID * @bss: the bss to search * @id: the element ID * * Note that the return value is an RCU-protected pointer, so * rcu_read_lock() must be held when calling this function. * Return: %NULL if not found. */ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id) { return (void *)ieee80211_bss_get_elem(bss, id); } /** * struct cfg80211_auth_request - Authentication request data * * This structure provides information needed to complete IEEE 802.11 * authentication. * * @bss: The BSS to authenticate with, the callee must obtain a reference * to it if it needs to keep it. * @auth_type: Authentication type (algorithm) * @ie: Extra IEs to add to Authentication frame or %NULL * @ie_len: Length of ie buffer in octets * @key_len: length of WEP key for shared key authentication * @key_idx: index of WEP key for shared key authentication * @key: WEP key for shared key authentication * @auth_data: Fields and elements in Authentication frames. This contains * the authentication frame body (non-IE and IE data), excluding the * Authentication algorithm number, i.e., starting at the Authentication * transaction sequence number field. * @auth_data_len: Length of auth_data buffer in octets */ struct cfg80211_auth_request { struct cfg80211_bss *bss; const u8 *ie; size_t ie_len; enum nl80211_auth_type auth_type; const u8 *key; u8 key_len, key_idx; const u8 *auth_data; size_t auth_data_len; }; /** * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association. * * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) * @ASSOC_REQ_DISABLE_VHT: Disable VHT * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association * @CONNECT_REQ_EXTERNAL_AUTH_SUPPORT: User space indicates external * authentication capability. Drivers can offload authentication to * userspace if this flag is set. Only applicable for cfg80211_connect() * request (connect callback). */ enum cfg80211_assoc_req_flags { ASSOC_REQ_DISABLE_HT = BIT(0), ASSOC_REQ_DISABLE_VHT = BIT(1), ASSOC_REQ_USE_RRM = BIT(2), CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3), }; /** * struct cfg80211_assoc_request - (Re)Association request data * * This structure provides information needed to complete IEEE 802.11 * (re)association. * @bss: The BSS to associate with. If the call is successful the driver is * given a reference that it must give back to cfg80211_send_rx_assoc() * or to cfg80211_assoc_timeout(). To ensure proper refcounting, new * association requests while already associating must be rejected. * @ie: Extra IEs to add to (Re)Association Request frame or %NULL * @ie_len: Length of ie buffer in octets * @use_mfp: Use management frame protection (IEEE 802.11w) in this association * @crypto: crypto settings * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used * to indicate a request to reassociate within the ESS instead of a request * do the initial association with the ESS. When included, this is set to * the BSSID of the current association, i.e., to the value that is * included in the Current AP address field of the Reassociation Request * frame. * @flags: See &enum cfg80211_assoc_req_flags * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT capability override * @vht_capa_mask: VHT capability mask indicating which fields to use * @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or * %NULL if FILS is not used. * @fils_kek_len: Length of fils_kek in octets * @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association * Request/Response frame or %NULL if FILS is not used. This field starts * with 16 octets of STA Nonce followed by 16 octets of AP Nonce. */ struct cfg80211_assoc_request { struct cfg80211_bss *bss; const u8 *ie, *prev_bssid; size_t ie_len; struct cfg80211_crypto_settings crypto; bool use_mfp; u32 flags; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa, vht_capa_mask; const u8 *fils_kek; size_t fils_kek_len; const u8 *fils_nonces; }; /** * struct cfg80211_deauth_request - Deauthentication request data * * This structure provides information needed to complete IEEE 802.11 * deauthentication. * * @bssid: the BSSID of the BSS to deauthenticate from * @ie: Extra IEs to add to Deauthentication frame or %NULL * @ie_len: Length of ie buffer in octets * @reason_code: The reason code for the deauthentication * @local_state_change: if set, change local state only and * do not set a deauth frame */ struct cfg80211_deauth_request { const u8 *bssid; const u8 *ie; size_t ie_len; u16 reason_code; bool local_state_change; }; /** * struct cfg80211_disassoc_request - Disassociation request data * * This structure provides information needed to complete IEEE 802.11 * disassociation. * * @bss: the BSS to disassociate from * @ie: Extra IEs to add to Disassociation frame or %NULL * @ie_len: Length of ie buffer in octets * @reason_code: The reason code for the disassociation * @local_state_change: This is a request for a local state only, i.e., no * Disassociation frame is to be transmitted. */ struct cfg80211_disassoc_request { struct cfg80211_bss *bss; const u8 *ie; size_t ie_len; u16 reason_code; bool local_state_change; }; /** * struct cfg80211_ibss_params - IBSS parameters * * This structure defines the IBSS parameters for the join_ibss() * method. * * @ssid: The SSID, will always be non-null. * @ssid_len: The length of the SSID, will always be non-zero. * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not * search for IBSSs with a different BSSID. * @chandef: defines the channel to use if no other IBSS to join can be found * @channel_fixed: The channel should be fixed -- do not search for * IBSSs to join on other channels. * @ie: information element(s) to include in the beacon * @ie_len: length of that * @beacon_interval: beacon interval to use * @privacy: this is a protected network, keys will be configured * after joining * @control_port: whether user space controls IEEE 802.1X port, i.e., * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is * required to assume that the port is unauthorized until authorized by * user space. Otherwise, port is marked authorized by default. * @control_port_over_nl80211: TRUE if userspace expects to exchange control * port frames over NL80211 instead of the network interface. * @userspace_handles_dfs: whether user space controls DFS operation, i.e. * changes the channel when a radar is detected. This is required * to operate on DFS channels. * @basic_rates: bitmap of basic rates to use when creating the IBSS * @mcast_rate: per-band multicast rate index + 1 (0: disabled) * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @wep_keys: static WEP keys, if not NULL points to an array of * CFG80211_MAX_WEP_KEYS WEP keys * @wep_tx_key: key index (0..3) of the default TX static WEP key */ struct cfg80211_ibss_params { const u8 *ssid; const u8 *bssid; struct cfg80211_chan_def chandef; const u8 *ie; u8 ssid_len, ie_len; u16 beacon_interval; u32 basic_rates; bool channel_fixed; bool privacy; bool control_port; bool control_port_over_nl80211; bool userspace_handles_dfs; int mcast_rate[NUM_NL80211_BANDS]; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct key_params *wep_keys; int wep_tx_key; }; /** * struct cfg80211_bss_selection - connection parameters for BSS selection. * * @behaviour: requested BSS selection behaviour. * @param: parameters for requestion behaviour. * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF. * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST. */ struct cfg80211_bss_selection { enum nl80211_bss_select_attr behaviour; union { enum nl80211_band band_pref; struct cfg80211_bss_select_adjust adjust; } param; }; /** * struct cfg80211_connect_params - Connection parameters * * This structure provides information needed to complete IEEE 802.11 * authentication and association. * * @channel: The channel to use or %NULL if not specified (auto-select based * on scan results) * @channel_hint: The channel of the recommended BSS for initial connection or * %NULL if not specified * @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan * results) * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or * %NULL if not specified. Unlike the @bssid parameter, the driver is * allowed to ignore this @bssid_hint if it has knowledge of a better BSS * to use. * @ssid: SSID * @ssid_len: Length of ssid in octets * @auth_type: Authentication type (algorithm) * @ie: IEs for association request * @ie_len: Length of assoc_ie in octets * @privacy: indicates whether privacy-enabled APs should be used * @mfp: indicate whether management frame protection is used * @crypto: crypto settings * @key_len: length of WEP key for shared key authentication * @key_idx: index of WEP key for shared key authentication * @key: WEP key for shared key authentication * @flags: See &enum cfg80211_assoc_req_flags * @bg_scan_period: Background scan period in seconds * or -1 to indicate that default value is to be used. * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT Capability overrides * @vht_capa_mask: The bits of vht_capa which are to be used. * @pbss: if set, connect to a PCP instead of AP. Valid for DMG * networks. * @bss_select: criteria to be used for BSS selection. * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used * to indicate a request to reassociate within the ESS instead of a request * do the initial association with the ESS. When included, this is set to * the BSSID of the current association, i.e., to the value that is * included in the Current AP address field of the Reassociation Request * frame. * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the * NAI or %NULL if not specified. This is used to construct FILS wrapped * data IE. * @fils_erp_username_len: Length of @fils_erp_username in octets. * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or * %NULL if not specified. This specifies the domain name of ER server and * is used to construct FILS wrapped data IE. * @fils_erp_realm_len: Length of @fils_erp_realm in octets. * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP * messages. This is also used to construct FILS wrapped data IE. * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional * keys in FILS or %NULL if not specified. * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets. * @want_1x: indicates user-space supports and wants to use 802.1X driver * offload of 4-way handshake. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; struct ieee80211_channel *channel_hint; const u8 *bssid; const u8 *bssid_hint; const u8 *ssid; size_t ssid_len; enum nl80211_auth_type auth_type; const u8 *ie; size_t ie_len; bool privacy; enum nl80211_mfp mfp; struct cfg80211_crypto_settings crypto; const u8 *key; u8 key_len, key_idx; u32 flags; int bg_scan_period; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa; struct ieee80211_vht_cap vht_capa_mask; bool pbss; struct cfg80211_bss_selection bss_select; const u8 *prev_bssid; const u8 *fils_erp_username; size_t fils_erp_username_len; const u8 *fils_erp_realm; size_t fils_erp_realm_len; u16 fils_erp_next_seq_num; const u8 *fils_erp_rrk; size_t fils_erp_rrk_len; bool want_1x; }; /** * enum cfg80211_connect_params_changed - Connection parameters being updated * * This enum provides information of all connect parameters that * have to be updated as part of update_connect_params() call. * * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated * @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm, * username, erp sequence number and rrk) are updated * @UPDATE_AUTH_TYPE: Indicates that authentication type is updated */ enum cfg80211_connect_params_changed { UPDATE_ASSOC_IES = BIT(0), UPDATE_FILS_ERP_INFO = BIT(1), UPDATE_AUTH_TYPE = BIT(2), }; /** * enum wiphy_params_flags - set_wiphy_params bitfield values * @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed * @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed * @WIPHY_PARAM_DYN_ACK: dynack has been enabled * @WIPHY_PARAM_TXQ_LIMIT: TXQ packet limit has been changed * @WIPHY_PARAM_TXQ_MEMORY_LIMIT: TXQ memory limit has been changed * @WIPHY_PARAM_TXQ_QUANTUM: TXQ scheduler quantum */ enum wiphy_params_flags { WIPHY_PARAM_RETRY_SHORT = 1 << 0, WIPHY_PARAM_RETRY_LONG = 1 << 1, WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2, WIPHY_PARAM_RTS_THRESHOLD = 1 << 3, WIPHY_PARAM_COVERAGE_CLASS = 1 << 4, WIPHY_PARAM_DYN_ACK = 1 << 5, WIPHY_PARAM_TXQ_LIMIT = 1 << 6, WIPHY_PARAM_TXQ_MEMORY_LIMIT = 1 << 7, WIPHY_PARAM_TXQ_QUANTUM = 1 << 8, }; /** * struct cfg80211_pmksa - PMK Security Association * * This structure is passed to the set/del_pmksa() method for PMKSA * caching. * * @bssid: The AP's BSSID (may be %NULL). * @pmkid: The identifier to refer a PMKSA. * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key * derivation by a FILS STA. Otherwise, %NULL. * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on * the hash algorithm used to generate this. * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS * cache identifier (may be %NULL). * @ssid_len: Length of the @ssid in octets. * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the * scope of PMKSA. This is valid only if @ssid_len is non-zero (may be * %NULL). */ struct cfg80211_pmksa { const u8 *bssid; const u8 *pmkid; const u8 *pmk; size_t pmk_len; const u8 *ssid; size_t ssid_len; const u8 *cache_id; }; /** * struct cfg80211_pkt_pattern - packet pattern * @mask: bitmask where to match pattern and where to ignore bytes, * one bit per byte, in same format as nl80211 * @pattern: bytes to match where bitmask is 1 * @pattern_len: length of pattern (in bytes) * @pkt_offset: packet offset (in bytes) * * Internal note: @mask and @pattern are allocated in one chunk of * memory, free @mask only! */ struct cfg80211_pkt_pattern { const u8 *mask, *pattern; int pattern_len; int pkt_offset; }; /** * struct cfg80211_wowlan_tcp - TCP connection parameters * * @sock: (internal) socket for source port allocation * @src: source IP address * @dst: destination IP address * @dst_mac: destination MAC address * @src_port: source port * @dst_port: destination port * @payload_len: data payload length * @payload: data payload buffer * @payload_seq: payload sequence stamping configuration * @data_interval: interval at which to send data packets * @wake_len: wakeup payload match length * @wake_data: wakeup payload match data * @wake_mask: wakeup payload match mask * @tokens_size: length of the tokens buffer * @payload_tok: payload token usage configuration */ struct cfg80211_wowlan_tcp { struct socket *sock; __be32 src, dst; u16 src_port, dst_port; u8 dst_mac[ETH_ALEN]; int payload_len; const u8 *payload; struct nl80211_wowlan_tcp_data_seq payload_seq; u32 data_interval; u32 wake_len; const u8 *wake_data, *wake_mask; u32 tokens_size; /* must be last, variable member */ struct nl80211_wowlan_tcp_data_token payload_tok; }; /** * struct cfg80211_wowlan - Wake on Wireless-LAN support info * * This structure defines the enabled WoWLAN triggers for the device. * @any: wake up on any activity -- special trigger if device continues * operating as normal during suspend * @disconnect: wake up if getting disconnected * @magic_pkt: wake up on receiving magic packet * @patterns: wake up on receiving packet matching a pattern * @n_patterns: number of patterns * @gtk_rekey_failure: wake up on GTK rekey failure * @eap_identity_req: wake up on EAP identity request packet * @four_way_handshake: wake up on 4-way handshake * @rfkill_release: wake up when rfkill is released * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h. * NULL if not configured. * @nd_config: configuration for the scan to be used for net detect wake. */ struct cfg80211_wowlan { bool any, disconnect, magic_pkt, gtk_rekey_failure, eap_identity_req, four_way_handshake, rfkill_release; struct cfg80211_pkt_pattern *patterns; struct cfg80211_wowlan_tcp *tcp; int n_patterns; struct cfg80211_sched_scan_request *nd_config; }; /** * struct cfg80211_coalesce_rules - Coalesce rule parameters * * This structure defines coalesce rule for the device. * @delay: maximum coalescing delay in msecs. * @condition: condition for packet coalescence. * see &enum nl80211_coalesce_condition. * @patterns: array of packet patterns * @n_patterns: number of patterns */ struct cfg80211_coalesce_rules { int delay; enum nl80211_coalesce_condition condition; struct cfg80211_pkt_pattern *patterns; int n_patterns; }; /** * struct cfg80211_coalesce - Packet coalescing settings * * This structure defines coalescing settings. * @rules: array of coalesce rules * @n_rules: number of rules */ struct cfg80211_coalesce { struct cfg80211_coalesce_rules *rules; int n_rules; }; /** * struct cfg80211_wowlan_nd_match - information about the match * * @ssid: SSID of the match that triggered the wake up * @n_channels: Number of channels where the match occurred. This * value may be zero if the driver can't report the channels. * @channels: center frequencies of the channels where a match * occurred (in MHz) */ struct cfg80211_wowlan_nd_match { struct cfg80211_ssid ssid; int n_channels; u32 channels[]; }; /** * struct cfg80211_wowlan_nd_info - net detect wake up information * * @n_matches: Number of match information instances provided in * @matches. This value may be zero if the driver can't provide * match information. * @matches: Array of pointers to matches containing information about * the matches that triggered the wake up. */ struct cfg80211_wowlan_nd_info { int n_matches; struct cfg80211_wowlan_nd_match *matches[]; }; /** * struct cfg80211_wowlan_wakeup - wakeup report * @disconnect: woke up by getting disconnected * @magic_pkt: woke up by receiving magic packet * @gtk_rekey_failure: woke up by GTK rekey failure * @eap_identity_req: woke up by EAP identity request packet * @four_way_handshake: woke up by 4-way handshake * @rfkill_release: woke up by rfkill being released * @pattern_idx: pattern that caused wakeup, -1 if not due to pattern * @packet_present_len: copied wakeup packet data * @packet_len: original wakeup packet length * @packet: The packet causing the wakeup, if any. * @packet_80211: For pattern match, magic packet and other data * frame triggers an 802.3 frame should be reported, for * disconnect due to deauth 802.11 frame. This indicates which * it is. * @tcp_match: TCP wakeup packet received * @tcp_connlost: TCP connection lost or failed to establish * @tcp_nomoretokens: TCP data ran out of tokens * @net_detect: if not %NULL, woke up because of net detect */ struct cfg80211_wowlan_wakeup { bool disconnect, magic_pkt, gtk_rekey_failure, eap_identity_req, four_way_handshake, rfkill_release, packet_80211, tcp_match, tcp_connlost, tcp_nomoretokens; s32 pattern_idx; u32 packet_present_len, packet_len; const void *packet; struct cfg80211_wowlan_nd_info *net_detect; }; /** * struct cfg80211_gtk_rekey_data - rekey data * @kek: key encryption key (NL80211_KEK_LEN bytes) * @kck: key confirmation key (NL80211_KCK_LEN bytes) * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) */ struct cfg80211_gtk_rekey_data { const u8 *kek, *kck, *replay_ctr; }; /** * struct cfg80211_update_ft_ies_params - FT IE Information * * This structure provides information needed to update the fast transition IE * * @md: The Mobility Domain ID, 2 Octet value * @ie: Fast Transition IEs * @ie_len: Length of ft_ie in octets */ struct cfg80211_update_ft_ies_params { u16 md; const u8 *ie; size_t ie_len; }; /** * struct cfg80211_mgmt_tx_params - mgmt tx parameters * * This structure provides information needed to transmit a mgmt frame * * @chan: channel to use * @offchan: indicates wether off channel operation is required * @wait: duration for ROC * @buf: buffer to transmit * @len: buffer length * @no_cck: don't use cck rates for this frame * @dont_wait_for_ack: tells the low level not to wait for an ack * @n_csa_offsets: length of csa_offsets array * @csa_offsets: array of all the csa offsets in the frame */ struct cfg80211_mgmt_tx_params { struct ieee80211_channel *chan; bool offchan; unsigned int wait; const u8 *buf; size_t len; bool no_cck; bool dont_wait_for_ack; int n_csa_offsets; const u16 *csa_offsets; }; /** * struct cfg80211_dscp_exception - DSCP exception * * @dscp: DSCP value that does not adhere to the user priority range definition * @up: user priority value to which the corresponding DSCP value belongs */ struct cfg80211_dscp_exception { u8 dscp; u8 up; }; /** * struct cfg80211_dscp_range - DSCP range definition for user priority * * @low: lowest DSCP value of this user priority range, inclusive * @high: highest DSCP value of this user priority range, inclusive */ struct cfg80211_dscp_range { u8 low; u8 high; }; /* QoS Map Set element length defined in IEEE Std 802.11-2012, 8.4.2.97 */ #define IEEE80211_QOS_MAP_MAX_EX 21 #define IEEE80211_QOS_MAP_LEN_MIN 16 #define IEEE80211_QOS_MAP_LEN_MAX \ (IEEE80211_QOS_MAP_LEN_MIN + 2 * IEEE80211_QOS_MAP_MAX_EX) /** * struct cfg80211_qos_map - QoS Map Information * * This struct defines the Interworking QoS map setting for DSCP values * * @num_des: number of DSCP exceptions (0..21) * @dscp_exception: optionally up to maximum of 21 DSCP exceptions from * the user priority DSCP range definition * @up: DSCP range definition for a particular user priority */ struct cfg80211_qos_map { u8 num_des; struct cfg80211_dscp_exception dscp_exception[IEEE80211_QOS_MAP_MAX_EX]; struct cfg80211_dscp_range up[8]; }; /** * struct cfg80211_nan_conf - NAN configuration * * This struct defines NAN configuration parameters * * @master_pref: master preference (1 - 255) * @bands: operating bands, a bitmap of &enum nl80211_band values. * For instance, for NL80211_BAND_2GHZ, bit 0 would be set * (i.e. BIT(NL80211_BAND_2GHZ)). * @cdw_2g: Committed DW on 2.4GHz * @cdw_5g: Committed DW on 5.2GHz */ struct cfg80211_nan_conf { u8 master_pref; u8 bands; u8 cdw_2g; u8 cdw_5g; }; /** * enum cfg80211_nan_conf_changes - indicates changed fields in NAN * configuration * * @CFG80211_NAN_CONF_CHANGED_PREF: master preference * @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands * @CFG80211_NAN_CONF_CHANGED_CDW_2G: committed DW on 2.4GHz * @CFG80211_NAN_CONF_CHANGED_CDW_5G: committed DW on 5.2GHz */ enum cfg80211_nan_conf_changes { CFG80211_NAN_CONF_CHANGED_PREF = BIT(0), CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1), CFG80211_NAN_CONF_CHANGED_CDW_2G = BIT(2), CFG80211_NAN_CONF_CHANGED_CDW_5G = BIT(3), }; /** * struct cfg80211_nan_func_filter - a NAN function Rx / Tx filter * * @filter: the content of the filter * @len: the length of the filter */ struct cfg80211_nan_func_filter { const u8 *filter; u8 len; }; /** * struct cfg80211_nan_sec_ctx_id - NAN security context ID * * @type: the type of the security context ID as specified by * &enum nl80211_nan_sec_ctx_type. * @len: the length of the @data field in bytes. * @data: the security context ID data. */ struct cfg80211_nan_sec_ctx_id { enum nl80211_nan_sec_ctx_type type; u16 len; const u8 *data; }; /** * struct cfg80211_nan_sec - NAN security configuration * * @cipher_suite_ids: cipher suites IDs supported by the service. A bitmap of * &enum nl80211_nan_cs_ids. * @n_ctx_ids: number of security context IDs in @ctx_ids array. * @ctx_ids: an array of security context IDs. */ struct cfg80211_nan_sec { u32 cipher_suite_ids; u8 n_ctx_ids; struct cfg80211_nan_sec_ctx_id *ctx_ids; }; /** * struct cfg80211_nan_func - a NAN function * * @type: &enum nl80211_nan_function_type * @service_id: the service ID of the function * @publish_type: &nl80211_nan_publish_type * @close_range: if true, the range should be limited. Threshold is * implementation specific. * @publish_bcast: if true, the solicited publish should be broadcasted * @subscribe_active: if true, the subscribe is active * @followup_id: the instance ID for follow up * @followup_reqid: the requestor instance ID for follow up * @followup_dest: MAC address of the recipient of the follow up * @ttl: time to live counter in DW. * @serv_spec_info: Service Specific Info * @serv_spec_info_len: Service Specific Info length * @srf_include: if true, SRF is inclusive * @srf_bf: Bloom Filter * @srf_bf_len: Bloom Filter length * @srf_bf_idx: Bloom Filter index * @srf_macs: SRF MAC addresses * @srf_num_macs: number of MAC addresses in SRF * @rx_filters: rx filters that are matched with corresponding peer's tx_filter * @tx_filters: filters that should be transmitted in the SDF. * @num_rx_filters: length of &rx_filters. * @num_tx_filters: length of &tx_filters. * @instance_id: driver allocated id of the function. * @cookie: unique NAN function identifier. * @sec: security configuration for the service. * @ndp_required: 1 if NAN data path is required for the service, 0 otherwise. * The NDP type is specified by @ndp_type. * @ndp_type: the type of NDP required for the service as specified by * &enum nl80211_nan_ndp_type. Only valid if @ndp_required is 1, ignored * otherwise. * @fsd_required: 1 if further service discovery is required for the service, 0 * otherwise. The further service discovery method is specified by * @fsd_method. * @fsd_method: the further service discovery method to be used for the service * as specified by &enum nl80211_nan_fsd. Only valid if @fsd_required is * set, ignored otherwise. * @range_limit_ingress: the ingress range limit for the service in centimeters. * Zero if no range limit is set. * @range_limit_egress: the egress range limit for the service in centimeters. * Zero if no range limit is set. * @awake_dw_interval: specifies the interval between two discovery windows in * which the device shall be awake to transmit or receive SDFs. The * interval will be 2^dw_interval * 512 TUs. Valid values are 0 - 4. */ struct cfg80211_nan_func { enum nl80211_nan_function_type type; u8 service_id[NL80211_NAN_FUNC_SERVICE_ID_LEN]; u8 publish_type; bool close_range; bool publish_bcast; bool subscribe_active; u8 followup_id; u8 followup_reqid; struct mac_address followup_dest; u32 ttl; const u8 *serv_spec_info; u16 serv_spec_info_len; bool srf_include; const u8 *srf_bf; u8 srf_bf_len; u8 srf_bf_idx; struct mac_address *srf_macs; int srf_num_macs; struct cfg80211_nan_func_filter *rx_filters; struct cfg80211_nan_func_filter *tx_filters; u8 num_tx_filters; u8 num_rx_filters; u8 instance_id; u64 cookie; struct cfg80211_nan_sec sec; u8 ndp_required; enum nl80211_nan_ndp_type ndp_type; u8 fsd_required; enum nl80211_nan_fsd fsd_method; u16 range_limit_ingress; u16 range_limit_egress; u8 awake_dw_interval; }; /** * struct cfg80211_pmk_conf - PMK configuration * * @aa: authenticator address * @pmk_len: PMK length in bytes. * @pmk: the PMK material * @pmk_r0_name: PMK-R0 Name. NULL if not applicable (i.e., the PMK * is not PMK-R0). When pmk_r0_name is not NULL, the pmk field * holds PMK-R0. */ struct cfg80211_pmk_conf { const u8 *aa; u8 pmk_len; const u8 *pmk; const u8 *pmk_r0_name; }; /** * struct cfg80211_external_auth_params - Trigger External authentication. * * Commonly used across the external auth request and event interfaces. * * @action: action type / trigger for external authentication. Only significant * for the authentication request event interface (driver to user space). * @bssid: BSSID of the peer with which the authentication has * to happen. Used by both the authentication request event and * authentication response command interface. * @ssid: SSID of the AP. Used by both the authentication request event and * authentication response command interface. * @key_mgmt_suite: AKM suite of the respective authentication. Used by the * authentication request event interface. * @status: status code, %WLAN_STATUS_SUCCESS for successful authentication, * use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you * the real status code for failures. Used only for the authentication * response command interface (user space to driver). * @pmk_len: Length of PMK if present. * @pmk: Derived PMK * @pmkid: PMKID of the derived PMK */ struct cfg80211_external_auth_params { enum nl80211_external_auth_action action; u8 bssid[ETH_ALEN] __aligned(2); struct cfg80211_ssid ssid; unsigned int key_mgmt_suite; u16 status; int pmk_len; const u8 *pmk; const u8 *pmkid; }; /** * struct cfg80211_ftm_responder_stats - FTM responder statistics * * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to * indicate the relevant values in this struct for them * @success_num: number of FTM sessions in which all frames were successfully * answered * @partial_num: number of FTM sessions in which part of frames were * successfully answered * @failed_num: number of failed FTM sessions * @asap_num: number of ASAP FTM sessions * @non_asap_num: number of non-ASAP FTM sessions * @total_duration_ms: total sessions durations - gives an indication * of how much time the responder was busy * @unknown_triggers_num: number of unknown FTM triggers - triggers from * initiators that didn't finish successfully the negotiation phase with * the responder * @reschedule_requests_num: number of FTM reschedule requests - initiator asks * for a new scheduling although it already has scheduled FTM slot * @out_of_window_triggers_num: total FTM triggers out of scheduled window */ struct cfg80211_ftm_responder_stats { u32 filled; u32 success_num; u32 partial_num; u32 failed_num; u32 asap_num; u32 non_asap_num; u64 total_duration_ms; u32 unknown_triggers_num; u32 reschedule_requests_num; u32 out_of_window_triggers_num; }; /** * struct cfg80211_pmsr_ftm_result - FTM result * @failure_reason: if this measurement failed (PMSR status is * %NL80211_PMSR_STATUS_FAILURE), this gives a more precise * reason than just "failure" * @burst_index: if reporting partial results, this is the index * in [0 .. num_bursts-1] of the burst that's being reported * @num_ftmr_attempts: number of FTM request frames transmitted * @num_ftmr_successes: number of FTM request frames acked * @busy_retry_time: if failure_reason is %NL80211_PMSR_FTM_FAILURE_PEER_BUSY, * fill this to indicate in how many seconds a retry is deemed possible * by the responder * @num_bursts_exp: actual number of bursts exponent negotiated * @burst_duration: actual burst duration negotiated * @ftms_per_burst: actual FTMs per burst negotiated * @lci_len: length of LCI information (if present) * @civicloc_len: length of civic location information (if present) * @lci: LCI data (may be %NULL) * @civicloc: civic location data (may be %NULL) * @rssi_avg: average RSSI over FTM action frames reported * @rssi_spread: spread of the RSSI over FTM action frames reported * @tx_rate: bitrate for transmitted FTM action frame response * @rx_rate: bitrate of received FTM action frame * @rtt_avg: average of RTTs measured (must have either this or @dist_avg) * @rtt_variance: variance of RTTs measured (note that standard deviation is * the square root of the variance) * @rtt_spread: spread of the RTTs measured * @dist_avg: average of distances (mm) measured * (must have either this or @rtt_avg) * @dist_variance: variance of distances measured (see also @rtt_variance) * @dist_spread: spread of distances measured (see also @rtt_spread) * @num_ftmr_attempts_valid: @num_ftmr_attempts is valid * @num_ftmr_successes_valid: @num_ftmr_successes is valid * @rssi_avg_valid: @rssi_avg is valid * @rssi_spread_valid: @rssi_spread is valid * @tx_rate_valid: @tx_rate is valid * @rx_rate_valid: @rx_rate is valid * @rtt_avg_valid: @rtt_avg is valid * @rtt_variance_valid: @rtt_variance is valid * @rtt_spread_valid: @rtt_spread is valid * @dist_avg_valid: @dist_avg is valid * @dist_variance_valid: @dist_variance is valid * @dist_spread_valid: @dist_spread is valid */ struct cfg80211_pmsr_ftm_result { const u8 *lci; const u8 *civicloc; unsigned int lci_len; unsigned int civicloc_len; enum nl80211_peer_measurement_ftm_failure_reasons failure_reason; u32 num_ftmr_attempts, num_ftmr_successes; s16 burst_index; u8 busy_retry_time; u8 num_bursts_exp; u8 burst_duration; u8 ftms_per_burst; s32 rssi_avg; s32 rssi_spread; struct rate_info tx_rate, rx_rate; s64 rtt_avg; s64 rtt_variance; s64 rtt_spread; s64 dist_avg; s64 dist_variance; s64 dist_spread; u16 num_ftmr_attempts_valid:1, num_ftmr_successes_valid:1, rssi_avg_valid:1, rssi_spread_valid:1, tx_rate_valid:1, rx_rate_valid:1, rtt_avg_valid:1, rtt_variance_valid:1, rtt_spread_valid:1, dist_avg_valid:1, dist_variance_valid:1, dist_spread_valid:1; }; /** * struct cfg80211_pmsr_result - peer measurement result * @addr: address of the peer * @host_time: host time (use ktime_get_boottime() adjust to the time when the * measurement was made) * @ap_tsf: AP's TSF at measurement time * @status: status of the measurement * @final: if reporting partial results, mark this as the last one; if not * reporting partial results always set this flag * @ap_tsf_valid: indicates the @ap_tsf value is valid * @type: type of the measurement reported, note that we only support reporting * one type at a time, but you can report multiple results separately and * they're all aggregated for userspace. */ struct cfg80211_pmsr_result { u64 host_time, ap_tsf; enum nl80211_peer_measurement_status status; u8 addr[ETH_ALEN]; u8 final:1, ap_tsf_valid:1; enum nl80211_peer_measurement_type type; union { struct cfg80211_pmsr_ftm_result ftm; }; }; /** * struct cfg80211_pmsr_ftm_request_peer - FTM request data * @requested: indicates FTM is requested * @preamble: frame preamble to use * @burst_period: burst period to use * @asap: indicates to use ASAP mode * @num_bursts_exp: number of bursts exponent * @burst_duration: burst duration * @ftms_per_burst: number of FTMs per burst * @ftmr_retries: number of retries for FTM request * @request_lci: request LCI information * @request_civicloc: request civic location information * * See also nl80211 for the respective attribute documentation. */ struct cfg80211_pmsr_ftm_request_peer { enum nl80211_preamble preamble; u16 burst_period; u8 requested:1, asap:1, request_lci:1, request_civicloc:1; u8 num_bursts_exp; u8 burst_duration; u8 ftms_per_burst; u8 ftmr_retries; }; /** * struct cfg80211_pmsr_request_peer - peer data for a peer measurement request * @addr: MAC address * @chandef: channel to use * @report_ap_tsf: report the associated AP's TSF * @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer */ struct cfg80211_pmsr_request_peer { u8 addr[ETH_ALEN]; struct cfg80211_chan_def chandef; u8 report_ap_tsf:1; struct cfg80211_pmsr_ftm_request_peer ftm; }; /** * struct cfg80211_pmsr_request - peer measurement request * @cookie: cookie, set by cfg80211 * @nl_portid: netlink portid - used by cfg80211 * @drv_data: driver data for this request, if required for aborting, * not otherwise freed or anything by cfg80211 * @mac_addr: MAC address used for (randomised) request * @mac_addr_mask: MAC address mask used for randomisation, bits that * are 0 in the mask should be randomised, bits that are 1 should * be taken from the @mac_addr * @list: used by cfg80211 to hold on to the request * @timeout: timeout (in milliseconds) for the whole operation, if * zero it means there's no timeout * @n_peers: number of peers to do measurements with * @peers: per-peer measurement request data */ struct cfg80211_pmsr_request { u64 cookie; void *drv_data; u32 n_peers; u32 nl_portid; u32 timeout; u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); struct list_head list; struct cfg80211_pmsr_request_peer peers[]; }; /** * struct cfg80211_ops - backend description for wireless configuration * * This struct is registered by fullmac card drivers and/or wireless stacks * in order to handle configuration requests on their interfaces. * * All callbacks except where otherwise noted should return 0 * on success or a negative error code. * * All operations are currently invoked under rtnl for consistency with the * wireless extensions but this is subject to reevaluation as soon as this * code is used more widely and we have a first user without wext. * * @suspend: wiphy device needs to be suspended. The variable @wow will * be %NULL or contain the enabled Wake-on-Wireless triggers that are * configured for the device. * @resume: wiphy device needs to be resumed * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback * to call device_set_wakeup_enable() to enable/disable wakeup from * the device. * * @add_virtual_intf: create a new virtual interface with the given name, * must set the struct wireless_dev's iftype. Beware: You must create * the new netdev in the wiphy's network namespace! Returns the struct * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must * also set the address member in the wdev. * * @del_virtual_intf: remove the virtual interface * * @change_virtual_intf: change type/configuration of virtual interface, * keep the struct wireless_dev's iftype updated. * * @add_key: add a key with the given parameters. @mac_addr will be %NULL * when adding a group key. * * @get_key: get information about the key with the given parameters. * @mac_addr will be %NULL when requesting information for a group * key. All pointers given to the @callback function need not be valid * after it returns. This function should return an error if it is * not possible to retrieve the key, -ENOENT if it doesn't exist. * * @del_key: remove a key given the @mac_addr (%NULL for a group key) * and @key_index, return -ENOENT if the key doesn't exist. * * @set_default_key: set the default key on an interface * * @set_default_mgmt_key: set the default management frame key on an interface * * @set_rekey_data: give the data necessary for GTK rekeying to the driver * * @start_ap: Start acting in AP mode defined by the parameters. * @change_beacon: Change the beacon parameters for an access point mode * interface. This should reject the call when AP mode wasn't started. * @stop_ap: Stop being an AP, including stopping beaconing. * * @add_station: Add a new station. * @del_station: Remove a station * @change_station: Modify a given station. Note that flags changes are not much * validated in cfg80211, in particular the auth/assoc/authorized flags * might come to the driver in invalid combinations -- make sure to check * them, also against the existing state! Drivers must call * cfg80211_check_station_change() to validate the information. * @get_station: get station information for the station identified by @mac * @dump_station: dump station callback -- resume dump at index @idx * * @add_mpath: add a fixed mesh path * @del_mpath: delete a given mesh path * @change_mpath: change a given mesh path * @get_mpath: get a mesh path for the given parameters * @dump_mpath: dump mesh path callback -- resume dump at index @idx * @get_mpp: get a mesh proxy path for the given parameters * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx * @join_mesh: join the mesh network with the specified parameters * (invoked with the wireless_dev mutex held) * @leave_mesh: leave the current mesh network * (invoked with the wireless_dev mutex held) * * @get_mesh_config: Get the current mesh configuration * * @update_mesh_config: Update mesh parameters on a running mesh. * The mask is a bitfield which tells us which parameters to * set, and which to leave alone. * * @change_bss: Modify parameters for a given BSS. * * @set_txq_params: Set TX queue parameters * * @libertas_set_mesh_channel: Only for backward compatibility for libertas, * as it doesn't implement join_mesh and needs to set the channel to * join the mesh instead. * * @set_monitor_channel: Set the monitor mode channel for the device. If other * interfaces are active this callback should reject the configuration. * If no interfaces are active or the device is down, the channel should * be stored for when a monitor interface becomes active. * * @scan: Request to do a scan. If returning zero, the scan request is given * the driver, and will be valid until passed to cfg80211_scan_done(). * For scan results, call cfg80211_inform_bss(); you can call this outside * the scan/scan_done bracket too. * @abort_scan: Tell the driver to abort an ongoing scan. The driver shall * indicate the status of the scan through cfg80211_scan_done(). * * @auth: Request to authenticate with the specified peer * (invoked with the wireless_dev mutex held) * @assoc: Request to (re)associate with the specified peer * (invoked with the wireless_dev mutex held) * @deauth: Request to deauthenticate from the specified peer * (invoked with the wireless_dev mutex held) * @disassoc: Request to disassociate from the specified peer * (invoked with the wireless_dev mutex held) * * @connect: Connect to the ESS with the specified parameters. When connected, * call cfg80211_connect_result()/cfg80211_connect_bss() with status code * %WLAN_STATUS_SUCCESS. If the connection fails for some reason, call * cfg80211_connect_result()/cfg80211_connect_bss() with the status code * from the AP or cfg80211_connect_timeout() if no frame with status code * was received. * The driver is allowed to roam to other BSSes within the ESS when the * other BSS matches the connect parameters. When such roaming is initiated * by the driver, the driver is expected to verify that the target matches * the configured security parameters and to use Reassociation Request * frame instead of Association Request frame. * The connect function can also be used to request the driver to perform a * specific roam when connected to an ESS. In that case, the prev_bssid * parameter is set to the BSSID of the currently associated BSS as an * indication of requesting reassociation. * In both the driver-initiated and new connect() call initiated roaming * cases, the result of roaming is indicated with a call to * cfg80211_roamed(). (invoked with the wireless_dev mutex held) * @update_connect_params: Update the connect parameters while connected to a * BSS. The updated parameters can be used by driver/firmware for * subsequent BSS selection (roaming) decisions and to form the * Authentication/(Re)Association Request frames. This call does not * request an immediate disassociation or reassociation with the current * BSS, i.e., this impacts only subsequent (re)associations. The bits in * changed are defined in &enum cfg80211_connect_params_changed. * (invoked with the wireless_dev mutex held) * @disconnect: Disconnect from the BSS/ESS or stop connection attempts if * connection is in progress. Once done, call cfg80211_disconnected() in * case connection was already established (invoked with the * wireless_dev mutex held), otherwise call cfg80211_connect_timeout(). * * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call * cfg80211_ibss_joined(), also call that function when changing BSSID due * to a merge. * (invoked with the wireless_dev mutex held) * @leave_ibss: Leave the IBSS. * (invoked with the wireless_dev mutex held) * * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or * MESH mode) * * @set_wiphy_params: Notify that wiphy parameters have changed; * @changed bitfield (see &enum wiphy_params_flags) describes which values * have changed. The actual parameter values are available in * struct wiphy. If returning an error, no value should be changed. * * @set_tx_power: set the transmit power according to the parameters, * the power passed is in mBm, to get dBm use MBM_TO_DBM(). The * wdev may be %NULL if power was set for the wiphy, and will * always be %NULL unless the driver supports per-vif TX power * (as advertised by the nl80211 feature flag.) * @get_tx_power: store the current TX power into the dbm variable; * return 0 if successful * * @set_wds_peer: set the WDS peer for a WDS interface * * @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting * functions to adjust rfkill hw state * * @dump_survey: get site survey information. * * @remain_on_channel: Request the driver to remain awake on the specified * channel for the specified duration to complete an off-channel * operation (e.g., public action frame exchange). When the driver is * ready on the requested channel, it must indicate this with an event * notification by calling cfg80211_ready_on_channel(). * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. * This allows the operation to be terminated prior to timeout based on * the duration value. * @mgmt_tx: Transmit a management frame. * @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management * frame on another channel * * @testmode_cmd: run a test mode command; @wdev may be %NULL * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be * used by the function, but 0 and 1 must not be touched. Additionally, * return error codes other than -ENOBUFS and -ENOENT will terminate the * dump and return to userspace with an error, so be careful. If any data * was passed in from userspace then the data/len arguments will be present * and point to the data contained in %NL80211_ATTR_TESTDATA. * * @set_bitrate_mask: set the bitrate mask configuration * * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac * devices running firmwares capable of generating the (re) association * RSN IE. It allows for faster roaming between WPA2 BSSIDs. * @del_pmksa: Delete a cached PMKID. * @flush_pmksa: Flush all cached PMKIDs. * @set_power_mgmt: Configure WLAN power management. A timeout value of -1 * allows the driver to adjust the dynamic ps timeout value. * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. * After configuration, the driver should (soon) send an event indicating * the current level is above/below the configured threshold; this may * need some care when the configuration is changed (without first being * disabled.) * @set_cqm_rssi_range_config: Configure two RSSI thresholds in the * connection quality monitor. An event is to be sent only when the * signal level is found to be outside the two values. The driver should * set %NL80211_EXT_FEATURE_CQM_RSSI_LIST if this method is implemented. * If it is provided then there's no point providing @set_cqm_rssi_config. * @set_cqm_txe_config: Configure connection quality monitor TX error * thresholds. * @sched_scan_start: Tell the driver to start a scheduled scan. * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan with * given request id. This call must stop the scheduled scan and be ready * for starting a new one before it returns, i.e. @sched_scan_start may be * called immediately after that again and should not fail in that case. * The driver should not call cfg80211_sched_scan_stopped() for a requested * stop (when this method returns 0). * * @mgmt_frame_register: Notify driver that a management frame type was * registered. The callback is allowed to sleep. * * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device. * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may * reject TX/RX mask combinations they cannot support by returning -EINVAL * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX). * * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant). * * @tdls_mgmt: Transmit a TDLS management frame. * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup). * * @probe_client: probe an associated client, must return a cookie that it * later passes to cfg80211_probe_status(). * * @set_noack_map: Set the NoAck Map for the TIDs. * * @get_channel: Get the current operating channel for the virtual interface. * For monitor interfaces, it should return %NULL unless there's a single * current monitoring channel. * * @start_p2p_device: Start the given P2P device. * @stop_p2p_device: Stop the given P2P device. * * @set_mac_acl: Sets MAC address control list in AP and P2P GO mode. * Parameters include ACL policy, an array of MAC address of stations * and the number of MAC addresses. If there is already a list in driver * this new list replaces the existing one. Driver has to clear its ACL * when number of MAC addresses entries is passed as 0. Drivers which * advertise the support for MAC based ACL have to implement this callback. * * @start_radar_detection: Start radar detection in the driver. * * @update_ft_ies: Provide updated Fast BSS Transition information to the * driver. If the SME is in the driver/firmware, this information can be * used in building Authentication and Reassociation Request frames. * * @crit_proto_start: Indicates a critical protocol needs more link reliability * for a given duration (milliseconds). The protocol is provided so the * driver can take the most appropriate actions. * @crit_proto_stop: Indicates critical protocol no longer needs increased link * reliability. This operation can not fail. * @set_coalesce: Set coalesce parameters. * * @channel_switch: initiate channel-switch procedure (with CSA). Driver is * responsible for veryfing if the switch is possible. Since this is * inherently tricky driver may decide to disconnect an interface later * with cfg80211_stop_iface(). This doesn't mean driver can accept * everything. It should do it's best to verify requests and reject them * as soon as possible. * * @set_qos_map: Set QoS mapping information to the driver * * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the * given interface This is used e.g. for dynamic HT 20/40 MHz channel width * changes during the lifetime of the BSS. * * @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device * with the given parameters; action frame exchange has been handled by * userspace so this just has to modify the TX path to take the TS into * account. * If the admitted time is 0 just validate the parameters to make sure * the session can be created at all; it is valid to just always return * success for that but that may result in inefficient behaviour (handshake * with the peer followed by immediate teardown when the addition is later * rejected) * @del_tx_ts: remove an existing TX TS * * @join_ocb: join the OCB network with the specified parameters * (invoked with the wireless_dev mutex held) * @leave_ocb: leave the current OCB network * (invoked with the wireless_dev mutex held) * * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver * is responsible for continually initiating channel-switching operations * and returning to the base channel for communication with the AP. * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both * peers must be on the base channel when the call completes. * @start_nan: Start the NAN interface. * @stop_nan: Stop the NAN interface. * @add_nan_func: Add a NAN function. Returns negative value on failure. * On success @nan_func ownership is transferred to the driver and * it may access it outside of the scope of this function. The driver * should free the @nan_func when no longer needed by calling * cfg80211_free_nan_func(). * On success the driver should assign an instance_id in the * provided @nan_func. * @del_nan_func: Delete a NAN function. * @nan_change_conf: changes NAN configuration. The changed parameters must * be specified in @changes (using &enum cfg80211_nan_conf_changes); * All other parameters must be ignored. * * @set_multicast_to_unicast: configure multicast to unicast conversion for BSS * * @get_txq_stats: Get TXQ stats for interface or phy. If wdev is %NULL, this * function should return phy stats, and interface stats otherwise. * * @set_pmk: configure the PMK to be used for offloaded 802.1X 4-Way handshake. * If not deleted through @del_pmk the PMK remains valid until disconnect * upon which the driver should clear it. * (invoked with the wireless_dev mutex held) * @del_pmk: delete the previously configured PMK for the given authenticator. * (invoked with the wireless_dev mutex held) * * @external_auth: indicates result of offloaded authentication processing from * user space * * @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter * tells the driver that the frame should not be encrypted. * * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. * Statistics should be cumulative, currently no way to reset is provided. * @start_pmsr: start peer measurement (e.g. FTM) * @abort_pmsr: abort peer measurement */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); int (*resume)(struct wiphy *wiphy); void (*set_wakeup)(struct wiphy *wiphy, bool enabled); struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params); int (*del_virtual_intf)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*change_virtual_intf)(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params); int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params); int (*get_key)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params*)); int (*del_key)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr); int (*set_default_key)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool unicast, bool multicast); int (*set_default_mgmt_key)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index); int (*start_ap)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings); int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_beacon_data *info); int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev); int (*add_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params); int (*del_station)(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params); int (*change_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params); int (*get_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_info *sinfo); int (*dump_station)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo); int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev, const u8 *dst, const u8 *next_hop); int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev, const u8 *dst); int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev, const u8 *dst, const u8 *next_hop); int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo); int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo); int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *mpp, struct mpath_info *pinfo); int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo); int (*get_mesh_config)(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf); int (*update_mesh_config)(struct wiphy *wiphy, struct net_device *dev, u32 mask, const struct mesh_config *nconf); int (*join_mesh)(struct wiphy *wiphy, struct net_device *dev, const struct mesh_config *conf, const struct mesh_setup *setup); int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev); int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev, struct ocb_setup *setup); int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev); int (*change_bss)(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params); int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_txq_params *params); int (*libertas_set_mesh_channel)(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan); int (*set_monitor_channel)(struct wiphy *wiphy, struct cfg80211_chan_def *chandef); int (*scan)(struct wiphy *wiphy, struct cfg80211_scan_request *request); void (*abort_scan)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*auth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_auth_request *req); int (*assoc)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_assoc_request *req); int (*deauth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_deauth_request *req); int (*disassoc)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_disassoc_request *req); int (*connect)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme); int (*update_connect_params)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme, u32 changed); int (*disconnect)(struct wiphy *wiphy, struct net_device *dev, u16 reason_code); int (*join_ibss)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params); int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev); int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev, int rate[NUM_NL80211_BANDS]); int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed); int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm); int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, int *dbm); int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev, const u8 *addr); void (*rfkill_poll)(struct wiphy *wiphy); #ifdef CPTCFG_NL80211_TESTMODE int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len); int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len); #endif int (*set_bitrate_mask)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, const struct cfg80211_bitrate_mask *mask); int (*dump_survey)(struct wiphy *wiphy, struct net_device *netdev, int idx, struct survey_info *info); int (*set_pmksa)(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa); int (*del_pmksa)(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa); int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev); int (*remain_on_channel)(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie); int (*cancel_remain_on_channel)(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); int (*mgmt_tx)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout); int (*set_cqm_rssi_config)(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst); int (*set_cqm_rssi_range_config)(struct wiphy *wiphy, struct net_device *dev, s32 rssi_low, s32 rssi_high); int (*set_cqm_txe_config)(struct wiphy *wiphy, struct net_device *dev, u32 rate, u32 pkts, u32 intvl); void (*mgmt_frame_register)(struct wiphy *wiphy, struct wireless_dev *wdev, u16 frame_type, bool reg); int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant); int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant); int (*sched_scan_start)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_sched_scan_request *request); int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev, u64 reqid); int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_gtk_rekey_data *data); int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *buf, size_t len); int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper); int (*probe_client)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u64 *cookie); int (*set_noack_map)(struct wiphy *wiphy, struct net_device *dev, u16 noack_map); int (*get_channel)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef); int (*start_p2p_device)(struct wiphy *wiphy, struct wireless_dev *wdev); void (*stop_p2p_device)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*set_mac_acl)(struct wiphy *wiphy, struct net_device *dev, const struct cfg80211_acl_data *params); int (*start_radar_detection)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms); int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_update_ft_ies_params *ftie); int (*crit_proto_start)(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_crit_proto_id protocol, u16 duration); void (*crit_proto_stop)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*set_coalesce)(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce); int (*channel_switch)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_csa_settings *params); int (*set_qos_map)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_qos_map *qos_map); int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef); int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time); int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer); int (*tdls_channel_switch)(struct wiphy *wiphy, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef); void (*tdls_cancel_channel_switch)(struct wiphy *wiphy, struct net_device *dev, const u8 *addr); int (*start_nan)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf); void (*stop_nan)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*add_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_func *nan_func); void (*del_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); int (*nan_change_conf)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes); int (*set_multicast_to_unicast)(struct wiphy *wiphy, struct net_device *dev, const bool enabled); int (*get_txq_stats)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_txq_stats *txqstats); int (*set_pmk)(struct wiphy *wiphy, struct net_device *dev, const struct cfg80211_pmk_conf *conf); int (*del_pmk)(struct wiphy *wiphy, struct net_device *dev, const u8 *aa); int (*external_auth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_external_auth_params *params); int (*tx_control_port)(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len, const u8 *dest, const __be16 proto, const bool noencrypt); int (*get_ftm_responder_stats)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ftm_responder_stats *ftm_stats); int (*start_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request); void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request); }; /* * wireless hardware and networking interfaces structures * and registration/helper functions */ /** * enum wiphy_flags - wiphy capability flags * * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this * wiphy at all * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled * by default -- this flag will be set depending on the kernel's default * on wiphy_new(), but can be changed by the driver if it has a good * reason to override the default * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station * on a VLAN interface) * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the * control port protocol ethertype. The device also honours the * control_port_no_encrypt flag. * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN. * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH. * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the * firmware. * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP. * @WIPHY_FLAG_SUPPORTS_TDLS: The device supports TDLS (802.11z) operation. * @WIPHY_FLAG_TDLS_EXTERNAL_SETUP: The device does not handle TDLS (802.11z) * link setup/discovery operations internally. Setup, discovery and * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be * used for asking the driver/firmware to perform a TDLS operation. * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes * when there are virtual interfaces in AP mode by calling * cfg80211_report_obss_beacon(). * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device * responds to probe-requests in hardware. * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX. * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call. * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in * beaconing mode (AP, IBSS, Mesh, ...). * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation * before connection. */ enum wiphy_flags { /* use hole at 0 */ /* use hole at 1 */ /* use hole at 2 */ WIPHY_FLAG_NETNS_OK = BIT(3), WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), WIPHY_FLAG_4ADDR_AP = BIT(5), WIPHY_FLAG_4ADDR_STATION = BIT(6), WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7), WIPHY_FLAG_IBSS_RSN = BIT(8), WIPHY_FLAG_MESH_AUTH = BIT(10), /* use hole at 11 */ /* use hole at 12 */ WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13), WIPHY_FLAG_AP_UAPSD = BIT(14), WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16), WIPHY_FLAG_HAVE_AP_SME = BIT(17), WIPHY_FLAG_REPORTS_OBSS = BIT(18), WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19), WIPHY_FLAG_OFFCHAN_TX = BIT(20), WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23), WIPHY_FLAG_HAS_STATIC_WEP = BIT(24), }; /** * struct ieee80211_iface_limit - limit on certain interface types * @max: maximum number of interfaces of these types * @types: interface types (bits) */ struct ieee80211_iface_limit { u16 max; u16 types; }; /** * struct ieee80211_iface_combination - possible interface combination * * With this structure the driver can describe which interface * combinations it supports concurrently. * * Examples: * * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total: * * .. code-block:: c * * struct ieee80211_iface_limit limits1[] = { * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, }, * }; * struct ieee80211_iface_combination combination1 = { * .limits = limits1, * .n_limits = ARRAY_SIZE(limits1), * .max_interfaces = 2, * .beacon_int_infra_match = true, * }; * * * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total: * * .. code-block:: c * * struct ieee80211_iface_limit limits2[] = { * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) | * BIT(NL80211_IFTYPE_P2P_GO), }, * }; * struct ieee80211_iface_combination combination2 = { * .limits = limits2, * .n_limits = ARRAY_SIZE(limits2), * .max_interfaces = 8, * .num_different_channels = 1, * }; * * * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total. * * This allows for an infrastructure connection and three P2P connections. * * .. code-block:: c * * struct ieee80211_iface_limit limits3[] = { * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) | * BIT(NL80211_IFTYPE_P2P_CLIENT), }, * }; * struct ieee80211_iface_combination combination3 = { * .limits = limits3, * .n_limits = ARRAY_SIZE(limits3), * .max_interfaces = 4, * .num_different_channels = 2, * }; * */ struct ieee80211_iface_combination { /** * @limits: * limits for the given interface types */ const struct ieee80211_iface_limit *limits; /** * @num_different_channels: * can use up to this many different channels */ u32 num_different_channels; /** * @max_interfaces: * maximum number of interfaces in total allowed in this group */ u16 max_interfaces; /** * @n_limits: * number of limitations */ u8 n_limits; /** * @beacon_int_infra_match: * In this combination, the beacon intervals between infrastructure * and AP types must match. This is required only in special cases. */ bool beacon_int_infra_match; /** * @radar_detect_widths: * bitmap of channel widths supported for radar detection */ u8 radar_detect_widths; /** * @radar_detect_regions: * bitmap of regions supported for radar detection */ u8 radar_detect_regions; /** * @beacon_int_min_gcd: * This interface combination supports different beacon intervals. * * = 0 * all beacon intervals for different interface must be same. * > 0 * any beacon interval for the interface part of this combination AND * GCD of all beacon intervals from beaconing interfaces of this * combination must be greater or equal to this value. */ u32 beacon_int_min_gcd; }; struct ieee80211_txrx_stypes { u16 tx, rx; }; /** * enum wiphy_wowlan_support_flags - WoWLAN support flags * @WIPHY_WOWLAN_ANY: supports wakeup for the special "any" * trigger that keeps the device operating as-is and * wakes up the host on any activity, for example a * received packet that passed filtering; note that the * packet should be preserved in that case * @WIPHY_WOWLAN_MAGIC_PKT: supports wakeup on magic packet * (see nl80211.h) * @WIPHY_WOWLAN_DISCONNECT: supports wakeup on disconnect * @WIPHY_WOWLAN_SUPPORTS_GTK_REKEY: supports GTK rekeying while asleep * @WIPHY_WOWLAN_GTK_REKEY_FAILURE: supports wakeup on GTK rekey failure * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release * @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection */ enum wiphy_wowlan_support_flags { WIPHY_WOWLAN_ANY = BIT(0), WIPHY_WOWLAN_MAGIC_PKT = BIT(1), WIPHY_WOWLAN_DISCONNECT = BIT(2), WIPHY_WOWLAN_SUPPORTS_GTK_REKEY = BIT(3), WIPHY_WOWLAN_GTK_REKEY_FAILURE = BIT(4), WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5), WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6), WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7), WIPHY_WOWLAN_NET_DETECT = BIT(8), }; struct wiphy_wowlan_tcp_support { const struct nl80211_wowlan_tcp_data_token_feature *tok; u32 data_payload_max; u32 data_interval_max; u32 wake_payload_max; bool seq; }; /** * struct wiphy_wowlan_support - WoWLAN support data * @flags: see &enum wiphy_wowlan_support_flags * @n_patterns: number of supported wakeup patterns * (see nl80211.h for the pattern definition) * @pattern_max_len: maximum length of each pattern * @pattern_min_len: minimum length of each pattern * @max_pkt_offset: maximum Rx packet offset * @max_nd_match_sets: maximum number of matchsets for net-detect, * similar, but not necessarily identical, to max_match_sets for * scheduled scans. * See &struct cfg80211_sched_scan_request.@match_sets for more * details. * @tcp: TCP wakeup support information */ struct wiphy_wowlan_support { u32 flags; int n_patterns; int pattern_max_len; int pattern_min_len; int max_pkt_offset; int max_nd_match_sets; const struct wiphy_wowlan_tcp_support *tcp; }; /** * struct wiphy_coalesce_support - coalesce support data * @n_rules: maximum number of coalesce rules * @max_delay: maximum supported coalescing delay in msecs * @n_patterns: number of supported patterns in a rule * (see nl80211.h for the pattern definition) * @pattern_max_len: maximum length of each pattern * @pattern_min_len: minimum length of each pattern * @max_pkt_offset: maximum Rx packet offset */ struct wiphy_coalesce_support { int n_rules; int max_delay; int n_patterns; int pattern_max_len; int pattern_min_len; int max_pkt_offset; }; /** * enum wiphy_vendor_command_flags - validation flags for vendor commands * @WIPHY_VENDOR_CMD_NEED_WDEV: vendor command requires wdev * @WIPHY_VENDOR_CMD_NEED_NETDEV: vendor command requires netdev * @WIPHY_VENDOR_CMD_NEED_RUNNING: interface/wdev must be up & running * (must be combined with %_WDEV or %_NETDEV) */ enum wiphy_vendor_command_flags { WIPHY_VENDOR_CMD_NEED_WDEV = BIT(0), WIPHY_VENDOR_CMD_NEED_NETDEV = BIT(1), WIPHY_VENDOR_CMD_NEED_RUNNING = BIT(2), }; /** * enum wiphy_opmode_flag - Station's ht/vht operation mode information flags * * @STA_OPMODE_MAX_BW_CHANGED: Max Bandwidth changed * @STA_OPMODE_SMPS_MODE_CHANGED: SMPS mode changed * @STA_OPMODE_N_SS_CHANGED: max N_SS (number of spatial streams) changed * */ enum wiphy_opmode_flag { STA_OPMODE_MAX_BW_CHANGED = BIT(0), STA_OPMODE_SMPS_MODE_CHANGED = BIT(1), STA_OPMODE_N_SS_CHANGED = BIT(2), }; /** * struct sta_opmode_info - Station's ht/vht operation mode information * @changed: contains value from &enum wiphy_opmode_flag * @smps_mode: New SMPS mode value from &enum nl80211_smps_mode of a station * @bw: new max bandwidth value from &enum nl80211_chan_width of a station * @rx_nss: new rx_nss value of a station */ struct sta_opmode_info { u32 changed; enum nl80211_smps_mode smps_mode; enum nl80211_chan_width bw; u8 rx_nss; }; /** * struct wiphy_vendor_command - vendor command definition * @info: vendor command identifying information, as used in nl80211 * @flags: flags, see &enum wiphy_vendor_command_flags * @doit: callback for the operation, note that wdev is %NULL if the * flags didn't ask for a wdev and non-%NULL otherwise; the data * pointer may be %NULL if userspace provided no data at all * @dumpit: dump callback, for transferring bigger/multiple items. The * @storage points to cb->args[5], ie. is preserved over the multiple * dumpit calls. * It's recommended to not have the same sub command with both @doit and * @dumpit, so that userspace can assume certain ones are get and others * are used with dump requests. */ struct wiphy_vendor_command { struct nl80211_vendor_cmd_info info; u32 flags; int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len); int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev, struct sk_buff *skb, const void *data, int data_len, unsigned long *storage); }; /** * struct wiphy_iftype_ext_capab - extended capabilities per interface type * @iftype: interface type * @extended_capabilities: extended capabilities supported by the driver, * additional capabilities might be supported by userspace; these are the * 802.11 extended capabilities ("Extended Capabilities element") and are * in the same format as in the information element. See IEEE Std * 802.11-2012 8.4.2.29 for the defined fields. * @extended_capabilities_mask: mask of the valid values * @extended_capabilities_len: length of the extended capabilities */ struct wiphy_iftype_ext_capab { enum nl80211_iftype iftype; const u8 *extended_capabilities; const u8 *extended_capabilities_mask; u8 extended_capabilities_len; }; /** * struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities * @max_peers: maximum number of peers in a single measurement * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement * @randomize_mac_addr: can randomize MAC address for measurement * @ftm.supported: FTM measurement is supported * @ftm.asap: ASAP-mode is supported * @ftm.non_asap: non-ASAP-mode is supported * @ftm.request_lci: can request LCI data * @ftm.request_civicloc: can request civic location data * @ftm.preambles: bitmap of preambles supported (&enum nl80211_preamble) * @ftm.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width) * @ftm.max_bursts_exponent: maximum burst exponent supported * (set to -1 if not limited; note that setting this will necessarily * forbid using the value 15 to let the responder pick) * @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if * not limited) */ struct cfg80211_pmsr_capabilities { unsigned int max_peers; u8 report_ap_tsf:1, randomize_mac_addr:1; struct { u32 preambles; u32 bandwidths; s8 max_bursts_exponent; u8 max_ftms_per_burst; u8 supported:1, asap:1, non_asap:1, request_lci:1, request_civicloc:1; } ftm; }; /** * struct wiphy - wireless hardware description * @reg_notifier: the driver's regulatory notification callback, * note that if your driver uses wiphy_apply_custom_regulatory() * the reg_notifier's request can be passed as NULL * @regd: the driver's regulatory domain, if one was requested via * the regulatory_hint() API. This can be used by the driver * on the reg_notifier() if it chooses to ignore future * regulatory domain changes caused by other drivers. * @signal_type: signal type reported in &struct cfg80211_bss. * @cipher_suites: supported cipher suites * @n_cipher_suites: number of supported cipher suites * @retry_short: Retry limit for short frames (dot11ShortRetryLimit) * @retry_long: Retry limit for long frames (dot11LongRetryLimit) * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold); * -1 = fragmentation disabled, only odd values >= 256 used * @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled * @_net: the network namespace this wiphy currently lives in * @perm_addr: permanent MAC address of this device * @addr_mask: If the device supports multiple MAC addresses by masking, * set this to a mask with variable bits set to 1, e.g. if the last * four bits are variable then set it to 00-00-00-00-00-0f. The actual * variable bits shall be determined by the interfaces added, with * interfaces not matching the mask being rejected to be brought up. * @n_addresses: number of addresses in @addresses. * @addresses: If the device has more than one address, set this pointer * to a list of addresses (6 bytes each). The first one will be used * by default for perm_addr. In this case, the mask should be set to * all-zeroes. In this case it is assumed that the device can handle * the same number of arbitrary MAC addresses. * @registered: protects ->resume and ->suspend sysfs callbacks against * unregister hardware * @debugfsdir: debugfs directory used for this wiphy, will be renamed * automatically on wiphy renames * @dev: (virtual) struct device for this wiphy * @registered: helps synchronize suspend/resume with wiphy unregister * @wext: wireless extension handlers * @priv: driver private data (sized according to wiphy_new() parameter) * @interface_modes: bitmask of interfaces types valid for this wiphy, * must be set by driver * @iface_combinations: Valid interface combinations array, should not * list single interface types. * @n_iface_combinations: number of entries in @iface_combinations array. * @software_iftypes: bitmask of software interface types, these are not * subject to any restrictions since they are purely managed in SW. * @flags: wiphy flags, see &enum wiphy_flags * @regulatory_flags: wiphy regulatory flags, see * &enum ieee80211_regulatory_flags * @features: features advertised to nl80211, see &enum nl80211_feature_flags. * @ext_features: extended features advertised to nl80211, see * &enum nl80211_ext_feature_index. * @bss_priv_size: each BSS struct has private data allocated with it, * this variable determines its size * @max_scan_ssids: maximum number of SSIDs the device can scan for in * any given scan * @max_sched_scan_reqs: maximum number of scheduled scan requests that * the device can run concurrently. * @max_sched_scan_ssids: maximum number of SSIDs the device can scan * for in any given scheduled scan * @max_match_sets: maximum number of match sets the device can handle * when performing a scheduled scan, 0 if filtering is not * supported. * @max_scan_ie_len: maximum length of user-controlled IEs device can * add to probe request frames transmitted during a scan, must not * include fixed IEs like supported rates * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled * scans * @max_sched_scan_plans: maximum number of scan plans (scan interval and number * of iterations) for scheduled scan supported by the device. * @max_sched_scan_plan_interval: maximum interval (in seconds) for a * single scan plan supported by the device. * @max_sched_scan_plan_iterations: maximum number of iterations for a single * scan plan supported by the device. * @coverage_class: current coverage class * @fw_version: firmware version for ethtool reporting * @hw_version: hardware version for ethtool reporting * @max_num_pmkids: maximum number of PMKIDs supported by device * @privid: a pointer that drivers can use to identify if an arbitrary * wiphy is theirs, e.g. in global notifiers * @bands: information about bands/channels supported by this device * * @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or * transmitted through nl80211, points to an array indexed by interface * type * * @available_antennas_tx: bitmap of antennas which are available to be * configured as TX antennas. Antenna configuration commands will be * rejected unless this or @available_antennas_rx is set. * * @available_antennas_rx: bitmap of antennas which are available to be * configured as RX antennas. Antenna configuration commands will be * rejected unless this or @available_antennas_tx is set. * * @probe_resp_offload: * Bitmap of supported protocols for probe response offloading. * See &enum nl80211_probe_resp_offload_support_attr. Only valid * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set. * * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation * may request, if implemented. * * @wowlan: WoWLAN support information * @wowlan_config: current WoWLAN configuration; this should usually not be * used since access to it is necessarily racy, use the parameter passed * to the suspend() operation instead. * * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden. * If null, then none can be over-ridden. * @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden. * If null, then none can be over-ridden. * * @wdev_list: the list of associated (virtual) interfaces; this list must * not be modified by the driver, but can be read with RTNL/RCU protection. * * @max_acl_mac_addrs: Maximum number of MAC addresses that the device * supports for ACL. * * @extended_capabilities: extended capabilities supported by the driver, * additional capabilities might be supported by userspace; these are * the 802.11 extended capabilities ("Extended Capabilities element") * and are in the same format as in the information element. See * 802.11-2012 8.4.2.29 for the defined fields. These are the default * extended capabilities to be used if the capabilities are not specified * for a specific interface type in iftype_ext_capab. * @extended_capabilities_mask: mask of the valid values * @extended_capabilities_len: length of the extended capabilities * @iftype_ext_capab: array of extended capabilities per interface type * @num_iftype_ext_capab: number of interface types for which extended * capabilities are specified separately. * @coalesce: packet coalescing support information * * @vendor_commands: array of vendor commands supported by the hardware * @n_vendor_commands: number of vendor commands * @vendor_events: array of vendor events supported by the hardware * @n_vendor_events: number of vendor events * * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode * (including P2P GO) or 0 to indicate no such limit is advertised. The * driver is allowed to advertise a theoretical limit that it can reach in * some cases, but may not always reach. * * @max_num_csa_counters: Number of supported csa_counters in beacons * and probe responses. This value should be set if the driver * wishes to limit the number of csa counters. Default (0) means * infinite. * @bss_select_support: bitmask indicating the BSS selection criteria supported * by the driver in the .connect() callback. The bit position maps to the * attribute indices defined in &enum nl80211_bss_select_attr. * * @nan_supported_bands: bands supported by the device in NAN mode, a * bitmap of &enum nl80211_band values. For instance, for * NL80211_BAND_2GHZ, bit 0 would be set * (i.e. BIT(NL80211_BAND_2GHZ)). * * @txq_limit: configuration of internal TX queue frame limit * @txq_memory_limit: configuration internal TX queue memory limit * @txq_quantum: configuration of internal TX queue scheduler quantum * * @support_mbssid: can HW support association with nontransmitted AP * @support_only_he_mbssid: don't parse MBSSID elements if it is not * HE AP, in order to avoid compatibility issues. * @support_mbssid must be set for this to have any effect. * * @pmsr_capa: peer measurement capabilities */ struct wiphy { /* assign these fields before you register the wiphy */ #define WIPHY_COMPAT_PAD_SIZE 2048 u8 padding[WIPHY_COMPAT_PAD_SIZE]; /* permanent MAC address(es) */ u8 perm_addr[ETH_ALEN]; u8 addr_mask[ETH_ALEN]; struct mac_address *addresses; const struct ieee80211_txrx_stypes *mgmt_stypes; const struct ieee80211_iface_combination *iface_combinations; int n_iface_combinations; u16 software_iftypes; u16 n_addresses; /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ u16 interface_modes; u16 max_acl_mac_addrs; u32 flags, regulatory_flags, features; u8 ext_features[DIV_ROUND_UP(NUM_NL80211_EXT_FEATURES, 8)]; u32 ap_sme_capa; enum cfg80211_signal_type signal_type; int bss_priv_size; u8 max_scan_ssids; u8 max_sched_scan_reqs; u8 max_sched_scan_ssids; u8 max_match_sets; u16 max_scan_ie_len; u16 max_sched_scan_ie_len; u32 max_sched_scan_plans; u32 max_sched_scan_plan_interval; u32 max_sched_scan_plan_iterations; int n_cipher_suites; const u32 *cipher_suites; u8 retry_short; u8 retry_long; u32 frag_threshold; u32 rts_threshold; u8 coverage_class; char fw_version[ETHTOOL_FWVERS_LEN]; u32 hw_version; #ifdef CONFIG_PM const struct wiphy_wowlan_support *wowlan; struct cfg80211_wowlan *wowlan_config; #endif u16 max_remain_on_channel_duration; u8 max_num_pmkids; u32 available_antennas_tx; u32 available_antennas_rx; /* * Bitmap of supported protocols for probe response offloading * see &enum nl80211_probe_resp_offload_support_attr. Only valid * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set. */ u32 probe_resp_offload; const u8 *extended_capabilities, *extended_capabilities_mask; u8 extended_capabilities_len; const struct wiphy_iftype_ext_capab *iftype_ext_capab; unsigned int num_iftype_ext_capab; /* If multiple wiphys are registered and you're handed e.g. * a regular netdev with assigned ieee80211_ptr, you won't * know whether it points to a wiphy your driver has registered * or not. Assign this to something global to your driver to * help determine whether you own this wiphy or not. */ const void *privid; struct ieee80211_supported_band *bands[NUM_NL80211_BANDS]; /* Lets us get back the wiphy on the callback */ void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request); /* fields below are read-only, assigned by cfg80211 */ const struct ieee80211_regdomain __rcu *regd; /* the item in /sys/class/ieee80211/ points to this, * you need use set_wiphy_dev() (see below) */ struct device dev; /* protects ->resume, ->suspend sysfs callbacks against unregister hw */ bool registered; /* dir in debugfs: ieee80211/ */ struct dentry *debugfsdir; const struct ieee80211_ht_cap *ht_capa_mod_mask; const struct ieee80211_vht_cap *vht_capa_mod_mask; struct list_head wdev_list; /* the network namespace this phy lives in currently */ possible_net_t _net; #ifdef CPTCFG_CFG80211_WEXT const struct iw_handler_def *wext; #endif const struct wiphy_coalesce_support *coalesce; const struct wiphy_vendor_command *vendor_commands; const struct nl80211_vendor_cmd_info *vendor_events; int n_vendor_commands, n_vendor_events; u16 max_ap_assoc_sta; u8 max_num_csa_counters; u32 bss_select_support; u8 nan_supported_bands; u32 txq_limit; u32 txq_memory_limit; u32 txq_quantum; u8 support_mbssid:1, support_only_he_mbssid:1; const struct cfg80211_pmsr_capabilities *pmsr_capa; char priv[0] __aligned(NETDEV_ALIGN); }; static inline struct net *wiphy_net(struct wiphy *wiphy) { return possible_read_pnet(&wiphy->_net); } static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) { possible_write_pnet(&wiphy->_net, net); } /** * wiphy_priv - return priv from wiphy * * @wiphy: the wiphy whose priv pointer to return * Return: The priv of @wiphy. */ static inline void *wiphy_priv(struct wiphy *wiphy) { BUG_ON(!wiphy); return &wiphy->priv; } /** * priv_to_wiphy - return the wiphy containing the priv * * @priv: a pointer previously returned by wiphy_priv * Return: The wiphy of @priv. */ static inline struct wiphy *priv_to_wiphy(void *priv) { BUG_ON(!priv); return container_of(priv, struct wiphy, priv); } /** * set_wiphy_dev - set device pointer for wiphy * * @wiphy: The wiphy whose device to bind * @dev: The device to parent it to */ static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev) { wiphy->dev.parent = dev; } /** * wiphy_dev - get wiphy dev pointer * * @wiphy: The wiphy whose device struct to look up * Return: The dev of @wiphy. */ static inline struct device *wiphy_dev(struct wiphy *wiphy) { return wiphy->dev.parent; } /** * wiphy_name - get wiphy name * * @wiphy: The wiphy whose name to return * Return: The name of @wiphy. */ static inline const char *wiphy_name(const struct wiphy *wiphy) { return dev_name(&wiphy->dev); } /** * wiphy_new_nm - create a new wiphy for use with cfg80211 * * @ops: The configuration operations for this device * @sizeof_priv: The size of the private area to allocate * @requested_name: Request a particular name. * NULL is valid value, and means use the default phy%d naming. * * Create a new wiphy and associate the given operations with it. * @sizeof_priv bytes are allocated for private use. * * Return: A pointer to the new wiphy. This pointer must be * assigned to each netdev's ieee80211_ptr for proper operation. */ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, const char *requested_name); /** * wiphy_new - create a new wiphy for use with cfg80211 * * @ops: The configuration operations for this device * @sizeof_priv: The size of the private area to allocate * * Create a new wiphy and associate the given operations with it. * @sizeof_priv bytes are allocated for private use. * * Return: A pointer to the new wiphy. This pointer must be * assigned to each netdev's ieee80211_ptr for proper operation. */ static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) { return wiphy_new_nm(ops, sizeof_priv, NULL); } /** * wiphy_register - register a wiphy with cfg80211 * * @wiphy: The wiphy to register. * * Return: A non-negative wiphy index or a negative error code. */ int wiphy_register(struct wiphy *wiphy); /** * wiphy_unregister - deregister a wiphy from cfg80211 * * @wiphy: The wiphy to unregister. * * After this call, no more requests can be made with this priv * pointer, but the call may sleep to wait for an outstanding * request that is being handled. */ void wiphy_unregister(struct wiphy *wiphy); /** * wiphy_free - free wiphy * * @wiphy: The wiphy to free */ void wiphy_free(struct wiphy *wiphy); /* internal structs */ struct cfg80211_conn; struct cfg80211_internal_bss; struct cfg80211_cached_keys; struct cfg80211_cqm_config; /** * struct wireless_dev - wireless device state * * For netdevs, this structure must be allocated by the driver * that uses the ieee80211_ptr field in struct net_device (this * is intentional so it can be allocated along with the netdev.) * It need not be registered then as netdev registration will * be intercepted by cfg80211 to see the new wireless device. * * For non-netdev uses, it must also be allocated by the driver * in response to the cfg80211 callbacks that require it, as * there's no netdev registration in that case it may not be * allocated outside of callback operations that return it. * * @wiphy: pointer to hardware description * @iftype: interface type * @list: (private) Used to collect the interfaces * @netdev: (private) Used to reference back to the netdev, may be %NULL * @identifier: (private) Identifier used in nl80211 to identify this * wireless device if it has no netdev * @current_bss: (private) Used by the internal configuration code * @chandef: (private) Used by the internal configuration code to track * the user-set channel definition. * @preset_chandef: (private) Used by the internal configuration code to * track the channel to be used for AP later * @bssid: (private) Used by the internal configuration code * @ssid: (private) Used by the internal configuration code * @ssid_len: (private) Used by the internal configuration code * @mesh_id_len: (private) Used by the internal configuration code * @mesh_id_up_len: (private) Used by the internal configuration code * @wext: (private) Used by the internal wireless extensions compat code * @use_4addr: indicates 4addr mode is used on this interface, must be * set by driver (if supported) on add_interface BEFORE registering the * netdev and may otherwise be used by driver read-only, will be update * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames * @mgmt_registrations_lock: lock for the list * @mtx: mutex used to lock data in this struct, may be used by drivers * and some API functions require it held * @beacon_interval: beacon interval used on this device for transmitting * beacons, 0 when not valid * @address: The address for this device, valid only if @netdev is %NULL * @is_running: true if this is a non-netdev device that has been started, e.g. * the P2P Device. * @cac_started: true if DFS channel availability check has been started * @cac_start_time: timestamp (jiffies) when the dfs state was entered. * @cac_time_ms: CAC time in ms * @ps: powersave mode is enabled * @ps_timeout: dynamic powersave timeout * @ap_unexpected_nlportid: (private) netlink port ID of application * registered for unexpected class 3 frames (AP mode) * @conn: (private) cfg80211 software SME connection state machine data * @connect_keys: (private) keys to set after connection is established * @conn_bss_type: connecting/connected BSS type * @conn_owner_nlportid: (private) connection owner socket port ID * @disconnect_wk: (private) auto-disconnect work * @disconnect_bssid: (private) the BSSID to use for auto-disconnect * @ibss_fixed: (private) IBSS is using fixed BSSID * @ibss_dfs_possible: (private) IBSS may change to a DFS channel * @event_list: (private) list for internal event processing * @event_lock: (private) lock for event list * @owner_nlportid: (private) owner socket port ID * @nl_owner_dead: (private) owner socket went away * @cqm_config: (private) nl80211 RSSI monitor state * @pmsr_list: (private) peer measurement requests * @pmsr_lock: (private) peer measurements requests/results lock * @pmsr_free_wk: (private) peer measurements cleanup work */ struct wireless_dev { struct wiphy *wiphy; enum nl80211_iftype iftype; /* the remainder of this struct should be private to cfg80211 */ struct list_head list; struct net_device *netdev; u32 identifier; struct list_head mgmt_registrations; spinlock_t mgmt_registrations_lock; struct mutex mtx; bool use_4addr, is_running; u8 address[ETH_ALEN] __aligned(sizeof(u16)); /* currently used for IBSS and SME - might be rearranged later */ u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len, mesh_id_len, mesh_id_up_len; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; enum ieee80211_bss_type conn_bss_type; u32 conn_owner_nlportid; struct work_struct disconnect_wk; u8 disconnect_bssid[ETH_ALEN]; struct list_head event_list; spinlock_t event_lock; struct cfg80211_internal_bss *current_bss; /* associated / joined */ struct cfg80211_chan_def preset_chandef; struct cfg80211_chan_def chandef; bool ibss_fixed; bool ibss_dfs_possible; bool ps; int ps_timeout; int beacon_interval; u32 ap_unexpected_nlportid; u32 owner_nlportid; bool nl_owner_dead; bool cac_started; unsigned long cac_start_time; unsigned int cac_time_ms; #ifdef CPTCFG_CFG80211_WEXT /* wext data */ struct { struct cfg80211_ibss_params ibss; struct cfg80211_connect_params connect; struct cfg80211_cached_keys *keys; const u8 *ie; size_t ie_len; u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; u8 ssid[IEEE80211_MAX_SSID_LEN]; s8 default_key, default_mgmt_key; bool prev_bssid_valid; } wext; #endif struct cfg80211_cqm_config *cqm_config; struct list_head pmsr_list; spinlock_t pmsr_lock; struct work_struct pmsr_free_wk; }; static inline u8 *wdev_address(struct wireless_dev *wdev) { if (wdev->netdev) return wdev->netdev->dev_addr; return wdev->address; } static inline bool wdev_running(struct wireless_dev *wdev) { if (wdev->netdev) return netif_running(wdev->netdev); return wdev->is_running; } /** * wdev_priv - return wiphy priv from wireless_dev * * @wdev: The wireless device whose wiphy's priv pointer to return * Return: The wiphy priv of @wdev. */ static inline void *wdev_priv(struct wireless_dev *wdev) { BUG_ON(!wdev); return wiphy_priv(wdev->wiphy); } /** * DOC: Utility functions * * cfg80211 offers a number of utility functions that can be useful. */ /** * ieee80211_channel_to_frequency - convert channel number to frequency * @chan: channel number * @band: band, necessary due to channel number overlap * Return: The corresponding frequency (in MHz), or 0 if the conversion failed. */ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band); /** * ieee80211_frequency_to_channel - convert frequency to channel number * @freq: center frequency * Return: The corresponding channel, or 0 if the conversion failed. */ int ieee80211_frequency_to_channel(int freq); /** * ieee80211_get_channel - get channel struct from wiphy for specified frequency * * @wiphy: the struct wiphy to get the channel for * @freq: the center frequency of the channel * * Return: The channel struct from @wiphy at @freq. */ struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq); /** * ieee80211_get_response_rate - get basic rate for a given rate * * @sband: the band to look for rates in * @basic_rates: bitmap of basic rates * @bitrate: the bitrate for which to find the basic rate * * Return: The basic rate corresponding to a given bitrate, that * is the next lower bitrate contained in the basic rate map, * which is, for this function, given as a bitmap of indices of * rates in the band's bitrate table. */ struct ieee80211_rate * ieee80211_get_response_rate(struct ieee80211_supported_band *sband, u32 basic_rates, int bitrate); /** * ieee80211_mandatory_rates - get mandatory rates for a given band * @sband: the band to look for rates in * @scan_width: width of the control channel * * This function returns a bitmap of the mandatory rates for the given * band, bits are set according to the rate position in the bitrates array. */ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, enum nl80211_bss_scan_width scan_width); /* * Radiotap parsing functions -- for controlled injection support * * Implemented in net/wireless/radiotap.c * Documentation in Documentation/networking/radiotap-headers.txt */ struct radiotap_align_size { uint8_t align:4, size:4; }; struct ieee80211_radiotap_namespace { const struct radiotap_align_size *align_size; int n_bits; uint32_t oui; uint8_t subns; }; struct ieee80211_radiotap_vendor_namespaces { const struct ieee80211_radiotap_namespace *ns; int n_ns; }; /** * struct ieee80211_radiotap_iterator - tracks walk thru present radiotap args * @this_arg_index: index of current arg, valid after each successful call * to ieee80211_radiotap_iterator_next() * @this_arg: pointer to current radiotap arg; it is valid after each * call to ieee80211_radiotap_iterator_next() but also after * ieee80211_radiotap_iterator_init() where it will point to * the beginning of the actual data portion * @this_arg_size: length of the current arg, for convenience * @current_namespace: pointer to the current namespace definition * (or internally %NULL if the current namespace is unknown) * @is_radiotap_ns: indicates whether the current namespace is the default * radiotap namespace or not * * @_rtheader: pointer to the radiotap header we are walking through * @_max_length: length of radiotap header in cpu byte ordering * @_arg_index: next argument index * @_arg: next argument pointer * @_next_bitmap: internal pointer to next present u32 * @_bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present * @_vns: vendor namespace definitions * @_next_ns_data: beginning of the next namespace's data * @_reset_on_ext: internal; reset the arg index to 0 when going to the * next bitmap word * * Describes the radiotap parser state. Fields prefixed with an underscore * must not be used by users of the parser, only by the parser internally. */ struct ieee80211_radiotap_iterator { struct ieee80211_radiotap_header *_rtheader; const struct ieee80211_radiotap_vendor_namespaces *_vns; const struct ieee80211_radiotap_namespace *current_namespace; unsigned char *_arg, *_next_ns_data; __le32 *_next_bitmap; unsigned char *this_arg; int this_arg_index; int this_arg_size; int is_radiotap_ns; int _max_length; int _arg_index; uint32_t _bitmap_shifter; int _reset_on_ext; }; int ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator, struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns); int ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator); extern const unsigned char rfc1042_header[6]; extern const unsigned char bridge_tunnel_header[6]; /** * ieee80211_get_hdrlen_from_skb - get header length from data * * @skb: the frame * * Given an skb with a raw 802.11 header at the data pointer this function * returns the 802.11 header length. * * Return: The 802.11 header length in bytes (not including encryption * headers). Or 0 if the data in the sk_buff is too short to contain a valid * 802.11 header. */ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); /** * ieee80211_hdrlen - get header length in bytes from frame control * @fc: frame control field in little-endian format * Return: The header length in bytes. */ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc); /** * ieee80211_get_mesh_hdrlen - get mesh extension header length * @meshhdr: the mesh extension header, only the flags field * (first byte) will be accessed * Return: The length of the extension header, which is always at * least 6 bytes and at most 18 if address 5 and 6 are present. */ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); /** * DOC: Data path helpers * * In addition to generic utilities, cfg80211 also offers * functions that help implement the data path for devices * that do not do the 802.11/802.3 conversion on the device. */ /** * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3 * @skb: the 802.11 data frame * @ehdr: pointer to a &struct ethhdr that will get the header, instead * of it being pushed into the SKB * @addr: the device MAC address * @iftype: the virtual interface type * @data_offset: offset of payload after the 802.11 header * Return: 0 on success. Non-zero on error. */ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, const u8 *addr, enum nl80211_iftype iftype, u8 data_offset); /** * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 * @skb: the 802.11 data frame * @addr: the device MAC address * @iftype: the virtual interface type * Return: 0 on success. Non-zero on error. */ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, enum nl80211_iftype iftype) { return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0); } /** * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame * * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames. * The @list will be empty if the decode fails. The @skb must be fully * header-less before being passed in here; it is freed in this function. * * @skb: The input A-MSDU frame without any headers. * @list: The output list of 802.3 frames. It must be allocated and * initialized by by the caller. * @addr: The device MAC address. * @iftype: The device interface type. * @extra_headroom: The hardware extra headroom for SKBs in the @list. * @check_da: DA to check in the inner ethernet header, or NULL * @check_sa: SA to check in the inner ethernet header, or NULL */ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, const u8 *check_da, const u8 *check_sa); /** * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame * @skb: the data frame * @qos_map: Interworking QoS mapping or %NULL if not in use * Return: The 802.1p/1d tag. */ unsigned int cfg80211_classify8021d(struct sk_buff *skb, struct cfg80211_qos_map *qos_map); /** * cfg80211_find_elem_match - match information element and byte array in data * * @eid: element ID * @ies: data consisting of IEs * @len: length of data * @match: byte array to match * @match_len: number of bytes in the match array * @match_offset: offset in the IE data where the byte array should match. * Note the difference to cfg80211_find_ie_match() which considers * the offset to start from the element ID byte, but here we take * the data portion instead. * * Return: %NULL if the element ID could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match; otherwise return the * requested element struct. * * Note: There are no checks on the element length other than * having to fit into the given data and being large enough for the * byte array to match. */ const struct element * cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len, const u8 *match, unsigned int match_len, unsigned int match_offset); /** * cfg80211_find_ie_match - match information element and byte array in data * * @eid: element ID * @ies: data consisting of IEs * @len: length of data * @match: byte array to match * @match_len: number of bytes in the match array * @match_offset: offset in the IE where the byte array should match. * If match_len is zero, this must also be set to zero. * Otherwise this must be set to 2 or more, because the first * byte is the element id, which is already compared to eid, and * the second byte is the IE length. * * Return: %NULL if the element ID could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match, or a pointer to the first * byte of the requested element, that is the byte containing the * element ID. * * Note: There are no checks on the element length other than * having to fit into the given data and being large enough for the * byte array to match. */ static inline const u8 * cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len, const u8 *match, unsigned int match_len, unsigned int match_offset) { /* match_offset can't be smaller than 2, unless match_len is * zero, in which case match_offset must be zero as well. */ if (WARN_ON((match_len && match_offset < 2) || (!match_len && match_offset))) return NULL; return (void *)cfg80211_find_elem_match(eid, ies, len, match, match_len, match_offset ? match_offset - 2 : 0); } /** * cfg80211_find_elem - find information element in data * * @eid: element ID * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the element ID could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match; otherwise return the * requested element struct. * * Note: There are no checks on the element length other than * having to fit into the given data. */ static inline const struct element * cfg80211_find_elem(u8 eid, const u8 *ies, int len) { return cfg80211_find_elem_match(eid, ies, len, NULL, 0, 0); } /** * cfg80211_find_ie - find information element in data * * @eid: element ID * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the element ID could not be found or if * the element is invalid (claims to be longer than the given * data), or a pointer to the first byte of the requested * element, that is the byte containing the element ID. * * Note: There are no checks on the element length other than * having to fit into the given data. */ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) { return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0); } /** * cfg80211_find_ext_elem - find information element with EID Extension in data * * @ext_eid: element ID Extension * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the etended element could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match; otherwise return the * requested element struct. * * Note: There are no checks on the element length other than * having to fit into the given data. */ static inline const struct element * cfg80211_find_ext_elem(u8 ext_eid, const u8 *ies, int len) { return cfg80211_find_elem_match(WLAN_EID_EXTENSION, ies, len, &ext_eid, 1, 0); } /** * cfg80211_find_ext_ie - find information element with EID Extension in data * * @ext_eid: element ID Extension * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the extended element ID could not be found or if * the element is invalid (claims to be longer than the given * data), or a pointer to the first byte of the requested * element, that is the byte containing the element ID. * * Note: There are no checks on the element length other than * having to fit into the given data. */ static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len) { return cfg80211_find_ie_match(WLAN_EID_EXTENSION, ies, len, &ext_eid, 1, 2); } /** * cfg80211_find_vendor_elem - find vendor specific information element in data * * @oui: vendor OUI * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the vendor specific element ID could not be found or if the * element is invalid (claims to be longer than the given data); otherwise * return the element structure for the requested element. * * Note: There are no checks on the element length other than having to fit into * the given data. */ const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type, const u8 *ies, unsigned int len); /** * cfg80211_find_vendor_ie - find vendor specific information element in data * * @oui: vendor OUI * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the vendor specific element ID could not be found or if the * element is invalid (claims to be longer than the given data), or a pointer to * the first byte of the requested element, that is the byte containing the * element ID. * * Note: There are no checks on the element length other than having to fit into * the given data. */ static inline const u8 * cfg80211_find_vendor_ie(unsigned int oui, int oui_type, const u8 *ies, unsigned int len) { return (void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len); } /** * cfg80211_send_layer2_update - send layer 2 update frame * * @dev: network device * @addr: STA MAC address * * Wireless drivers can use this function to update forwarding tables in bridge * devices upon STA association. */ void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr); /** * DOC: Regulatory enforcement infrastructure * * TODO */ /** * regulatory_hint - driver hint to the wireless core a regulatory domain * @wiphy: the wireless device giving the hint (used only for reporting * conflicts) * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain * should be in. If @rd is set this should be NULL. Note that if you * set this to NULL you should still set rd->alpha2 to some accepted * alpha2. * * Wireless drivers can use this function to hint to the wireless core * what it believes should be the current regulatory domain by * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory * domain should be in or by providing a completely build regulatory domain. * If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried * for a regulatory domain structure for the respective country. * * The wiphy must have been registered to cfg80211 prior to this call. * For cfg80211 drivers this means you must first use wiphy_register(), * for mac80211 drivers you must first use ieee80211_register_hw(). * * Drivers should check the return value, its possible you can get * an -ENOMEM. * * Return: 0 on success. -ENOMEM. */ int regulatory_hint(struct wiphy *wiphy, const char *alpha2); /** * regulatory_set_wiphy_regd - set regdom info for self managed drivers * @wiphy: the wireless device we want to process the regulatory domain on * @rd: the regulatory domain informatoin to use for this wiphy * * Set the regulatory domain information for self-managed wiphys, only they * may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more * information. * * Return: 0 on success. -EINVAL, -EPERM */ int regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd); /** * regulatory_set_wiphy_regd_sync_rtnl - set regdom for self-managed drivers * @wiphy: the wireless device we want to process the regulatory domain on * @rd: the regulatory domain information to use for this wiphy * * This functions requires the RTNL to be held and applies the new regdomain * synchronously to this wiphy. For more details see * regulatory_set_wiphy_regd(). * * Return: 0 on success. -EINVAL, -EPERM */ int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy, struct ieee80211_regdomain *rd); /** * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain * @wiphy: the wireless device we want to process the regulatory domain on * @regd: the custom regulatory domain to use for this wiphy * * Drivers can sometimes have custom regulatory domains which do not apply * to a specific country. Drivers can use this to apply such custom regulatory * domains. This routine must be called prior to wiphy registration. The * custom regulatory domain will be trusted completely and as such previous * default channel settings will be disregarded. If no rule is found for a * channel on the regulatory domain the channel will be disabled. * Drivers using this for a wiphy should also set the wiphy flag * REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy * that called this helper. */ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd); /** * freq_reg_info - get regulatory information for the given frequency * @wiphy: the wiphy for which we want to process this rule for * @center_freq: Frequency in KHz for which we want regulatory information for * * Use this function to get the regulatory rule for a specific frequency on * a given wireless device. If the device has a specific regulatory domain * it wants to follow we respect that unless a country IE has been received * and processed already. * * Return: A valid pointer, or, when an error occurs, for example if no rule * can be found, the return value is encoded using ERR_PTR(). Use IS_ERR() to * check and PTR_ERR() to obtain the numeric return value. The numeric return * value will be -ERANGE if we determine the given center_freq does not even * have a regulatory rule for a frequency range in the center_freq's band. * See freq_in_rule_band() for our current definition of a band -- this is * purely subjective and right now it's 802.11 specific. */ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, u32 center_freq); /** * reg_initiator_name - map regulatory request initiator enum to name * @initiator: the regulatory request initiator * * You can use this to map the regulatory request initiator enum to a * proper string representation. */ const char *reg_initiator_name(enum nl80211_reg_initiator initiator); /** * DOC: Internal regulatory db functions * */ /** * reg_query_regdb_wmm - Query internal regulatory db for wmm rule * Regulatory self-managed driver can use it to proactively * * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried. * @freq: the freqency(in MHz) to be queried. * @rule: pointer to store the wmm rule from the regulatory db. * * Self-managed wireless drivers can use this function to query * the internal regulatory database to check whether the given * ISO/IEC 3166 alpha2 country and freq have wmm rule limitations. * * Drivers should check the return value, its possible you can get * an -ENODATA. * * Return: 0 on success. -ENODATA. */ int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule); /* * callbacks for asynchronous cfg80211 methods, notification * functions and BSS handling helpers */ /** * cfg80211_scan_done - notify that scan finished * * @request: the corresponding scan request * @info: information about the completed scan */ void cfg80211_scan_done(struct cfg80211_scan_request *request, struct cfg80211_scan_info *info); /** * cfg80211_sched_scan_results - notify that new scan results are available * * @wiphy: the wiphy which got scheduled scan results * @reqid: identifier for the related scheduled scan request */ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid); /** * cfg80211_sched_scan_stopped - notify that the scheduled scan has stopped * * @wiphy: the wiphy on which the scheduled scan stopped * @reqid: identifier for the related scheduled scan request * * The driver can call this function to inform cfg80211 that the * scheduled scan had to be stopped, for whatever reason. The driver * is then called back via the sched_scan_stop operation when done. */ void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid); /** * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped * * @wiphy: the wiphy on which the scheduled scan stopped * @reqid: identifier for the related scheduled scan request * * The driver can call this function to inform cfg80211 that the * scheduled scan had to be stopped, for whatever reason. The driver * is then called back via the sched_scan_stop operation when done. * This function should be called with rtnl locked. */ void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid); /** * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame * @wiphy: the wiphy reporting the BSS * @data: the BSS metadata * @mgmt: the management frame (probe response or beacon) * @len: length of the management frame * @gfp: context flags * * This informs cfg80211 that BSS information was found and * the BSS should be updated/added. * * Return: A referenced struct, must be released with cfg80211_put_bss()! * Or %NULL on error. */ struct cfg80211_bss * __must_check cfg80211_inform_bss_frame_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, struct ieee80211_mgmt *mgmt, size_t len, gfp_t gfp); static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_width_frame(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, struct ieee80211_mgmt *mgmt, size_t len, s32 signal, gfp_t gfp) { struct cfg80211_inform_bss data = { .chan = rx_channel, .scan_width = scan_width, .signal = signal, }; return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); } static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, struct ieee80211_mgmt *mgmt, size_t len, s32 signal, gfp_t gfp) { struct cfg80211_inform_bss data = { .chan = rx_channel, .scan_width = NL80211_BSS_CHAN_WIDTH_20, .signal = signal, }; return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); } /** * cfg80211_gen_new_bssid - generate a nontransmitted BSSID for multi-BSSID * @bssid: transmitter BSSID * @max_bssid: max BSSID indicator, taken from Multiple BSSID element * @mbssid_index: BSSID index, taken from Multiple BSSID index element * @new_bssid: calculated nontransmitted BSSID */ static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid, u8 mbssid_index, u8 *new_bssid) { u64 bssid_u64 = ether_addr_to_u64(bssid); u64 mask = GENMASK_ULL(max_bssid - 1, 0); u64 new_bssid_u64; new_bssid_u64 = bssid_u64 & ~mask; new_bssid_u64 |= ((bssid_u64 & mask) + mbssid_index) & mask; u64_to_ether_addr(new_bssid_u64, new_bssid); } /** * cfg80211_is_element_inherited - returns if element ID should be inherited * @element: element to check * @non_inherit_element: non inheritance element */ bool cfg80211_is_element_inherited(const struct element *element, const struct element *non_inherit_element); /** * cfg80211_merge_profile - merges a MBSSID profile if it is split between IEs * @ie: ies * @ielen: length of IEs * @mbssid_elem: current MBSSID element * @sub_elem: current MBSSID subelement (profile) * @merged_ie: location of the merged profile * @max_copy_len: max merged profile length */ size_t cfg80211_merge_profile(const u8 *ie, size_t ielen, const struct element *mbssid_elem, const struct element *sub_elem, u8 **merged_ie, size_t max_copy_len); /** * enum cfg80211_bss_frame_type - frame type that the BSS data came from * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is * from a beacon or probe response * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response */ enum cfg80211_bss_frame_type { CFG80211_BSS_FTYPE_UNKNOWN, CFG80211_BSS_FTYPE_BEACON, CFG80211_BSS_FTYPE_PRESP, }; /** * cfg80211_inform_bss_data - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS * @data: the BSS metadata * @ftype: frame type (if known) * @bssid: the BSSID of the BSS * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) * @capability: the capability field sent by the peer * @beacon_interval: the beacon interval announced by the peer * @ie: additional IEs sent by the peer * @ielen: length of the additional IEs * @gfp: context flags * * This informs cfg80211 that BSS information was found and * the BSS should be updated/added. * * Return: A referenced struct, must be released with cfg80211_put_bss()! * Or %NULL on error. */ struct cfg80211_bss * __must_check cfg80211_inform_bss_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, gfp_t gfp); static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_width(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) { struct cfg80211_inform_bss data = { .chan = rx_channel, .scan_width = scan_width, .signal = signal, }; return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, capability, beacon_interval, ie, ielen, gfp); } static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) { struct cfg80211_inform_bss data = { .chan = rx_channel, .scan_width = NL80211_BSS_CHAN_WIDTH_20, .signal = signal, }; return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, capability, beacon_interval, ie, ielen, gfp); } /** * cfg80211_get_bss - get a BSS reference * @wiphy: the wiphy this BSS struct belongs to * @channel: the channel to search on (or %NULL) * @bssid: the desired BSSID (or %NULL) * @ssid: the desired SSID (or %NULL) * @ssid_len: length of the SSID (or 0) * @bss_type: type of BSS, see &enum ieee80211_bss_type * @privacy: privacy filter, see &enum ieee80211_privacy */ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, const u8 *ssid, size_t ssid_len, enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy); static inline struct cfg80211_bss * cfg80211_get_ibss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *ssid, size_t ssid_len) { return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY_ANY); } /** * cfg80211_ref_bss - reference BSS struct * @wiphy: the wiphy this BSS struct belongs to * @bss: the BSS struct to reference * * Increments the refcount of the given BSS struct. */ void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); /** * cfg80211_put_bss - unref BSS struct * @wiphy: the wiphy this BSS struct belongs to * @bss: the BSS struct * * Decrements the refcount of the given BSS struct. */ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); /** * cfg80211_unlink_bss - unlink BSS from internal data structures * @wiphy: the wiphy * @bss: the bss to remove * * This function removes the given BSS from the internal data structures * thereby making it no longer show up in scan results etc. Use this * function when you detect a BSS is gone. Normally BSSes will also time * out, so it is not necessary to use this function at all. */ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); /** * cfg80211_bss_iter - iterate all BSS entries * * This function iterates over the BSS entries associated with the given wiphy * and calls the callback for the iterated BSS. The iterator function is not * allowed to call functions that might modify the internal state of the BSS DB. * * @wiphy: the wiphy * @chandef: if given, the iterator function will be called only if the channel * of the currently iterated BSS is a subset of the given channel. * @iter: the iterator function to call * @iter_data: an argument to the iterator function */ void cfg80211_bss_iter(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, void (*iter)(struct wiphy *wiphy, struct cfg80211_bss *bss, void *data), void *iter_data); static inline enum nl80211_bss_scan_width cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_5: return NL80211_BSS_CHAN_WIDTH_5; case NL80211_CHAN_WIDTH_10: return NL80211_BSS_CHAN_WIDTH_10; default: return NL80211_BSS_CHAN_WIDTH_20; } } /** * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame * @dev: network device * @buf: authentication frame (header + body) * @len: length of the frame data * * This function is called whenever an authentication, disassociation or * deauthentication frame has been received and processed in station mode. * After being asked to authenticate via cfg80211_ops::auth() the driver must * call either this function or cfg80211_auth_timeout(). * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). * While connected, the driver must calls this for received and processed * disassociation and deauthentication frames. If the frame couldn't be used * because it was unprotected, the driver must call the function * cfg80211_rx_unprot_mlme_mgmt() instead. * * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** * cfg80211_auth_timeout - notification of timed out authentication * @dev: network device * @addr: The MAC address of the device with which the authentication timed out * * This function may sleep. The caller must hold the corresponding wdev's * mutex. */ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); /** * cfg80211_rx_assoc_resp - notification of processed association response * @dev: network device * @bss: the BSS that association was requested with, ownership of the pointer * moves to cfg80211 in this call * @buf: (Re)Association Response frame (header + body) * @len: length of the frame data * @uapsd_queues: bitmap of queues configured for uapsd. Same format * as the AC bitmap in the QoS info field * @req_ies: information elements from the (Re)Association Request frame * @req_ies_len: length of req_ies data * * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). * * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, const u8 *buf, size_t len, int uapsd_queues, const u8 *req_ies, size_t req_ies_len); /** * cfg80211_assoc_timeout - notification of timed out association * @dev: network device * @bss: The BSS entry with which association timed out. * * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss); /** * cfg80211_abandon_assoc - notify cfg80211 of abandoned association attempt * @dev: network device * @bss: The BSS entry with which association was abandoned. * * Call this whenever - for reasons reported through other API, like deauth RX, * an association attempt was abandoned. * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss); /** * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame * @dev: network device * @buf: 802.11 frame (header + body) * @len: length of the frame data * * This function is called whenever deauthentication has been processed in * station mode. This includes both received deauthentication frames and * locally generated ones. This function may sleep. The caller must hold the * corresponding wdev's mutex. */ void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame * @dev: network device * @buf: deauthentication frame (header + body) * @len: length of the frame data * * This function is called whenever a received deauthentication or dissassoc * frame has been dropped in station mode because of MFP being used but the * frame was not protected. This function may sleep. */ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP) * @dev: network device * @addr: The source MAC address of the frame * @key_type: The key type that the received frame used * @key_id: Key identifier (0..3). Can be -1 if missing. * @tsc: The TSC value of the frame that generated the MIC failure (6 octets) * @gfp: allocation flags * * This function is called whenever the local MAC detects a MIC failure in a * received frame. This matches with MLME-MICHAELMICFAILURE.indication() * primitive. */ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp); /** * cfg80211_ibss_joined - notify cfg80211 that device joined an IBSS * * @dev: network device * @bssid: the BSSID of the IBSS joined * @channel: the channel of the IBSS joined * @gfp: allocation flags * * This function notifies cfg80211 that the device joined an IBSS or * switched to a different BSSID. Before this function can be called, * either a beacon has to have been received from the IBSS, or one of * the cfg80211_inform_bss{,_frame} functions must have been called * with the locally generated beacon -- this guarantees that there is * always a scan result for this IBSS. cfg80211 will handle the rest. */ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, struct ieee80211_channel *channel, gfp_t gfp); /** * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate * * @dev: network device * @macaddr: the MAC address of the new candidate * @ie: information elements advertised by the peer candidate * @ie_len: lenght of the information elements buffer * @gfp: allocation flags * * This function notifies cfg80211 that the mesh peer candidate has been * detected, most likely via a beacon or, less likely, via a probe response. * cfg80211 then sends a notification to userspace. */ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *macaddr, const u8 *ie, u8 ie_len, int sig_dbm, gfp_t gfp); /** * DOC: RFkill integration * * RFkill integration in cfg80211 is almost invisible to drivers, * as cfg80211 automatically registers an rfkill instance for each * wireless device it knows about. Soft kill is also translated * into disconnecting and turning all interfaces off, drivers are * expected to turn off the device when all interfaces are down. * * However, devices may have a hard RFkill line, in which case they * also need to interact with the rfkill subsystem, via cfg80211. * They can do this with a few helper functions documented here. */ /** * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state * @wiphy: the wiphy * @blocked: block status */ void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked); /** * wiphy_rfkill_start_polling - start polling rfkill * @wiphy: the wiphy */ void wiphy_rfkill_start_polling(struct wiphy *wiphy); /** * wiphy_rfkill_stop_polling - stop polling rfkill * @wiphy: the wiphy */ void wiphy_rfkill_stop_polling(struct wiphy *wiphy); /** * DOC: Vendor commands * * Occasionally, there are special protocol or firmware features that * can't be implemented very openly. For this and similar cases, the * vendor command functionality allows implementing the features with * (typically closed-source) userspace and firmware, using nl80211 as * the configuration mechanism. * * A driver supporting vendor commands must register them as an array * in struct wiphy, with handlers for each one, each command has an * OUI and sub command ID to identify it. * * Note that this feature should not be (ab)used to implement protocol * features that could openly be shared across drivers. In particular, * it must never be required to use vendor commands to implement any * "normal" functionality that higher-level userspace like connection * managers etc. need. */ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy, enum nl80211_commands cmd, enum nl80211_attrs attr, int approxlen); struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_commands cmd, enum nl80211_attrs attr, unsigned int portid, int vendor_event_idx, int approxlen, gfp_t gfp); void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp); /** * cfg80211_vendor_cmd_alloc_reply_skb - allocate vendor command reply * @wiphy: the wiphy * @approxlen: an upper bound of the length of the data that will * be put into the skb * * This function allocates and pre-fills an skb for a reply to * a vendor command. Since it is intended for a reply, calling * it outside of a vendor command's doit() operation is invalid. * * The returned skb is pre-filled with some identifying data in * a way that any data that is put into the skb (with skb_put(), * nla_put() or similar) will end up being within the * %NL80211_ATTR_VENDOR_DATA attribute, so all that needs to be done * with the skb is adding data for the corresponding userspace tool * which can then read that data out of the vendor data attribute. * You must not modify the skb in any other way. * * When done, call cfg80211_vendor_cmd_reply() with the skb and return * its error code as the result of the doit() operation. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen) { return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR, NL80211_ATTR_VENDOR_DATA, approxlen); } /** * cfg80211_vendor_cmd_reply - send the reply skb * @skb: The skb, must have been allocated with * cfg80211_vendor_cmd_alloc_reply_skb() * * Since calling this function will usually be the last thing * before returning from the vendor command doit() you should * return the error code. Note that this function consumes the * skb regardless of the return value. * * Return: An error code or 0 on success. */ int cfg80211_vendor_cmd_reply(struct sk_buff *skb); /** * cfg80211_vendor_cmd_get_sender * @wiphy: the wiphy * * Return the current netlink port ID in a vendor command handler. * Valid to call only there. */ unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy); /** * cfg80211_vendor_event_alloc - allocate vendor-specific event skb * @wiphy: the wiphy * @wdev: the wireless device * @event_idx: index of the vendor event in the wiphy's vendor_events * @approxlen: an upper bound of the length of the data that will * be put into the skb * @gfp: allocation flags * * This function allocates and pre-fills an skb for an event on the * vendor-specific multicast group. * * If wdev != NULL, both the ifindex and identifier of the specified * wireless device are added to the event message before the vendor data * attribute. * * When done filling the skb, call cfg80211_vendor_event() with the * skb to send the event. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev, int approxlen, int event_idx, gfp_t gfp) { return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR, NL80211_ATTR_VENDOR_DATA, 0, event_idx, approxlen, gfp); } /** * cfg80211_vendor_event_alloc_ucast - alloc unicast vendor-specific event skb * @wiphy: the wiphy * @wdev: the wireless device * @event_idx: index of the vendor event in the wiphy's vendor_events * @portid: port ID of the receiver * @approxlen: an upper bound of the length of the data that will * be put into the skb * @gfp: allocation flags * * This function allocates and pre-fills an skb for an event to send to * a specific (userland) socket. This socket would previously have been * obtained by cfg80211_vendor_cmd_get_sender(), and the caller MUST take * care to register a netlink notifier to see when the socket closes. * * If wdev != NULL, both the ifindex and identifier of the specified * wireless device are added to the event message before the vendor data * attribute. * * When done filling the skb, call cfg80211_vendor_event() with the * skb to send the event. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_vendor_event_alloc_ucast(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int portid, int approxlen, int event_idx, gfp_t gfp) { return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR, NL80211_ATTR_VENDOR_DATA, portid, event_idx, approxlen, gfp); } /** * cfg80211_vendor_event - send the event * @skb: The skb, must have been allocated with cfg80211_vendor_event_alloc() * @gfp: allocation flags * * This function sends the given @skb, which must have been allocated * by cfg80211_vendor_event_alloc(), as an event. It always consumes it. */ static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp) { __cfg80211_send_event_skb(skb, gfp); } #ifdef CPTCFG_NL80211_TESTMODE /** * DOC: Test mode * * Test mode is a set of utility functions to allow drivers to * interact with driver-specific tools to aid, for instance, * factory programming. * * This chapter describes how drivers interact with it, for more * information see the nl80211 book's chapter on it. */ /** * cfg80211_testmode_alloc_reply_skb - allocate testmode reply * @wiphy: the wiphy * @approxlen: an upper bound of the length of the data that will * be put into the skb * * This function allocates and pre-fills an skb for a reply to * the testmode command. Since it is intended for a reply, calling * it outside of the @testmode_cmd operation is invalid. * * The returned skb is pre-filled with the wiphy index and set up in * a way that any data that is put into the skb (with skb_put(), * nla_put() or similar) will end up being within the * %NL80211_ATTR_TESTDATA attribute, so all that needs to be done * with the skb is adding data for the corresponding userspace tool * which can then read that data out of the testdata attribute. You * must not modify the skb in any other way. * * When done, call cfg80211_testmode_reply() with the skb and return * its error code as the result of the @testmode_cmd operation. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen) { return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE, NL80211_ATTR_TESTDATA, approxlen); } /** * cfg80211_testmode_reply - send the reply skb * @skb: The skb, must have been allocated with * cfg80211_testmode_alloc_reply_skb() * * Since calling this function will usually be the last thing * before returning from the @testmode_cmd you should return * the error code. Note that this function consumes the skb * regardless of the return value. * * Return: An error code or 0 on success. */ static inline int cfg80211_testmode_reply(struct sk_buff *skb) { return cfg80211_vendor_cmd_reply(skb); } /** * cfg80211_testmode_alloc_event_skb - allocate testmode event * @wiphy: the wiphy * @approxlen: an upper bound of the length of the data that will * be put into the skb * @gfp: allocation flags * * This function allocates and pre-fills an skb for an event on the * testmode multicast group. * * The returned skb is set up in the same way as with * cfg80211_testmode_alloc_reply_skb() but prepared for an event. As * there, you should simply add data to it that will then end up in the * %NL80211_ATTR_TESTDATA attribute. Again, you must not modify the skb * in any other way. * * When done filling the skb, call cfg80211_testmode_event() with the * skb to send the event. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp) { return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE, NL80211_ATTR_TESTDATA, 0, -1, approxlen, gfp); } /** * cfg80211_testmode_event - send the event * @skb: The skb, must have been allocated with * cfg80211_testmode_alloc_event_skb() * @gfp: allocation flags * * This function sends the given @skb, which must have been allocated * by cfg80211_testmode_alloc_event_skb(), as an event. It always * consumes it. */ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) { __cfg80211_send_event_skb(skb, gfp); } #define CFG80211_TESTMODE_CMD(cmd) .testmode_cmd = (cmd), #define CFG80211_TESTMODE_DUMP(cmd) .testmode_dump = (cmd), #else #define CFG80211_TESTMODE_CMD(cmd) #define CFG80211_TESTMODE_DUMP(cmd) #endif /** * struct cfg80211_fils_resp_params - FILS connection response params * @kek: KEK derived from a successful FILS connection (may be %NULL) * @kek_len: Length of @fils_kek in octets * @update_erp_next_seq_num: Boolean value to specify whether the value in * @erp_next_seq_num is valid. * @erp_next_seq_num: The next sequence number to use in ERP message in * FILS Authentication. This value should be specified irrespective of the * status for a FILS connection. * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL). * @pmk_len: Length of @pmk in octets * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID * used for this FILS connection (may be %NULL). */ struct cfg80211_fils_resp_params { const u8 *kek; size_t kek_len; bool update_erp_next_seq_num; u16 erp_next_seq_num; const u8 *pmk; size_t pmk_len; const u8 *pmkid; }; /** * struct cfg80211_connect_resp_params - Connection response params * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. If this call is used to report a * failure due to a timeout (e.g., not receiving an Authentication frame * from the AP) instead of an explicit rejection by the AP, -1 is used to * indicate that this is a failure, but without a status code. * @timeout_reason is used to report the reason for the timeout in that * case. * @bssid: The BSSID of the AP (may be %NULL) * @bss: Entry of bss to which STA got connected to, can be obtained through * cfg80211_get_bss() (may be %NULL). Only one parameter among @bssid and * @bss needs to be specified. * @req_ie: Association request IEs (may be %NULL) * @req_ie_len: Association request IEs length * @resp_ie: Association response IEs (may be %NULL) * @resp_ie_len: Association response IEs length * @fils: FILS connection response parameters. * @timeout_reason: Reason for connection timeout. This is used when the * connection fails due to a timeout instead of an explicit rejection from * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is * not known. This value is used only if @status < 0 to indicate that the * failure is due to a timeout and not due to explicit rejection by the AP. * This value is ignored in other cases (@status >= 0). */ struct cfg80211_connect_resp_params { int status; const u8 *bssid; struct cfg80211_bss *bss; const u8 *req_ie; size_t req_ie_len; const u8 *resp_ie; size_t resp_ie_len; struct cfg80211_fils_resp_params fils; enum nl80211_timeout_reason timeout_reason; }; /** * cfg80211_connect_done - notify cfg80211 of connection result * * @dev: network device * @params: connection response parameters * @gfp: allocation flags * * It should be called by the underlying driver once execution of the connection * request from connect() has been completed. This is similar to * cfg80211_connect_bss(), but takes a structure pointer for connection response * parameters. Only one of the functions among cfg80211_connect_bss(), * cfg80211_connect_result(), cfg80211_connect_timeout(), * and cfg80211_connect_done() should be called. */ void cfg80211_connect_done(struct net_device *dev, struct cfg80211_connect_resp_params *params, gfp_t gfp); /** * cfg80211_connect_bss - notify cfg80211 of connection result * * @dev: network device * @bssid: the BSSID of the AP * @bss: entry of bss to which STA got connected to, can be obtained * through cfg80211_get_bss (may be %NULL) * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. If this call is used to report a * failure due to a timeout (e.g., not receiving an Authentication frame * from the AP) instead of an explicit rejection by the AP, -1 is used to * indicate that this is a failure, but without a status code. * @timeout_reason is used to report the reason for the timeout in that * case. * @gfp: allocation flags * @timeout_reason: reason for connection timeout. This is used when the * connection fails due to a timeout instead of an explicit rejection from * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is * not known. This value is used only if @status < 0 to indicate that the * failure is due to a timeout and not due to explicit rejection by the AP. * This value is ignored in other cases (@status >= 0). * * It should be called by the underlying driver once execution of the connection * request from connect() has been completed. This is similar to * cfg80211_connect_result(), but with the option of identifying the exact bss * entry for the connection. Only one of the functions among * cfg80211_connect_bss(), cfg80211_connect_result(), * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ static inline void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, int status, gfp_t gfp, enum nl80211_timeout_reason timeout_reason) { struct cfg80211_connect_resp_params params; memset(¶ms, 0, sizeof(params)); params.status = status; params.bssid = bssid; params.bss = bss; params.req_ie = req_ie; params.req_ie_len = req_ie_len; params.resp_ie = resp_ie; params.resp_ie_len = resp_ie_len; params.timeout_reason = timeout_reason; cfg80211_connect_done(dev, ¶ms, gfp); } /** * cfg80211_connect_result - notify cfg80211 of connection result * * @dev: network device * @bssid: the BSSID of the AP * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. * @gfp: allocation flags * * It should be called by the underlying driver once execution of the connection * request from connect() has been completed. This is similar to * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(), * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ static inline void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, u16 status, gfp_t gfp) { cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie, resp_ie_len, status, gfp, NL80211_TIMEOUT_UNSPECIFIED); } /** * cfg80211_connect_timeout - notify cfg80211 of connection timeout * * @dev: network device * @bssid: the BSSID of the AP * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @gfp: allocation flags * @timeout_reason: reason for connection timeout. * * It should be called by the underlying driver whenever connect() has failed * in a sequence where no explicit authentication/association rejection was * received from the AP. This could happen, e.g., due to not being able to send * out the Authentication or Association Request frame or timing out while * waiting for the response. Only one of the functions among * cfg80211_connect_bss(), cfg80211_connect_result(), * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ static inline void cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, gfp_t gfp, enum nl80211_timeout_reason timeout_reason) { cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1, gfp, timeout_reason); } /** * struct cfg80211_roam_info - driver initiated roaming information * * @channel: the channel of the new AP * @bss: entry of bss to which STA got roamed (may be %NULL if %bssid is set) * @bssid: the BSSID of the new AP (may be %NULL if %bss is set) * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length * @fils: FILS related roaming information. */ struct cfg80211_roam_info { struct ieee80211_channel *channel; struct cfg80211_bss *bss; const u8 *bssid; const u8 *req_ie; size_t req_ie_len; const u8 *resp_ie; size_t resp_ie_len; struct cfg80211_fils_resp_params fils; }; /** * cfg80211_roamed - notify cfg80211 of roaming * * @dev: network device * @info: information about the new BSS. struct &cfg80211_roam_info. * @gfp: allocation flags * * This function may be called with the driver passing either the BSSID of the * new AP or passing the bss entry to avoid a race in timeout of the bss entry. * It should be called by the underlying driver whenever it roamed from one AP * to another while connected. Drivers which have roaming implemented in * firmware should pass the bss entry to avoid a race in bss entry timeout where * the bss entry of the new AP is seen in the driver, but gets timed out by the * time it is accessed in __cfg80211_roamed() due to delay in scheduling * rdev->event_work. In case of any failures, the reference is released * either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be * released while diconneting from the current bss. */ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, gfp_t gfp); /** * cfg80211_port_authorized - notify cfg80211 of successful security association * * @dev: network device * @bssid: the BSSID of the AP * @gfp: allocation flags * * This function should be called by a driver that supports 4 way handshake * offload after a security association was successfully established (i.e., * the 4 way handshake was completed successfully). The call to this function * should be preceded with a call to cfg80211_connect_result(), * cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to * indicate the 802.11 association. */ void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid, gfp_t gfp); /** * cfg80211_disconnected - notify cfg80211 that connection was dropped * * @dev: network device * @ie: information elements of the deauth/disassoc frame (may be %NULL) * @ie_len: length of IEs * @reason: reason code for the disconnection, set it to 0 if unknown * @locally_generated: disconnection was requested locally * @gfp: allocation flags * * After it calls this function, the driver should enter an idle state * and not try to connect to any AP any more. */ void cfg80211_disconnected(struct net_device *dev, u16 reason, const u8 *ie, size_t ie_len, bool locally_generated, gfp_t gfp); /** * cfg80211_ready_on_channel - notification of remain_on_channel start * @wdev: wireless device * @cookie: the request cookie * @chan: The current channel (from remain_on_channel request) * @duration: Duration in milliseconds that the driver intents to remain on the * channel * @gfp: allocation flags */ void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration, gfp_t gfp); /** * cfg80211_remain_on_channel_expired - remain_on_channel duration expired * @wdev: wireless device * @cookie: the request cookie * @chan: The current channel (from remain_on_channel request) * @gfp: allocation flags */ void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, gfp_t gfp); /** * cfg80211_sinfo_alloc_tid_stats - allocate per-tid statistics. * * @sinfo: the station information * @gfp: allocation flags */ int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp); /** * cfg80211_sinfo_release_content - release contents of station info * @sinfo: the station information * * Releases any potentially allocated sub-information of the station * information, but not the struct itself (since it's typically on * the stack.) */ static inline void cfg80211_sinfo_release_content(struct station_info *sinfo) { kfree(sinfo->pertid); } /** * cfg80211_new_sta - notify userspace about station * * @dev: the netdev * @mac_addr: the station's address * @sinfo: the station information * @gfp: allocation flags */ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp); /** * cfg80211_del_sta_sinfo - notify userspace about deletion of a station * @dev: the netdev * @mac_addr: the station's address * @sinfo: the station information/statistics * @gfp: allocation flags */ void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp); /** * cfg80211_del_sta - notify userspace about deletion of a station * * @dev: the netdev * @mac_addr: the station's address * @gfp: allocation flags */ static inline void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp) { cfg80211_del_sta_sinfo(dev, mac_addr, NULL, gfp); } /** * cfg80211_conn_failed - connection request failed notification * * @dev: the netdev * @mac_addr: the station's address * @reason: the reason for connection failure * @gfp: allocation flags * * Whenever a station tries to connect to an AP and if the station * could not connect to the AP as the AP has rejected the connection * for some reasons, this function is called. * * The reason for connection failure can be any of the value from * nl80211_connect_failed_reason enum */ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, enum nl80211_connect_failed_reason reason, gfp_t gfp); /** * cfg80211_rx_mgmt - notification of received, unprocessed management frame * @wdev: wireless device receiving the frame * @freq: Frequency on which the frame was received in MHz * @sig_dbm: signal strength in dBm, or 0 if unknown * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in enum nl80211_rxmgmt_flags * * This function is called whenever an Action frame is received for a station * mode interface, but is not processed in kernel. * * Return: %true if a user space application has registered for this frame. * For action frames, that makes it responsible for rejecting unrecognized * action frames; %false otherwise, in which case for action frames the * driver is responsible for rejecting the frame. */ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, const u8 *buf, size_t len, u32 flags); /** * cfg80211_mgmt_tx_status - notification of TX status for management frame * @wdev: wireless device receiving the frame * @cookie: Cookie returned by cfg80211_ops::mgmt_tx() * @buf: Management frame (header + body) * @len: length of the frame data * @ack: Whether frame was acknowledged * @gfp: context flags * * This function is called whenever a management frame was requested to be * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the * transmission attempt. */ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, const u8 *buf, size_t len, bool ack, gfp_t gfp); /** * cfg80211_rx_control_port - notification about a received control port frame * @dev: The device the frame matched to * @skb: The skbuf with the control port frame. It is assumed that the skbuf * is 802.3 formatted (with 802.3 header). The skb can be non-linear. * This function does not take ownership of the skb, so the caller is * responsible for any cleanup. The caller must also ensure that * skb->protocol is set appropriately. * @unencrypted: Whether the frame was received unencrypted * * This function is used to inform userspace about a received control port * frame. It should only be used if userspace indicated it wants to receive * control port frames over nl80211. * * The frame is the data portion of the 802.3 or 802.11 data frame with all * network layer headers removed (e.g. the raw EAPoL frame). * * Return: %true if the frame was passed to userspace */ bool cfg80211_rx_control_port(struct net_device *dev, struct sk_buff *skb, bool unencrypted); /** * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event * @dev: network device * @rssi_event: the triggered RSSI event * @rssi_level: new RSSI level value or 0 if not available * @gfp: context flags * * This function is called when a configured connection quality monitoring * rssi threshold reached event occurs. */ void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level, gfp_t gfp); /** * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer * @dev: network device * @peer: peer's MAC address * @num_packets: how many packets were lost -- should be a fixed threshold * but probably no less than maybe 50, or maybe a throughput dependent * threshold (to account for temporary interference) * @gfp: context flags */ void cfg80211_cqm_pktloss_notify(struct net_device *dev, const u8 *peer, u32 num_packets, gfp_t gfp); /** * cfg80211_cqm_txe_notify - TX error rate event * @dev: network device * @peer: peer's MAC address * @num_packets: how many packets were lost * @rate: % of packets which failed transmission * @intvl: interval (in s) over which the TX failure threshold was breached. * @gfp: context flags * * Notify userspace when configured % TX failures over number of packets in a * given interval is exceeded. */ void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer, u32 num_packets, u32 rate, u32 intvl, gfp_t gfp); /** * cfg80211_cqm_beacon_loss_notify - beacon loss event * @dev: network device * @gfp: context flags * * Notify userspace about beacon loss from the connected AP. */ void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp); /** * cfg80211_radar_event - radar detection event * @wiphy: the wiphy * @chandef: chandef for the current channel * @gfp: context flags * * This function is called when a radar is detected on the current chanenl. */ void cfg80211_radar_event(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, gfp_t gfp); /** * cfg80211_sta_opmode_change_notify - STA's ht/vht operation mode change event * @dev: network device * @mac: MAC address of a station which opmode got modified * @sta_opmode: station's current opmode value * @gfp: context flags * * Driver should call this function when station's opmode modified via action * frame. */ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac, struct sta_opmode_info *sta_opmode, gfp_t gfp); /** * cfg80211_cac_event - Channel availability check (CAC) event * @netdev: network device * @chandef: chandef for the current channel * @event: type of event * @gfp: context flags * * This function is called when a Channel availability check (CAC) is finished * or aborted. This must be called to notify the completion of a CAC process, * also by full-MAC drivers. */ void cfg80211_cac_event(struct net_device *netdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event, gfp_t gfp); /** * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying * @dev: network device * @bssid: BSSID of AP (to avoid races) * @replay_ctr: new replay counter * @gfp: allocation flags */ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp); /** * cfg80211_pmksa_candidate_notify - notify about PMKSA caching candidate * @dev: network device * @index: candidate index (the smaller the index, the higher the priority) * @bssid: BSSID of AP * @preauth: Whether AP advertises support for RSN pre-authentication * @gfp: allocation flags */ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, const u8 *bssid, bool preauth, gfp_t gfp); /** * cfg80211_rx_spurious_frame - inform userspace about a spurious frame * @dev: The device the frame matched to * @addr: the transmitter address * @gfp: context flags * * This function is used in AP mode (only!) to inform userspace that * a spurious class 3 frame was received, to be able to deauth the * sender. * Return: %true if the frame was passed to userspace (or this failed * for a reason other than not having a subscription.) */ bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr, gfp_t gfp); /** * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame * @dev: The device the frame matched to * @addr: the transmitter address * @gfp: context flags * * This function is used in AP mode (only!) to inform userspace that * an associated station sent a 4addr frame but that wasn't expected. * It is allowed and desirable to send this event only once for each * station to avoid event flooding. * Return: %true if the frame was passed to userspace (or this failed * for a reason other than not having a subscription.) */ bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr, gfp_t gfp); /** * cfg80211_probe_status - notify userspace about probe status * @dev: the device the probe was sent on * @addr: the address of the peer * @cookie: the cookie filled in @probe_client previously * @acked: indicates whether probe was acked or not * @ack_signal: signal strength (in dBm) of the ACK frame. * @is_valid_ack_signal: indicates the ack_signal is valid or not. * @gfp: allocation flags */ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, u64 cookie, bool acked, s32 ack_signal, bool is_valid_ack_signal, gfp_t gfp); /** * cfg80211_report_obss_beacon - report beacon from other APs * @wiphy: The wiphy that received the beacon * @frame: the frame * @len: length of the frame * @freq: frequency the frame was received on * @sig_dbm: signal strength in dBm, or 0 if unknown * * Use this function to report to userspace when a beacon was * received. It is not useful to call this when there is no * netdev that is in AP/GO mode. */ void cfg80211_report_obss_beacon(struct wiphy *wiphy, const u8 *frame, size_t len, int freq, int sig_dbm); /** * cfg80211_reg_can_beacon - check if beaconing is allowed * @wiphy: the wiphy * @chandef: the channel definition * @iftype: interface type * * Return: %true if there is no secondary channel or the secondary channel(s) * can be used for beaconing (i.e. is not a radar channel etc.) */ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype); /** * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation * @wiphy: the wiphy * @chandef: the channel definition * @iftype: interface type * * Return: %true if there is no secondary channel or the secondary channel(s) * can be used for beaconing (i.e. is not a radar channel etc.). This version * also checks if IR-relaxation conditions apply, to allow beaconing under * more permissive conditions. * * Requires the RTNL to be held. */ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype); /* * cfg80211_ch_switch_notify - update wdev channel and notify userspace * @dev: the device which switched channels * @chandef: the new channel definition * * Caller must acquire wdev_lock, therefore must only be called from sleepable * driver context! */ void cfg80211_ch_switch_notify(struct net_device *dev, struct cfg80211_chan_def *chandef); /* * cfg80211_ch_switch_started_notify - notify channel switch start * @dev: the device on which the channel switch started * @chandef: the future channel definition * @count: the number of TBTTs until the channel switch happens * * Inform the userspace about the channel switch that has just * started, so that it can take appropriate actions (eg. starting * channel switch on other vifs), if necessary. */ void cfg80211_ch_switch_started_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, u8 count); /** * ieee80211_operating_class_to_band - convert operating class to band * * @operating_class: the operating class to convert * @band: band pointer to fill * * Returns %true if the conversion was successful, %false otherwise. */ bool ieee80211_operating_class_to_band(u8 operating_class, enum nl80211_band *band); /** * ieee80211_chandef_to_operating_class - convert chandef to operation class * * @chandef: the chandef to convert * @op_class: a pointer to the resulting operating class * * Returns %true if the conversion was successful, %false otherwise. */ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, u8 *op_class); /* * cfg80211_tdls_oper_request - request userspace to perform TDLS operation * @dev: the device on which the operation is requested * @peer: the MAC address of the peer device * @oper: the requested TDLS operation (NL80211_TDLS_SETUP or * NL80211_TDLS_TEARDOWN) * @reason_code: the reason code for teardown request * @gfp: allocation flags * * This function is used to request userspace to perform TDLS operation that * requires knowledge of keys, i.e., link setup or teardown when the AP * connection uses encryption. This is optional mechanism for the driver to use * if it can automatically determine when a TDLS link could be useful (e.g., * based on traffic and signal strength for a peer). */ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp); /* * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units) * @rate: given rate_info to calculate bitrate from * * return 0 if MCS index >= 32 */ u32 cfg80211_calculate_bitrate(struct rate_info *rate); /** * cfg80211_unregister_wdev - remove the given wdev * @wdev: struct wireless_dev to remove * * Call this function only for wdevs that have no netdev assigned, * e.g. P2P Devices. It removes the device from the list so that * it can no longer be used. It is necessary to call this function * even when cfg80211 requests the removal of the interface by * calling the del_virtual_intf() callback. The function must also * be called when the driver wishes to unregister the wdev, e.g. * when the device is unbound from the driver. * * Requires the RTNL to be held. */ void cfg80211_unregister_wdev(struct wireless_dev *wdev); /** * struct cfg80211_ft_event - FT Information Elements * @ies: FT IEs * @ies_len: length of the FT IE in bytes * @target_ap: target AP's MAC address * @ric_ies: RIC IE * @ric_ies_len: length of the RIC IE in bytes */ struct cfg80211_ft_event_params { const u8 *ies; size_t ies_len; const u8 *target_ap; const u8 *ric_ies; size_t ric_ies_len; }; /** * cfg80211_ft_event - notify userspace about FT IE and RIC IE * @netdev: network device * @ft_event: IE information */ void cfg80211_ft_event(struct net_device *netdev, struct cfg80211_ft_event_params *ft_event); /** * cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer * @ies: the input IE buffer * @len: the input length * @attr: the attribute ID to find * @buf: output buffer, can be %NULL if the data isn't needed, e.g. * if the function is only called to get the needed buffer size * @bufsize: size of the output buffer * * The function finds a given P2P attribute in the (vendor) IEs and * copies its contents to the given buffer. * * Return: A negative error code (-%EILSEQ or -%ENOENT) if the data is * malformed or the attribute can't be found (respectively), or the * length of the found attribute (which can be zero). */ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len, enum ieee80211_p2p_attr_id attr, u8 *buf, unsigned int bufsize); /** * ieee80211_ie_split_ric - split an IE buffer according to ordering (with RIC) * @ies: the IE buffer * @ielen: the length of the IE buffer * @ids: an array with element IDs that are allowed before * the split. A WLAN_EID_EXTENSION value means that the next * EID in the list is a sub-element of the EXTENSION IE. * @n_ids: the size of the element ID array * @after_ric: array IE types that come after the RIC element * @n_after_ric: size of the @after_ric array * @offset: offset where to start splitting in the buffer * * This function splits an IE buffer by updating the @offset * variable to point to the location where the buffer should be * split. * * It assumes that the given IE buffer is well-formed, this * has to be guaranteed by the caller! * * It also assumes that the IEs in the buffer are ordered * correctly, if not the result of using this function will not * be ordered correctly either, i.e. it does no reordering. * * The function returns the offset where the next part of the * buffer starts, which may be @ielen if the entire (remainder) * of the buffer should be used. */ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, const u8 *after_ric, int n_after_ric, size_t offset); /** * ieee80211_ie_split - split an IE buffer according to ordering * @ies: the IE buffer * @ielen: the length of the IE buffer * @ids: an array with element IDs that are allowed before * the split. A WLAN_EID_EXTENSION value means that the next * EID in the list is a sub-element of the EXTENSION IE. * @n_ids: the size of the element ID array * @offset: offset where to start splitting in the buffer * * This function splits an IE buffer by updating the @offset * variable to point to the location where the buffer should be * split. * * It assumes that the given IE buffer is well-formed, this * has to be guaranteed by the caller! * * It also assumes that the IEs in the buffer are ordered * correctly, if not the result of using this function will not * be ordered correctly either, i.e. it does no reordering. * * The function returns the offset where the next part of the * buffer starts, which may be @ielen if the entire (remainder) * of the buffer should be used. */ static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, size_t offset) { return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset); } /** * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN * @wdev: the wireless device reporting the wakeup * @wakeup: the wakeup report * @gfp: allocation flags * * This function reports that the given device woke up. If it * caused the wakeup, report the reason(s), otherwise you may * pass %NULL as the @wakeup parameter to advertise that something * else caused the wakeup. */ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, struct cfg80211_wowlan_wakeup *wakeup, gfp_t gfp); /** * cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver. * * @wdev: the wireless device for which critical protocol is stopped. * @gfp: allocation flags * * This function can be called by the driver to indicate it has reverted * operation back to normal. One reason could be that the duration given * by .crit_proto_start() has expired. */ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp); /** * ieee80211_get_num_supported_channels - get number of channels device has * @wiphy: the wiphy * * Return: the number of channels supported by the device. */ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy); /** * cfg80211_check_combinations - check interface combinations * * @wiphy: the wiphy * @params: the interface combinations parameter * * This function can be called by the driver to check whether a * combination of interfaces and their types are allowed according to * the interface combinations. */ int cfg80211_check_combinations(struct wiphy *wiphy, struct iface_combination_params *params); /** * cfg80211_iter_combinations - iterate over matching combinations * * @wiphy: the wiphy * @params: the interface combinations parameter * @iter: function to call for each matching combination * @data: pointer to pass to iter function * * This function can be called by the driver to check what possible * combinations it fits in at a given moment, e.g. for channel switching * purposes. */ int cfg80211_iter_combinations(struct wiphy *wiphy, struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data); /* * cfg80211_stop_iface - trigger interface disconnection * * @wiphy: the wiphy * @wdev: wireless device * @gfp: context flags * * Trigger interface to be stopped as if AP was stopped, IBSS/mesh left, STA * disconnected. * * Note: This doesn't need any locks and is asynchronous. */ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, gfp_t gfp); /** * cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy * @wiphy: the wiphy to shut down * * This function shuts down all interfaces belonging to this wiphy by * calling dev_close() (and treating non-netdev interfaces as needed). * It shouldn't really be used unless there are some fatal device errors * that really can't be recovered in any other way. * * Callers must hold the RTNL and be able to deal with callbacks into * the driver while the function is running. */ void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy); /** * wiphy_ext_feature_set - set the extended feature flag * * @wiphy: the wiphy to modify. * @ftidx: extended feature bit index. * * The extended features are flagged in multiple bytes (see * &struct wiphy.@ext_features) */ static inline void wiphy_ext_feature_set(struct wiphy *wiphy, enum nl80211_ext_feature_index ftidx) { u8 *ft_byte; ft_byte = &wiphy->ext_features[ftidx / 8]; *ft_byte |= BIT(ftidx % 8); } /** * wiphy_ext_feature_isset - check the extended feature flag * * @wiphy: the wiphy to modify. * @ftidx: extended feature bit index. * * The extended features are flagged in multiple bytes (see * &struct wiphy.@ext_features) */ static inline bool wiphy_ext_feature_isset(struct wiphy *wiphy, enum nl80211_ext_feature_index ftidx) { u8 ft_byte; ft_byte = wiphy->ext_features[ftidx / 8]; return (ft_byte & BIT(ftidx % 8)) != 0; } /** * cfg80211_free_nan_func - free NAN function * @f: NAN function that should be freed * * Frees all the NAN function and all it's allocated members. */ void cfg80211_free_nan_func(struct cfg80211_nan_func *f); /** * struct cfg80211_nan_match_params - NAN match parameters * @type: the type of the function that triggered a match. If it is * %NL80211_NAN_FUNC_SUBSCRIBE it means that we replied to a subscriber. * If it is %NL80211_NAN_FUNC_PUBLISH, it means that we got a discovery * result. * If it is %NL80211_NAN_FUNC_FOLLOW_UP, we received a follow up. * @inst_id: the local instance id * @peer_inst_id: the instance id of the peer's function * @addr: the MAC address of the peer * @info_len: the length of the &info * @info: the Service Specific Info from the peer (if any) * @cookie: unique identifier of the corresponding function * @sec: security configuration for the service. Only valid if @type is * %NL80211_NAN_FUNC_PUBLISH. * @device_attrs: device related attributes from the service discovery frame. * (e.g. Device capability attribute, NAN availability attribute and NDC * attribute). * @device_attrs_len: length of @device_attrs field in bytes. * @ndp_required: 1 if NAN data path is required for the service, 0 otherwise. * The NDP type is specified by @ndp_type. Only valid if @type is * %NL80211_NAN_FUNC_PUBLISH. * @ndp_type: the type of NDP required for the service as specified by * &enum nl80211_nan_ndp_type. Only valid if @ndp_required is 1, ignored * otherwise. * @fsd_required: 1 if further service discovery is required for the service, 0 * otherwise. The further service discovery method is specified by * @fsd_method. Only valid if @type is %NL80211_NAN_FUNC_PUBLISH. * @fsd_method: the further service discovery method to be used for the service * as specified by &enum nl80211_nan_fsd. Only valid if @fsd_required is * set, ignored otherwise. * @range_limit_ingress: the ingress range limit for the service in centimeters. * Zero if no range limit is set. Only valid if @type is * %NL80211_NAN_FUNC_PUBLISH. * @range_limit_egress: the egress range limit for the service in centimeters. * Zero if no range limit is set. Only valid if @type is * %NL80211_NAN_FUNC_PUBLISH. */ struct cfg80211_nan_match_params { enum nl80211_nan_function_type type; u8 inst_id; u8 peer_inst_id; const u8 *addr; u16 info_len; const u8 *info; u64 cookie; struct cfg80211_nan_sec sec; const u8 *device_attrs; u32 device_attrs_len; u8 ndp_required; enum nl80211_nan_ndp_type ndp_type; u8 fsd_required; enum nl80211_nan_fsd fsd_method; u16 range_limit_ingress; u16 range_limit_egress; }; /** * cfg80211_nan_match - report a match for a NAN function. * @wdev: the wireless device reporting the match * @match: match notification parameters * @gfp: allocation flags * * This function reports that the a NAN function had a match. This * can be a subscribe that had a match or a solicited publish that * was sent. It can also be a follow up that was received. */ void cfg80211_nan_match(struct wireless_dev *wdev, struct cfg80211_nan_match_params *match, gfp_t gfp); /** * cfg80211_nan_func_terminated - notify about NAN function termination. * * @wdev: the wireless device reporting the match * @inst_id: the local instance id * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*) * @cookie: unique NAN function identifier * @gfp: allocation flags * * This function reports that the a NAN function is terminated. */ void cfg80211_nan_func_terminated(struct wireless_dev *wdev, u8 inst_id, enum nl80211_nan_func_term_reason reason, u64 cookie, gfp_t gfp); /* ethtool helper */ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info); /** * cfg80211_external_auth_request - userspace request for authentication * @netdev: network device * @params: External authentication parameters * @gfp: allocation flags * Returns: 0 on success, < 0 on error */ int cfg80211_external_auth_request(struct net_device *netdev, struct cfg80211_external_auth_params *params, gfp_t gfp); /** * cfg80211_pmsr_report - report peer measurement result data * @wdev: the wireless device reporting the measurement * @req: the original measurement request * @result: the result data * @gfp: allocation flags */ void cfg80211_pmsr_report(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, struct cfg80211_pmsr_result *result, gfp_t gfp); /** * cfg80211_pmsr_complete - report peer measurement completed * @wdev: the wireless device reporting the measurement * @req: the original measurement request * @gfp: allocation flags * * Report that the entire measurement completed, after this * the request pointer will no longer be valid. */ void cfg80211_pmsr_complete(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, gfp_t gfp); /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* wiphy_printk helpers, similar to dev_printk */ #define wiphy_printk(level, wiphy, format, args...) \ dev_printk(level, &(wiphy)->dev, format, ##args) #define wiphy_emerg(wiphy, format, args...) \ dev_emerg(&(wiphy)->dev, format, ##args) #define wiphy_alert(wiphy, format, args...) \ dev_alert(&(wiphy)->dev, format, ##args) #define wiphy_crit(wiphy, format, args...) \ dev_crit(&(wiphy)->dev, format, ##args) #define wiphy_err(wiphy, format, args...) \ dev_err(&(wiphy)->dev, format, ##args) #define wiphy_warn(wiphy, format, args...) \ dev_warn(&(wiphy)->dev, format, ##args) #define wiphy_notice(wiphy, format, args...) \ dev_notice(&(wiphy)->dev, format, ##args) #define wiphy_info(wiphy, format, args...) \ dev_info(&(wiphy)->dev, format, ##args) #define wiphy_debug(wiphy, format, args...) \ wiphy_printk(KERN_DEBUG, wiphy, format, ##args) #define wiphy_dbg(wiphy, format, args...) \ dev_dbg(&(wiphy)->dev, format, ##args) #if defined(VERBOSE_DEBUG) #define wiphy_vdbg wiphy_dbg #else #define wiphy_vdbg(wiphy, format, args...) \ ({ \ if (0) \ wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ 0; \ }) #endif /* * wiphy_WARN() acts like wiphy_printk(), but with the key difference * of using a WARN/WARN_ON to get the message out, including the * file/line information and a backtrace. */ #define wiphy_WARN(wiphy, format, args...) \ WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args); #endif /* __NET_CFG80211_H */ backport-iwlwifi-dkms-7906/include/net/codel.h000066400000000000000000000132261351064634500213550ustar00rootroot00000000000000#ifndef __NET_SCHED_CODEL_H #define __NET_SCHED_CODEL_H /* * Codel - The Controlled-Delay Active Queue Management algorithm * * Copyright (C) 2011-2012 Kathleen Nichols * Copyright (C) 2011-2012 Van Jacobson * Copyright (C) 2012 Michael D. Taht * Copyright (C) 2012,2015 Eric Dumazet * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include #include #include #include #include /* Controlling Queue Delay (CoDel) algorithm * ========================================= * Source : Kathleen Nichols and Van Jacobson * http://queue.acm.org/detail.cfm?id=2209336 * * Implemented on linux by Dave Taht and Eric Dumazet */ /* CoDel uses a 1024 nsec clock, encoded in u32 * This gives a range of 2199 seconds, because of signed compares */ typedef u32 codel_time_t; typedef s32 codel_tdiff_t; #define CODEL_SHIFT 10 #define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT) static inline codel_time_t codel_get_time(void) { u64 ns = ktime_get_ns(); return ns >> CODEL_SHIFT; } /* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia: * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution * codel_time_after(a,b) returns true if the time a is after time b. */ #define codel_time_after(a, b) \ (typecheck(codel_time_t, a) && \ typecheck(codel_time_t, b) && \ ((s32)((a) - (b)) > 0)) #define codel_time_before(a, b) codel_time_after(b, a) #define codel_time_after_eq(a, b) \ (typecheck(codel_time_t, a) && \ typecheck(codel_time_t, b) && \ ((s32)((a) - (b)) >= 0)) #define codel_time_before_eq(a, b) codel_time_after_eq(b, a) static inline u32 codel_time_to_us(codel_time_t val) { u64 valns = ((u64)val << CODEL_SHIFT); do_div(valns, NSEC_PER_USEC); return (u32)valns; } /** * struct codel_params - contains codel parameters * @target: target queue size (in time units) * @ce_threshold: threshold for marking packets with ECN CE * @interval: width of moving time window * @mtu: device mtu, or minimal queue backlog in bytes. * @ecn: is Explicit Congestion Notification enabled */ struct codel_params { codel_time_t target; codel_time_t ce_threshold; codel_time_t interval; u32 mtu; bool ecn; }; /** * struct codel_vars - contains codel variables * @count: how many drops we've done since the last time we * entered dropping state * @lastcount: count at entry to dropping state * @dropping: set to true if in dropping state * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 * @first_above_time: when we went (or will go) continuously above target * for interval * @drop_next: time to drop next packet, or when we dropped last * @ldelay: sojourn time of last dequeued packet */ struct codel_vars { u32 count; u32 lastcount; bool dropping; u16 rec_inv_sqrt; codel_time_t first_above_time; codel_time_t drop_next; codel_time_t ldelay; }; #define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */ /* needed shift to get a Q0.32 number from rec_inv_sqrt */ #define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS) /** * struct codel_stats - contains codel shared variables and stats * @maxpacket: largest packet we've seen so far * @drop_count: temp count of dropped packets in dequeue() * @drop_len: bytes of dropped packets in dequeue() * ecn_mark: number of packets we ECN marked instead of dropping * ce_mark: number of packets CE marked because sojourn time was above ce_threshold */ struct codel_stats { u32 maxpacket; u32 drop_count; u32 drop_len; u32 ecn_mark; u32 ce_mark; }; #define CODEL_DISABLED_THRESHOLD INT_MAX typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb); typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb); typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx); typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, void *ctx); #endif backport-iwlwifi-dkms-7906/include/net/codel_impl.h000066400000000000000000000177471351064634500224120ustar00rootroot00000000000000#ifndef __NET_SCHED_CODEL_IMPL_H #define __NET_SCHED_CODEL_IMPL_H /* * Codel - The Controlled-Delay Active Queue Management algorithm * * Copyright (C) 2011-2012 Kathleen Nichols * Copyright (C) 2011-2012 Van Jacobson * Copyright (C) 2012 Michael D. Taht * Copyright (C) 2012,2015 Eric Dumazet * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ /* Controlling Queue Delay (CoDel) algorithm * ========================================= * Source : Kathleen Nichols and Van Jacobson * http://queue.acm.org/detail.cfm?id=2209336 * * Implemented on linux by Dave Taht and Eric Dumazet */ static void codel_params_init(struct codel_params *params) { params->interval = MS2TIME(100); params->target = MS2TIME(5); params->ce_threshold = CODEL_DISABLED_THRESHOLD; params->ecn = false; } static void codel_vars_init(struct codel_vars *vars) { memset(vars, 0, sizeof(*vars)); } static void codel_stats_init(struct codel_stats *stats) { stats->maxpacket = 0; } /* * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) * * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 */ static void codel_Newton_step(struct codel_vars *vars) { u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); val >>= 2; /* avoid overflow in following multiply */ val = (val * invsqrt) >> (32 - 2 + 1); vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; } /* * CoDel control_law is t + interval/sqrt(count) * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid * both sqrt() and divide operation. */ static codel_time_t codel_control_law(codel_time_t t, codel_time_t interval, u32 rec_inv_sqrt) { return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); } static bool codel_should_drop(const struct sk_buff *skb, void *ctx, struct codel_vars *vars, struct codel_params *params, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, u32 *backlog, codel_time_t now) { bool ok_to_drop; u32 skb_len; if (!skb) { vars->first_above_time = 0; return false; } skb_len = skb_len_func(skb); vars->ldelay = now - skb_time_func(skb); if (unlikely(skb_len > stats->maxpacket)) stats->maxpacket = skb_len; if (codel_time_before(vars->ldelay, params->target) || *backlog <= params->mtu) { /* went below - stay below for at least interval */ vars->first_above_time = 0; return false; } ok_to_drop = false; if (vars->first_above_time == 0) { /* just went above from below. If we stay above * for at least interval we'll say it's ok to drop */ vars->first_above_time = now + params->interval; } else if (codel_time_after(now, vars->first_above_time)) { ok_to_drop = true; } return ok_to_drop; } static struct sk_buff *codel_dequeue(void *ctx, u32 *backlog, struct codel_params *params, struct codel_vars *vars, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, codel_skb_drop_t drop_func, codel_skb_dequeue_t dequeue_func) { struct sk_buff *skb = dequeue_func(vars, ctx); codel_time_t now; bool drop; if (!skb) { vars->dropping = false; return skb; } now = codel_get_time(); drop = codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now); if (vars->dropping) { if (!drop) { /* sojourn time below target - leave dropping state */ vars->dropping = false; } else if (codel_time_after_eq(now, vars->drop_next)) { /* It's time for the next drop. Drop the current * packet and dequeue the next. The dequeue might * take us out of dropping state. * If not, schedule the next drop. * A large backlog might result in drop rates so high * that the next drop should happen now, * hence the while loop. */ while (vars->dropping && codel_time_after_eq(now, vars->drop_next)) { vars->count++; /* dont care of possible wrap * since there is no more divide */ codel_Newton_step(vars); if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; vars->drop_next = codel_control_law(vars->drop_next, params->interval, vars->rec_inv_sqrt); goto end; } stats->drop_len += skb_len_func(skb); drop_func(skb, ctx); stats->drop_count++; skb = dequeue_func(vars, ctx); if (!codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now)) { /* leave dropping state */ vars->dropping = false; } else { /* and schedule the next drop */ vars->drop_next = codel_control_law(vars->drop_next, params->interval, vars->rec_inv_sqrt); } } } } else if (drop) { u32 delta; if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { stats->drop_len += skb_len_func(skb); drop_func(skb, ctx); stats->drop_count++; skb = dequeue_func(vars, ctx); drop = codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now); } vars->dropping = true; /* if min went above target close to when we last went below it * assume that the drop rate that controlled the queue on the * last cycle is a good starting point to control it now. */ delta = vars->count - vars->lastcount; if (delta > 1 && codel_time_before(now - vars->drop_next, 16 * params->interval)) { vars->count = delta; /* we dont care if rec_inv_sqrt approximation * is not very precise : * Next Newton steps will correct it quadratically. */ codel_Newton_step(vars); } else { vars->count = 1; vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; } vars->lastcount = vars->count; vars->drop_next = codel_control_law(now, params->interval, vars->rec_inv_sqrt); } end: if (skb && codel_time_after(vars->ldelay, params->ce_threshold) && INET_ECN_set_ce(skb)) stats->ce_mark++; return skb; } #endif backport-iwlwifi-dkms-7906/include/net/codel_qdisc.h000066400000000000000000000056351351064634500225450ustar00rootroot00000000000000#ifndef __NET_SCHED_CODEL_QDISC_H #define __NET_SCHED_CODEL_QDISC_H /* * Codel - The Controlled-Delay Active Queue Management algorithm * * Copyright (C) 2011-2012 Kathleen Nichols * Copyright (C) 2011-2012 Van Jacobson * Copyright (C) 2012 Michael D. Taht * Copyright (C) 2012,2015 Eric Dumazet * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ /* Controlling Queue Delay (CoDel) algorithm * ========================================= * Source : Kathleen Nichols and Van Jacobson * http://queue.acm.org/detail.cfm?id=2209336 * * Implemented on linux by Dave Taht and Eric Dumazet */ /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ struct codel_skb_cb { codel_time_t enqueue_time; unsigned int mem_usage; }; static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) { qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb)); return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data; } static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb) { return get_codel_cb(skb)->enqueue_time; } static void codel_set_enqueue_time(struct sk_buff *skb) { get_codel_cb(skb)->enqueue_time = codel_get_time(); } #endif backport-iwlwifi-dkms-7906/include/net/fq.h000066400000000000000000000052441351064634500206760ustar00rootroot00000000000000/* * Copyright (c) 2016 Qualcomm Atheros, Inc * * GPL v2 * * Based on net/sched/sch_fq_codel.c */ #ifndef __NET_SCHED_FQ_H #define __NET_SCHED_FQ_H struct fq_tin; /** * struct fq_flow - per traffic flow queue * * @tin: owner of this flow. Used to manage collisions, i.e. when a packet * hashes to an index which points to a flow that is already owned by a * different tin the packet is destined to. In such case the implementer * must provide a fallback flow * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++ * (deficit round robin) based round robin queuing similar to the one * found in net/sched/sch_fq_codel.c * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of * fat flows and efficient head-dropping if packet limit is reached * @queue: sk_buff queue to hold packets * @backlog: number of bytes pending in the queue. The number of packets can be * found in @queue.qlen * @deficit: used for DRR++ */ struct fq_flow { struct fq_tin *tin; struct list_head flowchain; struct list_head backlogchain; struct sk_buff_head queue; u32 backlog; int deficit; }; /** * struct fq_tin - a logical container of fq_flows * * Used to group fq_flows into a logical aggregate. DRR++ scheme is used to * pull interleaved packets out of the associated flows. * * @new_flows: linked list of fq_flow * @old_flows: linked list of fq_flow */ struct fq_tin { struct list_head new_flows; struct list_head old_flows; u32 backlog_bytes; u32 backlog_packets; u32 overlimit; u32 collisions; u32 flows; u32 tx_bytes; u32 tx_packets; }; /** * struct fq - main container for fair queuing purposes * * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient * head-dropping when @backlog reaches @limit * @limit: max number of packets that can be queued across all flows * @backlog: number of packets queued across all flows */ struct fq { struct fq_flow *flows; struct list_head backlogs; spinlock_t lock; u32 flows_cnt; u32 perturbation; u32 limit; u32 memory_limit; u32 memory_usage; u32 quantum; u32 backlog; u32 overlimit; u32 overmemory; u32 collisions; }; typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, struct fq_tin *, struct fq_flow *flow); typedef void fq_skb_free_t(struct fq *, struct fq_tin *, struct fq_flow *, struct sk_buff *); /* Return %true to filter (drop) the frame. */ typedef bool fq_skb_filter_t(struct fq *, struct fq_tin *, struct fq_flow *, struct sk_buff *, void *); typedef struct fq_flow *fq_flow_get_default_t(struct fq *, struct fq_tin *, int idx, struct sk_buff *); #endif backport-iwlwifi-dkms-7906/include/net/fq_impl.h000066400000000000000000000155761351064634500217300ustar00rootroot00000000000000/* * Copyright (c) 2016 Qualcomm Atheros, Inc * * GPL v2 * * Based on net/sched/sch_fq_codel.c */ #ifndef __NET_SCHED_FQ_IMPL_H #define __NET_SCHED_FQ_IMPL_H #include /* functions that are embedded into includer */ static void fq_adjust_removal(struct fq *fq, struct fq_flow *flow, struct sk_buff *skb) { struct fq_tin *tin = flow->tin; tin->backlog_bytes -= skb->len; tin->backlog_packets--; flow->backlog -= skb->len; fq->backlog--; fq->memory_usage -= skb->truesize; } static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) { struct fq_flow *i; if (flow->backlog == 0) { list_del_init(&flow->backlogchain); } else { i = flow; list_for_each_entry_continue(i, &fq->backlogs, backlogchain) if (i->backlog < flow->backlog) break; list_move_tail(&flow->backlogchain, &i->backlogchain); } } static struct sk_buff *fq_flow_dequeue(struct fq *fq, struct fq_flow *flow) { struct sk_buff *skb; lockdep_assert_held(&fq->lock); skb = __skb_dequeue(&flow->queue); if (!skb) return NULL; fq_adjust_removal(fq, flow, skb); fq_rejigger_backlog(fq, flow); return skb; } static struct sk_buff *fq_tin_dequeue(struct fq *fq, struct fq_tin *tin, fq_tin_dequeue_t dequeue_func) { struct fq_flow *flow; struct list_head *head; struct sk_buff *skb; lockdep_assert_held(&fq->lock); begin: head = &tin->new_flows; if (list_empty(head)) { head = &tin->old_flows; if (list_empty(head)) return NULL; } flow = list_first_entry(head, struct fq_flow, flowchain); if (flow->deficit <= 0) { flow->deficit += fq->quantum; list_move_tail(&flow->flowchain, &tin->old_flows); goto begin; } skb = dequeue_func(fq, tin, flow); if (!skb) { /* force a pass through old_flows to prevent starvation */ if ((head == &tin->new_flows) && !list_empty(&tin->old_flows)) { list_move_tail(&flow->flowchain, &tin->old_flows); } else { list_del_init(&flow->flowchain); flow->tin = NULL; } goto begin; } flow->deficit -= skb->len; tin->tx_bytes += skb->len; tin->tx_packets++; return skb; } static struct fq_flow *fq_flow_classify(struct fq *fq, struct fq_tin *tin, struct sk_buff *skb, fq_flow_get_default_t get_default_func) { struct fq_flow *flow; u32 hash; u32 idx; lockdep_assert_held(&fq->lock); hash = skb_get_hash_perturb(skb, fq->perturbation); idx = reciprocal_scale(hash, fq->flows_cnt); flow = &fq->flows[idx]; if (flow->tin && flow->tin != tin) { flow = get_default_func(fq, tin, idx, skb); tin->collisions++; fq->collisions++; } if (!flow->tin) tin->flows++; return flow; } static void fq_recalc_backlog(struct fq *fq, struct fq_tin *tin, struct fq_flow *flow) { struct fq_flow *i; if (list_empty(&flow->backlogchain)) list_add_tail(&flow->backlogchain, &fq->backlogs); i = flow; list_for_each_entry_continue_reverse(i, &fq->backlogs, backlogchain) if (i->backlog > flow->backlog) break; list_move(&flow->backlogchain, &i->backlogchain); } static void fq_tin_enqueue(struct fq *fq, struct fq_tin *tin, struct sk_buff *skb, fq_skb_free_t free_func, fq_flow_get_default_t get_default_func) { struct fq_flow *flow; bool oom; lockdep_assert_held(&fq->lock); flow = fq_flow_classify(fq, tin, skb, get_default_func); flow->tin = tin; flow->backlog += skb->len; tin->backlog_bytes += skb->len; tin->backlog_packets++; fq->memory_usage += skb->truesize; fq->backlog++; fq_recalc_backlog(fq, tin, flow); if (list_empty(&flow->flowchain)) { flow->deficit = fq->quantum; list_add_tail(&flow->flowchain, &tin->new_flows); } __skb_queue_tail(&flow->queue, skb); oom = (fq->memory_usage > fq->memory_limit); while (fq->backlog > fq->limit || oom) { flow = list_first_entry_or_null(&fq->backlogs, struct fq_flow, backlogchain); if (!flow) return; skb = fq_flow_dequeue(fq, flow); if (!skb) return; free_func(fq, flow->tin, flow, skb); flow->tin->overlimit++; fq->overlimit++; if (oom) { fq->overmemory++; oom = (fq->memory_usage > fq->memory_limit); } } } static void fq_flow_filter(struct fq *fq, struct fq_flow *flow, fq_skb_filter_t filter_func, void *filter_data, fq_skb_free_t free_func) { struct fq_tin *tin = flow->tin; struct sk_buff *skb, *tmp; lockdep_assert_held(&fq->lock); skb_queue_walk_safe(&flow->queue, skb, tmp) { if (!filter_func(fq, tin, flow, skb, filter_data)) continue; __skb_unlink(skb, &flow->queue); fq_adjust_removal(fq, flow, skb); free_func(fq, tin, flow, skb); } fq_rejigger_backlog(fq, flow); } static void fq_tin_filter(struct fq *fq, struct fq_tin *tin, fq_skb_filter_t filter_func, void *filter_data, fq_skb_free_t free_func) { struct fq_flow *flow; lockdep_assert_held(&fq->lock); list_for_each_entry(flow, &tin->new_flows, flowchain) fq_flow_filter(fq, flow, filter_func, filter_data, free_func); list_for_each_entry(flow, &tin->old_flows, flowchain) fq_flow_filter(fq, flow, filter_func, filter_data, free_func); } static void fq_flow_reset(struct fq *fq, struct fq_flow *flow, fq_skb_free_t free_func) { struct sk_buff *skb; while ((skb = fq_flow_dequeue(fq, flow))) free_func(fq, flow->tin, flow, skb); if (!list_empty(&flow->flowchain)) list_del_init(&flow->flowchain); if (!list_empty(&flow->backlogchain)) list_del_init(&flow->backlogchain); flow->tin = NULL; WARN_ON_ONCE(flow->backlog); } static void fq_tin_reset(struct fq *fq, struct fq_tin *tin, fq_skb_free_t free_func) { struct list_head *head; struct fq_flow *flow; for (;;) { head = &tin->new_flows; if (list_empty(head)) { head = &tin->old_flows; if (list_empty(head)) break; } flow = list_first_entry(head, struct fq_flow, flowchain); fq_flow_reset(fq, flow, free_func); } WARN_ON_ONCE(tin->backlog_bytes); WARN_ON_ONCE(tin->backlog_packets); } static void fq_flow_init(struct fq_flow *flow) { INIT_LIST_HEAD(&flow->flowchain); INIT_LIST_HEAD(&flow->backlogchain); __skb_queue_head_init(&flow->queue); } static void fq_tin_init(struct fq_tin *tin) { INIT_LIST_HEAD(&tin->new_flows); INIT_LIST_HEAD(&tin->old_flows); } static int fq_init(struct fq *fq, int flows_cnt) { int i; memset(fq, 0, sizeof(fq[0])); INIT_LIST_HEAD(&fq->backlogs); spin_lock_init(&fq->lock); fq->flows_cnt = max_t(u32, flows_cnt, 1); fq->perturbation = prandom_u32(); fq->quantum = 300; fq->limit = 8192; fq->memory_limit = 16 << 20; /* 16 MBytes */ fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); if (!fq->flows) return -ENOMEM; for (i = 0; i < fq->flows_cnt; i++) fq_flow_init(&fq->flows[i]); return 0; } static void fq_reset(struct fq *fq, fq_skb_free_t free_func) { int i; for (i = 0; i < fq->flows_cnt; i++) fq_flow_reset(fq, &fq->flows[i], free_func); kfree(fq->flows); fq->flows = NULL; } #endif backport-iwlwifi-dkms-7906/include/net/ieee80211_radiotap.h000066400000000000000000000320471351064634500234570ustar00rootroot00000000000000/* * Copyright (c) 2017 Intel Deutschland GmbH * Copyright (c) 2018-2019 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __RADIOTAP_H #define __RADIOTAP_H #include #include /** * struct ieee82011_radiotap_header - base radiotap header */ struct ieee80211_radiotap_header { /** * @it_version: radiotap version, always 0 */ uint8_t it_version; /** * @it_pad: padding (or alignment) */ uint8_t it_pad; /** * @it_len: overall radiotap header length */ __le16 it_len; /** * @it_present: (first) present word */ __le32 it_present; } __packed; /* version is always 0 */ #define PKTHDR_RADIOTAP_VERSION 0 /* see the radiotap website for the descriptions */ enum ieee80211_radiotap_presence { IEEE80211_RADIOTAP_TSFT = 0, IEEE80211_RADIOTAP_FLAGS = 1, IEEE80211_RADIOTAP_RATE = 2, IEEE80211_RADIOTAP_CHANNEL = 3, IEEE80211_RADIOTAP_FHSS = 4, IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5, IEEE80211_RADIOTAP_DBM_ANTNOISE = 6, IEEE80211_RADIOTAP_LOCK_QUALITY = 7, IEEE80211_RADIOTAP_TX_ATTENUATION = 8, IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9, IEEE80211_RADIOTAP_DBM_TX_POWER = 10, IEEE80211_RADIOTAP_ANTENNA = 11, IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12, IEEE80211_RADIOTAP_DB_ANTNOISE = 13, IEEE80211_RADIOTAP_RX_FLAGS = 14, IEEE80211_RADIOTAP_TX_FLAGS = 15, IEEE80211_RADIOTAP_RTS_RETRIES = 16, IEEE80211_RADIOTAP_DATA_RETRIES = 17, /* 18 is XChannel, but it's not defined yet */ IEEE80211_RADIOTAP_MCS = 19, IEEE80211_RADIOTAP_AMPDU_STATUS = 20, IEEE80211_RADIOTAP_VHT = 21, IEEE80211_RADIOTAP_TIMESTAMP = 22, IEEE80211_RADIOTAP_HE = 23, IEEE80211_RADIOTAP_HE_MU = 24, IEEE80211_RADIOTAP_ZERO_LEN_PSDU = 26, IEEE80211_RADIOTAP_LSIG = 27, /* valid in every it_present bitmap, even vendor namespaces */ IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30, IEEE80211_RADIOTAP_EXT = 31 }; /* for IEEE80211_RADIOTAP_FLAGS */ enum ieee80211_radiotap_flags { IEEE80211_RADIOTAP_F_CFP = 0x01, IEEE80211_RADIOTAP_F_SHORTPRE = 0x02, IEEE80211_RADIOTAP_F_WEP = 0x04, IEEE80211_RADIOTAP_F_FRAG = 0x08, IEEE80211_RADIOTAP_F_FCS = 0x10, IEEE80211_RADIOTAP_F_DATAPAD = 0x20, IEEE80211_RADIOTAP_F_BADFCS = 0x40, }; /* for IEEE80211_RADIOTAP_CHANNEL */ enum ieee80211_radiotap_channel_flags { IEEE80211_CHAN_CCK = 0x0020, IEEE80211_CHAN_OFDM = 0x0040, IEEE80211_CHAN_2GHZ = 0x0080, IEEE80211_CHAN_5GHZ = 0x0100, IEEE80211_CHAN_DYN = 0x0400, IEEE80211_CHAN_HALF = 0x4000, IEEE80211_CHAN_QUARTER = 0x8000, }; /* for IEEE80211_RADIOTAP_RX_FLAGS */ enum ieee80211_radiotap_rx_flags { IEEE80211_RADIOTAP_F_RX_BADPLCP = 0x0002, }; /* for IEEE80211_RADIOTAP_TX_FLAGS */ enum ieee80211_radiotap_tx_flags { IEEE80211_RADIOTAP_F_TX_FAIL = 0x0001, IEEE80211_RADIOTAP_F_TX_CTS = 0x0002, IEEE80211_RADIOTAP_F_TX_RTS = 0x0004, IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008, }; /* for IEEE80211_RADIOTAP_MCS "have" flags */ enum ieee80211_radiotap_mcs_have { IEEE80211_RADIOTAP_MCS_HAVE_BW = 0x01, IEEE80211_RADIOTAP_MCS_HAVE_MCS = 0x02, IEEE80211_RADIOTAP_MCS_HAVE_GI = 0x04, IEEE80211_RADIOTAP_MCS_HAVE_FMT = 0x08, IEEE80211_RADIOTAP_MCS_HAVE_FEC = 0x10, IEEE80211_RADIOTAP_MCS_HAVE_STBC = 0x20, }; enum ieee80211_radiotap_mcs_flags { IEEE80211_RADIOTAP_MCS_BW_MASK = 0x03, IEEE80211_RADIOTAP_MCS_BW_20 = 0, IEEE80211_RADIOTAP_MCS_BW_40 = 1, IEEE80211_RADIOTAP_MCS_BW_20L = 2, IEEE80211_RADIOTAP_MCS_BW_20U = 3, IEEE80211_RADIOTAP_MCS_SGI = 0x04, IEEE80211_RADIOTAP_MCS_FMT_GF = 0x08, IEEE80211_RADIOTAP_MCS_FEC_LDPC = 0x10, IEEE80211_RADIOTAP_MCS_STBC_MASK = 0x60, IEEE80211_RADIOTAP_MCS_STBC_1 = 1, IEEE80211_RADIOTAP_MCS_STBC_2 = 2, IEEE80211_RADIOTAP_MCS_STBC_3 = 3, IEEE80211_RADIOTAP_MCS_STBC_SHIFT = 5, }; /* for IEEE80211_RADIOTAP_AMPDU_STATUS */ enum ieee80211_radiotap_ampdu_flags { IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN = 0x0001, IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN = 0x0002, IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN = 0x0004, IEEE80211_RADIOTAP_AMPDU_IS_LAST = 0x0008, IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR = 0x0010, IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN = 0x0020, IEEE80211_RADIOTAP_AMPDU_EOF = 0x0040, IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN = 0x0080, }; /* for IEEE80211_RADIOTAP_VHT */ enum ieee80211_radiotap_vht_known { IEEE80211_RADIOTAP_VHT_KNOWN_STBC = 0x0001, IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA = 0x0002, IEEE80211_RADIOTAP_VHT_KNOWN_GI = 0x0004, IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS = 0x0008, IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM = 0x0010, IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED = 0x0020, IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH = 0x0040, IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID = 0x0080, IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID = 0x0100, }; enum ieee80211_radiotap_vht_flags { IEEE80211_RADIOTAP_VHT_FLAG_STBC = 0x01, IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA = 0x02, IEEE80211_RADIOTAP_VHT_FLAG_SGI = 0x04, IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 = 0x08, IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM = 0x10, IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED = 0x20, }; enum ieee80211_radiotap_vht_coding { IEEE80211_RADIOTAP_CODING_LDPC_USER0 = 0x01, IEEE80211_RADIOTAP_CODING_LDPC_USER1 = 0x02, IEEE80211_RADIOTAP_CODING_LDPC_USER2 = 0x04, IEEE80211_RADIOTAP_CODING_LDPC_USER3 = 0x08, }; /* for IEEE80211_RADIOTAP_TIMESTAMP */ enum ieee80211_radiotap_timestamp_unit_spos { IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK = 0x000F, IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS = 0x0000, IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US = 0x0001, IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS = 0x0003, IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK = 0x00F0, IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU = 0x0000, IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ = 0x0010, IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU = 0x0020, IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU = 0x0030, IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN = 0x00F0, }; enum ieee80211_radiotap_timestamp_flags { IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT = 0x00, IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT = 0x01, IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02, }; struct ieee80211_radiotap_he { __le16 data1, data2, data3, data4, data5, data6; }; enum ieee80211_radiotap_he_bits { IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3, IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0, IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1, IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2, IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3, IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004, IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008, IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010, IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020, IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040, IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080, IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100, IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200, IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400, IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800, IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000, IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000, IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000, IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000, IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001, IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002, IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004, IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008, IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010, IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020, IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040, IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000, IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000, IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f, IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040, IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080, IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00, IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000, IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000, IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000, IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000, IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f, IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0, IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f, IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0, IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00, IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10, IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030, IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0, IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1, IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3, IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700, IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000, IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000, IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000, IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f, IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010, IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN = 0x0020, IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW = 0x00c0, IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_20MHZ = 0, IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_40MHZ = 1, IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_80MHZ = 2, IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_160MHZ = 3, IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00, IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000, }; struct ieee80211_radiotap_he_mu { __le16 flags1, flags2; u8 ru_ch1[4]; u8 ru_ch2[4]; }; enum ieee80211_radiotap_he_mu_bits { IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f, IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010, IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020, IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040, IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080, IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100, IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200, IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000, IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000, IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000, IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000, IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003, IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000, IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001, IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002, IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003, IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004, IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008, IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0, IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300, IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400, IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800, }; enum ieee80211_radiotap_lsig_data1 { IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN = 0x0001, IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN = 0x0002, }; enum ieee80211_radiotap_lsig_data2 { IEEE80211_RADIOTAP_LSIG_DATA2_RATE = 0x000f, IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH = 0xfff0, }; struct ieee80211_radiotap_lsig { __le16 data1, data2; }; enum ieee80211_radiotap_zero_len_psdu_type { IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING = 0, IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED = 1, IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 0xff, }; /** * ieee80211_get_radiotap_len - get radiotap header length */ static inline u16 ieee80211_get_radiotap_len(const char *data) { struct ieee80211_radiotap_header *hdr = (void *)data; return get_unaligned_le16(&hdr->it_len); } #endif /* __RADIOTAP_H */ backport-iwlwifi-dkms-7906/include/net/mac80211.h000066400000000000000000007513241351064634500214330ustar00rootroot00000000000000/* * mac80211 <-> driver interface * * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef MAC80211_H #define MAC80211_H #include #include #include #include #include #include #include #include #include /** * DOC: Introduction * * mac80211 is the Linux stack for 802.11 hardware that implements * only partial functionality in hard- or firmware. This document * defines the interface between mac80211 and low-level hardware * drivers. */ /** * DOC: Calling mac80211 from interrupts * * Only ieee80211_tx_status_irqsafe() and ieee80211_rx_irqsafe() can be * called in hardware interrupt context. The low-level driver must not call any * other functions in hardware interrupt context. If there is a need for such * call, the low-level driver should first ACK the interrupt and perform the * IEEE 802.11 code call after this, e.g. from a scheduled workqueue or even * tasklet function. * * NOTE: If the driver opts to use the _irqsafe() functions, it may not also * use the non-IRQ-safe functions! */ /** * DOC: Warning * * If you're reading this document and not the header file itself, it will * be incomplete because not all documentation has been converted yet. */ /** * DOC: Frame format * * As a general rule, when frames are passed between mac80211 and the driver, * they start with the IEEE 802.11 header and include the same octets that are * sent over the air except for the FCS which should be calculated by the * hardware. * * There are, however, various exceptions to this rule for advanced features: * * The first exception is for hardware encryption and decryption offload * where the IV/ICV may or may not be generated in hardware. * * Secondly, when the hardware handles fragmentation, the frame handed to * the driver from mac80211 is the MSDU, not the MPDU. */ /** * DOC: mac80211 workqueue * * mac80211 provides its own workqueue for drivers and internal mac80211 use. * The workqueue is a single threaded workqueue and can only be accessed by * helpers for sanity checking. Drivers must ensure all work added onto the * mac80211 workqueue should be cancelled on the driver stop() callback. * * mac80211 will flushed the workqueue upon interface removal and during * suspend. * * All work performed on the mac80211 workqueue must not acquire the RTNL lock. * */ /** * DOC: mac80211 software tx queueing * * mac80211 provides an optional intermediate queueing implementation designed * to allow the driver to keep hardware queues short and provide some fairness * between different stations/interfaces. * In this model, the driver pulls data frames from the mac80211 queue instead * of letting mac80211 push them via drv_tx(). * Other frames (e.g. control or management) are still pushed using drv_tx(). * * Drivers indicate that they use this model by implementing the .wake_tx_queue * driver operation. * * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with * another per-sta for non-data/non-mgmt and bufferable management frames, and * a single per-vif queue for multicast data frames. * * The driver is expected to initialize its private per-queue data for stations * and interfaces in the .add_interface and .sta_add ops. * * The driver can't access the queue directly. To dequeue a frame, it calls * ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it * calls the .wake_tx_queue driver op. * * For AP powersave TIM handling, the driver only needs to indicate if it has * buffered packets in the driver specific data structures by calling * ieee80211_sta_set_buffered(). For frames buffered in the ieee80211_txq * struct, mac80211 sets the appropriate TIM PVB bits and calls * .release_buffered_frames(). * In that callback the driver is therefore expected to release its own * buffered frames and afterwards also frames from the ieee80211_txq (obtained * via the usual ieee80211_tx_dequeue). */ struct device; /** * enum ieee80211_max_queues - maximum number of queues * * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues. * @IEEE80211_MAX_QUEUE_MAP: bitmap with maximum queues set */ enum ieee80211_max_queues { IEEE80211_MAX_QUEUES = 16, IEEE80211_MAX_QUEUE_MAP = BIT(IEEE80211_MAX_QUEUES) - 1, }; #define IEEE80211_INVAL_HW_QUEUE 0xff /** * enum ieee80211_ac_numbers - AC numbers as used in mac80211 * @IEEE80211_AC_VO: voice * @IEEE80211_AC_VI: video * @IEEE80211_AC_BE: best effort * @IEEE80211_AC_BK: background */ enum ieee80211_ac_numbers { IEEE80211_AC_VO = 0, IEEE80211_AC_VI = 1, IEEE80211_AC_BE = 2, IEEE80211_AC_BK = 3, }; /** * struct ieee80211_tx_queue_params - transmit queue configuration * * The information provided in this structure is required for QoS * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29. * * @aifs: arbitration interframe space [0..255] * @cw_min: minimum contention window [a value of the form * 2^n-1 in the range 1..32767] * @cw_max: maximum contention window [like @cw_min] * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled * @acm: is mandatory admission control required for the access category * @uapsd: is U-APSD mode enabled for the queue * @mu_edca: is the MU EDCA configured * @mu_edca_param_rec: MU EDCA Parameter Record for HE */ struct ieee80211_tx_queue_params { u16 txop; u16 cw_min; u16 cw_max; u8 aifs; bool acm; bool uapsd; bool mu_edca; struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec; }; struct ieee80211_low_level_stats { unsigned int dot11ACKFailureCount; unsigned int dot11RTSFailureCount; unsigned int dot11FCSErrorCount; unsigned int dot11RTSSuccessCount; }; /** * enum ieee80211_chanctx_change - change flag for channel context * @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed * @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel, * this is used only with channel switching with CSA * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed */ enum ieee80211_chanctx_change { IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0), IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1), IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2), IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3), IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4), }; /** * struct ieee80211_chanctx_conf - channel context that vifs may be tuned to * * This is the driver-visible part. The ieee80211_chanctx * that contains it is visible in mac80211 only. * * @def: the channel definition * @min_def: the minimum channel definition currently required. * @rx_chains_static: The number of RX chains that must always be * active on the channel to receive MIMO transmissions * @rx_chains_dynamic: The number of RX chains that must be enabled * after RTS/CTS handshake to receive SMPS MIMO transmissions; * this will always be >= @rx_chains_static. * @radar_enabled: whether radar detection is enabled on this channel. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *), size is determined in hw information. */ struct ieee80211_chanctx_conf { struct cfg80211_chan_def def; struct cfg80211_chan_def min_def; u8 rx_chains_static, rx_chains_dynamic; bool radar_enabled; u8 drv_priv[0] __aligned(sizeof(void *)); }; /** * enum ieee80211_chanctx_switch_mode - channel context switch mode * @CHANCTX_SWMODE_REASSIGN_VIF: Both old and new contexts already * exist (and will continue to exist), but the virtual interface * needs to be switched from one to the other. * @CHANCTX_SWMODE_SWAP_CONTEXTS: The old context exists but will stop * to exist with this call, the new context doesn't exist but * will be active after this call, the virtual interface switches * from the old to the new (note that the driver may of course * implement this as an on-the-fly chandef switch of the existing * hardware context, but the mac80211 pointer for the old context * will cease to exist and only the new one will later be used * for changes/removal.) */ enum ieee80211_chanctx_switch_mode { CHANCTX_SWMODE_REASSIGN_VIF, CHANCTX_SWMODE_SWAP_CONTEXTS, }; /** * struct ieee80211_vif_chanctx_switch - vif chanctx switch information * * This is structure is used to pass information about a vif that * needs to switch from one chanctx to another. The * &ieee80211_chanctx_switch_mode defines how the switch should be * done. * * @vif: the vif that should be switched from old_ctx to new_ctx * @old_ctx: the old context to which the vif was assigned * @new_ctx: the new context to which the vif must be assigned */ struct ieee80211_vif_chanctx_switch { struct ieee80211_vif *vif; struct ieee80211_chanctx_conf *old_ctx; struct ieee80211_chanctx_conf *new_ctx; }; /** * enum ieee80211_bss_change - BSS change notification flags * * These flags are used with the bss_info_changed() callback * to indicate which BSS parameter changed. * * @BSS_CHANGED_ASSOC: association status changed (associated/disassociated), * also implies a change in the AID. * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed * @BSS_CHANGED_ERP_PREAMBLE: preamble changed * @BSS_CHANGED_ERP_SLOT: slot timing changed * @BSS_CHANGED_HT: 802.11n parameters changed * @BSS_CHANGED_BASIC_RATES: Basic rateset changed * @BSS_CHANGED_BEACON_INT: Beacon interval changed * @BSS_CHANGED_BSSID: BSSID changed, for whatever * reason (IBSS and managed mode) * @BSS_CHANGED_BEACON: Beacon data changed, retrieve * new beacon (beaconing modes) * @BSS_CHANGED_BEACON_ENABLED: Beaconing should be * enabled/disabled (beaconing modes) * @BSS_CHANGED_CQM: Connection quality monitor config changed * @BSS_CHANGED_IBSS: IBSS join status changed * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed. * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note * that it is only ever disabled for station mode. * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface. * @BSS_CHANGED_SSID: SSID changed for this BSS (AP and IBSS mode) * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode) * @BSS_CHANGED_PS: PS changed for this BSS (STA mode) * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS) * changed * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available: * currently dtim_period only is under consideration. * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, * note that this is only called when it changes after the channel * context had been assigned. * @BSS_CHANGED_OCB: OCB join status changed * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed * @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected * keep alive) changed. * @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder * functionality changed for this BSS (AP mode). * */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, BSS_CHANGED_ERP_CTS_PROT = 1<<1, BSS_CHANGED_ERP_PREAMBLE = 1<<2, BSS_CHANGED_ERP_SLOT = 1<<3, BSS_CHANGED_HT = 1<<4, BSS_CHANGED_BASIC_RATES = 1<<5, BSS_CHANGED_BEACON_INT = 1<<6, BSS_CHANGED_BSSID = 1<<7, BSS_CHANGED_BEACON = 1<<8, BSS_CHANGED_BEACON_ENABLED = 1<<9, BSS_CHANGED_CQM = 1<<10, BSS_CHANGED_IBSS = 1<<11, BSS_CHANGED_ARP_FILTER = 1<<12, BSS_CHANGED_QOS = 1<<13, BSS_CHANGED_IDLE = 1<<14, BSS_CHANGED_SSID = 1<<15, BSS_CHANGED_AP_PROBE_RESP = 1<<16, BSS_CHANGED_PS = 1<<17, BSS_CHANGED_TXPOWER = 1<<18, BSS_CHANGED_P2P_PS = 1<<19, BSS_CHANGED_BEACON_INFO = 1<<20, BSS_CHANGED_BANDWIDTH = 1<<21, BSS_CHANGED_OCB = 1<<22, BSS_CHANGED_MU_GROUPS = 1<<23, BSS_CHANGED_KEEP_ALIVE = 1<<24, BSS_CHANGED_MCAST_RATE = 1<<25, BSS_CHANGED_FTM_RESPONDER = 1<<26, /* when adding here, make sure to change ieee80211_reconfig */ }; /* * The maximum number of IPv4 addresses listed for ARP filtering. If the number * of addresses for an interface increase beyond this value, hardware ARP * filtering will be disabled. */ #define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4 /** * enum ieee80211_event_type - event to be notified to the low level driver * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver. * @MLME_EVENT: event related to MLME * @BAR_RX_EVENT: a BAR was received * @BA_FRAME_TIMEOUT: Frames were released from the reordering buffer because * they timed out. This won't be called for each frame released, but only * once each time the timeout triggers. * @TX_LATENCY_EVENT: A Tx packet latency crossed the configured threshold */ enum ieee80211_event_type { RSSI_EVENT, MLME_EVENT, BAR_RX_EVENT, BA_FRAME_TIMEOUT, #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS TX_LATENCY_EVENT, #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ }; /** * enum ieee80211_rssi_event_data - relevant when event type is %RSSI_EVENT * @RSSI_EVENT_HIGH: AP's rssi went below the threshold set by the driver. * @RSSI_EVENT_LOW: AP's rssi went above the threshold set by the driver. */ enum ieee80211_rssi_event_data { RSSI_EVENT_HIGH, RSSI_EVENT_LOW, }; /** * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT * @data: See &enum ieee80211_rssi_event_data */ struct ieee80211_rssi_event { enum ieee80211_rssi_event_data data; }; /** * enum ieee80211_mlme_event_data - relevant when event type is %MLME_EVENT * @AUTH_EVENT: the MLME operation is authentication * @ASSOC_EVENT: the MLME operation is association * @DEAUTH_RX_EVENT: deauth received.. * @DEAUTH_TX_EVENT: deauth sent. */ enum ieee80211_mlme_event_data { AUTH_EVENT, ASSOC_EVENT, DEAUTH_RX_EVENT, DEAUTH_TX_EVENT, }; /** * enum ieee80211_mlme_event_status - relevant when event type is %MLME_EVENT * @MLME_SUCCESS: the MLME operation completed successfully. * @MLME_DENIED: the MLME operation was denied by the peer. * @MLME_TIMEOUT: the MLME operation timed out. */ enum ieee80211_mlme_event_status { MLME_SUCCESS, MLME_DENIED, MLME_TIMEOUT, }; /** * struct ieee80211_mlme_event - data attached to an %MLME_EVENT * @data: See &enum ieee80211_mlme_event_data * @status: See &enum ieee80211_mlme_event_status * @reason: the reason code if applicable */ struct ieee80211_mlme_event { enum ieee80211_mlme_event_data data; enum ieee80211_mlme_event_status status; u16 reason; }; /** * struct ieee80211_ba_event - data attached for BlockAck related events * @sta: pointer to the &ieee80211_sta to which this event relates * @tid: the tid * @ssn: the starting sequence number (for %BAR_RX_EVENT) */ struct ieee80211_ba_event { struct ieee80211_sta *sta; u16 tid; u16 ssn; }; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS /* * enum ieee80211_tx_latency_iface - ifaces that can have a latency threshold */ enum ieee80211_tx_latency_iface { IEEE80211_TX_LATENCY_BSS, IEEE80211_TX_LATENCY_P2P, }; /* * enum ieee80211_tx_latency_rec_mode - usniffer logs recording mode * @IEEE80211_TX_LATENCY_INT_BUF: internal buffer to save usniffer logs * @IEEE80211_TX_LATENCY_EXT_BUF: external buffer to save usniffer logs */ enum ieee80211_tx_latency_rec_mode { IEEE80211_TX_LATENCY_INT_BUF, IEEE80211_TX_LATENCY_EXT_BUF }; /* * struct ieee80211_tx_latency_event - data attached to an tx latency event * * @mode: recording mode (Internal buffer or continues recording) * @monitor_collec_wind: the size of the window to collect the logs * @seq: packet sequence * @pkt_start: start time of triggering pkt * @pkt_end: end time of triggering pkt * @msrmnt: the tx latency of the pkt * @tid: tid of the pkt * @event_time: the kernel time, in ms, in which the latency event occurred */ struct ieee80211_tx_latency_event { u16 mode; u32 monitor_collec_wind; u16 seq; u32 pkt_start; u32 pkt_end; u32 msrmnt; u16 tid; u32 event_time; }; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ /** * struct ieee80211_event - event to be sent to the driver * @type: The event itself. See &enum ieee80211_event_type. * @rssi: relevant if &type is %RSSI_EVENT * @mlme: relevant if &type is %AUTH_EVENT * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT * @tx_lat: relevant if &type is %TX_LATENCY_EVENT * @u:union holding the fields above */ struct ieee80211_event { enum ieee80211_event_type type; union { struct ieee80211_rssi_event rssi; struct ieee80211_mlme_event mlme; struct ieee80211_ba_event ba; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS struct ieee80211_tx_latency_event tx_lat; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ } u; }; /** * struct ieee80211_mu_group_data - STA's VHT MU-MIMO group data * * This structure describes the group id data of VHT MU-MIMO * * @membership: 64 bits array - a bit is set if station is member of the group * @position: 2 bits per group id indicating the position in the group */ struct ieee80211_mu_group_data { u8 membership[WLAN_MEMBERSHIP_LEN]; u8 position[WLAN_USER_POSITION_LEN]; }; /** * struct ieee80211_ftm_responder_params - FTM responder parameters * * @lci: LCI subelement content * @civicloc: CIVIC location subelement content * @lci_len: LCI data length * @civicloc_len: Civic data length */ struct ieee80211_ftm_responder_params { const u8 *lci; const u8 *civicloc; size_t lci_len; size_t civicloc_len; }; /** * struct ieee80211_bss_conf - holds the BSS's changing parameters * * This structure keeps information about a BSS (and an association * to that BSS) that can change during the lifetime of the BSS. * * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK * @uora_exists: is the UORA element advertised by AP * @ack_enabled: indicates support to receive a multi-TID that solicits either * ACK, BACK or both * @uora_ocw_range: UORA element's OCW Range field * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @he_support: does this BSS support HE * @twt_requester: does this BSS support TWT requester (relevant for managed * mode only, set if the AP advertises TWT responder role) * @assoc: association status * @ibss_joined: indicates whether this station is part of an IBSS * or not * @ibss_creator: indicates if a new IBSS network is being created * @aid: association ID number, valid only when @assoc is true * @use_cts_prot: use CTS protection * @use_short_preamble: use 802.11b short preamble * @use_short_slot: use short slot time (only relevant for ERP) * @dtim_period: num of beacons before the next DTIM, for beaconing, * valid in station mode only if after the driver was notified * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then. * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old * as it may have been received during scanning long ago). If the * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can * only come from a beacon, but might not become valid until after * association when a beacon is received (which is notified with the * %BSS_CHANGED_DTIM flag.). See also sync_dtim_count important notice. * @sync_device_ts: the device timestamp corresponding to the sync_tsf, * the driver/device can use this to calculate synchronisation * (see @sync_tsf). See also sync_dtim_count important notice. * @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY * is requested, see @sync_tsf/@sync_device_ts. * IMPORTANT: These three sync_* parameters would possibly be out of sync * by the time the driver will use them. The synchronized view is currently * guaranteed only in certain callbacks. * @beacon_int: beacon interval * @assoc_capability: capabilities taken from assoc resp * @basic_rates: bitmap of basic rates, each bit stands for an * index into the rate table configured by the driver in * the current band. * @beacon_rate: associated AP's beacon TX rate * @mcast_rate: per-band multicast rate index + 1 (0: disabled) * @bssid: The BSSID for this BSS * @enable_beacon: whether beaconing should be enabled or not * @chandef: Channel definition for this BSS -- the hardware might be * configured a higher bandwidth than this BSS uses, for example. * @mu_group: VHT MU-MIMO group membership data * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation. * This field is only valid when the channel is a wide HT/VHT channel. * Note that with TDLS this can be the case (channel is HT, protection must * be used from this field) even when the BSS association isn't using HT. * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value * implies disabled. As with the cfg80211 callback, a change here should * cause an event to be sent indicating where the current value is in * relation to the newly configured threshold. * @cqm_rssi_low: Connection quality monitor RSSI lower threshold, a zero value * implies disabled. This is an alternative mechanism to the single * threshold event and can't be enabled simultaneously with it. * @cqm_rssi_high: Connection quality monitor RSSI upper threshold. * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The * may filter ARP queries targeted for other addresses than listed here. * The driver must allow ARP queries targeted for all address listed here * to pass through. An empty list implies no ARP queries need to pass. * @arp_addr_cnt: Number of addresses currently on the list. Note that this * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list * array size), it's up to the driver what to do in that case. * @qos: This is a QoS-enabled BSS. * @idle: This interface is idle. There's also a global idle flag in the * hardware config which may be more appropriate depending on what * your driver/device needs to do. * @ps: power-save mode (STA only). This flag is NOT affected by * offchannel/dynamic_ps operations. * @ssid: The SSID of the current vif. Valid in AP and IBSS mode. * @ssid_len: Length of SSID given in @ssid. * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode. * @txpower: TX power in dBm * @txpower_type: TX power adjustment used to control per packet Transmit * Power Control (TPC) in lower driver for the current vif. In particular * TPC is enabled if value passed in %txpower_type is * NL80211_TX_POWER_LIMITED (allow using less than specified from * userspace), whereas TPC is disabled if %txpower_type is set to * NL80211_TX_POWER_FIXED (use value configured from userspace) * @p2p_noa_attr: P2P NoA attribute for P2P powersave * @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed * to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS * if it has associated clients without P2P PS support. * @max_idle_period: the time period during which the station can refrain from * transmitting frames to its associated AP without being disassociated. * In units of 1000 TUs. Zero value indicates that the AP did not include * a (valid) BSS Max Idle Period Element. * @protected_keep_alive: if set, indicates that the station should send an RSN * protected frame to the AP to reset the idle timer at the AP for the * station. * @ftm_responder: whether to enable or disable fine timing measurement FTM * responder functionality. * @ftmr_params: configurable lci/civic parameter when enabling FTM responder. * @nontransmitted: this BSS is a nontransmitted BSS profile * @transmitter_bssid: the address of transmitter AP * @bssid_index: index inside the multiple BSSID set * @bssid_indicator: 2^bssid_indicator is the maximum number of APs in set * @ema_ap: AP supports enhancements of discovery and advertisement of * nontransmitted BSSIDs * @profile_periodicity: the least number of beacon frames need to be received * in order to discover all the nontransmitted BSSIDs in the set. */ struct ieee80211_bss_conf { const u8 *bssid; u8 bss_color; u8 htc_trig_based_pkt_ext; bool multi_sta_back_32bit; bool uora_exists; bool ack_enabled; u8 uora_ocw_range; u16 frame_time_rts_th; bool he_support; bool twt_requester; /* association related data */ bool assoc, ibss_joined; bool ibss_creator; u16 aid; /* erp related data */ bool use_cts_prot; bool use_short_preamble; bool use_short_slot; bool enable_beacon; u8 dtim_period; u16 beacon_int; u16 assoc_capability; u64 sync_tsf; u32 sync_device_ts; u8 sync_dtim_count; u32 basic_rates; struct ieee80211_rate *beacon_rate; int mcast_rate[NUM_NL80211_BANDS]; u16 ht_operation_mode; s32 cqm_rssi_thold; u32 cqm_rssi_hyst; s32 cqm_rssi_low; s32 cqm_rssi_high; struct cfg80211_chan_def chandef; struct ieee80211_mu_group_data mu_group; __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; int arp_addr_cnt; bool qos; bool idle; bool ps; u8 ssid[IEEE80211_MAX_SSID_LEN]; size_t ssid_len; bool hidden_ssid; int txpower; enum nl80211_tx_power_setting txpower_type; struct ieee80211_p2p_noa_attr p2p_noa_attr; bool allow_p2p_go_ps; u16 max_idle_period; bool protected_keep_alive; bool ftm_responder; struct ieee80211_ftm_responder_params *ftmr_params; /* Multiple BSSID data */ bool nontransmitted; u8 transmitter_bssid[ETH_ALEN]; u8 bssid_index; u8 bssid_indicator; bool ema_ap; u8 profile_periodicity; }; /** * enum mac80211_tx_info_flags - flags to describe transmission information/status * * These flags are used with the @flags member of &ieee80211_tx_info. * * @IEEE80211_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame. * @IEEE80211_TX_CTL_ASSIGN_SEQ: The driver has to assign a sequence * number to this frame, taking care of not overwriting the fragment * number and increasing the sequence number only when the * IEEE80211_TX_CTL_FIRST_FRAGMENT flag is set. mac80211 will properly * assign sequence numbers to QoS-data frames but cannot do so correctly * for non-QoS-data and management frames because beacons need them from * that counter as well and mac80211 cannot guarantee proper sequencing. * If this flag is set, the driver should instruct the hardware to * assign a sequence number to the frame or assign one itself. Cf. IEEE * 802.11-2007 7.1.3.4.1 paragraph 3. This flag will always be set for * beacons and always be clear for frames without a sequence number field. * @IEEE80211_TX_CTL_NO_ACK: tell the low level not to wait for an ack * @IEEE80211_TX_CTL_CLEAR_PS_FILT: clear powersave filter for destination * station * @IEEE80211_TX_CTL_FIRST_FRAGMENT: this is a first fragment of the frame * @IEEE80211_TX_CTL_SEND_AFTER_DTIM: send this frame after DTIM beacon * @IEEE80211_TX_CTL_AMPDU: this frame should be sent as part of an A-MPDU * @IEEE80211_TX_CTL_INJECTED: Frame was injected, internal to mac80211. * @IEEE80211_TX_STAT_TX_FILTERED: The frame was not transmitted * because the destination STA was in powersave mode. Note that to * avoid race conditions, the filter must be set by the hardware or * firmware upon receiving a frame that indicates that the station * went to sleep (must be done on device to filter frames already on * the queue) and may only be unset after mac80211 gives the OK for * that by setting the IEEE80211_TX_CTL_CLEAR_PS_FILT (see above), * since only then is it guaranteed that no more frames are in the * hardware queue. * @IEEE80211_TX_STAT_ACK: Frame was acknowledged * @IEEE80211_TX_STAT_AMPDU: The frame was aggregated, so status * is for the whole aggregation. * @IEEE80211_TX_STAT_AMPDU_NO_BACK: no block ack was returned, * so consider using block ack request (BAR). * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be * set by rate control algorithms to indicate probe rate, will * be cleared for fragmented frames (except on the last fragment) * @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate * that a frame can be transmitted while the queues are stopped for * off-channel operation. * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211, * used to indicate that a pending frame requires TX processing before * it can be sent out. * @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211, * used to indicate that a frame was already retried due to PS * @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211, * used to indicate frame should not be encrypted * @IEEE80211_TX_CTL_NO_PS_BUFFER: This frame is a response to a poll * frame (PS-Poll or uAPSD) or a non-bufferable MMPDU and must * be sent although the station is in powersave mode. * @IEEE80211_TX_CTL_MORE_FRAMES: More frames will be passed to the * transmit function after the current frame, this can be used * by drivers to kick the DMA queue only if unset or when the * queue gets full. * @IEEE80211_TX_INTFL_RETRANSMISSION: This frame is being retransmitted * after TX status because the destination was asleep, it must not * be modified again (no seqno assignment, crypto, etc.) * @IEEE80211_TX_INTFL_MLME_CONN_TX: This frame was transmitted by the MLME * code for connection establishment, this indicates that its status * should kick the MLME state machine. * @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211 * MLME command (internal to mac80211 to figure out whether to send TX * status to user space) * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this * frame and selects the maximum number of streams that it can use. * @IEEE80211_TX_CTL_TX_OFFCHAN: Marks this packet to be transmitted on * the off-channel channel when a remain-on-channel offload is done * in hardware -- normal packets still flow and are expected to be * handled properly by the device. * @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP * testing. It will be sent out with incorrect Michael MIC key to allow * TKIP countermeasures to be tested. * @IEEE80211_TX_CTL_NO_CCK_RATE: This frame will be sent at non CCK rate. * This flag is actually used for management frame especially for P2P * frames not being sent at CCK rate in 2GHz band. * @IEEE80211_TX_STATUS_EOSP: This packet marks the end of service period, * when its status is reported the service period ends. For frames in * an SP that mac80211 transmits, it is already set; for driver frames * the driver may set this flag. It is also used to do the same for * PS-Poll responses. * @IEEE80211_TX_CTL_USE_MINRATE: This frame will be sent at lowest rate. * This flag is used to send nullfunc frame at minimum rate when * the nullfunc is used for connection monitoring purpose. * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it * would be fragmented by size (this is optional, only used for * monitor injection). * @IEEE80211_TX_STAT_NOACK_TRANSMITTED: A frame that was marked with * IEEE80211_TX_CTL_NO_ACK has been successfully transmitted without * any errors (like issues specific to the driver/HW). * This flag must not be set for frames that don't request no-ack * behaviour with IEEE80211_TX_CTL_NO_ACK. * * Note: If you have to add new flags to the enumeration, then don't * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. */ enum mac80211_tx_info_flags { IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1), IEEE80211_TX_CTL_NO_ACK = BIT(2), IEEE80211_TX_CTL_CLEAR_PS_FILT = BIT(3), IEEE80211_TX_CTL_FIRST_FRAGMENT = BIT(4), IEEE80211_TX_CTL_SEND_AFTER_DTIM = BIT(5), IEEE80211_TX_CTL_AMPDU = BIT(6), IEEE80211_TX_CTL_INJECTED = BIT(7), IEEE80211_TX_STAT_TX_FILTERED = BIT(8), IEEE80211_TX_STAT_ACK = BIT(9), IEEE80211_TX_STAT_AMPDU = BIT(10), IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11), IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12), IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13), IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14), IEEE80211_TX_INTFL_RETRIED = BIT(15), IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16), IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17), IEEE80211_TX_CTL_MORE_FRAMES = BIT(18), IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19), IEEE80211_TX_INTFL_MLME_CONN_TX = BIT(20), IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21), IEEE80211_TX_CTL_LDPC = BIT(22), IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24), IEEE80211_TX_CTL_TX_OFFCHAN = BIT(25), IEEE80211_TX_INTFL_TKIP_MIC_FAILURE = BIT(26), IEEE80211_TX_CTL_NO_CCK_RATE = BIT(27), IEEE80211_TX_STATUS_EOSP = BIT(28), IEEE80211_TX_CTL_USE_MINRATE = BIT(29), IEEE80211_TX_CTL_DONTFRAG = BIT(30), IEEE80211_TX_STAT_NOACK_TRANSMITTED = BIT(31), }; #define IEEE80211_TX_CTL_STBC_SHIFT 23 /** * enum mac80211_tx_control_flags - flags to describe transmit control * * @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control * protocol frame (e.g. EAP) * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll * frame (PS-Poll or uAPSD). * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path * * These flags are used in tx_info->control.flags. */ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0), IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1), IEEE80211_TX_CTRL_RATE_INJECT = BIT(2), IEEE80211_TX_CTRL_AMSDU = BIT(3), IEEE80211_TX_CTRL_FAST_XMIT = BIT(4), }; /* * This definition is used as a mask to clear all temporary flags, which are * set by the tx handlers for each transmission attempt by the mac80211 stack. */ #define IEEE80211_TX_TEMPORARY_FLAGS (IEEE80211_TX_CTL_NO_ACK | \ IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT | \ IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU | \ IEEE80211_TX_STAT_TX_FILTERED | IEEE80211_TX_STAT_ACK | \ IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK | \ IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_NO_PS_BUFFER | \ IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC | \ IEEE80211_TX_CTL_STBC | IEEE80211_TX_STATUS_EOSP) /** * enum mac80211_rate_control_flags - per-rate flags set by the * Rate Control algorithm. * * These flags are set by the Rate control algorithm for each rate during tx, * in the @flags member of struct ieee80211_tx_rate. * * @IEEE80211_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate. * @IEEE80211_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required. * This is set if the current BSS requires ERP protection. * @IEEE80211_TX_RC_USE_SHORT_PREAMBLE: Use short preamble. * @IEEE80211_TX_RC_MCS: HT rate. * @IEEE80211_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is split * into a higher 4 bits (Nss) and lower 4 bits (MCS number) * @IEEE80211_TX_RC_GREEN_FIELD: Indicates whether this rate should be used in * Greenfield mode. * @IEEE80211_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be 40 MHz. * @IEEE80211_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission * @IEEE80211_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission * (80+80 isn't supported yet) * @IEEE80211_TX_RC_DUP_DATA: The frame should be transmitted on both of the * adjacent 20 MHz channels, if the current channel type is * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS. * @IEEE80211_TX_RC_SHORT_GI: Short Guard interval should be used for this rate. */ enum mac80211_rate_control_flags { IEEE80211_TX_RC_USE_RTS_CTS = BIT(0), IEEE80211_TX_RC_USE_CTS_PROTECT = BIT(1), IEEE80211_TX_RC_USE_SHORT_PREAMBLE = BIT(2), /* rate index is an HT/VHT MCS instead of an index */ IEEE80211_TX_RC_MCS = BIT(3), IEEE80211_TX_RC_GREEN_FIELD = BIT(4), IEEE80211_TX_RC_40_MHZ_WIDTH = BIT(5), IEEE80211_TX_RC_DUP_DATA = BIT(6), IEEE80211_TX_RC_SHORT_GI = BIT(7), IEEE80211_TX_RC_VHT_MCS = BIT(8), IEEE80211_TX_RC_80_MHZ_WIDTH = BIT(9), IEEE80211_TX_RC_160_MHZ_WIDTH = BIT(10), }; /* there are 40 bytes if you don't need the rateset to be kept */ #define IEEE80211_TX_INFO_DRIVER_DATA_SIZE 40 /* if you do need the rateset, then you have less space */ #define IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE 24 /* maximum number of rate stages */ #define IEEE80211_TX_MAX_RATES 4 /* maximum number of rate table entries */ #define IEEE80211_TX_RATE_TABLE_SIZE 4 /** * struct ieee80211_tx_rate - rate selection/status * * @idx: rate index to attempt to send with * @flags: rate control flags (&enum mac80211_rate_control_flags) * @count: number of tries in this rate before going to the next rate * * A value of -1 for @idx indicates an invalid rate and, if used * in an array of retry rates, that no more rates should be tried. * * When used for transmit status reporting, the driver should * always report the rate along with the flags it used. * * &struct ieee80211_tx_info contains an array of these structs * in the control information, and it will be filled by the rate * control algorithm according to what should be sent. For example, * if this array contains, in the format { , } the * information:: * * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 } * * then this means that the frame should be transmitted * up to twice at rate 3, up to twice at rate 2, and up to four * times at rate 1 if it doesn't get acknowledged. Say it gets * acknowledged by the peer after the fifth attempt, the status * information should then contain:: * * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ... * * since it was transmitted twice at rate 3, twice at rate 2 * and once at rate 1 after which we received an acknowledgement. */ struct ieee80211_tx_rate { s8 idx; u16 count:5, flags:11; } __packed; #define IEEE80211_MAX_TX_RETRY 31 static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate, u8 mcs, u8 nss) { WARN_ON(mcs & ~0xF); WARN_ON((nss - 1) & ~0x7); rate->idx = ((nss - 1) << 4) | mcs; } static inline u8 ieee80211_rate_get_vht_mcs(const struct ieee80211_tx_rate *rate) { return rate->idx & 0xF; } static inline u8 ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate) { return (rate->idx >> 4) + 1; } /** * struct ieee80211_tx_info - skb transmit information * * This structure is placed in skb->cb for three uses: * (1) mac80211 TX control - mac80211 tells the driver what to do * (2) driver internal use (if applicable) * (3) TX status information - driver tells mac80211 what happened * * @flags: transmit info flags, defined above * @band: the band to transmit on (use for checking for races) * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC * @ack_frame_id: internal frame ID for TX status, used internally * @control: union for control data * @status: union for status data * @driver_data: array of driver_data pointers * @ampdu_ack_len: number of acked aggregated frames. * relevant only if IEEE80211_TX_STAT_AMPDU was set. * @ampdu_len: number of aggregated frames. * relevant only if IEEE80211_TX_STAT_AMPDU was set. * @ack_signal: signal strength of the ACK frame */ struct ieee80211_tx_info { /* common information */ u32 flags; u8 band; u8 hw_queue; u16 ack_frame_id; union { struct { union { /* rate control */ struct { struct ieee80211_tx_rate rates[ IEEE80211_TX_MAX_RATES]; s8 rts_cts_rate_idx; u8 use_rts:1; u8 use_cts_prot:1; u8 short_preamble:1; u8 skip_table:1; /* 2 bytes free */ }; /* only needed before rate control */ unsigned long jiffies; }; /* NB: vif can be NULL for injected frames */ struct ieee80211_vif *vif; struct ieee80211_key_conf *hw_key; u32 flags; codel_time_t enqueue_time; } control; struct { u64 cookie; } ack; struct { struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES]; s32 ack_signal; u8 ampdu_ack_len; u8 ampdu_len; u8 antenna; u16 tx_time; bool is_valid_ack_signal; void *status_driver_data[19 / sizeof(void *)]; } status; struct { struct ieee80211_tx_rate driver_rates[ IEEE80211_TX_MAX_RATES]; u8 pad[4]; void *rate_driver_data[ IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE / sizeof(void *)]; }; void *driver_data[ IEEE80211_TX_INFO_DRIVER_DATA_SIZE / sizeof(void *)]; }; }; /** * struct ieee80211_tx_status - extended tx staus info for rate control * * @sta: Station that the packet was transmitted for * @info: Basic tx status information * @skb: Packet skb (can be NULL if not provided by the driver) */ struct ieee80211_tx_status { struct ieee80211_sta *sta; struct ieee80211_tx_info *info; struct sk_buff *skb; }; /** * struct ieee80211_scan_ies - descriptors for different blocks of IEs * * This structure is used to point to different blocks of IEs in HW scan * and scheduled scan. These blocks contain the IEs passed by userspace * and the ones generated by mac80211. * * @ies: pointers to band specific IEs. * @len: lengths of band_specific IEs. * @common_ies: IEs for all bands (especially vendor specific ones) * @common_ie_len: length of the common_ies */ struct ieee80211_scan_ies { const u8 *ies[NUM_NL80211_BANDS]; size_t len[NUM_NL80211_BANDS]; const u8 *common_ies; size_t common_ie_len; }; static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb) { return (struct ieee80211_tx_info *)skb->cb; } static inline struct ieee80211_rx_status *IEEE80211_SKB_RXCB(struct sk_buff *skb) { return (struct ieee80211_rx_status *)skb->cb; } /** * ieee80211_tx_info_clear_status - clear TX status * * @info: The &struct ieee80211_tx_info to be cleared. * * When the driver passes an skb back to mac80211, it must report * a number of things in TX status. This function clears everything * in the TX status but the rate control information (it does clear * the count since you need to fill that in anyway). * * NOTE: You can only use this function if you do NOT use * info->driver_data! Use info->rate_driver_data * instead if you need only the less space that allows. */ static inline void ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) { int i; BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != offsetof(struct ieee80211_tx_info, control.rates)); BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != offsetof(struct ieee80211_tx_info, driver_rates)); BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != 8); /* clear the rate counts */ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) info->status.rates[i].count = 0; BUILD_BUG_ON( offsetof(struct ieee80211_tx_info, status.ack_signal) != 20); memset(&info->status.ampdu_ack_len, 0, sizeof(struct ieee80211_tx_info) - offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); } /** * enum mac80211_rx_flags - receive flags * * These flags are used with the @flag member of &struct ieee80211_rx_status. * @RX_FLAG_MMIC_ERROR: Michael MIC error was reported on this frame. * Use together with %RX_FLAG_MMIC_STRIPPED. * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware. * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame, * verification has been done by the hardware. * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame. * If this flag is set, the stack cannot do any replay detection * hence the driver or hardware will have to do that. * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this * flag indicates that the PN was verified for replay protection. * Note that this flag is also currently only supported when a frame * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set) * @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did * de-duplication by itself. * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on * the frame. * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on * the frame. * @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the first symbol of the MPDU * was received. This is useful in monitor mode and for proper IBSS * merging. * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the last symbol of the MPDU * (including FCS) was received. * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the SYNC preamble was received. * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present. * Valid only for data frames (mainly A-MPDU) * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference * number (@ampdu_reference) must be populated and be a distinct number for * each A-MPDU * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all * subframes of a single A-MPDU * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected * on this subframe * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC * is stored in the @ampdu_delimiter_crc field) * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was * done by the hardware * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without * processing it in any regular way. * This is useful if drivers offload some frames but still want to report * them for sniffing purposes. * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except * monitor interfaces. * This is useful if drivers offload some frames but still want to report * them for sniffing purposes. * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU * subframes instead of a one huge frame for performance reasons. * All, but the last MSDU from an A-MSDU should have this flag set. E.g. * if an A-MSDU has 3 frames, the first 2 must have the flag set, while * the 3rd (last) one must not have this flag set. The flag is used to * deal with retransmission/duplication recovery properly since A-MSDU * subframes share the same sequence number. Reported subframes can be * either regular MSDU or singly A-MSDUs. Subframes must not be * interleaved with other frames. * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific * radiotap data in the skb->data (before the frame) as described by * the &struct ieee80211_vendor_radiotap. * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before. * This is used for AMSDU subframes which can have the same PN as * the first subframe. * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must * be done in the hardware. * @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this * frame * @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present * (&struct ieee80211_radiotap_he, mac80211 will fill in * - DATA3_DATA_MCS * - DATA3_DATA_DCM * - DATA3_CODING * - DATA5_GI * - DATA5_DATA_BW_RU_ALLOC * - DATA6_NSTS * - DATA3_STBC * from the RX info data, so leave those zeroed when building this data) * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present * (&struct ieee80211_radiotap_he_mu) * @RX_FLAG_RADIOTAP_LSIG: L-SIG radiotap data is present * @RX_FLAG_NO_PSDU: use the frame only for radiotap reporting, with * the "0-length PSDU" field included there. The value for it is * in &struct ieee80211_rx_status. Note that if this value isn't * known the frame shouldn't be reported. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), RX_FLAG_DECRYPTED = BIT(1), RX_FLAG_MACTIME_PLCP_START = BIT(2), RX_FLAG_MMIC_STRIPPED = BIT(3), RX_FLAG_IV_STRIPPED = BIT(4), RX_FLAG_FAILED_FCS_CRC = BIT(5), RX_FLAG_FAILED_PLCP_CRC = BIT(6), RX_FLAG_MACTIME_START = BIT(7), RX_FLAG_NO_SIGNAL_VAL = BIT(8), RX_FLAG_AMPDU_DETAILS = BIT(9), RX_FLAG_PN_VALIDATED = BIT(10), RX_FLAG_DUP_VALIDATED = BIT(11), RX_FLAG_AMPDU_LAST_KNOWN = BIT(12), RX_FLAG_AMPDU_IS_LAST = BIT(13), RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14), RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15), RX_FLAG_MACTIME_END = BIT(16), RX_FLAG_ONLY_MONITOR = BIT(17), RX_FLAG_SKIP_MONITOR = BIT(18), RX_FLAG_AMSDU_MORE = BIT(19), RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(20), RX_FLAG_MIC_STRIPPED = BIT(21), RX_FLAG_ALLOW_SAME_PN = BIT(22), RX_FLAG_ICV_STRIPPED = BIT(23), RX_FLAG_AMPDU_EOF_BIT = BIT(24), RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25), RX_FLAG_RADIOTAP_HE = BIT(26), RX_FLAG_RADIOTAP_HE_MU = BIT(27), RX_FLAG_RADIOTAP_LSIG = BIT(28), RX_FLAG_NO_PSDU = BIT(29), }; /** * enum mac80211_rx_encoding_flags - MCS & bandwidth flags * * @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame * @RX_ENC_FLAG_40MHZ: HT40 (40 MHz) was used * @RX_ENC_FLAG_SHORT_GI: Short guard interval was used * @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, * if the driver fills this value it should add * %IEEE80211_RADIOTAP_MCS_HAVE_FMT * to hw.radiotap_mcs_details to advertise that fact * @RX_ENC_FLAG_LDPC: LDPC was used * @RX_ENC_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 * @RX_ENC_FLAG_BF: packet was beamformed */ enum mac80211_rx_encoding_flags { RX_ENC_FLAG_SHORTPRE = BIT(0), RX_ENC_FLAG_40MHZ = BIT(1), RX_ENC_FLAG_SHORT_GI = BIT(2), RX_ENC_FLAG_HT_GF = BIT(3), RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5), RX_ENC_FLAG_LDPC = BIT(6), RX_ENC_FLAG_BF = BIT(7), }; #define RX_ENC_FLAG_STBC_SHIFT 4 enum mac80211_rx_encoding { RX_ENC_LEGACY = 0, RX_ENC_HT, RX_ENC_VHT, RX_ENC_HE, }; /** * struct ieee80211_rx_status - receive status * * The low-level driver should provide this information (the subset * supported by hardware) to the 802.11 code with each received * frame, in the skb's control buffer (cb). * * @mactime: value in microseconds of the 64-bit Time Synchronization Function * (TSF) timer when the first data symbol (MPDU) arrived at the hardware. * @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is * needed only for beacons and probe responses that update the scan cache. * @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use * it but can store it and pass it back to the driver for synchronisation * @band: the active band when this frame was received * @freq: frequency the radio was tuned to when receiving this frame, in MHz * This field must be set for management frames, but isn't strictly needed * for data (other) frames - for those it only affects radiotap reporting. * @signal: signal strength when receiving this frame, either in dBm, in dB or * unspecified depending on the hardware capabilities flags * @IEEE80211_HW_SIGNAL_* * @chains: bitmask of receive chains for which separate signal strength * values were filled. * @chain_signal: per-chain signal strength, in dBm (unlike @signal, doesn't * support dB or unspecified units) * @antenna: antenna used * @rate_idx: index of data rate into band's supported rates or MCS index if * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) * @nss: number of streams (VHT and HE only) * @flag: %RX_FLAG_\* * @encoding: &enum mac80211_rx_encoding * @bw: &enum rate_info_bw * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags * @he_ru: HE RU, from &enum nl80211_he_ru_alloc * @he_gi: HE GI, from &enum nl80211_he_gi * @he_dcm: HE DCM value * @rx_flags: internal RX flags for mac80211 * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU * @ampdu_delimiter_crc: A-MPDU delimiter CRC * @zero_length_psdu_type: radiotap type of the 0-length PSDU */ struct ieee80211_rx_status { u64 mactime; u64 boottime_ns; u32 device_timestamp; u32 ampdu_reference; u32 flag; u16 freq; u8 enc_flags; u8 encoding:2, bw:3, he_ru:3; u8 he_gi:2, he_dcm:1; u8 rate_idx; u8 nss; u8 rx_flags; u8 band; u8 antenna; s8 signal; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 ampdu_delimiter_crc; u8 zero_length_psdu_type; }; /** * struct ieee80211_vendor_radiotap - vendor radiotap data information * @present: presence bitmap for this vendor namespace * (this could be extended in the future if any vendor needs more * bits, the radiotap spec does allow for that) * @align: radiotap vendor namespace alignment. This defines the needed * alignment for the @data field below, not for the vendor namespace * description itself (which has a fixed 2-byte alignment) * Must be a power of two, and be set to at least 1! * @oui: radiotap vendor namespace OUI * @subns: radiotap vendor sub namespace * @len: radiotap vendor sub namespace skip length, if alignment is done * then that's added to this, i.e. this is only the length of the * @data field. * @pad: number of bytes of padding after the @data, this exists so that * the skb data alignment can be preserved even if the data has odd * length * @data: the actual vendor namespace data * * This struct, including the vendor data, goes into the skb->data before * the 802.11 header. It's split up in mac80211 using the align/oui/subns * data. */ struct ieee80211_vendor_radiotap { u32 present; u8 align; u8 oui[3]; u8 subns; u8 pad; u16 len; u8 data[]; } __packed; /** * enum ieee80211_conf_flags - configuration flags * * Flags to define PHY configuration options * * @IEEE80211_CONF_MONITOR: there's a monitor interface present -- use this * to determine for example whether to calculate timestamps for packets * or not, do not use instead of filter flags! * @IEEE80211_CONF_PS: Enable 802.11 power save mode (managed mode only). * This is the power save mode defined by IEEE 802.11-2007 section 11.2, * meaning that the hardware still wakes up for beacons, is able to * transmit frames and receive the possible acknowledgment frames. * Not to be confused with hardware specific wakeup/sleep states, * driver is responsible for that. See the section "Powersave support" * for more. * @IEEE80211_CONF_IDLE: The device is running, but idle; if the flag is set * the driver should be prepared to handle configuration requests but * may turn the device off as much as possible. Typically, this flag will * be set when an interface is set UP but not associated or scanning, but * it can also be unset in that case when monitor interfaces are active. * @IEEE80211_CONF_OFFCHANNEL: The device is currently not on its main * operating channel. */ enum ieee80211_conf_flags { IEEE80211_CONF_MONITOR = (1<<0), IEEE80211_CONF_PS = (1<<1), IEEE80211_CONF_IDLE = (1<<2), IEEE80211_CONF_OFFCHANNEL = (1<<3), }; /** * enum ieee80211_conf_changed - denotes which configuration changed * * @IEEE80211_CONF_CHANGE_LISTEN_INTERVAL: the listen interval changed * @IEEE80211_CONF_CHANGE_MONITOR: the monitor flag changed * @IEEE80211_CONF_CHANGE_PS: the PS flag or dynamic PS timeout changed * @IEEE80211_CONF_CHANGE_POWER: the TX power changed * @IEEE80211_CONF_CHANGE_CHANNEL: the channel/channel_type changed * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed * Note that this is only valid if channel contexts are not used, * otherwise each channel context has the number of chains listed. */ enum ieee80211_conf_changed { IEEE80211_CONF_CHANGE_SMPS = BIT(1), IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2), IEEE80211_CONF_CHANGE_MONITOR = BIT(3), IEEE80211_CONF_CHANGE_PS = BIT(4), IEEE80211_CONF_CHANGE_POWER = BIT(5), IEEE80211_CONF_CHANGE_CHANNEL = BIT(6), IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7), IEEE80211_CONF_CHANGE_IDLE = BIT(8), }; /** * enum ieee80211_smps_mode - spatial multiplexing power save mode * * @IEEE80211_SMPS_AUTOMATIC: automatic * @IEEE80211_SMPS_OFF: off * @IEEE80211_SMPS_STATIC: static * @IEEE80211_SMPS_DYNAMIC: dynamic * @IEEE80211_SMPS_NUM_MODES: internal, don't use */ enum ieee80211_smps_mode { IEEE80211_SMPS_AUTOMATIC, IEEE80211_SMPS_OFF, IEEE80211_SMPS_STATIC, IEEE80211_SMPS_DYNAMIC, /* keep last */ IEEE80211_SMPS_NUM_MODES, }; /** * struct ieee80211_conf - configuration of the device * * This struct indicates how the driver shall configure the hardware. * * @flags: configuration flags defined above * * @listen_interval: listen interval in units of beacon interval * @ps_dtim_period: The DTIM period of the AP we're connected to, for use * in power saving. Power saving will not be enabled until a beacon * has been received and the DTIM period is known. * @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the * powersave documentation below. This variable is valid only when * the CONF_PS flag is set. * * @power_level: requested transmit power (in dBm), backward compatibility * value only that is set to the minimum of all interfaces * * @chandef: the channel definition to tune to * @radar_enabled: whether radar detection is enabled * * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11, * but actually means the number of transmissions not the number of retries * @short_frame_max_tx_count: Maximum number of transmissions for a "short" * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the * number of transmissions not the number of retries * * @smps_mode: spatial multiplexing powersave mode; note that * %IEEE80211_SMPS_STATIC is used when the device is not * configured for an HT channel. * Note that this is only valid if channel contexts are not used, * otherwise each channel context has the number of chains listed. */ struct ieee80211_conf { u32 flags; int power_level, dynamic_ps_timeout; u16 listen_interval; u8 ps_dtim_period; u8 long_frame_max_tx_count, short_frame_max_tx_count; struct cfg80211_chan_def chandef; bool radar_enabled; enum ieee80211_smps_mode smps_mode; }; /** * struct ieee80211_channel_switch - holds the channel switch data * * The information provided in this structure is required for channel switch * operation. * * @timestamp: value in microseconds of the 64-bit Time Synchronization * Function (TSF) timer when the frame containing the channel switch * announcement was received. This is simply the rx.mactime parameter * the driver passed into mac80211. * @device_timestamp: arbitrary timestamp for the device, this is the * rx.device_timestamp parameter the driver passed to mac80211. * @block_tx: Indicates whether transmission must be blocked before the * scheduled channel switch, as indicated by the AP. * @chandef: the new channel to switch to * @count: the number of TBTT's until the channel switch event * @delay: maximum delay between the time the AP transmitted the last beacon in * current channel and the expected time of the first beacon in the new * channel, expressed in TU. */ struct ieee80211_channel_switch { u64 timestamp; u32 device_timestamp; bool block_tx; struct cfg80211_chan_def chandef; u8 count; u32 delay; }; /** * enum ieee80211_vif_flags - virtual interface flags * * @IEEE80211_VIF_BEACON_FILTER: the device performs beacon filtering * on this virtual interface to avoid unnecessary CPU wakeups * @IEEE80211_VIF_SUPPORTS_CQM_RSSI: the device can do connection quality * monitoring on this virtual interface -- i.e. it can monitor * connection quality related parameters, such as the RSSI level and * provide notifications if configured trigger levels are reached. * @IEEE80211_VIF_SUPPORTS_UAPSD: The device can do U-APSD for this * interface. This flag should be set during interface addition, * but may be set/cleared as late as authentication to an AP. It is * only valid for managed/station mode interfaces. * @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes * and send P2P_PS notification to the driver if NOA changed, even * this is not pure P2P vif. */ enum ieee80211_vif_flags { IEEE80211_VIF_BEACON_FILTER = BIT(0), IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1), IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2), IEEE80211_VIF_GET_NOA_UPDATE = BIT(3), }; /** * struct ieee80211_vif - per-interface data * * Data in this structure is continually present for driver * use during the life of a virtual interface. * * @type: type of this virtual interface * @bss_conf: BSS configuration for this interface, either our own * or the BSS we're associated to * @addr: address of this interface * @p2p: indicates whether this AP or STA interface is a p2p * interface, i.e. a GO or p2p-sta respectively * @csa_active: marks whether a channel switch is going on. Internally it is * write-protected by sdata_lock and local->mtx so holding either is fine * for read access. * @mu_mimo_owner: indicates interface owns MU-MIMO capability * @driver_flags: flags/capabilities the driver has for this interface, * these need to be set (or cleared) when the interface is added * or, if supported by the driver, the interface type is changed * at runtime, mac80211 will never touch this field * @filter_gratuitous_arp_unscol_na: User space configuration if to filter * gratuitous arp & unsolicited na packets * @filter_gtk: User space configuration if to filter gtk packets * @hw_queue: hardware queue for each AC * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only * @chanctx_conf: The channel context this interface is assigned to, or %NULL * when it is not assigned. This pointer is RCU-protected due to the TX * path needing to access it; even though the netdev carrier will always * be off when it is %NULL there can still be races and packets could be * processed after it switches back to %NULL. * @debugfs_dir: debugfs dentry, can be used by drivers to create own per * interface debug files. Note that it will be NULL for the virtual * monitor interface (if that is requested.) * @probe_req_reg: probe requests should be reported to mac80211 for this * interface. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void \*). * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped, * protected by fq->lock. */ struct ieee80211_vif { enum nl80211_iftype type; struct ieee80211_bss_conf bss_conf; u8 addr[ETH_ALEN] __aligned(2); bool p2p; bool csa_active; bool mu_mimo_owner; u8 cab_queue; u8 hw_queue[IEEE80211_NUM_ACS]; struct ieee80211_txq *txq; struct ieee80211_chanctx_conf __rcu *chanctx_conf; u32 driver_flags; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS bool filter_grat_arp_unsol_na; bool filter_gtk; #endif #ifdef CPTCFG_MAC80211_DEBUGFS struct dentry *debugfs_dir; #endif unsigned int probe_req_reg; bool txqs_stopped[IEEE80211_NUM_ACS]; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) { #ifdef CPTCFG_MAC80211_MESH return vif->type == NL80211_IFTYPE_MESH_POINT; #endif return false; } /** * wdev_to_ieee80211_vif - return a vif struct from a wdev * @wdev: the wdev to get the vif for * * This can be used by mac80211 drivers with direct cfg80211 APIs * (like the vendor commands) that get a wdev. * * Note that this function may return %NULL if the given wdev isn't * associated with a vif that the driver knows about (e.g. monitor * or AP_VLAN interfaces.) */ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); /** * ieee80211_vif_to_wdev - return a wdev struct from a vif * @vif: the vif to get the wdev for * * This can be used by mac80211 drivers with direct cfg80211 APIs * (like the vendor commands) that needs to get the wdev for a vif. * * Note that this function may return %NULL if the given wdev isn't * associated with a vif that the driver knows about (e.g. monitor * or AP_VLAN interfaces.) */ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); /** * enum ieee80211_key_flags - key flags * * These flags are used for communication about keys between the driver * and mac80211, with the @flags parameter of &struct ieee80211_key_conf. * * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the * driver to indicate that it requires IV generation for this * particular key. Setting this flag does not necessarily mean that SKBs * will have sufficient tailroom for ICV or MIC. * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by * the driver for a TKIP key if it requires Michael MIC * generation in software. * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates * that the key is pairwise rather then a shared key. * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a * CCMP/GCMP key if it requires CCMP/GCMP encryption of management frames * (MFP) to be done in software. * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver * if space should be prepared for the IV, but the IV * itself should not be generated. Do not set together with * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does * not necessarily mean that SKBs will have sufficient tailroom for ICV or * MIC. * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received * management frames. The flag can help drivers that have a hardware * crypto implementation that doesn't deal with management frames * properly by allowing them to not upload the keys to hardware and * fall back to software crypto. Note that this flag deals only with * RX, if your crypto engine can't deal with TX you can also set the * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW. * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the * driver for a CCMP/GCMP key to indicate that is requires IV generation * only for managment frames (MFP). * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the * driver for a key to indicate that sufficient tailroom must always * be reserved for ICV or MIC, even when HW encryption is enabled. * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for * a TKIP key if it only requires MIC space. Do not set together with * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key. */ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0), IEEE80211_KEY_FLAG_GENERATE_IV = BIT(1), IEEE80211_KEY_FLAG_GENERATE_MMIC = BIT(2), IEEE80211_KEY_FLAG_PAIRWISE = BIT(3), IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(4), IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5), IEEE80211_KEY_FLAG_RX_MGMT = BIT(6), IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7), IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8), }; /** * struct ieee80211_key_conf - key information * * This key information is given by mac80211 to the driver by * the set_key() callback in &struct ieee80211_ops. * * @hw_key_idx: To be set by the driver, this is the key index the driver * wants to be given when a frame is transmitted and needs to be * encrypted in hardware. * @cipher: The key's cipher suite selector. * @tx_pn: PN used for TX keys, may be used by the driver as well if it * needs to do software PN assignment by itself (e.g. due to TSO) * @flags: key flags, see &enum ieee80211_key_flags. * @keyidx: the key index (0-3) * @keylen: key material length * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte) * data block: * - Temporal Encryption Key (128 bits) * - Temporal Authenticator Tx MIC Key (64 bits) * - Temporal Authenticator Rx MIC Key (64 bits) * @icv_len: The ICV length for this key type * @iv_len: The IV length for this key type */ struct ieee80211_key_conf { atomic64_t tx_pn; u32 cipher; u8 icv_len; u8 iv_len; u8 hw_key_idx; s8 keyidx; u16 flags; u8 keylen; u8 key[0]; }; #define IEEE80211_MAX_PN_LEN 16 #define TKIP_PN_TO_IV16(pn) ((u16)(pn & 0xffff)) #define TKIP_PN_TO_IV32(pn) ((u32)((pn >> 16) & 0xffffffff)) /** * struct ieee80211_key_seq - key sequence counter * * @tkip: TKIP data, containing IV32 and IV16 in host byte order * @ccmp: PN data, most significant byte first (big endian, * reverse order than in packet) * @aes_cmac: PN data, most significant byte first (big endian, * reverse order than in packet) * @aes_gmac: PN data, most significant byte first (big endian, * reverse order than in packet) * @gcmp: PN data, most significant byte first (big endian, * reverse order than in packet) * @hw: data for HW-only (e.g. cipher scheme) keys */ struct ieee80211_key_seq { union { struct { u32 iv32; u16 iv16; } tkip; struct { u8 pn[6]; } ccmp; struct { u8 pn[6]; } aes_cmac; struct { u8 pn[6]; } aes_gmac; struct { u8 pn[6]; } gcmp; struct { u8 seq[IEEE80211_MAX_PN_LEN]; u8 seq_len; } hw; }; }; /** * struct ieee80211_cipher_scheme - cipher scheme * * This structure contains a cipher scheme information defining * the secure packet crypto handling. * * @cipher: a cipher suite selector * @iftype: a cipher iftype bit mask indicating an allowed cipher usage * @hdr_len: a length of a security header used the cipher * @pn_len: a length of a packet number in the security header * @pn_off: an offset of pn from the beginning of the security header * @key_idx_off: an offset of key index byte in the security header * @key_idx_mask: a bit mask of key_idx bits * @key_idx_shift: a bit shift needed to get key_idx * key_idx value calculation: * (sec_header_base[key_idx_off] & key_idx_mask) >> key_idx_shift * @mic_len: a mic length in bytes */ struct ieee80211_cipher_scheme { u32 cipher; u16 iftype; u8 hdr_len; u8 pn_len; u8 pn_off; u8 key_idx_off; u8 key_idx_mask; u8 key_idx_shift; u8 mic_len; }; /** * enum set_key_cmd - key command * * Used with the set_key() callback in &struct ieee80211_ops, this * indicates whether a key is being removed or added. * * @SET_KEY: a key is set * @DISABLE_KEY: a key must be disabled */ enum set_key_cmd { SET_KEY, DISABLE_KEY, }; /** * enum ieee80211_sta_state - station state * * @IEEE80211_STA_NOTEXIST: station doesn't exist at all, * this is a special state for add/remove transitions * @IEEE80211_STA_NONE: station exists without special state * @IEEE80211_STA_AUTH: station is authenticated * @IEEE80211_STA_ASSOC: station is associated * @IEEE80211_STA_AUTHORIZED: station is authorized (802.1X) */ enum ieee80211_sta_state { /* NOTE: These need to be ordered correctly! */ IEEE80211_STA_NOTEXIST, IEEE80211_STA_NONE, IEEE80211_STA_AUTH, IEEE80211_STA_ASSOC, IEEE80211_STA_AUTHORIZED, }; /** * enum ieee80211_sta_rx_bandwidth - station RX bandwidth * @IEEE80211_STA_RX_BW_20: station can only receive 20 MHz * @IEEE80211_STA_RX_BW_40: station can receive up to 40 MHz * @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz * @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz * (including 80+80 MHz) * * Implementation note: 20 must be zero to be initialized * correctly, the values must be sorted. */ enum ieee80211_sta_rx_bandwidth { IEEE80211_STA_RX_BW_20 = 0, IEEE80211_STA_RX_BW_40, IEEE80211_STA_RX_BW_80, IEEE80211_STA_RX_BW_160, }; /** * struct ieee80211_sta_rates - station rate selection table * * @rcu_head: RCU head used for freeing the table on update * @rate: transmit rates/flags to be used by default. * Overriding entries per-packet is possible by using cb tx control. */ struct ieee80211_sta_rates { struct rcu_head rcu_head; struct { s8 idx; u8 count; u8 count_cts; u8 count_rts; u16 flags; } rate[IEEE80211_TX_RATE_TABLE_SIZE]; }; /** * struct ieee80211_sta - station table entry * * A station table entry represents a station we are possibly * communicating with. Since stations are RCU-managed in * mac80211, any ieee80211_sta pointer you get access to must * either be protected by rcu_read_lock() explicitly or implicitly, * or you must take good care to not use such a pointer after a * call to your sta_remove callback that removed it. * * @addr: MAC address * @aid: AID we assigned to the station if we're an AP * @supp_rates: Bitmap of supported rates (per band) * @ht_cap: HT capabilities of this STA; restricted to our own capabilities * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities * @he_cap: HE capabilities of this STA * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU * that this station is allowed to transmit to us. * Can be modified by driver. * @wme: indicates whether the STA supports QoS/WME (if local devices does, * otherwise always false) * @drv_priv: data area for driver use, will always be aligned to * sizeof(void \*), size is determined in hw information. * @uapsd_queues: bitmap of queues configured for uapsd. Only valid * if wme is supported. The bits order is like in * IEEE80211_WMM_IE_STA_QOSINFO_AC_*. * @max_sp: max Service Period. Only valid if wme is supported. * @bandwidth: current bandwidth the station can receive with * @rx_nss: in HT/VHT, the maximum number of spatial streams the * station can receive at the moment, changed by operating mode * notifications and capabilities. The value is only valid after * the station moves to associated state. * @smps_mode: current SMPS mode (off, static or dynamic) * @rates: rate control selection table * @tdls: indicates whether the STA is a TDLS peer * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only * valid if the STA is a TDLS peer in the first place. * @mfp: indicates whether the STA uses management frame protection or not. * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single * A-MSDU. Taken from the Extended Capabilities element. 0 means * unlimited. * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control. * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames */ struct ieee80211_sta { u32 supp_rates[NUM_NL80211_BANDS]; u8 addr[ETH_ALEN]; u16 aid; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_he_cap he_cap; u16 max_rx_aggregation_subframes; bool wme; u8 uapsd_queues; u8 max_sp; u8 rx_nss; enum ieee80211_sta_rx_bandwidth bandwidth; enum ieee80211_smps_mode smps_mode; struct ieee80211_sta_rates __rcu *rates; bool tdls; bool tdls_initiator; bool mfp; u8 max_amsdu_subframes; /** * @max_amsdu_len: * indicates the maximal length of an A-MSDU in bytes. * This field is always valid for packets with a VHT preamble. * For packets with a HT preamble, additional limits apply: * * * If the skb is transmitted as part of a BA agreement, the * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes. * * If the skb is not part of a BA aggreement, the A-MSDU maximal * size is min(max_amsdu_len, 7935) bytes. * * Both additional HT limits must be enforced by the low level * driver. This is defined by the spec (IEEE 802.11-2012 section * 8.3.2.2 NOTE 2). */ u16 max_amsdu_len; bool support_p2p_ps; u16 max_rc_amsdu_len; u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS]; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; /** * enum sta_notify_cmd - sta notify command * * Used with the sta_notify() callback in &struct ieee80211_ops, this * indicates if an associated station made a power state transition. * * @STA_NOTIFY_SLEEP: a station is now sleeping * @STA_NOTIFY_AWAKE: a sleeping station woke up */ enum sta_notify_cmd { STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE, }; /** * struct ieee80211_tx_control - TX control data * * @sta: station table entry, this sta pointer may be NULL and * it is not allowed to copy the pointer, due to RCU. */ struct ieee80211_tx_control { struct ieee80211_sta *sta; }; /** * struct ieee80211_txq - Software intermediate tx queue * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @sta: station table entry, %NULL for per-vif queue * @tid: the TID for this queue (unused for per-vif queue), * %IEEE80211_NUM_TIDS for non-data (if enabled) * @ac: the AC for this queue * @drv_priv: driver private area, sized by hw->txq_data_size * * The driver can obtain packets from this queue by calling * ieee80211_tx_dequeue(). */ struct ieee80211_txq { struct ieee80211_vif *vif; struct ieee80211_sta *sta; u8 tid; u8 ac; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; /** * enum ieee80211_hw_flags - hardware flags * * These flags are used to indicate hardware capabilities to * the stack. Generally, flags here should have their meaning * done in a way that the simplest hardware doesn't need setting * any particular flags. There are some exceptions to this rule, * however, so you are advised to review these flags carefully. * * @IEEE80211_HW_HAS_RATE_CONTROL: * The hardware or firmware includes rate control, and cannot be * controlled by the stack. As such, no rate control algorithm * should be instantiated, and the TX rate reported to userspace * will be taken from the TX status instead of the rate control * algorithm. * Note that this requires that the driver implement a number of * callbacks so it has the correct information, it needs to have * the @set_rts_threshold callback and must look at the BSS config * @use_cts_prot for G/N protection, @use_short_slot for slot * timing in 2.4 GHz and @use_short_preamble for preambles for * CCK frames. * * @IEEE80211_HW_RX_INCLUDES_FCS: * Indicates that received frames passed to the stack include * the FCS at the end. * * @IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING: * Some wireless LAN chipsets buffer broadcast/multicast frames * for power saving stations in the hardware/firmware and others * rely on the host system for such buffering. This option is used * to configure the IEEE 802.11 upper layer to buffer broadcast and * multicast frames when there are power saving stations so that * the driver can fetch them with ieee80211_get_buffered_bc(). * * @IEEE80211_HW_SIGNAL_UNSPEC: * Hardware can provide signal values but we don't know its units. We * expect values between 0 and @max_signal. * If possible please provide dB or dBm instead. * * @IEEE80211_HW_SIGNAL_DBM: * Hardware gives signal values in dBm, decibel difference from * one milliwatt. This is the preferred method since it is standardized * between different devices. @max_signal does not need to be set. * * @IEEE80211_HW_SPECTRUM_MGMT: * Hardware supports spectrum management defined in 802.11h * Measurement, Channel Switch, Quieting, TPC * * @IEEE80211_HW_AMPDU_AGGREGATION: * Hardware supports 11n A-MPDU aggregation. * * @IEEE80211_HW_SUPPORTS_PS: * Hardware has power save support (i.e. can go to sleep). * * @IEEE80211_HW_PS_NULLFUNC_STACK: * Hardware requires nullfunc frame handling in stack, implies * stack support for dynamic PS. * * @IEEE80211_HW_SUPPORTS_DYNAMIC_PS: * Hardware has support for dynamic PS. * * @IEEE80211_HW_MFP_CAPABLE: * Hardware supports management frame protection (MFP, IEEE 802.11w). * * @IEEE80211_HW_REPORTS_TX_ACK_STATUS: * Hardware can provide ack status reports of Tx frames to * the stack. * * @IEEE80211_HW_CONNECTION_MONITOR: * The hardware performs its own connection monitoring, including * periodic keep-alives to the AP and probing the AP on beacon loss. * * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC: * This device needs to get data from beacon before association (i.e. * dtim_period). * * @IEEE80211_HW_SUPPORTS_PER_STA_GTK: The device's crypto engine supports * per-station GTKs as used by IBSS RSN or during fast transition. If * the device doesn't support per-station GTKs, but can be asked not * to decrypt group addressed frames, then IBSS RSN support is still * possible but software crypto will be used. Advertise the wiphy flag * only in that case. * * @IEEE80211_HW_AP_LINK_PS: When operating in AP mode the device * autonomously manages the PS status of connected stations. When * this flag is set mac80211 will not trigger PS mode for connected * stations based on the PM bit of incoming frames. * Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure * the PS mode of connected stations. * * @IEEE80211_HW_TX_AMPDU_SETUP_IN_HW: The device handles TX A-MPDU session * setup strictly in HW. mac80211 should not attempt to do this in * software. * * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of * a virtual monitor interface when monitor interfaces are the only * active interfaces. * * @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to * be created. It is expected user-space will create vifs as * desired (and thus have them named as desired). * * @IEEE80211_HW_SW_CRYPTO_CONTROL: The driver wants to control which of the * crypto algorithms can be done in software - so don't automatically * try to fall back to it if hardware crypto fails, but do so only if * the driver returns 1. This also forces the driver to advertise its * supported cipher suites. * * @IEEE80211_HW_SUPPORT_FAST_XMIT: The driver/hardware supports fast-xmit, * this currently requires only the ability to calculate the duration * for frames. * * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface * queue mapping in order to use different queues (not just one per AC) * for different virtual interfaces. See the doc section on HW queue * control for more details. * * @IEEE80211_HW_SUPPORTS_RC_TABLE: The driver supports using a rate * selection table provided by the rate control algorithm. * * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any * P2P Interface. This will be honoured even if more than one interface * is supported. * * @IEEE80211_HW_TIMING_BEACON_ONLY: Use sync timing from beacon frames * only, to allow getting TBTT of a DTIM beacon. * * @IEEE80211_HW_SUPPORTS_HT_CCK_RATES: Hardware supports mixing HT/CCK rates * and can cope with CCK rates in an aggregation session (e.g. by not * using aggregation for such frames.) * * @IEEE80211_HW_CHANCTX_STA_CSA: Support 802.11h based channel-switch (CSA) * for a single active channel while using channel contexts. When support * is not enabled the default action is to disconnect when getting the * CSA frame. * * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload * or tailroom of TX skbs without copying them first. * * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands * in one command, mac80211 doesn't have to run separate scans per band. * * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth * than then BSS bandwidth for a TDLS link on the base channel. * * @IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU: The driver supports receiving A-MSDUs * within A-MPDU. * * @IEEE80211_HW_BEACON_TX_STATUS: The device/driver provides TX status * for sent beacons. * * @IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR: Hardware (or driver) requires that each * station has a unique address, i.e. each station entry can be identified * by just its MAC address; this prevents, for example, the same station * from connecting to two virtual AP interfaces at the same time. * * @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the * reordering buffer internally, guaranteeing mac80211 receives frames in * order and does not need to manage its own reorder buffer or BA session * timeout. * * @IEEE80211_HW_USES_RSS: The device uses RSS and thus requires parallel RX, * which implies using per-CPU station statistics. * * @IEEE80211_HW_TX_AMSDU: Hardware (or driver) supports software aggregated * A-MSDU frames. Requires software tx queueing and fast-xmit support. * When not using minstrel/minstrel_ht rate control, the driver must * limit the maximum A-MSDU size based on the current tx rate by setting * max_rc_amsdu_len in struct ieee80211_sta. * * @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list * skbs, needed for zero-copy software A-MSDU. * * @IEEE80211_HW_REPORTS_LOW_ACK: The driver (or firmware) reports low ack event * by ieee80211_report_low_ack() based on its own algorithm. For such * drivers, mac80211 packet loss mechanism will not be triggered and driver * is completely depending on firmware event for station kickout. * * @IEEE80211_HW_SUPPORTS_TX_FRAG: Hardware does fragmentation by itself. * The stack will not do fragmentation. * The callback for @set_frag_threshold should be set as well. * * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on * TDLS links. * * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the * mgd_prepare_tx() callback to be called before transmission of a * deauthentication frame in case the association was completed but no * beacon was heard. This is required in multi-channel scenarios, where the * virtual interface might not be given air time for the transmission of * the frame, as it is not synced with the AP/P2P GO yet, and thus the * deauthentication frame might not be transmitted. * * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't * support QoS NDP for AP probing - that's most likely a driver bug. * * @IEEE80211_HW_BUFF_MMPDU_TXQ: use the TXQ for bufferable MMPDUs, this of * course requires the driver to use TXQs to start with. * * @IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW: (Hardware) rate control supports VHT * extended NSS BW (dot11VHTExtendedNSSBWCapable). This flag will be set if * the selected rate control algorithm sets %RATE_CTRL_CAPA_VHT_EXT_NSS_BW * but if the rate control is built-in then it must be set by the driver. * See also the documentation for that flag. * * @IEEE80211_HW_STA_MMPDU_TXQ: use the extra non-TID per-station TXQ for all * MMPDUs on station interfaces. This of course requires the driver to use * TXQs to start with. * * @IEEE80211_HW_SUPPORTS_MULTI_BSSID: Hardware supports multi BSSID * * @IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID: Hardware supports multi BSSID * only for HE APs. Applies if @IEEE80211_HW_SUPPORTS_MULTI_BSSID is set. * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { IEEE80211_HW_HAS_RATE_CONTROL, IEEE80211_HW_RX_INCLUDES_FCS, IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING, IEEE80211_HW_SIGNAL_UNSPEC, IEEE80211_HW_SIGNAL_DBM, IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC, IEEE80211_HW_SPECTRUM_MGMT, IEEE80211_HW_AMPDU_AGGREGATION, IEEE80211_HW_SUPPORTS_PS, IEEE80211_HW_PS_NULLFUNC_STACK, IEEE80211_HW_SUPPORTS_DYNAMIC_PS, IEEE80211_HW_MFP_CAPABLE, IEEE80211_HW_WANT_MONITOR_VIF, IEEE80211_HW_NO_AUTO_VIF, IEEE80211_HW_SW_CRYPTO_CONTROL, IEEE80211_HW_SUPPORT_FAST_XMIT, IEEE80211_HW_REPORTS_TX_ACK_STATUS, IEEE80211_HW_CONNECTION_MONITOR, IEEE80211_HW_QUEUE_CONTROL, IEEE80211_HW_SUPPORTS_PER_STA_GTK, IEEE80211_HW_AP_LINK_PS, IEEE80211_HW_TX_AMPDU_SETUP_IN_HW, IEEE80211_HW_SUPPORTS_RC_TABLE, IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF, IEEE80211_HW_TIMING_BEACON_ONLY, IEEE80211_HW_SUPPORTS_HT_CCK_RATES, IEEE80211_HW_CHANCTX_STA_CSA, IEEE80211_HW_SUPPORTS_CLONED_SKBS, IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS, IEEE80211_HW_TDLS_WIDER_BW, IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU, IEEE80211_HW_BEACON_TX_STATUS, IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR, IEEE80211_HW_SUPPORTS_REORDERING_BUFFER, IEEE80211_HW_USES_RSS, IEEE80211_HW_TX_AMSDU, IEEE80211_HW_TX_FRAG_LIST, IEEE80211_HW_REPORTS_LOW_ACK, IEEE80211_HW_SUPPORTS_TX_FRAG, IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP, IEEE80211_HW_BUFF_MMPDU_TXQ, IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, IEEE80211_HW_STA_MMPDU_TXQ, IEEE80211_HW_SUPPORTS_MULTI_BSSID, IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS }; /* * struct ieee80211_tx_thrshld_md - tx packet metadata that crosses a thrshld * * @mode: recording mode (Internal buffer or continues recording) * @monitor_collec_wind: the size of the window to collect the logs * @seq: packet sequence * @pkt_start: start time of triggering pkt * @pkt_end: end time of triggering pkt * @msrmnt: the tx latency of the pkt * @tid: tid of the pkt */ struct ieee80211_tx_thrshld_md { u8 mode; u16 monitor_collec_wind; u16 seq; u32 pkt_start; u32 pkt_end; u32 msrmnt; u16 tid; }; /** * struct ieee80211_hw - hardware information and state * * This structure contains the configuration and hardware * information for an 802.11 PHY. * * @wiphy: This points to the &struct wiphy allocated for this * 802.11 PHY. You must fill in the @perm_addr and @dev * members of this structure using SET_IEEE80211_DEV() * and SET_IEEE80211_PERM_ADDR(). Additionally, all supported * bands (with channels, bitrates) are registered here. * * @conf: &struct ieee80211_conf, device configuration, don't use. * * @priv: pointer to private area that was allocated for driver use * along with this structure. * * @flags: hardware flags, see &enum ieee80211_hw_flags. * * @extra_tx_headroom: headroom to reserve in each transmit skb * for use by the driver (e.g. for transmit headers.) * * @extra_beacon_tailroom: tailroom to reserve in each beacon tx skb. * Can be used by drivers to add extra IEs. * * @max_signal: Maximum value for signal (rssi) in RX information, used * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB * * @max_listen_interval: max listen interval in units of beacon interval * that HW supports * * @queues: number of available hardware transmit queues for * data packets. WMM/QoS requires at least four, these * queues need to have configurable access parameters. * * @rate_control_algorithm: rate control algorithm for this hardware. * If unset (NULL), the default algorithm will be used. Must be * set before calling ieee80211_register_hw(). * * @vif_data_size: size (in bytes) of the drv_priv data area * within &struct ieee80211_vif. * @sta_data_size: size (in bytes) of the drv_priv data area * within &struct ieee80211_sta. * @chanctx_data_size: size (in bytes) of the drv_priv data area * within &struct ieee80211_chanctx_conf. * @txq_data_size: size (in bytes) of the drv_priv data area * within @struct ieee80211_txq. * * @max_rates: maximum number of alternate rate retry stages the hw * can handle. * @max_report_rates: maximum number of alternate rate retry stages * the hw can report back. * @max_rate_tries: maximum number of tries for each stage * * @max_rx_aggregation_subframes: maximum buffer size (number of * sub-frames) to be used for A-MPDU block ack receiver * aggregation. * This is only relevant if the device has restrictions on the * number of subframes, if it relies on mac80211 to do reordering * it shouldn't be set. * * @max_tx_aggregation_subframes: maximum number of subframes in an * aggregate an HT/HE device will transmit. In HT AddBA we'll * advertise a constant value of 64 as some older APs crash if * the window size is smaller (an example is LinkSys WRT120N * with FW v1.0.07 build 002 Jun 18 2012). * For AddBA to HE capable peers this value will be used. * * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list. * * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX * (if %IEEE80211_HW_QUEUE_CONTROL is set) * * @radiotap_mcs_details: lists which MCS information can the HW * reports, by default it is set to _MCS, _GI and _BW but doesn't * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_\* values, only * adding _BW is supported today. * * @radiotap_vht_details: lists which VHT MCS information the HW reports, * the default is _GI | _BANDWIDTH. * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values. * * @radiotap_he: HE radiotap validity flags * * @radiotap_timestamp: Information for the radiotap timestamp field; if the * 'units_pos' member is set to a non-negative value it must be set to * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value, and then the timestamp * field will be added and populated from the &struct ieee80211_rx_status * device_timestamp. If the 'accuracy' member is non-negative, it's put * into the accuracy radiotap field and the accuracy known flag is set. * * @netdev_features: netdev features to be set in each netdev created * from this HW. Note that not all features are usable with mac80211, * other features will be rejected during HW registration. * * @uapsd_queues: This bitmap is included in (re)association frame to indicate * for each access category if it is uAPSD trigger-enabled and delivery- * enabled. Use IEEE80211_WMM_IE_STA_QOSINFO_AC_* to set this bitmap. * Each bit corresponds to different AC. Value '1' in specific bit means * that corresponding AC is both trigger- and delivery-enabled. '0' means * neither enabled. * * @uapsd_max_sp_len: maximum number of total buffered frames the WMM AP may * deliver to a WMM STA during any Service Period triggered by the WMM STA. * Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct values. * * @n_cipher_schemes: a size of an array of cipher schemes definitions. * @cipher_schemes: a pointer to an array of cipher scheme definitions * supported by HW. * @max_nan_de_entries: maximum number of NAN DE functions supported by the * device. * * @tx_sk_pacing_shift: Pacing shift to set on TCP sockets when frames from * them are encountered. The default should typically not be changed, * unless the driver has good reasons for needing more buffers. */ struct ieee80211_hw { struct ieee80211_conf conf; struct wiphy *wiphy; const char *rate_control_algorithm; void *priv; unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)]; unsigned int extra_tx_headroom; unsigned int extra_beacon_tailroom; int vif_data_size; int sta_data_size; int chanctx_data_size; int txq_data_size; u16 queues; u16 max_listen_interval; s8 max_signal; u8 max_rates; u8 max_report_rates; u8 max_rate_tries; u16 max_rx_aggregation_subframes; u16 max_tx_aggregation_subframes; u8 max_tx_fragments; u8 offchannel_tx_hw_queue; u8 radiotap_mcs_details; u16 radiotap_vht_details; struct { int units_pos; s16 accuracy; } radiotap_timestamp; netdev_features_t netdev_features; u8 uapsd_queues; u8 uapsd_max_sp_len; u8 n_cipher_schemes; const struct ieee80211_cipher_scheme *cipher_schemes; u8 max_nan_de_entries; u8 tx_sk_pacing_shift; }; static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw, enum ieee80211_hw_flags flg) { return test_bit(flg, hw->flags); } #define ieee80211_hw_check(hw, flg) _ieee80211_hw_check(hw, IEEE80211_HW_##flg) static inline void _ieee80211_hw_set(struct ieee80211_hw *hw, enum ieee80211_hw_flags flg) { return __set_bit(flg, hw->flags); } #define ieee80211_hw_set(hw, flg) _ieee80211_hw_set(hw, IEEE80211_HW_##flg) /** * struct ieee80211_scan_request - hw scan request * * @ies: pointers different parts of IEs (in req.ie) * @req: cfg80211 request. */ struct ieee80211_scan_request { struct ieee80211_scan_ies ies; /* Keep last */ struct cfg80211_scan_request req; }; /** * struct ieee80211_tdls_ch_sw_params - TDLS channel switch parameters * * @sta: peer this TDLS channel-switch request/response came from * @chandef: channel referenced in a TDLS channel-switch request * @action_code: see &enum ieee80211_tdls_actioncode * @status: channel-switch response status * @timestamp: time at which the frame was received * @switch_time: switch-timing parameter received in the frame * @switch_timeout: switch-timing parameter received in the frame * @tmpl_skb: TDLS switch-channel response template * @ch_sw_tm_ie: offset of the channel-switch timing IE inside @tmpl_skb */ struct ieee80211_tdls_ch_sw_params { struct ieee80211_sta *sta; struct cfg80211_chan_def *chandef; u8 action_code; u32 status; u32 timestamp; u16 switch_time; u16 switch_timeout; struct sk_buff *tmpl_skb; u32 ch_sw_tm_ie; }; /** * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy * * @wiphy: the &struct wiphy which we want to query * * mac80211 drivers can use this to get to their respective * &struct ieee80211_hw. Drivers wishing to get to their own private * structure can then access it via hw->priv. Note that mac802111 drivers should * not use wiphy_priv() to try to get their private driver structure as this * is already used internally by mac80211. * * Return: The mac80211 driver hw struct of @wiphy. */ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy); /** * SET_IEEE80211_DEV - set device for 802.11 hardware * * @hw: the &struct ieee80211_hw to set the device for * @dev: the &struct device of this 802.11 device */ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev) { set_wiphy_dev(hw->wiphy, dev); } /** * SET_IEEE80211_PERM_ADDR - set the permanent MAC address for 802.11 hardware * * @hw: the &struct ieee80211_hw to set the MAC address for * @addr: the address to set */ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr) { memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); } static inline struct ieee80211_rate * ieee80211_get_tx_rate(const struct ieee80211_hw *hw, const struct ieee80211_tx_info *c) { if (WARN_ON_ONCE(c->control.rates[0].idx < 0)) return NULL; return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx]; } static inline struct ieee80211_rate * ieee80211_get_rts_cts_rate(const struct ieee80211_hw *hw, const struct ieee80211_tx_info *c) { if (c->control.rts_cts_rate_idx < 0) return NULL; return &hw->wiphy->bands[c->band]->bitrates[c->control.rts_cts_rate_idx]; } static inline struct ieee80211_rate * ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, const struct ieee80211_tx_info *c, int idx) { if (c->control.rates[idx + 1].idx < 0) return NULL; return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[idx + 1].idx]; } /** * ieee80211_free_txskb - free TX skb * @hw: the hardware * @skb: the skb * * Free a transmit skb. Use this funtion when some failure * to transmit happened and thus status cannot be reported. */ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); /** * DOC: Hardware crypto acceleration * * mac80211 is capable of taking advantage of many hardware * acceleration designs for encryption and decryption operations. * * The set_key() callback in the &struct ieee80211_ops for a given * device is called to enable hardware acceleration of encryption and * decryption. The callback takes a @sta parameter that will be NULL * for default keys or keys used for transmission only, or point to * the station information for the peer for individual keys. * Multiple transmission keys with the same key index may be used when * VLANs are configured for an access point. * * When transmitting, the TX control data will use the @hw_key_idx * selected by the driver by modifying the &struct ieee80211_key_conf * pointed to by the @key parameter to the set_key() function. * * The set_key() call for the %SET_KEY command should return 0 if * the key is now in use, -%EOPNOTSUPP or -%ENOSPC if it couldn't be * added; if you return 0 then hw_key_idx must be assigned to the * hardware key index, you are free to use the full u8 range. * * Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is * set, mac80211 will not automatically fall back to software crypto if * enabling hardware crypto failed. The set_key() call may also return the * value 1 to permit this specific key/algorithm to be done in software. * * When the cmd is %DISABLE_KEY then it must succeed. * * Note that it is permissible to not decrypt a frame even if a key * for it has been uploaded to hardware, the stack will not make any * decision based on whether a key has been uploaded or not but rather * based on the receive flags. * * The &struct ieee80211_key_conf structure pointed to by the @key * parameter is guaranteed to be valid until another call to set_key() * removes it, but it can only be used as a cookie to differentiate * keys. * * In TKIP some HW need to be provided a phase 1 key, for RX decryption * acceleration (i.e. iwlwifi). Those drivers should provide update_tkip_key * handler. * The update_tkip_key() call updates the driver with the new phase 1 key. * This happens every time the iv16 wraps around (every 65536 packets). The * set_key() call will happen only once for each key (unless the AP did * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is * provided by update_tkip_key only. The trigger that makes mac80211 call this * handler is software decryption with wrap around of iv16. * * The set_default_unicast_key() call updates the default WEP key index * configured to the hardware for WEP encryption type. This is required * for devices that support offload of data packets (e.g. ARP responses). * * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag * when they are able to replace in-use PTK keys according to to following * requirements: * 1) They do not hand over frames decrypted with the old key to mac80211 once the call to set_key() with command %DISABLE_KEY has been completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key, 2) either drop or continue to use the old key for any outgoing frames queued at the time of the key deletion (including re-transmits), 3) never send out a frame queued prior to the set_key() %SET_KEY command encrypted with the new key and 4) never send out a frame unencrypted when it should be encrypted. Mac80211 will not queue any new frames for a deleted key to the driver. */ /** * DOC: Powersave support * * mac80211 has support for various powersave implementations. * * First, it can support hardware that handles all powersaving by itself, * such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware * flag. In that case, it will be told about the desired powersave mode * with the %IEEE80211_CONF_PS flag depending on the association status. * The hardware must take care of sending nullfunc frames when necessary, * i.e. when entering and leaving powersave mode. The hardware is required * to look at the AID in beacons and signal to the AP that it woke up when * it finds traffic directed to it. * * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused * with hardware wakeup and sleep states. Driver is responsible for waking * up the hardware before issuing commands to the hardware and putting it * back to sleep at appropriate times. * * When PS is enabled, hardware needs to wakeup for beacons and receive the * buffered multicast/broadcast frames after the beacon. Also it must be * possible to send frames and receive the acknowledment frame. * * Other hardware designs cannot send nullfunc frames by themselves and also * need software support for parsing the TIM bitmap. This is also supported * by mac80211 by combining the %IEEE80211_HW_SUPPORTS_PS and * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still * required to pass up beacons. The hardware is still required to handle * waking up for multicast traffic; if it cannot the driver must handle that * as best as it can, mac80211 is too slow to do that. * * Dynamic powersave is an extension to normal powersave in which the * hardware stays awake for a user-specified period of time after sending a * frame so that reply frames need not be buffered and therefore delayed to * the next wakeup. It's compromise of getting good enough latency when * there's data traffic and still saving significantly power in idle * periods. * * Dynamic powersave is simply supported by mac80211 enabling and disabling * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS * flag and mac80211 will handle everything automatically. Additionally, * hardware having support for the dynamic PS feature may set the * %IEEE80211_HW_SUPPORTS_DYNAMIC_PS flag to indicate that it can support * dynamic PS mode itself. The driver needs to look at the * @dynamic_ps_timeout hardware configuration value and use it that value * whenever %IEEE80211_CONF_PS is set. In this case mac80211 will disable * dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS * enabled whenever user has enabled powersave. * * Driver informs U-APSD client support by enabling * %IEEE80211_VIF_SUPPORTS_UAPSD flag. The mode is configured through the * uapsd parameter in conf_tx() operation. Hardware needs to send the QoS * Nullfunc frames and stay awake until the service period has ended. To * utilize U-APSD, dynamic powersave is disabled for voip AC and all frames * from that AC are transmitted with powersave enabled. * * Note: U-APSD client mode is not yet supported with * %IEEE80211_HW_PS_NULLFUNC_STACK. */ /** * DOC: Beacon filter support * * Some hardware have beacon filter support to reduce host cpu wakeups * which will reduce system power consumption. It usually works so that * the firmware creates a checksum of the beacon but omits all constantly * changing elements (TSF, TIM etc). Whenever the checksum changes the * beacon is forwarded to the host, otherwise it will be just dropped. That * way the host will only receive beacons where some relevant information * (for example ERP protection or WMM settings) have changed. * * Beacon filter support is advertised with the %IEEE80211_VIF_BEACON_FILTER * interface capability. The driver needs to enable beacon filter support * whenever power save is enabled, that is %IEEE80211_CONF_PS is set. When * power save is enabled, the stack will not check for beacon loss and the * driver needs to notify about loss of beacons with ieee80211_beacon_loss(). * * The time (or number of beacons missed) until the firmware notifies the * driver of a beacon loss event (which in turn causes the driver to call * ieee80211_beacon_loss()) should be configurable and will be controlled * by mac80211 and the roaming algorithm in the future. * * Since there may be constantly changing information elements that nothing * in the software stack cares about, we will, in the future, have mac80211 * tell the driver which information elements are interesting in the sense * that we want to see changes in them. This will include * * - a list of information element IDs * - a list of OUIs for the vendor information element * * Ideally, the hardware would filter out any beacons without changes in the * requested elements, but if it cannot support that it may, at the expense * of some efficiency, filter out only a subset. For example, if the device * doesn't support checking for OUIs it should pass up all changes in all * vendor information elements. * * Note that change, for the sake of simplification, also includes information * elements appearing or disappearing from the beacon. * * Some hardware supports an "ignore list" instead, just make sure nothing * that was requested is on the ignore list, and include commonly changing * information element IDs in the ignore list, for example 11 (BSS load) and * the various vendor-assigned IEs with unknown contents (128, 129, 133-136, * 149, 150, 155, 156, 173, 176, 178, 179, 219); for forward compatibility * it could also include some currently unused IDs. * * * In addition to these capabilities, hardware should support notifying the * host of changes in the beacon RSSI. This is relevant to implement roaming * when no traffic is flowing (when traffic is flowing we see the RSSI of * the received data packets). This can consist in notifying the host when * the RSSI changes significantly or when it drops below or rises above * configurable thresholds. In the future these thresholds will also be * configured by mac80211 (which gets them from userspace) to implement * them as the roaming algorithm requires. * * If the hardware cannot implement this, the driver should ask it to * periodically pass beacon frames to the host so that software can do the * signal strength threshold checking. */ /** * DOC: Spatial multiplexing power save * * SMPS (Spatial multiplexing power save) is a mechanism to conserve * power in an 802.11n implementation. For details on the mechanism * and rationale, please refer to 802.11 (as amended by 802.11n-2009) * "11.2.3 SM power save". * * The mac80211 implementation is capable of sending action frames * to update the AP about the station's SMPS mode, and will instruct * the driver to enter the specific mode. It will also announce the * requested SMPS mode during the association handshake. Hardware * support for this feature is required, and can be indicated by * hardware flags. * * The default mode will be "automatic", which nl80211/cfg80211 * defines to be dynamic SMPS in (regular) powersave, and SMPS * turned off otherwise. * * To support this feature, the driver must set the appropriate * hardware support flags, and handle the SMPS flag to the config() * operation. It will then with this mechanism be instructed to * enter the requested SMPS mode while associated to an HT AP. */ /** * DOC: Frame filtering * * mac80211 requires to see many management frames for proper * operation, and users may want to see many more frames when * in monitor mode. However, for best CPU usage and power consumption, * having as few frames as possible percolate through the stack is * desirable. Hence, the hardware should filter as much as possible. * * To achieve this, mac80211 uses filter flags (see below) to tell * the driver's configure_filter() function which frames should be * passed to mac80211 and which should be filtered out. * * Before configure_filter() is invoked, the prepare_multicast() * callback is invoked with the parameters @mc_count and @mc_list * for the combined multicast address list of all virtual interfaces. * It's use is optional, and it returns a u64 that is passed to * configure_filter(). Additionally, configure_filter() has the * arguments @changed_flags telling which flags were changed and * @total_flags with the new flag states. * * If your device has no multicast address filters your driver will * need to check both the %FIF_ALLMULTI flag and the @mc_count * parameter to see whether multicast frames should be accepted * or dropped. * * All unsupported flags in @total_flags must be cleared. * Hardware does not support a flag if it is incapable of _passing_ * the frame to the stack. Otherwise the driver must ignore * the flag, but not clear it. * You must _only_ clear the flag (announce no support for the * flag to mac80211) if you are not able to pass the packet type * to the stack (so the hardware always filters it). * So for example, you should clear @FIF_CONTROL, if your hardware * always filters control frames. If your hardware always passes * control frames to the kernel and is incapable of filtering them, * you do _not_ clear the @FIF_CONTROL flag. * This rule applies to all other FIF flags as well. */ /** * DOC: AP support for powersaving clients * * In order to implement AP and P2P GO modes, mac80211 has support for * client powersaving, both "legacy" PS (PS-Poll/null data) and uAPSD. * There currently is no support for sAPSD. * * There is one assumption that mac80211 makes, namely that a client * will not poll with PS-Poll and trigger with uAPSD at the same time. * Both are supported, and both can be used by the same client, but * they can't be used concurrently by the same client. This simplifies * the driver code. * * The first thing to keep in mind is that there is a flag for complete * driver implementation: %IEEE80211_HW_AP_LINK_PS. If this flag is set, * mac80211 expects the driver to handle most of the state machine for * powersaving clients and will ignore the PM bit in incoming frames. * Drivers then use ieee80211_sta_ps_transition() to inform mac80211 of * stations' powersave transitions. In this mode, mac80211 also doesn't * handle PS-Poll/uAPSD. * * In the mode without %IEEE80211_HW_AP_LINK_PS, mac80211 will check the * PM bit in incoming frames for client powersave transitions. When a * station goes to sleep, we will stop transmitting to it. There is, * however, a race condition: a station might go to sleep while there is * data buffered on hardware queues. If the device has support for this * it will reject frames, and the driver should give the frames back to * mac80211 with the %IEEE80211_TX_STAT_TX_FILTERED flag set which will * cause mac80211 to retry the frame when the station wakes up. The * driver is also notified of powersave transitions by calling its * @sta_notify callback. * * When the station is asleep, it has three choices: it can wake up, * it can PS-Poll, or it can possibly start a uAPSD service period. * Waking up is implemented by simply transmitting all buffered (and * filtered) frames to the station. This is the easiest case. When * the station sends a PS-Poll or a uAPSD trigger frame, mac80211 * will inform the driver of this with the @allow_buffered_frames * callback; this callback is optional. mac80211 will then transmit * the frames as usual and set the %IEEE80211_TX_CTL_NO_PS_BUFFER * on each frame. The last frame in the service period (or the only * response to a PS-Poll) also has %IEEE80211_TX_STATUS_EOSP set to * indicate that it ends the service period; as this frame must have * TX status report it also sets %IEEE80211_TX_CTL_REQ_TX_STATUS. * When TX status is reported for this frame, the service period is * marked has having ended and a new one can be started by the peer. * * Additionally, non-bufferable MMPDUs can also be transmitted by * mac80211 with the %IEEE80211_TX_CTL_NO_PS_BUFFER set in them. * * Another race condition can happen on some devices like iwlwifi * when there are frames queued for the station and it wakes up * or polls; the frames that are already queued could end up being * transmitted first instead, causing reordering and/or wrong * processing of the EOSP. The cause is that allowing frames to be * transmitted to a certain station is out-of-band communication to * the device. To allow this problem to be solved, the driver can * call ieee80211_sta_block_awake() if frames are buffered when it * is notified that the station went to sleep. When all these frames * have been filtered (see above), it must call the function again * to indicate that the station is no longer blocked. * * If the driver buffers frames in the driver for aggregation in any * way, it must use the ieee80211_sta_set_buffered() call when it is * notified of the station going to sleep to inform mac80211 of any * TIDs that have frames buffered. Note that when a station wakes up * this information is reset (hence the requirement to call it when * informed of the station going to sleep). Then, when a service * period starts for any reason, @release_buffered_frames is called * with the number of frames to be released and which TIDs they are * to come from. In this case, the driver is responsible for setting * the EOSP (for uAPSD) and MORE_DATA bits in the released frames, * to help the @more_data parameter is passed to tell the driver if * there is more data on other TIDs -- the TIDs to release frames * from are ignored since mac80211 doesn't know how many frames the * buffers for those TIDs contain. * * If the driver also implement GO mode, where absence periods may * shorten service periods (or abort PS-Poll responses), it must * filter those response frames except in the case of frames that * are buffered in the driver -- those must remain buffered to avoid * reordering. Because it is possible that no frames are released * in this case, the driver must call ieee80211_sta_eosp() * to indicate to mac80211 that the service period ended anyway. * * Finally, if frames from multiple TIDs are released from mac80211 * but the driver might reorder them, it must clear & set the flags * appropriately (only the last frame may have %IEEE80211_TX_STATUS_EOSP) * and also take care of the EOSP and MORE_DATA bits in the frame. * The driver may also use ieee80211_sta_eosp() in this case. * * Note that if the driver ever buffers frames other than QoS-data * frames, it must take care to never send a non-QoS-data frame as * the last frame in a service period, adding a QoS-nulldata frame * after a non-QoS-data frame if needed. */ /** * DOC: HW queue control * * Before HW queue control was introduced, mac80211 only had a single static * assignment of per-interface AC software queues to hardware queues. This * was problematic for a few reasons: * 1) off-channel transmissions might get stuck behind other frames * 2) multiple virtual interfaces couldn't be handled correctly * 3) after-DTIM frames could get stuck behind other frames * * To solve this, hardware typically uses multiple different queues for all * the different usages, and this needs to be propagated into mac80211 so it * won't have the same problem with the software queues. * * Therefore, mac80211 now offers the %IEEE80211_HW_QUEUE_CONTROL capability * flag that tells it that the driver implements its own queue control. To do * so, the driver will set up the various queues in each &struct ieee80211_vif * and the offchannel queue in &struct ieee80211_hw. In response, mac80211 will * use those queue IDs in the hw_queue field of &struct ieee80211_tx_info and * if necessary will queue the frame on the right software queue that mirrors * the hardware queue. * Additionally, the driver has to then use these HW queue IDs for the queue * management functions (ieee80211_stop_queue() et al.) * * The driver is free to set up the queue mappings as needed, multiple virtual * interfaces may map to the same hardware queues if needed. The setup has to * happen during add_interface or change_interface callbacks. For example, a * driver supporting station+station and station+AP modes might decide to have * 10 hardware queues to handle different scenarios: * * 4 AC HW queues for 1st vif: 0, 1, 2, 3 * 4 AC HW queues for 2nd vif: 4, 5, 6, 7 * after-DTIM queue for AP: 8 * off-channel queue: 9 * * It would then set up the hardware like this: * hw.offchannel_tx_hw_queue = 9 * * and the first virtual interface that is added as follows: * vif.hw_queue[IEEE80211_AC_VO] = 0 * vif.hw_queue[IEEE80211_AC_VI] = 1 * vif.hw_queue[IEEE80211_AC_BE] = 2 * vif.hw_queue[IEEE80211_AC_BK] = 3 * vif.cab_queue = 8 // if AP mode, otherwise %IEEE80211_INVAL_HW_QUEUE * and the second virtual interface with 4-7. * * If queue 6 gets full, for example, mac80211 would only stop the second * virtual interface's BE queue since virtual interface queues are per AC. * * Note that the vif.cab_queue value should be set to %IEEE80211_INVAL_HW_QUEUE * whenever the queue is not used (i.e. the interface is not in AP mode) if the * queue could potentially be shared since mac80211 will look at cab_queue when * a queue is stopped/woken even if the interface is not in AP mode. */ /** * enum ieee80211_filter_flags - hardware filter flags * * These flags determine what the filter in hardware should be * programmed to let through and what should not be passed to the * stack. It is always safe to pass more frames than requested, * but this has negative impact on power consumption. * * @FIF_ALLMULTI: pass all multicast frames, this is used if requested * by the user or if the hardware is not capable of filtering by * multicast address. * * @FIF_FCSFAIL: pass frames with failed FCS (but you need to set the * %RX_FLAG_FAILED_FCS_CRC for them) * * @FIF_PLCPFAIL: pass frames with failed PLCP CRC (but you need to set * the %RX_FLAG_FAILED_PLCP_CRC for them * * @FIF_BCN_PRBRESP_PROMISC: This flag is set during scanning to indicate * to the hardware that it should not filter beacons or probe responses * by BSSID. Filtering them can greatly reduce the amount of processing * mac80211 needs to do and the amount of CPU wakeups, so you should * honour this flag if possible. * * @FIF_CONTROL: pass control frames (except for PS Poll) addressed to this * station * * @FIF_OTHER_BSS: pass frames destined to other BSSes * * @FIF_PSPOLL: pass PS Poll frames * * @FIF_PROBE_REQ: pass probe request frames */ enum ieee80211_filter_flags { FIF_ALLMULTI = 1<<1, FIF_FCSFAIL = 1<<2, FIF_PLCPFAIL = 1<<3, FIF_BCN_PRBRESP_PROMISC = 1<<4, FIF_CONTROL = 1<<5, FIF_OTHER_BSS = 1<<6, FIF_PSPOLL = 1<<7, FIF_PROBE_REQ = 1<<8, }; /** * enum ieee80211_ampdu_mlme_action - A-MPDU actions * * These flags are used with the ampdu_action() callback in * &struct ieee80211_ops to indicate which action is needed. * * Note that drivers MUST be able to deal with a TX aggregation * session being stopped even before they OK'ed starting it by * calling ieee80211_start_tx_ba_cb_irqsafe, because the peer * might receive the addBA frame and send a delBA right away! * * @IEEE80211_AMPDU_RX_START: start RX aggregation * @IEEE80211_AMPDU_RX_STOP: stop RX aggregation * @IEEE80211_AMPDU_TX_START: start TX aggregation * @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational * @IEEE80211_AMPDU_TX_STOP_CONT: stop TX aggregation but continue transmitting * queued packets, now unaggregated. After all packets are transmitted the * driver has to call ieee80211_stop_tx_ba_cb_irqsafe(). * @IEEE80211_AMPDU_TX_STOP_FLUSH: stop TX aggregation and flush all packets, * called when the station is removed. There's no need or reason to call * ieee80211_stop_tx_ba_cb_irqsafe() in this case as mac80211 assumes the * session is gone and removes the station. * @IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: called when TX aggregation is stopped * but the driver hasn't called ieee80211_stop_tx_ba_cb_irqsafe() yet and * now the connection is dropped and the station will be removed. Drivers * should clean up and drop remaining packets when this is called. */ enum ieee80211_ampdu_mlme_action { IEEE80211_AMPDU_RX_START, IEEE80211_AMPDU_RX_STOP, IEEE80211_AMPDU_TX_START, IEEE80211_AMPDU_TX_STOP_CONT, IEEE80211_AMPDU_TX_STOP_FLUSH, IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, IEEE80211_AMPDU_TX_OPERATIONAL, }; /** * struct ieee80211_ampdu_params - AMPDU action parameters * * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action. * @sta: peer of this AMPDU session * @tid: tid of the BA session * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When * action is set to %IEEE80211_AMPDU_RX_START the driver passes back the * actual ssn value used to start the session and writes the value here. * @buf_size: reorder buffer size (number of subframes). Valid only when the * action is set to %IEEE80211_AMPDU_RX_START or * %IEEE80211_AMPDU_TX_OPERATIONAL * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU. * valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL * @timeout: BA session timeout. Valid only when the action is set to * %IEEE80211_AMPDU_RX_START */ struct ieee80211_ampdu_params { enum ieee80211_ampdu_mlme_action action; struct ieee80211_sta *sta; u16 tid; u16 ssn; u16 buf_size; bool amsdu; u16 timeout; }; /** * enum ieee80211_frame_release_type - frame release reason * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll * @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to * frame received on trigger-enabled AC */ enum ieee80211_frame_release_type { IEEE80211_FRAME_RELEASE_PSPOLL, IEEE80211_FRAME_RELEASE_UAPSD, }; /** * enum ieee80211_rate_control_changed - flags to indicate what changed * * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit * to this station changed. The actual bandwidth is in the station * information -- for HT20/40 the IEEE80211_HT_CAP_SUP_WIDTH_20_40 * flag changes, for HT and VHT the bandwidth field changes. * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed. * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer * changed (in IBSS mode) due to discovering more information about * the peer. * @IEEE80211_RC_NSS_CHANGED: N_SS (number of spatial streams) was changed * by the peer */ enum ieee80211_rate_control_changed { IEEE80211_RC_BW_CHANGED = BIT(0), IEEE80211_RC_SMPS_CHANGED = BIT(1), IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2), IEEE80211_RC_NSS_CHANGED = BIT(3), }; /** * enum ieee80211_roc_type - remain on channel type * * With the support for multi channel contexts and multi channel operations, * remain on channel operations might be limited/deferred/aborted by other * flows/operations which have higher priority (and vise versa). * Specifying the ROC type can be used by devices to prioritize the ROC * operations compared to other operations/flows. * * @IEEE80211_ROC_TYPE_NORMAL: There are no special requirements for this ROC. * @IEEE80211_ROC_TYPE_MGMT_TX: The remain on channel request is required * for sending managment frames offchannel. */ enum ieee80211_roc_type { IEEE80211_ROC_TYPE_NORMAL = 0, IEEE80211_ROC_TYPE_MGMT_TX, }; /** * enum ieee80211_reconfig_complete_type - reconfig type * * This enum is used by the reconfig_complete() callback to indicate what * reconfiguration type was completed. * * @IEEE80211_RECONFIG_TYPE_RESTART: hw restart type * (also due to resume() callback returning 1) * @IEEE80211_RECONFIG_TYPE_SUSPEND: suspend type (regardless * of wowlan configuration) */ enum ieee80211_reconfig_type { IEEE80211_RECONFIG_TYPE_RESTART, IEEE80211_RECONFIG_TYPE_SUSPEND, }; /** * struct ieee80211_ops - callbacks from mac80211 to the driver * * This structure contains various callbacks that the driver may * handle or, in some cases, must handle, for example to configure * the hardware to a new channel or to transmit a frame. * * @tx: Handler that 802.11 module calls for each transmitted frame. * skb contains the buffer starting from the IEEE 802.11 header. * The low-level driver should send the frame out based on * configuration in the TX control data. This handler should, * preferably, never fail and stop queues appropriately. * Must be atomic. * * @start: Called before the first netdevice attached to the hardware * is enabled. This should turn on the hardware and must turn on * frame reception (for possibly enabled monitor interfaces.) * Returns negative error codes, these may be seen in userspace, * or zero. * When the device is started it should not have a MAC address * to avoid acknowledging frames before a non-monitor device * is added. * Must be implemented and can sleep. * * @stop: Called after last netdevice attached to the hardware * is disabled. This should turn off the hardware (at least * it must turn off frame reception.) * May be called right after add_interface if that rejects * an interface. If you added any work onto the mac80211 workqueue * you should ensure to cancel it on this callback. * Must be implemented and can sleep. * * @suspend: Suspend the device; mac80211 itself will quiesce before and * stop transmitting and doing any other configuration, and then * ask the device to suspend. This is only invoked when WoWLAN is * configured, otherwise the device is deconfigured completely and * reconfigured at resume time. * The driver may also impose special conditions under which it * wants to use the "normal" suspend (deconfigure), say if it only * supports WoWLAN when the device is associated. In this case, it * must return 1 from this function. * * @resume: If WoWLAN was configured, this indicates that mac80211 is * now resuming its operation, after this the device must be fully * functional again. If this returns an error, the only way out is * to also unregister the device. If it returns 1, then mac80211 * will also go through the regular complete restart on resume. * * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is * modified. The reason is that device_set_wakeup_enable() is * supposed to be called when the configuration changes, not only * in suspend(). * * @add_interface: Called when a netdevice attached to the hardware is * enabled. Because it is not called for monitor mode devices, @start * and @stop must be implemented. * The driver should perform any initialization it needs before * the device can be enabled. The initial configuration for the * interface is given in the conf parameter. * The callback may refuse to add an interface by returning a * negative error code (which will be seen in userspace.) * Must be implemented and can sleep. * * @change_interface: Called when a netdevice changes type. This callback * is optional, but only if it is supported can interface types be * switched while the interface is UP. The callback may sleep. * Note that while an interface is being switched, it will not be * found by the interface iteration callbacks. * * @remove_interface: Notifies a driver that an interface is going down. * The @stop callback is called after this if it is the last interface * and no monitor interfaces are present. * When all interfaces are removed, the MAC address in the hardware * must be cleared so the device no longer acknowledges packets, * the mac_addr member of the conf structure is, however, set to the * MAC address of the device going away. * Hence, this callback must be implemented. It can sleep. * * @config: Handler for configuration requests. IEEE 802.11 code calls this * function to change hardware configuration, e.g., channel. * This function should never fail but returns a negative error code * if it does. The callback can sleep. * * @bss_info_changed: Handler for configuration requests related to BSS * parameters that may vary during BSS's lifespan, and may affect low * level driver (e.g. assoc/disassoc status, erp parameters). * This function should not be used if no BSS has been set, unless * for association indication. The @changed parameter indicates which * of the bss parameters has changed when a call is made. The callback * can sleep. * * @prepare_multicast: Prepare for multicast filter configuration. * This callback is optional, and its return value is passed * to configure_filter(). This callback must be atomic. * * @configure_filter: Configure the device's RX filter. * See the section "Frame filtering" for more information. * This callback must be implemented and can sleep. * * @config_iface_filter: Configure the interface's RX filter. * This callback is optional and is used to configure which frames * should be passed to mac80211. The filter_flags is the combination * of FIF_* flags. The changed_flags is a bit mask that indicates * which flags are changed. * This callback can sleep. * * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit * must be set or cleared for a given STA. Must be atomic. * * @set_key: See the section "Hardware crypto acceleration" * This callback is only called between add_interface and * remove_interface calls, i.e. while the given virtual interface * is enabled. * Returns a negative error code if the key can't be added. * The callback can sleep. * * @update_tkip_key: See the section "Hardware crypto acceleration" * This callback will be called in the context of Rx. Called for drivers * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY. * The callback must be atomic. * * @set_rekey_data: If the device supports GTK rekeying, for example while the * host is suspended, it can assign this callback to retrieve the data * necessary to do GTK rekeying, this is the KEK, KCK and replay counter. * After rekeying was done it should (for example during resume) notify * userspace of the new replay counter using ieee80211_gtk_rekey_notify(). * * @set_default_unicast_key: Set the default (unicast) key index, useful for * WEP when the device sends data packets autonomously, e.g. for ARP * offloading. The index can be 0-3, or -1 for unsetting it. * * @hw_scan: Ask the hardware to service the scan request, no need to start * the scan state machine in stack. The scan must honour the channel * configuration done by the regulatory agent in the wiphy's * registered bands. The hardware (or the driver) needs to make sure * that power save is disabled. * The @req ie/ie_len members are rewritten by mac80211 to contain the * entire IEs after the SSID, so that drivers need not look at these * at all but just send them after the SSID -- mac80211 includes the * (extended) supported rates and HT information (where applicable). * When the scan finishes, ieee80211_scan_completed() must be called; * note that it also must be called when the scan cannot finish due to * any error unless this callback returned a negative error code. * This callback is also allowed to return the special return value 1, * this indicates that hardware scan isn't desirable right now and a * software scan should be done instead. A driver wishing to use this * capability must ensure its (hardware) scan capabilities aren't * advertised as more capable than mac80211's software scan is. * The callback can sleep. * * @cancel_hw_scan: Ask the low-level tp cancel the active hw scan. * The driver should ask the hardware to cancel the scan (if possible), * but the scan will be completed only after the driver will call * ieee80211_scan_completed(). * This callback is needed for wowlan, to prevent enqueueing a new * scan_work after the low-level driver was already suspended. * The callback can sleep. * * @sched_scan_start: Ask the hardware to start scanning repeatedly at * specific intervals. The driver must call the * ieee80211_sched_scan_results() function whenever it finds results. * This process will continue until sched_scan_stop is called. * * @sched_scan_stop: Tell the hardware to stop an ongoing scheduled scan. * In this case, ieee80211_sched_scan_stopped() must not be called. * * @sw_scan_start: Notifier function that is called just before a software scan * is started. Can be NULL, if the driver doesn't need this notification. * The mac_addr parameter allows supporting NL80211_SCAN_FLAG_RANDOM_ADDR, * the driver may set the NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR flag if it * can use this parameter. The callback can sleep. * * @sw_scan_complete: Notifier function that is called just after a * software scan finished. Can be NULL, if the driver doesn't need * this notification. * The callback can sleep. * * @get_stats: Return low-level statistics. * Returns zero if statistics are available. * The callback can sleep. * * @get_key_seq: If your device implements encryption in hardware and does * IV/PN assignment then this callback should be provided to read the * IV/PN for the given key from hardware. * The callback must be atomic. * * @set_frag_threshold: Configuration of fragmentation threshold. Assign this * if the device does fragmentation by itself. Note that to prevent the * stack from doing fragmentation IEEE80211_HW_SUPPORTS_TX_FRAG * should be set as well. * The callback can sleep. * * @set_rts_threshold: Configuration of RTS threshold (if device needs it) * The callback can sleep. * * @sta_add: Notifies low level driver about addition of an associated station, * AP, IBSS/WDS/mesh peer etc. This callback can sleep. * * @sta_remove: Notifies low level driver about removal of an associated * station, AP, IBSS/WDS/mesh peer etc. Note that after the callback * returns it isn't safe to use the pointer, not even RCU protected; * no RCU grace period is guaranteed between returning here and freeing * the station. See @sta_pre_rcu_remove if needed. * This callback can sleep. * * @sta_add_debugfs: Drivers can use this callback to add debugfs files * when a station is added to mac80211's station list. This callback * should be within a CPTCFG_MAC80211_DEBUGFS conditional. This * callback can sleep. * * @sta_notify: Notifies low level driver about power state transition of an * associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating * in AP mode, this callback will not be called when the flag * %IEEE80211_HW_AP_LINK_PS is set. Must be atomic. * * @sta_state: Notifies low level driver about state transition of a * station (which can be the AP, a client, IBSS/WDS/mesh peer etc.) * This callback is mutually exclusive with @sta_add/@sta_remove. * It must not fail for down transitions but may fail for transitions * up the list of states. Also note that after the callback returns it * isn't safe to use the pointer, not even RCU protected - no RCU grace * period is guaranteed between returning here and freeing the station. * See @sta_pre_rcu_remove if needed. * The callback can sleep. * * @sta_pre_rcu_remove: Notify driver about station removal before RCU * synchronisation. This is useful if a driver needs to have station * pointers protected using RCU, it can then use this call to clear * the pointers instead of waiting for an RCU grace period to elapse * in @sta_state. * The callback can sleep. * * @sta_rc_update: Notifies the driver of changes to the bitrates that can be * used to transmit to the station. The changes are advertised with bits * from &enum ieee80211_rate_control_changed and the values are reflected * in the station data. This callback should only be used when the driver * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since * otherwise the rate control algorithm is notified directly. * Must be atomic. * @sta_rate_tbl_update: Notifies the driver that the rate table changed. This * is only used if the configured rate control algorithm actually uses * the new rate table API, and is therefore optional. Must be atomic. * * @sta_statistics: Get statistics for this station. For example with beacon * filtering, the statistics kept by mac80211 might not be accurate, so * let the driver pre-fill the statistics. The driver can fill most of * the values (indicating which by setting the filled bitmap), but not * all of them make sense - see the source for which ones are possible. * Statistics that the driver doesn't fill will be filled by mac80211. * The callback can sleep. * * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), * bursting) for a hardware TX queue. * Returns a negative error code on failure. * The callback can sleep. * * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, * this is only used for IBSS mode BSSID merging and debugging. Is not a * required function. * The callback can sleep. * * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware. * Currently, this is only used for IBSS mode debugging. Is not a * required function. * The callback can sleep. * * @offset_tsf: Offset the TSF timer by the specified value in the * firmware/hardware. Preferred to set_tsf as it avoids delay between * calling set_tsf() and hardware getting programmed, which will show up * as TSF delay. Is not a required function. * The callback can sleep. * * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize * with other STAs in the IBSS. This is only used in IBSS mode. This * function is optional if the firmware/hardware takes full care of * TSF synchronization. * The callback can sleep. * * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us. * This is needed only for IBSS mode and the result of this function is * used to determine whether to reply to Probe Requests. * Returns non-zero if this device sent the last beacon. * The callback can sleep. * * @get_survey: Return per-channel survey information * * @rfkill_poll: Poll rfkill hardware state. If you need this, you also * need to set wiphy->rfkill_poll to %true before registration, * and need to call wiphy_rfkill_set_hw_state() in the callback. * The callback can sleep. * * @set_coverage_class: Set slot time for given coverage class as specified * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout * accordingly; coverage class equals to -1 to enable ACK timeout * estimation algorithm (dynack). To disable dynack set valid value for * coverage class. This callback is not required and may sleep. * * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may * be %NULL. The callback can sleep. * @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep. * * @flush: Flush all pending frames from the hardware queue, making sure * that the hardware queues are empty. The @queues parameter is a bitmap * of queues to flush, which is useful if different virtual interfaces * use different hardware queues; it may also indicate all queues. * If the parameter @drop is set to %true, pending frames may be dropped. * Note that vif can be NULL. * The callback can sleep. * * @channel_switch: Drivers that need (or want) to offload the channel * switch operation for CSAs received from the AP may implement this * callback. They must then call ieee80211_chswitch_done() to indicate * completion of the channel switch. * * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device. * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may * reject TX/RX mask combinations they cannot support by returning -EINVAL * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX). * * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant). * * @remain_on_channel: Starts an off-channel period on the given channel, must * call back to ieee80211_ready_on_channel() when on that channel. Note * that normal channel traffic is not stopped as this is intended for hw * offload. Frames to transmit on the off-channel channel are transmitted * normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the * duration (which will always be non-zero) expires, the driver must call * ieee80211_remain_on_channel_expired(). * Note that this callback may be called while the device is in IDLE and * must be accepted in this case. * This callback may sleep. * @cancel_remain_on_channel: Requests that an ongoing off-channel period is * aborted before it expires. This callback may sleep. * * @set_ringparam: Set tx and rx ring sizes. * * @get_ringparam: Get tx and rx ring current and maximum sizes. * * @tx_frames_pending: Check if there is any pending frame in the hardware * queues before entering power save. * * @set_bitrate_mask: Set a mask of rates to be used for rate control selection * when transmitting a frame. Currently only legacy rates are handled. * The callback can sleep. * @event_callback: Notify driver about any event in mac80211. See * &enum ieee80211_event_type for the different types. * The callback must be atomic. * * @release_buffered_frames: Release buffered frames according to the given * parameters. In the case where the driver buffers some frames for * sleeping stations mac80211 will use this callback to tell the driver * to release some frames, either for PS-poll or uAPSD. * Note that if the @more_data parameter is %false the driver must check * if there are more frames on the given TIDs, and if there are more than * the frames being released then it must still set the more-data bit in * the frame. If the @more_data parameter is %true, then of course the * more-data bit must always be set. * The @tids parameter tells the driver which TIDs to release frames * from, for PS-poll it will always have only a single bit set. * In the case this is used for a PS-poll initiated release, the * @num_frames parameter will always be 1 so code can be shared. In * this case the driver must also set %IEEE80211_TX_STATUS_EOSP flag * on the TX status (and must report TX status) so that the PS-poll * period is properly ended. This is used to avoid sending multiple * responses for a retried PS-poll frame. * In the case this is used for uAPSD, the @num_frames parameter may be * bigger than one, but the driver may send fewer frames (it must send * at least one, however). In this case it is also responsible for * setting the EOSP flag in the QoS header of the frames. Also, when the * service period ends, the driver must set %IEEE80211_TX_STATUS_EOSP * on the last frame in the SP. Alternatively, it may call the function * ieee80211_sta_eosp() to inform mac80211 of the end of the SP. * This callback must be atomic. * @allow_buffered_frames: Prepare device to allow the given number of frames * to go out to the given station. The frames will be sent by mac80211 * via the usual TX path after this call. The TX information for frames * released will also have the %IEEE80211_TX_CTL_NO_PS_BUFFER flag set * and the last one will also have %IEEE80211_TX_STATUS_EOSP set. In case * frames from multiple TIDs are released and the driver might reorder * them between the TIDs, it must set the %IEEE80211_TX_STATUS_EOSP flag * on the last frame and clear it on all others and also handle the EOSP * bit in the QoS header correctly. Alternatively, it can also call the * ieee80211_sta_eosp() function. * The @tids parameter is a bitmap and tells the driver which TIDs the * frames will be on; it will at most have two bits set. * This callback must be atomic. * * @get_et_sset_count: Ethtool API to get string-set count. * * @get_et_stats: Ethtool API to get a set of u64 stats. * * @get_et_strings: Ethtool API to get a set of strings to describe stats * and perhaps other supported types of ethtool data-sets. * * @mgd_prepare_tx: Prepare for transmitting a management frame for association * before associated. In multi-channel scenarios, a virtual interface is * bound to a channel before it is associated, but as it isn't associated * yet it need not necessarily be given airtime, in particular since any * transmission to a P2P GO needs to be synchronized against the GO's * powersave state. mac80211 will call this function before transmitting a * management frame prior to having successfully associated to allow the * driver to give it channel time for the transmission, to get a response * and to be able to synchronize with the GO. * For drivers that set %IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, mac80211 * would also call this function before transmitting a deauthentication * frame in case that no beacon was heard from the AP/P2P GO. * The callback will be called before each transmission and upon return * mac80211 will transmit the frame right away. * If duration is greater than zero, mac80211 hints to the driver the * duration for which the operation is requested. * The callback is optional and can (should!) sleep. * * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending * a TDLS discovery-request, we expect a reply to arrive on the AP's * channel. We must stay on the channel (no PSM, scan, etc.), since a TDLS * setup-response is a direct packet not buffered by the AP. * mac80211 will call this function just before the transmission of a TDLS * discovery-request. The recommended period of protection is at least * 2 * (DTIM period). * The callback is optional and can sleep. * * @add_chanctx: Notifies device driver about new channel context creation. * This callback may sleep. * @remove_chanctx: Notifies device driver about channel context destruction. * This callback may sleep. * @change_chanctx: Notifies device driver about channel context changes that * may happen when combining different virtual interfaces on the same * channel context with different settings * This callback may sleep. * @assign_vif_chanctx: Notifies device driver about channel context being bound * to vif. Possible use is for hw queue remapping. * This callback may sleep. * @unassign_vif_chanctx: Notifies device driver about channel context being * unbound from vif. * This callback may sleep. * @switch_vif_chanctx: switch a number of vifs from one chanctx to * another, as specified in the list of * @ieee80211_vif_chanctx_switch passed to the driver, according * to the mode defined in &ieee80211_chanctx_switch_mode. * This callback may sleep. * * @start_ap: Start operation on the AP interface, this is called after all the * information in bss_conf is set and beacon can be retrieved. A channel * context is bound before this is called. Note that if the driver uses * software scan or ROC, this (and @stop_ap) isn't called when the AP is * just "paused" for scanning/ROC, which is indicated by the beacon being * disabled/enabled via @bss_info_changed. * @stop_ap: Stop operation on the AP interface. * * @reconfig_complete: Called after a call to ieee80211_restart_hw() and * during resume, when the reconfiguration has completed. * This can help the driver implement the reconfiguration step (and * indicate mac80211 is ready to receive frames). * This callback may sleep. * * @ipv6_addr_change: IPv6 address assignment on the given interface changed. * Currently, this is only called for managed or P2P client interfaces. * This callback is optional; it must not sleep. * * @channel_switch_beacon: Starts a channel switch to a new channel. * Beacons are modified to include CSA or ECSA IEs before calling this * function. The corresponding count fields in these IEs must be * decremented, and when they reach 1 the driver must call * ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get() * get the csa counter decremented by mac80211, but must check if it is * 1 using ieee80211_csa_is_complete() after the beacon has been * transmitted and then call ieee80211_csa_finish(). * If the CSA count starts as zero or 1, this function will not be called, * since there won't be any time to beacon before the switch anyway. * @pre_channel_switch: This is an optional callback that is called * before a channel switch procedure is started (ie. when a STA * gets a CSA or a userspace initiated channel-switch), allowing * the driver to prepare for the channel switch. * @post_channel_switch: This is an optional callback that is called * after a channel switch procedure is completed, allowing the * driver to go back to a normal configuration. * @abort_channel_switch: This is an optional callback that is called * when channel switch procedure was completed, allowing the * driver to go back to a normal configuration. * @channel_switch_rx_beacon: This is an optional callback that is called * when channel switch procedure is in progress and additional beacon with * CSA IE was received, allowing driver to track changes in count. * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all * information in bss_conf is set up and the beacon can be retrieved. A * channel context is bound before this is called. * @leave_ibss: Leave the IBSS again. * * @get_expected_throughput: extract the expected throughput towards the * specified station. The returned value is expressed in Kbps. It returns 0 * if the RC algorithm does not have proper data to provide. * * @get_txpower: get current maximum tx power (in dBm) based on configuration * and hardware limits. * * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver * is responsible for continually initiating channel-switching operations * and returning to the base channel for communication with the AP. The * driver receives a channel-switch request template and the location of * the switch-timing IE within the template as part of the invocation. * The template is valid only within the call, and the driver can * optionally copy the skb for further re-use. * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both * peers must be on the base channel when the call completes. * @tdls_recv_channel_switch: a TDLS channel-switch related frame (request or * response) has been received from a remote peer. The driver gets * parameters parsed from the incoming frame and may use them to continue * an ongoing channel-switch operation. In addition, a channel-switch * response template is provided, together with the location of the * switch-timing IE within the template. The skb can only be used within * the function call. * * @wake_tx_queue: Called when new packets have been added to the queue. * @sync_rx_queues: Process all pending frames in RSS queues. This is a * synchronization which is needed in case driver has in its RSS queues * pending frames that were received prior to the control path action * currently taken (e.g. disassociation) but are not processed yet. * * @start_nan: join an existing NAN cluster, or create a new one. * @stop_nan: leave the NAN cluster. * @nan_change_conf: change NAN configuration. The data in cfg80211_nan_conf * contains full new configuration and changes specify which parameters * are changed with respect to the last NAN config. * The driver gets both full configuration and the changed parameters since * some devices may need the full configuration while others need only the * changed parameters. * @add_nan_func: Add a NAN function. Returns 0 on success. The data in * cfg80211_nan_func must not be referenced outside the scope of * this call. * @del_nan_func: Remove a NAN function. The driver must call * ieee80211_nan_func_terminated() with * NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal. * @can_aggregate_in_amsdu: Called in order to determine if HW supports * aggregating two specific frames in the same A-MSDU. The relation * between the skbs should be symmetric and transitive. Note that while * skb is always a real frame, head may or may not be an A-MSDU. * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. * Statistics should be cumulative, currently no way to reset is provided. * * @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep) * @abort_pmsr: abort peer measurement (this call can sleep) */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); int (*start)(struct ieee80211_hw *hw); void (*stop)(struct ieee80211_hw *hw); #ifdef CONFIG_PM int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int (*resume)(struct ieee80211_hw *hw); void (*set_wakeup)(struct ieee80211_hw *hw, bool enabled); #endif int (*add_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*change_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype new_type, bool p2p); void (*remove_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*config)(struct ieee80211_hw *hw, u32 changed); void (*bss_info_changed)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed); int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); u64 (*prepare_multicast)(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list); void (*configure_filter)(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast); void (*config_iface_filter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int filter_flags, unsigned int changed_flags); int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); void (*update_tkip_key)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key_conf *conf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); void (*set_rekey_data)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data); void (*set_default_unicast_key)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *req); void (*cancel_hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*sched_scan_start)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies); int (*sched_scan_stop)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*sw_scan_start)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr); void (*sw_scan_complete)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*get_stats)(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); void (*get_key_seq)(struct ieee80211_hw *hw, struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq); int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value); int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value); int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); #ifdef CPTCFG_MAC80211_DEBUGFS void (*sta_add_debugfs)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); #endif void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd, struct ieee80211_sta *sta); int (*sta_state)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void (*sta_rc_update)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed); void (*sta_rate_tbl_update)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void (*sta_statistics)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo); int (*conf_tx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params); u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf); void (*offset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, s64 offset); void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*tx_last_beacon)(struct ieee80211_hw *hw); /** * @ampdu_action: * Perform a certain A-MPDU action. * The RA/TID combination determines the destination and TID we want * the ampdu action to be performed for. The action is defined through * ieee80211_ampdu_mlme_action. * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver * may neither send aggregates containing more subframes than @buf_size * nor send aggregates in a way that lost frames would exceed the * buffer size. If just limiting the aggregate size, this would be * possible with a buf_size of 8: * * - ``TX: 1.....7`` * - ``RX: 2....7`` (lost frame #1) * - ``TX: 8..1...`` * * which is invalid since #1 was now re-transmitted well past the * buffer size of 8. Correct ways to retransmit #1 would be: * * - ``TX: 1 or`` * - ``TX: 18 or`` * - ``TX: 81`` * * Even ``189`` would be wrong since 1 could be lost again. * * Returns a negative error code on failure. * The callback can sleep. */ int (*ampdu_action)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params); int (*get_survey)(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void (*rfkill_poll)(struct ieee80211_hw *hw); void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class); #ifdef CPTCFG_NL80211_TESTMODE int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len); int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len); #endif void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); void (*channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch); int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); int (*remain_on_channel)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type); int (*cancel_remain_on_channel)(struct ieee80211_hw *hw); int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx); void (*get_ringparam)(struct ieee80211_hw *hw, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max); bool (*tx_frames_pending)(struct ieee80211_hw *hw); int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask); void (*event_callback)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_event *event); void (*allow_buffered_frames)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data); void (*release_buffered_frames)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data); int (*get_et_sset_count)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset); void (*get_et_stats)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data); void (*get_et_strings)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data); void (*mgd_prepare_tx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 duration); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*add_chanctx)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx); void (*remove_chanctx)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx); void (*change_chanctx)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed); int (*assign_vif_chanctx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx); void (*unassign_vif_chanctx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx); int (*switch_vif_chanctx)(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode); void (*reconfig_complete)(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); #if IS_ENABLED(CONFIG_IPV6) void (*ipv6_addr_change)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev); #endif void (*channel_switch_beacon)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_chan_def *chandef); int (*pre_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch); int (*post_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*abort_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch); int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); u32 (*get_expected_throughput)(struct ieee80211_hw *hw, struct ieee80211_sta *sta); int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm); int (*tdls_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); void (*tdls_cancel_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tdls_ch_sw_params *params); void (*wake_tx_queue)(struct ieee80211_hw *hw, struct ieee80211_txq *txq); void (*sync_rx_queues)(struct ieee80211_hw *hw); int (*start_nan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_nan_conf *conf); int (*stop_nan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*nan_change_conf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_nan_conf *conf, u32 changes); int (*add_nan_func)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_nan_func *nan_func); void (*del_nan_func)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 instance_id); bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *hw, struct sk_buff *head, struct sk_buff *skb); int (*get_ftm_responder_stats)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_ftm_responder_stats *ftm_stats); int (*start_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request); void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request); }; /** * ieee80211_alloc_hw_nm - Allocate a new hardware device * * This must be called once for each hardware device. The returned pointer * must be used to refer to this device when calling other functions. * mac80211 allocates a private data area for the driver pointed to by * @priv in &struct ieee80211_hw, the size of this area is given as * @priv_data_len. * * @priv_data_len: length of private data * @ops: callbacks for this device * @requested_name: Requested name for this device. * NULL is valid value, and means use the default naming (phy%d) * * Return: A pointer to the new hardware device, or %NULL on error. */ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, const struct ieee80211_ops *ops, const char *requested_name); /** * ieee80211_alloc_hw - Allocate a new hardware device * * This must be called once for each hardware device. The returned pointer * must be used to refer to this device when calling other functions. * mac80211 allocates a private data area for the driver pointed to by * @priv in &struct ieee80211_hw, the size of this area is given as * @priv_data_len. * * @priv_data_len: length of private data * @ops: callbacks for this device * * Return: A pointer to the new hardware device, or %NULL on error. */ static inline struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops) { return ieee80211_alloc_hw_nm(priv_data_len, ops, NULL); } /** * ieee80211_register_hw - Register hardware device * * You must call this function before any other functions in * mac80211. Note that before a hardware can be registered, you * need to fill the contained wiphy's information. * * @hw: the device to register as returned by ieee80211_alloc_hw() * * Return: 0 on success. An error code otherwise. */ int ieee80211_register_hw(struct ieee80211_hw *hw); /** * struct ieee80211_tpt_blink - throughput blink description * @throughput: throughput in Kbit/sec * @blink_time: blink time in milliseconds * (full cycle, ie. one off + one on period) */ struct ieee80211_tpt_blink { int throughput; int blink_time; }; /** * enum ieee80211_tpt_led_trigger_flags - throughput trigger flags * @IEEE80211_TPT_LEDTRIG_FL_RADIO: enable blinking with radio * @IEEE80211_TPT_LEDTRIG_FL_WORK: enable blinking when working * @IEEE80211_TPT_LEDTRIG_FL_CONNECTED: enable blinking when at least one * interface is connected in some way, including being an AP */ enum ieee80211_tpt_led_trigger_flags { IEEE80211_TPT_LEDTRIG_FL_RADIO = BIT(0), IEEE80211_TPT_LEDTRIG_FL_WORK = BIT(1), IEEE80211_TPT_LEDTRIG_FL_CONNECTED = BIT(2), }; #ifdef CPTCFG_MAC80211_LEDS const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); const char * __ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags, const struct ieee80211_tpt_blink *blink_table, unsigned int blink_table_len); #endif /** * ieee80211_get_tx_led_name - get name of TX LED * * mac80211 creates a transmit LED trigger for each wireless hardware * that can be used to drive LEDs if your driver registers a LED device. * This function returns the name (or %NULL if not configured for LEDs) * of the trigger so you can automatically link the LED device. * * @hw: the hardware to get the LED trigger name for * * Return: The name of the LED trigger. %NULL if not configured for LEDs. */ static inline const char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw) { #ifdef CPTCFG_MAC80211_LEDS return __ieee80211_get_tx_led_name(hw); #else return NULL; #endif } /** * ieee80211_get_rx_led_name - get name of RX LED * * mac80211 creates a receive LED trigger for each wireless hardware * that can be used to drive LEDs if your driver registers a LED device. * This function returns the name (or %NULL if not configured for LEDs) * of the trigger so you can automatically link the LED device. * * @hw: the hardware to get the LED trigger name for * * Return: The name of the LED trigger. %NULL if not configured for LEDs. */ static inline const char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw) { #ifdef CPTCFG_MAC80211_LEDS return __ieee80211_get_rx_led_name(hw); #else return NULL; #endif } /** * ieee80211_get_assoc_led_name - get name of association LED * * mac80211 creates a association LED trigger for each wireless hardware * that can be used to drive LEDs if your driver registers a LED device. * This function returns the name (or %NULL if not configured for LEDs) * of the trigger so you can automatically link the LED device. * * @hw: the hardware to get the LED trigger name for * * Return: The name of the LED trigger. %NULL if not configured for LEDs. */ static inline const char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) { #ifdef CPTCFG_MAC80211_LEDS return __ieee80211_get_assoc_led_name(hw); #else return NULL; #endif } /** * ieee80211_get_radio_led_name - get name of radio LED * * mac80211 creates a radio change LED trigger for each wireless hardware * that can be used to drive LEDs if your driver registers a LED device. * This function returns the name (or %NULL if not configured for LEDs) * of the trigger so you can automatically link the LED device. * * @hw: the hardware to get the LED trigger name for * * Return: The name of the LED trigger. %NULL if not configured for LEDs. */ static inline const char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw) { #ifdef CPTCFG_MAC80211_LEDS return __ieee80211_get_radio_led_name(hw); #else return NULL; #endif } /** * ieee80211_create_tpt_led_trigger - create throughput LED trigger * @hw: the hardware to create the trigger for * @flags: trigger flags, see &enum ieee80211_tpt_led_trigger_flags * @blink_table: the blink table -- needs to be ordered by throughput * @blink_table_len: size of the blink table * * Return: %NULL (in case of error, or if no LED triggers are * configured) or the name of the new trigger. * * Note: This function must be called before ieee80211_register_hw(). */ static inline const char * ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags, const struct ieee80211_tpt_blink *blink_table, unsigned int blink_table_len) { #ifdef CPTCFG_MAC80211_LEDS return __ieee80211_create_tpt_led_trigger(hw, flags, blink_table, blink_table_len); #else return NULL; #endif } /** * ieee80211_unregister_hw - Unregister a hardware device * * This function instructs mac80211 to free allocated resources * and unregister netdevices from the networking subsystem. * * @hw: the hardware to unregister */ void ieee80211_unregister_hw(struct ieee80211_hw *hw); /** * ieee80211_free_hw - free hardware descriptor * * This function frees everything that was allocated, including the * private data for the driver. You must call ieee80211_unregister_hw() * before calling this function. * * @hw: the hardware to free */ void ieee80211_free_hw(struct ieee80211_hw *hw); /** * ieee80211_restart_hw - restart hardware completely * * Call this function when the hardware was restarted for some reason * (hardware error, ...) and the driver is unable to restore its state * by itself. mac80211 assumes that at this point the driver/hardware * is completely uninitialised and stopped, it starts the process by * calling the ->start() operation. The driver will need to reset all * internal state that it has prior to calling this function. * * @hw: the hardware to restart */ void ieee80211_restart_hw(struct ieee80211_hw *hw); /** * ieee80211_rx_napi - receive frame from NAPI context * * Use this function to hand received frames to mac80211. The receive * buffer in @skb must start with an IEEE 802.11 header. In case of a * paged @skb is used, the driver is recommended to put the ieee80211 * header of the frame on the linear part of the @skb to avoid memory * allocation and/or memcpy by the stack. * * This function may not be called in IRQ context. Calls to this function * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be * mixed for a single hardware. Must not run concurrently with * ieee80211_tx_status() or ieee80211_tx_status_ni(). * * This function must be called with BHs disabled. * * @hw: the hardware this frame came in on * @sta: the station the frame was received from, or %NULL * @skb: the buffer to receive, owned by mac80211 after this call * @napi: the NAPI context */ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, struct napi_struct *napi); /** * ieee80211_rx - receive frame * * Use this function to hand received frames to mac80211. The receive * buffer in @skb must start with an IEEE 802.11 header. In case of a * paged @skb is used, the driver is recommended to put the ieee80211 * header of the frame on the linear part of the @skb to avoid memory * allocation and/or memcpy by the stack. * * This function may not be called in IRQ context. Calls to this function * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be * mixed for a single hardware. Must not run concurrently with * ieee80211_tx_status() or ieee80211_tx_status_ni(). * * In process context use instead ieee80211_rx_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call */ static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) { ieee80211_rx_napi(hw, NULL, skb, NULL); } /** * ieee80211_rx_irqsafe - receive frame * * Like ieee80211_rx() but can be called in IRQ context * (internally defers to a tasklet.) * * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not * be mixed for a single hardware.Must not run concurrently with * ieee80211_tx_status() or ieee80211_tx_status_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call */ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb); /** * ieee80211_rx_ni - receive frame (in process context) * * Like ieee80211_rx() but can be called in process context * (internally disables bottom halves). * * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may * not be mixed for a single hardware. Must not run concurrently with * ieee80211_tx_status() or ieee80211_tx_status_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call */ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw, struct sk_buff *skb) { local_bh_disable(); ieee80211_rx(hw, skb); local_bh_enable(); } /** * ieee80211_sta_ps_transition - PS transition for connected sta * * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS * flag set, use this function to inform mac80211 about a connected station * entering/leaving PS mode. * * This function may not be called in IRQ context or with softirqs enabled. * * Calls to this function for a single hardware must be synchronized against * each other. * * @sta: currently connected sta * @start: start or stop PS * * Return: 0 on success. -EINVAL when the requested PS mode is already set. */ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start); /** * ieee80211_sta_ps_transition_ni - PS transition for connected sta * (in process context) * * Like ieee80211_sta_ps_transition() but can be called in process context * (internally disables bottom halves). Concurrent call restriction still * applies. * * @sta: currently connected sta * @start: start or stop PS * * Return: Like ieee80211_sta_ps_transition(). */ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta, bool start) { int ret; local_bh_disable(); ret = ieee80211_sta_ps_transition(sta, start); local_bh_enable(); return ret; } /** * ieee80211_sta_pspoll - PS-Poll frame received * @sta: currently connected station * * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set, * use this function to inform mac80211 that a PS-Poll frame from a * connected station was received. * This must be used in conjunction with ieee80211_sta_ps_transition() * and possibly ieee80211_sta_uapsd_trigger(); calls to all three must * be serialized. */ void ieee80211_sta_pspoll(struct ieee80211_sta *sta); /** * ieee80211_sta_uapsd_trigger - (potential) U-APSD trigger frame received * @sta: currently connected station * @tid: TID of the received (potential) trigger frame * * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set, * use this function to inform mac80211 that a (potential) trigger frame * from a connected station was received. * This must be used in conjunction with ieee80211_sta_ps_transition() * and possibly ieee80211_sta_pspoll(); calls to all three must be * serialized. * %IEEE80211_NUM_TIDS can be passed as the tid if the tid is unknown. * In this case, mac80211 will not check that this tid maps to an AC * that is trigger enabled and assume that the caller did the proper * checks. */ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid); /* * The TX headroom reserved by mac80211 for its own tx_status functions. * This is enough for the radiotap header. */ #define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4) /** * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames * @sta: &struct ieee80211_sta pointer for the sleeping station * @tid: the TID that has buffered frames * @buffered: indicates whether or not frames are buffered for this TID * * If a driver buffers frames for a powersave station instead of passing * them back to mac80211 for retransmission, the station may still need * to be told that there are buffered frames via the TIM bit. * * This function informs mac80211 whether or not there are frames that are * buffered in the driver for a given TID; mac80211 can then use this data * to set the TIM bit (NOTE: This may call back into the driver's set_tim * call! Beware of the locking!) * * If all frames are released to the station (due to PS-poll or uAPSD) * then the driver needs to inform mac80211 that there no longer are * frames buffered. However, when the station wakes up mac80211 assumes * that all buffered frames will be transmitted and clears this data, * drivers need to make sure they inform mac80211 about all buffered * frames on the sleep transition (sta_notify() with %STA_NOTIFY_SLEEP). * * Note that technically mac80211 only needs to know this per AC, not per * TID, but since driver buffering will inevitably happen per TID (since * it is related to aggregation) it is easier to make mac80211 map the * TID to the AC as required instead of keeping track in all drivers that * use this API. */ void ieee80211_sta_set_buffered(struct ieee80211_sta *sta, u8 tid, bool buffered); /** * ieee80211_get_tx_rates - get the selected transmit rates for a packet * * Call this function in a driver with per-packet rate selection support * to combine the rate info in the packet tx info with the most recent * rate selection table for the station entry. * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @sta: the receiver station to which this packet is sent. * @skb: the frame to be transmitted. * @dest: buffer for extracted rate/retry information * @max_rates: maximum number of rates to fetch */ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct sk_buff *skb, struct ieee80211_tx_rate *dest, int max_rates); /** * ieee80211_sta_set_expected_throughput - set the expected tpt for a station * * Call this function to notify mac80211 about a change in expected throughput * to a station. A driver for a device that does rate control in firmware can * call this function when the expected throughput estimate towards a station * changes. The information is used to tune the CoDel AQM applied to traffic * going towards that station (which can otherwise be too aggressive and cause * slow stations to starve). * * @pubsta: the station to set throughput for. * @thr: the current expected throughput in kbps. */ void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, u32 thr); /** * ieee80211_tx_rate_update - transmit rate update callback * * Drivers should call this functions with a non-NULL pub sta * This function can be used in drivers that does not have provision * in updating the tx rate in data path. * * @hw: the hardware the frame was transmitted by * @pubsta: the station to update the tx rate for. * @info: tx status information */ void ieee80211_tx_rate_update(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_tx_info *info); /** * ieee80211_tx_status - transmit status callback * * Call this function for all transmitted frames after they have been * transmitted. It is permissible to not call this function for * multicast frames but this can affect statistics. * * This function may not be called in IRQ context. Calls to this function * for a single hardware must be synchronized against each other. Calls * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe() * may not be mixed for a single hardware. Must not run concurrently with * ieee80211_rx() or ieee80211_rx_ni(). * * @hw: the hardware the frame was transmitted by * @skb: the frame that was transmitted, owned by mac80211 after this call */ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS /** * ieee80211_tx_lat_thrshld_cfg - tx latency threshold configure * * Call this function on time for configuring the latency threshold on * tx packets for triggering the dbg data collection as part of the WRT * mechanism. * * @hw: the hardware the fw was uploaded with. * @thrshld: the threshold for the latency of tx packets. * @tid: the tid that the lateny was collectd on. * @window: window before collecting the data. * @mode: Internal buffer or continuos recording (recording with MIPI) * @iface: the interface we are checking on. */ void ieee80211_tx_lat_thrshld_cfg(struct ieee80211_hw *hw, u32 thrshld, u16 tid, u16 window, u16 mode, u32 iface); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ /** * ieee80211_tx_status_ext - extended transmit status callback * * This function can be used as a replacement for ieee80211_tx_status * in drivers that may want to provide extra information that does not * fit into &struct ieee80211_tx_info. * * Calls to this function for a single hardware must be synchronized * against each other. Calls to this function, ieee80211_tx_status_ni() * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware. * * @hw: the hardware the frame was transmitted by * @status: tx status information */ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, struct ieee80211_tx_status *status); /** * ieee80211_tx_status_noskb - transmit status callback without skb * * This function can be used as a replacement for ieee80211_tx_status * in drivers that cannot reliably map tx status information back to * specific skbs. * * Calls to this function for a single hardware must be synchronized * against each other. Calls to this function, ieee80211_tx_status_ni() * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware. * * @hw: the hardware the frame was transmitted by * @sta: the receiver station to which this packet is sent * (NULL for multicast packets) * @info: tx status information */ static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_tx_info *info) { struct ieee80211_tx_status status = { .sta = sta, .info = info, }; ieee80211_tx_status_ext(hw, &status); } /** * ieee80211_tx_status_ni - transmit status callback (in process context) * * Like ieee80211_tx_status() but can be called in process context. * * Calls to this function, ieee80211_tx_status() and * ieee80211_tx_status_irqsafe() may not be mixed * for a single hardware. * * @hw: the hardware the frame was transmitted by * @skb: the frame that was transmitted, owned by mac80211 after this call */ static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw, struct sk_buff *skb) { local_bh_disable(); ieee80211_tx_status(hw, skb); local_bh_enable(); } /** * ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback * * Like ieee80211_tx_status() but can be called in IRQ context * (internally defers to a tasklet.) * * Calls to this function, ieee80211_tx_status() and * ieee80211_tx_status_ni() may not be mixed for a single hardware. * * @hw: the hardware the frame was transmitted by * @skb: the frame that was transmitted, owned by mac80211 after this call */ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb); /** * ieee80211_report_low_ack - report non-responding station * * When operating in AP-mode, call this function to report a non-responding * connected STA. * * @sta: the non-responding connected sta * @num_packets: number of packets sent to @sta without a response */ void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets); #define IEEE80211_MAX_CSA_COUNTERS_NUM 2 /** * struct ieee80211_mutable_offsets - mutable beacon offsets * @tim_offset: position of TIM element * @tim_length: size of TIM element * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets * to CSA counters. This array can contain zero values which * should be ignored. */ struct ieee80211_mutable_offsets { u16 tim_offset; u16 tim_length; u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM]; }; /** * ieee80211_beacon_get_template - beacon template generation function * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @offs: &struct ieee80211_mutable_offsets pointer to struct that will * receive the offsets that may be updated by the driver. * * If the driver implements beaconing modes, it must use this function to * obtain the beacon template. * * This function should be used if the beacon frames are generated by the * device, and then the driver must use the returned beacon as the template * The driver or the device are responsible to update the DTIM and, when * applicable, the CSA count. * * The driver is responsible for freeing the returned skb. * * Return: The beacon template. %NULL on error. */ struct sk_buff * ieee80211_beacon_get_template(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs); /** * ieee80211_beacon_get_tim - beacon generation function * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @tim_offset: pointer to variable that will receive the TIM IE offset. * Set to 0 if invalid (in non-AP modes). * @tim_length: pointer to variable that will receive the TIM IE length, * (including the ID and length bytes!). * Set to 0 if invalid (in non-AP modes). * * If the driver implements beaconing modes, it must use this function to * obtain the beacon frame. * * If the beacon frames are generated by the host system (i.e., not in * hardware/firmware), the driver uses this function to get each beacon * frame from mac80211 -- it is responsible for calling this function exactly * once before the beacon is needed (e.g. based on hardware interrupt). * * The driver is responsible for freeing the returned skb. * * Return: The beacon template. %NULL on error. */ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 *tim_offset, u16 *tim_length); /** * ieee80211_beacon_get - beacon generation function * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * See ieee80211_beacon_get_tim(). * * Return: See ieee80211_beacon_get_tim(). */ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { return ieee80211_beacon_get_tim(hw, vif, NULL, NULL); } /** * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * The csa counter should be updated after each beacon transmission. * This function is called implicitly when * ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the * beacon frames are generated by the device, the driver should call this * function after each beacon transmission to sync mac80211's csa counters. * * Return: new csa counter value */ u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif); /** * ieee80211_csa_set_counter - request mac80211 to set csa counter * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @counter: the new value for the counter * * The csa counter can be changed by the device, this API should be * used by the device driver to update csa counter in mac80211. * * It should never be used together with ieee80211_csa_update_counter(), * as it will cause a race condition around the counter value. */ void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter); /** * ieee80211_csa_finish - notify mac80211 about channel switch * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * After a channel switch announcement was scheduled and the counter in this * announcement hits 1, this function must be called by the driver to * notify mac80211 that the channel can be changed. */ void ieee80211_csa_finish(struct ieee80211_vif *vif); /** * ieee80211_csa_is_complete - find out if counters reached 1 * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * This function returns whether the channel switch counters reached zero. */ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif); /** * ieee80211_proberesp_get - retrieve a Probe Response template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * Creates a Probe Response template which can, for example, be uploaded to * hardware. The destination address should be set by the caller. * * Can only be called in AP mode. * * Return: The Probe Response template. %NULL on error. */ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif); /** * ieee80211_pspoll_get - retrieve a PS Poll template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * Creates a PS Poll a template which can, for example, uploaded to * hardware. The template must be updated after association so that correct * AID, BSSID and MAC address is used. * * Note: Caller (or hardware) is responsible for setting the * &IEEE80211_FCTL_PM bit. * * Return: The PS Poll template. %NULL on error. */ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif); /** * ieee80211_nullfunc_get - retrieve a nullfunc template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @qos_ok: QoS NDP is acceptable to the caller, this should be set * if at all possible * * Creates a Nullfunc template which can, for example, uploaded to * hardware. The template must be updated after association so that correct * BSSID and address is used. * * If @qos_ndp is set and the association is to an AP with QoS/WMM, the * returned packet will be QoS NDP. * * Note: Caller (or hardware) is responsible for setting the * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields. * * Return: The nullfunc template. %NULL on error. */ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool qos_ok); /** * ieee80211_probereq_get - retrieve a Probe Request template * @hw: pointer obtained from ieee80211_alloc_hw(). * @src_addr: source MAC address * @ssid: SSID buffer * @ssid_len: length of SSID * @tailroom: tailroom to reserve at end of SKB for IEs * * Creates a Probe Request template which can, for example, be uploaded to * hardware. * * Return: The Probe Request template. %NULL on error. */ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, const u8 *src_addr, const u8 *ssid, size_t ssid_len, size_t tailroom); /** * ieee80211_rts_get - RTS frame generation function * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @frame: pointer to the frame that is going to be protected by the RTS. * @frame_len: the frame length (in octets). * @frame_txctl: &struct ieee80211_tx_info of the frame. * @rts: The buffer where to store the RTS frame. * * If the RTS frames are generated by the host system (i.e., not in * hardware/firmware), the low-level driver uses this function to receive * the next RTS frame from the 802.11 code. The low-level is responsible * for calling this function before and RTS frame is needed. */ void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_rts *rts); /** * ieee80211_rts_duration - Get the duration field for an RTS frame * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @frame_len: the length of the frame that is going to be protected by the RTS. * @frame_txctl: &struct ieee80211_tx_info of the frame. * * If the RTS is generated in firmware, but the host system must provide * the duration field, the low-level driver uses this function to receive * the duration field value in little-endian byteorder. * * Return: The duration. */ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_info *frame_txctl); /** * ieee80211_ctstoself_get - CTS-to-self frame generation function * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @frame: pointer to the frame that is going to be protected by the CTS-to-self. * @frame_len: the frame length (in octets). * @frame_txctl: &struct ieee80211_tx_info of the frame. * @cts: The buffer where to store the CTS-to-self frame. * * If the CTS-to-self frames are generated by the host system (i.e., not in * hardware/firmware), the low-level driver uses this function to receive * the next CTS-to-self frame from the 802.11 code. The low-level is responsible * for calling this function before and CTS-to-self frame is needed. */ void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_cts *cts); /** * ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @frame_len: the length of the frame that is going to be protected by the CTS-to-self. * @frame_txctl: &struct ieee80211_tx_info of the frame. * * If the CTS-to-self is generated in firmware, but the host system must provide * the duration field, the low-level driver uses this function to receive * the duration field value in little-endian byteorder. * * Return: The duration. */ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_info *frame_txctl); /** * ieee80211_generic_frame_duration - Calculate the duration field for a frame * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @band: the band to calculate the frame duration on * @frame_len: the length of the frame. * @rate: the rate at which the frame is going to be transmitted. * * Calculate the duration field of some generic frame, given its * length and transmission rate (in 100kbps). * * Return: The duration. */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_band band, size_t frame_len, struct ieee80211_rate *rate); /** * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames * @hw: pointer as obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * Function for accessing buffered broadcast and multicast frames. If * hardware/firmware does not implement buffering of broadcast/multicast * frames when power saving is used, 802.11 code buffers them in the host * memory. The low-level driver uses this function to fetch next buffered * frame. In most cases, this is used when generating beacon frame. * * Return: A pointer to the next buffered skb or NULL if no more buffered * frames are available. * * Note: buffered frames are returned only after DTIM beacon frame was * generated with ieee80211_beacon_get() and the low-level driver must thus * call ieee80211_beacon_get() first. ieee80211_get_buffered_bc() returns * NULL if the previous generated beacon was not DTIM, so the low-level driver * does not need to check for DTIM beacons separately and should be able to * use common code for all beacons. */ struct sk_buff * ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif); /** * ieee80211_get_tkip_p1k_iv - get a TKIP phase 1 key for IV32 * * This function returns the TKIP phase 1 key for the given IV32. * * @keyconf: the parameter passed with the set key * @iv32: IV32 to get the P1K for * @p1k: a buffer to which the key will be written, as 5 u16 values */ void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf, u32 iv32, u16 *p1k); /** * ieee80211_get_tkip_p1k - get a TKIP phase 1 key * * This function returns the TKIP phase 1 key for the IV32 taken * from the given packet. * * @keyconf: the parameter passed with the set key * @skb: the packet to take the IV32 value from that will be encrypted * with this P1K * @p1k: a buffer to which the key will be written, as 5 u16 values */ static inline void ieee80211_get_tkip_p1k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb, u16 *p1k) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); u32 iv32 = get_unaligned_le32(&data[4]); ieee80211_get_tkip_p1k_iv(keyconf, iv32, p1k); } /** * ieee80211_get_tkip_rx_p1k - get a TKIP phase 1 key for RX * * This function returns the TKIP phase 1 key for the given IV32 * and transmitter address. * * @keyconf: the parameter passed with the set key * @ta: TA that will be used with the key * @iv32: IV32 to get the P1K for * @p1k: a buffer to which the key will be written, as 5 u16 values */ void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, const u8 *ta, u32 iv32, u16 *p1k); /** * ieee80211_get_tkip_p2k - get a TKIP phase 2 key * * This function computes the TKIP RC4 key for the IV values * in the packet. * * @keyconf: the parameter passed with the set key * @skb: the packet to take the IV32/IV16 values from that will be * encrypted with this key * @p2k: a buffer to which the key will be written, 16 bytes */ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb, u8 *p2k); /** * ieee80211_tkip_add_iv - write TKIP IV and Ext. IV to pos * * @pos: start of crypto header * @keyconf: the parameter passed with the set key * @pn: PN to add * * Returns: pointer to the octet following IVs (i.e. beginning of * the packet payload) * * This function writes the tkip IV value to pos (which should * point to the crypto header) */ u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn); /** * ieee80211_get_key_rx_seq - get key RX sequence counter * * @keyconf: the parameter passed with the set key * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only); * the value on TID 0 is also used for non-QoS frames. For * CMAC, only TID 0 is valid. * @seq: buffer to receive the sequence data * * This function allows a driver to retrieve the current RX IV/PNs * for the given key. It must not be called if IV checking is done * by the device and not by mac80211. * * Note that this function may only be called when no RX processing * can be done concurrently. */ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq); /** * ieee80211_set_key_rx_seq - set key RX sequence counter * * @keyconf: the parameter passed with the set key * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only); * the value on TID 0 is also used for non-QoS frames. For * CMAC, only TID 0 is valid. * @seq: new sequence data * * This function allows a driver to set the current RX IV/PNs for the * given key. This is useful when resuming from WoWLAN sleep and GTK * rekey may have been done while suspended. It should not be called * if IV checking is done by the device and not by mac80211. * * Note that this function may only be called when no RX processing * can be done concurrently. */ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq); /** * ieee80211_remove_key - remove the given key * @keyconf: the parameter passed with the set key * * Remove the given key. If the key was uploaded to the hardware at the * time this function is called, it is not deleted in the hardware but * instead assumed to have been removed already. * * Note that due to locking considerations this function can (currently) * only be called during key iteration (ieee80211_iter_keys().) */ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf); /** * ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN * @vif: the virtual interface to add the key on * @keyconf: new key data * * When GTK rekeying was done while the system was suspended, (a) new * key(s) will be available. These will be needed by mac80211 for proper * RX processing, so this function allows setting them. * * The function returns the newly allocated key structure, which will * have similar contents to the passed key configuration but point to * mac80211-owned memory. In case of errors, the function returns an * ERR_PTR(), use IS_ERR() etc. * * Note that this function assumes the key isn't added to hardware * acceleration, so no TX will be done with the key. Since it's a GTK * on managed (station) networks, this is true anyway. If the driver * calls this function from the resume callback and subsequently uses * the return code 1 to reconfigure the device, this key will be part * of the reconfiguration. * * Note that the driver should also call ieee80211_set_key_rx_seq() * for the new key for each TID to set up sequence counters properly. * * IMPORTANT: If this replaces a key that is present in the hardware, * then it will attempt to remove it during this call. In many cases * this isn't what you want, so call ieee80211_remove_key() first for * the key that's being replaced. */ struct ieee80211_key_conf * ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf); /** * ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying * @vif: virtual interface the rekeying was done on * @bssid: The BSSID of the AP, for checking association * @replay_ctr: the new replay counter after GTK rekeying * @gfp: allocation flags */ void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp); /** * ieee80211_wake_queue - wake specific queue * @hw: pointer as obtained from ieee80211_alloc_hw(). * @queue: queue number (counted from zero). * * Drivers should use this function instead of netif_wake_queue. */ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue); /** * ieee80211_stop_queue - stop specific queue * @hw: pointer as obtained from ieee80211_alloc_hw(). * @queue: queue number (counted from zero). * * Drivers should use this function instead of netif_stop_queue. */ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue); /** * ieee80211_queue_stopped - test status of the queue * @hw: pointer as obtained from ieee80211_alloc_hw(). * @queue: queue number (counted from zero). * * Drivers should use this function instead of netif_stop_queue. * * Return: %true if the queue is stopped. %false otherwise. */ int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue); /** * ieee80211_stop_queues - stop all queues * @hw: pointer as obtained from ieee80211_alloc_hw(). * * Drivers should use this function instead of netif_stop_queue. */ void ieee80211_stop_queues(struct ieee80211_hw *hw); /** * ieee80211_wake_queues - wake all queues * @hw: pointer as obtained from ieee80211_alloc_hw(). * * Drivers should use this function instead of netif_wake_queue. */ void ieee80211_wake_queues(struct ieee80211_hw *hw); /** * ieee80211_scan_completed - completed hardware scan * * When hardware scan offload is used (i.e. the hw_scan() callback is * assigned) this function needs to be called by the driver to notify * mac80211 that the scan finished. This function can be called from * any context, including hardirq context. * * @hw: the hardware that finished the scan * @info: information about the completed scan */ void ieee80211_scan_completed(struct ieee80211_hw *hw, struct cfg80211_scan_info *info); /** * ieee80211_sched_scan_results - got results from scheduled scan * * When a scheduled scan is running, this function needs to be called by the * driver whenever there are new scan results available. * * @hw: the hardware that is performing scheduled scans */ void ieee80211_sched_scan_results(struct ieee80211_hw *hw); /** * ieee80211_sched_scan_stopped - inform that the scheduled scan has stopped * * When a scheduled scan is running, this function can be called by * the driver if it needs to stop the scan to perform another task. * Usual scenarios are drivers that cannot continue the scheduled scan * while associating, for instance. * * @hw: the hardware that is performing scheduled scans */ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw); /** * enum ieee80211_interface_iteration_flags - interface iteration flags * @IEEE80211_IFACE_ITER_NORMAL: Iterate over all interfaces that have * been added to the driver; However, note that during hardware * reconfiguration (after restart_hw) it will iterate over a new * interface and over all the existing interfaces even if they * haven't been re-added to the driver yet. * @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all * interfaces, even if they haven't been re-added to the driver yet. * @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up). */ enum ieee80211_interface_iteration_flags { IEEE80211_IFACE_ITER_NORMAL = 0, IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0), IEEE80211_IFACE_ITER_ACTIVE = BIT(1), }; /** * ieee80211_iterate_interfaces - iterate interfaces * * This function iterates over the interfaces associated with a given * hardware and calls the callback for them. This includes active as well as * inactive interfaces. This function allows the iterator function to sleep. * Will iterate over a new interface during add_interface(). * * @hw: the hardware struct of which the interfaces should be iterated over * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags * @iterator: the iterator function to call * @data: first argument of the iterator function */ void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data); /** * ieee80211_iterate_active_interfaces - iterate active interfaces * * This function iterates over the interfaces associated with a given * hardware that are currently active and calls the callback for them. * This function allows the iterator function to sleep, when the iterator * function is atomic @ieee80211_iterate_active_interfaces_atomic can * be used. * Does not iterate over a new interface during add_interface(). * * @hw: the hardware struct of which the interfaces should be iterated over * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags * @iterator: the iterator function to call * @data: first argument of the iterator function */ static inline void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { ieee80211_iterate_interfaces(hw, iter_flags | IEEE80211_IFACE_ITER_ACTIVE, iterator, data); } /** * ieee80211_iterate_active_interfaces_atomic - iterate active interfaces * * This function iterates over the interfaces associated with a given * hardware that are currently active and calls the callback for them. * This function requires the iterator callback function to be atomic, * if that is not desired, use @ieee80211_iterate_active_interfaces instead. * Does not iterate over a new interface during add_interface(). * * @hw: the hardware struct of which the interfaces should be iterated over * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags * @iterator: the iterator function to call, cannot sleep * @data: first argument of the iterator function */ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data); /** * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces * * This function iterates over the interfaces associated with a given * hardware that are currently active and calls the callback for them. * This version can only be used while holding the RTNL. * * @hw: the hardware struct of which the interfaces should be iterated over * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags * @iterator: the iterator function to call, cannot sleep * @data: first argument of the iterator function */ void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data); /** * ieee80211_iterate_stations_atomic - iterate stations * * This function iterates over all stations associated with a given * hardware that are currently uploaded to the driver and calls the callback * function for them. * This function requires the iterator callback function to be atomic, * * @hw: the hardware struct of which the interfaces should be iterated over * @iterator: the iterator function to call, cannot sleep * @data: first argument of the iterator function */ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, void (*iterator)(void *data, struct ieee80211_sta *sta), void *data); /** * ieee80211_queue_work - add work onto the mac80211 workqueue * * Drivers and mac80211 use this to add work onto the mac80211 workqueue. * This helper ensures drivers are not queueing work when they should not be. * * @hw: the hardware struct for the interface we are adding work for * @work: the work we want to add onto the mac80211 workqueue */ void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work); /** * ieee80211_queue_delayed_work - add work onto the mac80211 workqueue * * Drivers and mac80211 use this to queue delayed work onto the mac80211 * workqueue. * * @hw: the hardware struct for the interface we are adding work for * @dwork: delayable work to queue onto the mac80211 workqueue * @delay: number of jiffies to wait before queueing */ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, struct delayed_work *dwork, unsigned long delay); /** * ieee80211_start_tx_ba_session - Start a tx Block Ack session. * @sta: the station for which to start a BA session * @tid: the TID to BA on. * @timeout: session timeout value (in TUs) * * Return: success if addBA request was sent, failure otherwise * * Although mac80211/low level driver/user space application can estimate * the need to start aggregation on a certain RA/TID, the session level * will be managed by the mac80211. */ int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid, u16 timeout); /** * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate. * @vif: &struct ieee80211_vif pointer from the add_interface callback * @ra: receiver address of the BA session recipient. * @tid: the TID to BA on. * * This function must be called by low level driver once it has * finished with preparations for the BA session. It can be called * from any context. */ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, u16 tid); /** * ieee80211_stop_tx_ba_session - Stop a Block Ack session. * @sta: the station whose BA session to stop * @tid: the TID to stop BA. * * Return: negative error if the TID is invalid, or no aggregation active * * Although mac80211/low level driver/user space application can estimate * the need to stop aggregation on a certain RA/TID, the session level * will be managed by the mac80211. */ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid); /** * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate. * @vif: &struct ieee80211_vif pointer from the add_interface callback * @ra: receiver address of the BA session recipient. * @tid: the desired TID to BA on. * * This function must be called by low level driver once it has * finished with preparations for the BA session tear down. It * can be called from any context. */ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, u16 tid); /** * ieee80211_find_sta - find a station * * @vif: virtual interface to look for station on * @addr: station's address * * Return: The station, if found. %NULL otherwise. * * Note: This function must be called under RCU lock and the * resulting pointer is only valid under RCU lock as well. */ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, const u8 *addr); /** * ieee80211_find_sta_by_ifaddr - find a station on hardware * * @hw: pointer as obtained from ieee80211_alloc_hw() * @addr: remote station's address * @localaddr: local address (vif->sdata->vif.addr). Use NULL for 'any'. * * Return: The station, if found. %NULL otherwise. * * Note: This function must be called under RCU lock and the * resulting pointer is only valid under RCU lock as well. * * NOTE: You may pass NULL for localaddr, but then you will just get * the first STA that matches the remote address 'addr'. * We can have multiple STA associated with multiple * logical stations (e.g. consider a station connecting to another * BSSID on the same AP hardware without disconnecting first). * In this case, the result of this method with localaddr NULL * is not reliable. * * DO NOT USE THIS FUNCTION with localaddr NULL if at all possible. */ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, const u8 *addr, const u8 *localaddr); /** * ieee80211_sta_block_awake - block station from waking up * @hw: the hardware * @pubsta: the station * @block: whether to block or unblock * * Some devices require that all frames that are on the queues * for a specific station that went to sleep are flushed before * a poll response or frames after the station woke up can be * delivered to that it. Note that such frames must be rejected * by the driver as filtered, with the appropriate status flag. * * This function allows implementing this mode in a race-free * manner. * * To do this, a driver must keep track of the number of frames * still enqueued for a specific station. If this number is not * zero when the station goes to sleep, the driver must call * this function to force mac80211 to consider the station to * be asleep regardless of the station's actual state. Once the * number of outstanding frames reaches zero, the driver must * call this function again to unblock the station. That will * cause mac80211 to be able to send ps-poll responses, and if * the station queried in the meantime then frames will also * be sent out as a result of this. Additionally, the driver * will be notified that the station woke up some time after * it is unblocked, regardless of whether the station actually * woke up while blocked or not. */ void ieee80211_sta_block_awake(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, bool block); /** * ieee80211_sta_eosp - notify mac80211 about end of SP * @pubsta: the station * * When a device transmits frames in a way that it can't tell * mac80211 in the TX status about the EOSP, it must clear the * %IEEE80211_TX_STATUS_EOSP bit and call this function instead. * This applies for PS-Poll as well as uAPSD. * * Note that just like with _tx_status() and _rx() drivers must * not mix calls to irqsafe/non-irqsafe versions, this function * must not be mixed with those either. Use the all irqsafe, or * all non-irqsafe, don't mix! * * NB: the _irqsafe version of this function doesn't exist, no * driver needs it right now. Don't call this function if * you'd need the _irqsafe version, look at the git history * and restore the _irqsafe version! */ void ieee80211_sta_eosp(struct ieee80211_sta *pubsta); /** * ieee80211_send_eosp_nullfunc - ask mac80211 to send NDP with EOSP * @pubsta: the station * @tid: the tid of the NDP * * Sometimes the device understands that it needs to close * the Service Period unexpectedly. This can happen when * sending frames that are filling holes in the BA window. * In this case, the device can ask mac80211 to send a * Nullfunc frame with EOSP set. When that happens, the * driver must have called ieee80211_sta_set_buffered() to * let mac80211 know that there are no buffered frames any * more, otherwise mac80211 will get the more_data bit wrong. * The low level driver must have made sure that the frame * will be sent despite the station being in power-save. * Mac80211 won't call allow_buffered_frames(). * Note that calling this function, doesn't exempt the driver * from closing the EOSP properly, it will still have to call * ieee80211_sta_eosp when the NDP is sent. */ void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid); /** * ieee80211_iter_keys - iterate keys programmed into the device * @hw: pointer obtained from ieee80211_alloc_hw() * @vif: virtual interface to iterate, may be %NULL for all * @iter: iterator function that will be called for each key * @iter_data: custom data to pass to the iterator function * * This function can be used to iterate all the keys known to * mac80211, even those that weren't previously programmed into * the device. This is intended for use in WoWLAN if the device * needs reprogramming of the keys during suspend. Note that due * to locking reasons, it is also only safe to call this at few * spots since it must hold the RTNL and be able to sleep. * * The order in which the keys are iterated matches the order * in which they were originally installed and handed to the * set_key callback. */ void ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data); /** * ieee80211_iter_keys_rcu - iterate keys programmed into the device * @hw: pointer obtained from ieee80211_alloc_hw() * @vif: virtual interface to iterate, may be %NULL for all * @iter: iterator function that will be called for each key * @iter_data: custom data to pass to the iterator function * * This function can be used to iterate all the keys known to * mac80211, even those that weren't previously programmed into * the device. Note that due to locking reasons, keys of station * in removal process will be skipped. * * This function requires being called in an RCU critical section, * and thus iter must be atomic. */ void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data); /** * ieee80211_iter_chan_contexts_atomic - iterate channel contexts * @hw: pointre obtained from ieee80211_alloc_hw(). * @iter: iterator function * @iter_data: data passed to iterator function * * Iterate all active channel contexts. This function is atomic and * doesn't acquire any locks internally that might be held in other * places while calling into the driver. * * The iterator will not find a context that's being added (during * the driver callback to add it) but will find it while it's being * removed. * * Note that during hardware restart, all contexts that existed * before the restart are considered already present so will be * found while iterating, whether they've been re-added already * or not. */ void ieee80211_iter_chan_contexts_atomic( struct ieee80211_hw *hw, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf, void *data), void *iter_data); /** * ieee80211_ap_probereq_get - retrieve a Probe Request template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * Creates a Probe Request template which can, for example, be uploaded to * hardware. The template is filled with bssid, ssid and supported rate * information. This function must only be called from within the * .bss_info_changed callback function and only in managed mode. The function * is only useful when the interface is associated, otherwise it will return * %NULL. * * Return: The Probe Request template. %NULL on error. */ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif); /** * ieee80211_beacon_loss - inform hardware does not receive beacons * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER and * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the * hardware is not receiving beacons with this function. */ void ieee80211_beacon_loss(struct ieee80211_vif *vif); /** * ieee80211_connection_loss - inform hardware has lost connection to the AP * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER, and * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver * needs to inform if the connection to the AP has been lost. * The function may also be called if the connection needs to be terminated * for some other reason, even if %IEEE80211_HW_CONNECTION_MONITOR isn't set. * * This function will cause immediate change to disassociated state, * without connection recovery attempts. */ void ieee80211_connection_loss(struct ieee80211_vif *vif); /** * ieee80211_resume_disconnect - disconnect from AP after resume * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * Instructs mac80211 to disconnect from the AP after resume. * Drivers can use this after WoWLAN if they know that the * connection cannot be kept up, for example because keys were * used while the device was asleep but the replay counters or * similar cannot be retrieved from the device during resume. * * Note that due to implementation issues, if the driver uses * the reconfiguration functionality during resume the interface * will still be added as associated first during resume and then * disconnect normally later. * * This function can only be called from the resume callback and * the driver must not be holding any of its own locks while it * calls this function, or at least not any locks it needs in the * key configuration paths (if it supports HW crypto). */ void ieee80211_resume_disconnect(struct ieee80211_vif *vif); /** * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring * rssi threshold triggered * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @rssi_event: the RSSI trigger event type * @rssi_level: new RSSI level value or 0 if not available * @gfp: context flags * * When the %IEEE80211_VIF_SUPPORTS_CQM_RSSI is set, and a connection quality * monitoring is configured with an rssi threshold, the driver will inform * whenever the rssi level reaches the threshold. */ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level, gfp_t gfp); /** * ieee80211_cqm_beacon_loss_notify - inform CQM of beacon loss * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @gfp: context flags */ void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp); /** * ieee80211_radar_detected - inform that a radar was detected * * @hw: pointer as obtained from ieee80211_alloc_hw() */ void ieee80211_radar_detected(struct ieee80211_hw *hw); /** * ieee80211_chswitch_done - Complete channel switch process * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @success: make the channel switch successful or not * * Complete the channel switch post-process: set the new operational channel * and wake up the suspended queues. */ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); /** * ieee80211_request_smps - request SM PS transition * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @smps_mode: new SM PS mode * * This allows the driver to request an SM PS transition in managed * mode. This is useful when the driver has more information than * the stack about possible interference, for example by bluetooth. */ void ieee80211_request_smps(struct ieee80211_vif *vif, enum ieee80211_smps_mode smps_mode); /** * ieee80211_ready_on_channel - notification of remain-on-channel start * @hw: pointer as obtained from ieee80211_alloc_hw() */ void ieee80211_ready_on_channel(struct ieee80211_hw *hw); /** * ieee80211_remain_on_channel_expired - remain_on_channel duration expired * @hw: pointer as obtained from ieee80211_alloc_hw() */ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw); /** * ieee80211_stop_rx_ba_session - callback to stop existing BA sessions * * in order not to harm the system performance and user experience, the device * may request not to allow any rx ba session and tear down existing rx ba * sessions based on system constraints such as periodic BT activity that needs * to limit wlan activity (eg.sco or a2dp)." * in such cases, the intention is to limit the duration of the rx ppdu and * therefore prevent the peer device to use a-mpdu aggregation. * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @ba_rx_bitmap: Bit map of open rx ba per tid * @addr: & to bssid mac address */ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, const u8 *addr); /** * ieee80211_mark_rx_ba_filtered_frames - move RX BA window and mark filtered * @pubsta: station struct * @tid: the session's TID * @ssn: starting sequence number of the bitmap, all frames before this are * assumed to be out of the window after the call * @filtered: bitmap of filtered frames, BIT(0) is the @ssn entry etc. * @received_mpdus: number of received mpdus in firmware * * This function moves the BA window and releases all frames before @ssn, and * marks frames marked in the bitmap as having been filtered. Afterwards, it * checks if any frames in the window starting from @ssn can now be released * (in case they were only waiting for frames that were filtered.) */ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, u16 ssn, u64 filtered, u16 received_mpdus); /** * ieee80211_send_bar - send a BlockAckReq frame * * can be used to flush pending frames from the peer's aggregation reorder * buffer. * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @ra: the peer's destination address * @tid: the TID of the aggregation session * @ssn: the new starting sequence number for the receiver */ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn); /** * ieee80211_manage_rx_ba_offl - helper to queue an RX BA work * @vif: &struct ieee80211_vif pointer from the add_interface callback * @addr: station mac address * @tid: the rx tid */ void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, const u8 *addr, unsigned int tid); /** * ieee80211_start_rx_ba_session_offl - start a Rx BA session * * Some device drivers may offload part of the Rx aggregation flow including * AddBa/DelBa negotiation but may otherwise be incapable of full Rx * reordering. * * Create structures responsible for reordering so device drivers may call here * when they complete AddBa negotiation. * * @vif: &struct ieee80211_vif pointer from the add_interface callback * @addr: station mac address * @tid: the rx tid */ static inline void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif, const u8 *addr, u16 tid) { if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) return; ieee80211_manage_rx_ba_offl(vif, addr, tid); } /** * ieee80211_stop_rx_ba_session_offl - stop a Rx BA session * * Some device drivers may offload part of the Rx aggregation flow including * AddBa/DelBa negotiation but may otherwise be incapable of full Rx * reordering. * * Destroy structures responsible for reordering so device drivers may call here * when they complete DelBa negotiation. * * @vif: &struct ieee80211_vif pointer from the add_interface callback * @addr: station mac address * @tid: the rx tid */ static inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif, const u8 *addr, u16 tid) { if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) return; ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS); } /** * ieee80211_rx_ba_timer_expired - stop a Rx BA session due to timeout * * Some device drivers do not offload AddBa/DelBa negotiation, but handle rx * buffer reording internally, and therefore also handle the session timer. * * Trigger the timeout flow, which sends a DelBa. * * @vif: &struct ieee80211_vif pointer from the add_interface callback * @addr: station mac address * @tid: the rx tid */ void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif, const u8 *addr, unsigned int tid); /* Rate control API */ /** * struct ieee80211_tx_rate_control - rate control information for/from RC algo * * @hw: The hardware the algorithm is invoked for. * @sband: The band this frame is being transmitted on. * @bss_conf: the current BSS configuration * @skb: the skb that will be transmitted, the control information in it needs * to be filled in * @reported_rate: The rate control algorithm can fill this in to indicate * which rate should be reported to userspace as the current rate and * used for rate calculations in the mesh network. * @rts: whether RTS will be used for this frame because it is longer than the * RTS threshold * @short_preamble: whether mac80211 will request short-preamble transmission * if the selected rate supports it * @rate_idx_mask: user-requested (legacy) rate mask * @rate_idx_mcs_mask: user-requested MCS rate mask (NULL if not in use) * @bss: whether this frame is sent out in AP or IBSS mode */ struct ieee80211_tx_rate_control { struct ieee80211_hw *hw; struct ieee80211_supported_band *sband; struct ieee80211_bss_conf *bss_conf; struct sk_buff *skb; struct ieee80211_tx_rate reported_rate; bool rts, short_preamble; u32 rate_idx_mask; u8 *rate_idx_mcs_mask; bool bss; }; /** * enum rate_control_capabilities - rate control capabilities */ enum rate_control_capabilities { /** * @RATE_CTRL_CAPA_VHT_EXT_NSS_BW: * Support for extended NSS BW support (dot11VHTExtendedNSSCapable) * Note that this is only looked at if the minimum number of chains * that the AP uses is < the number of TX chains the hardware has, * otherwise the NSS difference doesn't bother us. */ RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0), }; struct rate_control_ops { unsigned long capa; const char *name; void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir); void (*free)(void *priv); void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp); void (*rate_init)(void *priv, struct ieee80211_supported_band *sband, struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta); void (*rate_update)(void *priv, struct ieee80211_supported_band *sband, struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta, u32 changed); void (*free_sta)(void *priv, struct ieee80211_sta *sta, void *priv_sta); void (*tx_status_ext)(void *priv, struct ieee80211_supported_band *sband, void *priv_sta, struct ieee80211_tx_status *st); void (*tx_status)(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb); void (*get_rate)(void *priv, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate_control *txrc); void (*add_sta_debugfs)(void *priv, void *priv_sta, struct dentry *dir); void (*remove_sta_debugfs)(void *priv, void *priv_sta); u32 (*get_expected_throughput)(void *priv_sta); }; static inline int rate_supported(struct ieee80211_sta *sta, enum nl80211_band band, int index) { return (sta == NULL || sta->supp_rates[band] & BIT(index)); } static inline s8 rate_lowest_index(struct ieee80211_supported_band *sband, struct ieee80211_sta *sta) { int i; for (i = 0; i < sband->n_bitrates; i++) if (rate_supported(sta, sband->band, i)) return i; /* warn when we cannot find a rate. */ WARN_ON_ONCE(1); /* and return 0 (the lowest index) */ return 0; } static inline bool rate_usable_index_exists(struct ieee80211_supported_band *sband, struct ieee80211_sta *sta) { unsigned int i; for (i = 0; i < sband->n_bitrates; i++) if (rate_supported(sta, sband->band, i)) return true; return false; } /** * rate_control_set_rates - pass the sta rate selection to mac80211/driver * * When not doing a rate control probe to test rates, rate control should pass * its rate selection to mac80211. If the driver supports receiving a station * rate table, it will use it to ensure that frames are always sent based on * the most recent rate control module decision. * * @hw: pointer as obtained from ieee80211_alloc_hw() * @pubsta: &struct ieee80211_sta pointer to the target destination. * @rates: new tx rate set to be used for this station. */ int rate_control_set_rates(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_sta_rates *rates); int ieee80211_rate_control_register(const struct rate_control_ops *ops); void ieee80211_rate_control_unregister(const struct rate_control_ops *ops); static inline bool conf_is_ht20(struct ieee80211_conf *conf) { return conf->chandef.width == NL80211_CHAN_WIDTH_20; } static inline bool conf_is_ht40_minus(struct ieee80211_conf *conf) { return conf->chandef.width == NL80211_CHAN_WIDTH_40 && conf->chandef.center_freq1 < conf->chandef.chan->center_freq; } static inline bool conf_is_ht40_plus(struct ieee80211_conf *conf) { return conf->chandef.width == NL80211_CHAN_WIDTH_40 && conf->chandef.center_freq1 > conf->chandef.chan->center_freq; } static inline bool conf_is_ht40(struct ieee80211_conf *conf) { return conf->chandef.width == NL80211_CHAN_WIDTH_40; } static inline bool conf_is_ht(struct ieee80211_conf *conf) { return (conf->chandef.width != NL80211_CHAN_WIDTH_5) && (conf->chandef.width != NL80211_CHAN_WIDTH_10) && (conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT); } static inline enum nl80211_iftype ieee80211_iftype_p2p(enum nl80211_iftype type, bool p2p) { if (p2p) { switch (type) { case NL80211_IFTYPE_STATION: return NL80211_IFTYPE_P2P_CLIENT; case NL80211_IFTYPE_AP: return NL80211_IFTYPE_P2P_GO; default: break; } } return type; } static inline enum nl80211_iftype ieee80211_vif_type_p2p(struct ieee80211_vif *vif) { return ieee80211_iftype_p2p(vif->type, vif->p2p); } /** * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data * * @vif: the specified virtual interface * @membership: 64 bits array - a bit is set if station is member of the group * @position: 2 bits per group id indicating the position in the group * * Note: This function assumes that the given vif is valid and the position and * membership data is of the correct size and are in the same byte order as the * matching GroupId management frame. * Calls to this function need to be serialized with RX path. */ void ieee80211_update_mu_groups(struct ieee80211_vif *vif, const u8 *membership, const u8 *position); void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, int rssi_min_thold, int rssi_max_thold); void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif); /** * ieee80211_ave_rssi - report the average RSSI for the specified interface * * @vif: the specified virtual interface * * Note: This function assumes that the given vif is valid. * * Return: The average RSSI value for the requested interface, or 0 if not * applicable. */ int ieee80211_ave_rssi(struct ieee80211_vif *vif); /** * ieee80211_report_wowlan_wakeup - report WoWLAN wakeup * @vif: virtual interface * @wakeup: wakeup reason(s) * @gfp: allocation flags * * See cfg80211_report_wowlan_wakeup(). */ void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif, struct cfg80211_wowlan_wakeup *wakeup, gfp_t gfp); /** * ieee80211_tx_prepare_skb - prepare an 802.11 skb for transmission * @hw: pointer as obtained from ieee80211_alloc_hw() * @vif: virtual interface * @skb: frame to be sent from within the driver * @band: the band to transmit on * @sta: optional pointer to get the station to send the frame to * * Note: must be called under RCU lock */ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct sk_buff *skb, int band, struct ieee80211_sta **sta); /** * struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state * * @next_tsf: TSF timestamp of the next absent state change * @has_next_tsf: next absent state change event pending * * @absent: descriptor bitmask, set if GO is currently absent * * private: * * @count: count fields from the NoA descriptors * @desc: adjusted data from the NoA */ struct ieee80211_noa_data { u32 next_tsf; bool has_next_tsf; u8 absent; u8 count[IEEE80211_P2P_NOA_DESC_MAX]; struct { u32 start; u32 duration; u32 interval; } desc[IEEE80211_P2P_NOA_DESC_MAX]; }; /** * ieee80211_parse_p2p_noa - initialize NoA tracking data from P2P IE * * @attr: P2P NoA IE * @data: NoA tracking data * @tsf: current TSF timestamp * * Return: number of successfully parsed descriptors */ int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr, struct ieee80211_noa_data *data, u32 tsf); /** * ieee80211_update_p2p_noa - get next pending P2P GO absent state change * * @data: NoA tracking data * @tsf: current TSF timestamp */ void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf); /** * ieee80211_tdls_oper - request userspace to perform a TDLS operation * @vif: virtual interface * @peer: the peer's destination address * @oper: the requested TDLS operation * @reason_code: reason code for the operation, valid for TDLS teardown * @gfp: allocation flags * * See cfg80211_tdls_oper_request(). */ void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp); /** * ieee80211_reserve_tid - request to reserve a specific TID * * There is sometimes a need (such as in TDLS) for blocking the driver from * using a specific TID so that the FW can use it for certain operations such * as sending PTI requests. To make sure that the driver doesn't use that TID, * this function must be called as it flushes out packets on this TID and marks * it as blocked, so that any transmit for the station on this TID will be * redirected to the alternative TID in the same AC. * * Note that this function blocks and may call back into the driver, so it * should be called without driver locks held. Also note this function should * only be called from the driver's @sta_state callback. * * @sta: the station to reserve the TID for * @tid: the TID to reserve * * Returns: 0 on success, else on failure */ int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid); /** * ieee80211_unreserve_tid - request to unreserve a specific TID * * Once there is no longer any need for reserving a certain TID, this function * should be called, and no longer will packets have their TID modified for * preventing use of this TID in the driver. * * Note that this function blocks and acquires a lock, so it should be called * without driver locks held. Also note this function should only be called * from the driver's @sta_state callback. * * @sta: the station * @tid: the TID to unreserve */ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); /** * ieee80211_tx_dequeue - dequeue a packet from a software tx queue * * @hw: pointer as obtained from ieee80211_alloc_hw() * @txq: pointer obtained from station or virtual interface * * Returns the skb if successful, %NULL if no frame was available. * * Note that this must be called in an rcu_read_lock() critical section, * which can only be released after the SKB was handled. Some pointers in * skb->cb, e.g. the key pointer, are protected by by RCU and thus the * critical section must persist not just for the duration of this call * but for the duration of the frame handling. * However, also note that while in the wake_tx_queue() method, * rcu_read_lock() is already held. */ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); /** * ieee80211_txq_get_depth - get pending frame/byte count of given txq * * The values are not guaranteed to be coherent with regard to each other, i.e. * txq state can change half-way of this function and the caller may end up * with "new" frame_cnt and "old" byte_cnt or vice-versa. * * @txq: pointer obtained from station or virtual interface * @frame_cnt: pointer to store frame count * @byte_cnt: pointer to store byte count */ void ieee80211_txq_get_depth(struct ieee80211_txq *txq, unsigned long *frame_cnt, unsigned long *byte_cnt); /** * ieee80211_nan_func_terminated - notify about NAN function termination. * * This function is used to notify mac80211 about NAN function termination. * Note that this function can't be called from hard irq. * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @inst_id: the local instance id * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*) * @gfp: allocation flags */ void ieee80211_nan_func_terminated(struct ieee80211_vif *vif, u8 inst_id, enum nl80211_nan_func_term_reason reason, gfp_t gfp); /** * ieee80211_nan_func_match - notify about NAN function match event. * * This function is used to notify mac80211 about NAN function match. The * cookie inside the match struct will be assigned by mac80211. * Note that this function can't be called from hard irq. * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @match: match event information * @gfp: allocation flags */ void ieee80211_nan_func_match(struct ieee80211_vif *vif, struct cfg80211_nan_match_params *match, gfp_t gfp); #endif /* MAC80211_H */ backport-iwlwifi-dkms-7906/include/net/regulatory.h000066400000000000000000000257701351064634500224730ustar00rootroot00000000000000#ifndef __NET_REGULATORY_H #define __NET_REGULATORY_H /* * regulatory support structures * * Copyright 2008-2009 Luis R. Rodriguez * Copyright (C) 2018 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include /** * enum environment_cap - Environment parsed from country IE * @ENVIRON_ANY: indicates country IE applies to both indoor and * outdoor operation. * @ENVIRON_INDOOR: indicates country IE applies only to indoor operation * @ENVIRON_OUTDOOR: indicates country IE applies only to outdoor operation */ enum environment_cap { ENVIRON_ANY, ENVIRON_INDOOR, ENVIRON_OUTDOOR, }; /** * struct regulatory_request - used to keep track of regulatory requests * * @rcu_head: RCU head struct used to free the request * @wiphy_idx: this is set if this request's initiator is * %REGDOM_SET_BY_COUNTRY_IE or %REGDOM_SET_BY_DRIVER. This * can be used by the wireless core to deal with conflicts * and potentially inform users of which devices specifically * cased the conflicts. * @initiator: indicates who sent this request, could be any of * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*) * @alpha2: the ISO / IEC 3166 alpha2 country code of the requested * regulatory domain. We have a few special codes: * 00 - World regulatory domain * 99 - built by driver but a specific alpha2 cannot be determined * 98 - result of an intersection between two regulatory domains * 97 - regulatory domain has not yet been configured * @dfs_region: If CRDA responded with a regulatory domain that requires * DFS master operation on a known DFS region (NL80211_DFS_*), * dfs_region represents that region. Drivers can use this and the * @alpha2 to adjust their device's DFS parameters as required. * @user_reg_hint_type: if the @initiator was of type * %NL80211_REGDOM_SET_BY_USER, this classifies the type * of hint passed. This could be any of the %NL80211_USER_REG_HINT_* * types. * @intersect: indicates whether the wireless core should intersect * the requested regulatory domain with the presently set regulatory * domain. * @processed: indicates whether or not this requests has already been * processed. When the last request is processed it means that the * currently regulatory domain set on cfg80211 is updated from * CRDA and can be used by other regulatory requests. When a * the last request is not yet processed we must yield until it * is processed before processing any new requests. * @is_indoor: indicates if the device is operating in an indoor environment * or not * @country_ie_checksum: checksum of the last processed and accepted * country IE * @country_ie_env: lets us know if the AP is telling us we are outdoor, * indoor, or if it doesn't matter * @list: used to insert into the reg_requests_list linked list */ struct regulatory_request { struct rcu_head rcu_head; int wiphy_idx; enum nl80211_reg_initiator initiator; enum nl80211_user_reg_hint_type user_reg_hint_type; char alpha2[3]; enum nl80211_dfs_regions dfs_region; bool intersect; bool processed; bool is_indoor; enum environment_cap country_ie_env; struct list_head list; }; /** * enum ieee80211_regulatory_flags - device regulatory flags * * @REGULATORY_CUSTOM_REG: tells us the driver for this device * has its own custom regulatory domain and cannot identify the * ISO / IEC 3166 alpha2 it belongs to. When this is enabled * we will disregard the first regulatory hint (when the * initiator is %REGDOM_SET_BY_CORE). Drivers that use * wiphy_apply_custom_regulatory() should have this flag set * or the regulatory core will set it for the wiphy. * If you use regulatory_hint() *after* using * wiphy_apply_custom_regulatory() the wireless core will * clear the REGULATORY_CUSTOM_REG for your wiphy as it would be * implied that the device somehow gained knowledge of its region. * @REGULATORY_STRICT_REG: tells us that the wiphy for this device * has regulatory domain that it wishes to be considered as the * superset for regulatory rules. After this device gets its regulatory * domain programmed further regulatory hints shall only be considered * for this device to enhance regulatory compliance, forcing the * device to only possibly use subsets of the original regulatory * rules. For example if channel 13 and 14 are disabled by this * device's regulatory domain no user specified regulatory hint which * has these channels enabled would enable them for this wiphy, * the device's original regulatory domain will be trusted as the * base. You can program the superset of regulatory rules for this * wiphy with regulatory_hint() for cards programmed with an * ISO3166-alpha2 country code. wiphys that use regulatory_hint() * will have their wiphy->regd programmed once the regulatory * domain is set, and all other regulatory hints will be ignored * until their own regulatory domain gets programmed. * @REGULATORY_DISABLE_BEACON_HINTS: enable this if your driver needs to * ensure that passive scan flags and beaconing flags may not be lifted by * cfg80211 due to regulatory beacon hints. For more information on beacon * hints read the documenation for regulatory_hint_found_beacon() * @REGULATORY_COUNTRY_IE_FOLLOW_POWER: for devices that have a preference * that even though they may have programmed their own custom power * setting prior to wiphy registration, they want to ensure their channel * power settings are updated for this connection with the power settings * derived from the regulatory domain. The regulatory domain used will be * based on the ISO3166-alpha2 from country IE provided through * regulatory_hint_country_ie() * @REGULATORY_COUNTRY_IE_IGNORE: for devices that have a preference to ignore * all country IE information processed by the regulatory core. This will * override %REGULATORY_COUNTRY_IE_FOLLOW_POWER as all country IEs will * be ignored. * @REGULATORY_ENABLE_RELAX_NO_IR: for devices that wish to allow the * NO_IR relaxation, which enables transmissions on channels on which * otherwise initiating radiation is not allowed. This will enable the * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration * option * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure * all interfaces on this wiphy reside on allowed channels. If this flag * is not set, upon a regdomain change, the interfaces are given a grace * period (currently 60 seconds) to disconnect or move to an allowed * channel. Interfaces on forbidden channels are forcibly disconnected. * Currently these types of interfaces are supported for enforcement: * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP, * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR, * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device * includes any modes unsupported for enforcement checking. * @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific * regdom management. These devices will ignore all regdom changes not * originating from their own wiphy. * A self-managed wiphys only employs regulatory information obtained from * the FW and driver and does not use other cfg80211 sources like * beacon-hints, country-code IEs and hints from other devices on the same * system. Conversely, a self-managed wiphy does not share its regulatory * hints with other devices in the system. If a system contains several * devices, one or more of which are self-managed, there might be * contradictory regulatory settings between them. Usage of flag is * generally discouraged. Only use it if the FW/driver is incompatible * with non-locally originated hints. * This flag is incompatible with the flags: %REGULATORY_CUSTOM_REG, * %REGULATORY_STRICT_REG, %REGULATORY_COUNTRY_IE_FOLLOW_POWER, * %REGULATORY_COUNTRY_IE_IGNORE and %REGULATORY_DISABLE_BEACON_HINTS. * Mixing any of the above flags with this flag will result in a failure * to register the wiphy. This flag implies * %REGULATORY_DISABLE_BEACON_HINTS and %REGULATORY_COUNTRY_IE_IGNORE. */ enum ieee80211_regulatory_flags { REGULATORY_CUSTOM_REG = BIT(0), REGULATORY_STRICT_REG = BIT(1), REGULATORY_DISABLE_BEACON_HINTS = BIT(2), REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3), REGULATORY_COUNTRY_IE_IGNORE = BIT(4), REGULATORY_ENABLE_RELAX_NO_IR = BIT(5), REGULATORY_IGNORE_STALE_KICKOFF = BIT(6), REGULATORY_WIPHY_SELF_MANAGED = BIT(7), }; struct ieee80211_freq_range { u32 start_freq_khz; u32 end_freq_khz; u32 max_bandwidth_khz; }; struct ieee80211_power_rule { u32 max_antenna_gain; u32 max_eirp; }; /** * struct ieee80211_wmm_ac - used to store per ac wmm regulatory limitation * * The information provided in this structure is required for QoS * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29. * * @cw_min: minimum contention window [a value of the form * 2^n-1 in the range 1..32767] * @cw_max: maximum contention window [like @cw_min] * @cot: maximum burst time in units of 32 usecs, 0 meaning disabled * @aifsn: arbitration interframe space [0..255] * */ struct ieee80211_wmm_ac { u16 cw_min; u16 cw_max; u16 cot; u8 aifsn; }; struct ieee80211_wmm_rule { struct ieee80211_wmm_ac client[IEEE80211_NUM_ACS]; struct ieee80211_wmm_ac ap[IEEE80211_NUM_ACS]; }; struct ieee80211_reg_rule { struct ieee80211_freq_range freq_range; struct ieee80211_power_rule power_rule; struct ieee80211_wmm_rule wmm_rule; u32 flags; u32 dfs_cac_ms; bool has_wmm; }; struct ieee80211_regdomain { struct rcu_head rcu_head; u32 n_reg_rules; char alpha2[3]; enum nl80211_dfs_regions dfs_region; struct ieee80211_reg_rule reg_rules[]; }; #define MHZ_TO_KHZ(freq) ((freq) * 1000) #define KHZ_TO_MHZ(freq) ((freq) / 1000) #define DBI_TO_MBI(gain) ((gain) * 100) #define MBI_TO_DBI(gain) ((gain) / 100) #define DBM_TO_MBM(gain) ((gain) * 100) #define MBM_TO_DBM(gain) ((gain) / 100) #define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \ { \ .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \ .freq_range.end_freq_khz = MHZ_TO_KHZ(end), \ .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \ .power_rule.max_antenna_gain = DBI_TO_MBI(gain), \ .power_rule.max_eirp = DBM_TO_MBM(eirp), \ .flags = reg_flags, \ .dfs_cac_ms = dfs_cac, \ } #define REG_RULE(start, end, bw, gain, eirp, reg_flags) \ REG_RULE_EXT(start, end, bw, gain, eirp, 0, reg_flags) #endif backport-iwlwifi-dkms-7906/include/uapi/000077500000000000000000000000001351064634500202625ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/include/uapi/linux/000077500000000000000000000000001351064634500214215ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/include/uapi/linux/mpls.h000066400000000000000000000044151351064634500225510ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_MPLS_H #define _UAPI_MPLS_H #include #include /* Reference: RFC 5462, RFC 3032 * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Label | TC |S| TTL | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Label: Label Value, 20 bits * TC: Traffic Class field, 3 bits * S: Bottom of Stack, 1 bit * TTL: Time to Live, 8 bits */ struct mpls_label { __be32 entry; }; #define MPLS_LS_LABEL_MASK 0xFFFFF000 #define MPLS_LS_LABEL_SHIFT 12 #define MPLS_LS_TC_MASK 0x00000E00 #define MPLS_LS_TC_SHIFT 9 #define MPLS_LS_S_MASK 0x00000100 #define MPLS_LS_S_SHIFT 8 #define MPLS_LS_TTL_MASK 0x000000FF #define MPLS_LS_TTL_SHIFT 0 /* Reserved labels */ #define MPLS_LABEL_IPV4NULL 0 /* RFC3032 */ #define MPLS_LABEL_RTALERT 1 /* RFC3032 */ #define MPLS_LABEL_IPV6NULL 2 /* RFC3032 */ #define MPLS_LABEL_IMPLNULL 3 /* RFC3032 */ #define MPLS_LABEL_ENTROPY 7 /* RFC6790 */ #define MPLS_LABEL_GAL 13 /* RFC5586 */ #define MPLS_LABEL_OAMALERT 14 /* RFC3429 */ #define MPLS_LABEL_EXTENSION 15 /* RFC7274 */ #define MPLS_LABEL_FIRST_UNRESERVED 16 /* RFC3032 */ /* These are embedded into IFLA_STATS_AF_SPEC: * [IFLA_STATS_AF_SPEC] * -> [AF_MPLS] * -> [MPLS_STATS_xxx] * * Attributes: * [MPLS_STATS_LINK] = { * struct mpls_link_stats * } */ enum { MPLS_STATS_UNSPEC, /* also used as 64bit pad attribute */ MPLS_STATS_LINK, __MPLS_STATS_MAX, }; #define MPLS_STATS_MAX (__MPLS_STATS_MAX - 1) struct mpls_link_stats { __u64 rx_packets; /* total packets received */ __u64 tx_packets; /* total packets transmitted */ __u64 rx_bytes; /* total bytes received */ __u64 tx_bytes; /* total bytes transmitted */ __u64 rx_errors; /* bad packets received */ __u64 tx_errors; /* packet transmit problems */ __u64 rx_dropped; /* packet dropped on receive */ __u64 tx_dropped; /* packet dropped on transmit */ __u64 rx_noroute; /* no route for packet dest */ }; #endif /* _UAPI_MPLS_H */ backport-iwlwifi-dkms-7906/include/uapi/linux/nl80211.h000066400000000000000000010355651351064634500226160ustar00rootroot00000000000000#ifndef __LINUX_NL80211_H #define __LINUX_NL80211_H /* * 802.11 netlink interface public header * * Copyright 2006-2010 Johannes Berg * Copyright 2008 Michael Wu * Copyright 2008 Luis Carlos Cobo * Copyright 2008 Michael Buesch * Copyright 2008, 2009 Luis R. Rodriguez * Copyright 2008 Jouni Malinen * Copyright 2008 Colin McCabe * Copyright 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /* * This header file defines the userspace API to the wireless stack. Please * be careful not to break things - i.e. don't move anything around or so * unless you can demonstrate that it breaks neither API nor ABI. * * Additions to the API should be accompanied by actual implementations in * an upstream driver, so that example implementations exist in case there * are ever concerns about the precise semantics of the API or changes are * needed, and to ensure that code for dead (no longer implemented) API * can actually be identified and removed. * Nonetheless, semantics should also be documented carefully in this file. */ #include #define NL80211_GENL_NAME "nl80211" #define NL80211_MULTICAST_GROUP_CONFIG "config" #define NL80211_MULTICAST_GROUP_SCAN "scan" #define NL80211_MULTICAST_GROUP_REG "regulatory" #define NL80211_MULTICAST_GROUP_MLME "mlme" #define NL80211_MULTICAST_GROUP_VENDOR "vendor" #define NL80211_MULTICAST_GROUP_NAN "nan" #define NL80211_MULTICAST_GROUP_TESTMODE "testmode" /** * DOC: Station handling * * Stations are added per interface, but a special case exists with VLAN * interfaces. When a station is bound to an AP interface, it may be moved * into a VLAN identified by a VLAN interface index (%NL80211_ATTR_STA_VLAN). * The station is still assumed to belong to the AP interface it was added * to. * * Station handling varies per interface type and depending on the driver's * capabilities. * * For drivers supporting TDLS with external setup (WIPHY_FLAG_SUPPORTS_TDLS * and WIPHY_FLAG_TDLS_EXTERNAL_SETUP), the station lifetime is as follows: * - a setup station entry is added, not yet authorized, without any rate * or capability information, this just exists to avoid race conditions * - when the TDLS setup is done, a single NL80211_CMD_SET_STATION is valid * to add rate and capability information to the station and at the same * time mark it authorized. * - %NL80211_TDLS_ENABLE_LINK is then used * - after this, the only valid operation is to remove it by tearing down * the TDLS link (%NL80211_TDLS_DISABLE_LINK) * * TODO: need more info for other interface types */ /** * DOC: Frame transmission/registration support * * Frame transmission and registration support exists to allow userspace * management entities such as wpa_supplicant react to management frames * that are not being handled by the kernel. This includes, for example, * certain classes of action frames that cannot be handled in the kernel * for various reasons. * * Frame registration is done on a per-interface basis and registrations * cannot be removed other than by closing the socket. It is possible to * specify a registration filter to register, for example, only for a * certain type of action frame. In particular with action frames, those * that userspace registers for will not be returned as unhandled by the * driver, so that the registered application has to take responsibility * for doing that. * * The type of frame that can be registered for is also dependent on the * driver and interface type. The frame types are advertised in wiphy * attributes so applications know what to expect. * * NOTE: When an interface changes type while registrations are active, * these registrations are ignored until the interface type is * changed again. This means that changing the interface type can * lead to a situation that couldn't otherwise be produced, but * any such registrations will be dormant in the sense that they * will not be serviced, i.e. they will not receive any frames. * * Frame transmission allows userspace to send for example the required * responses to action frames. It is subject to some sanity checking, * but many frames can be transmitted. When a frame was transmitted, its * status is indicated to the sending socket. * * For more technical details, see the corresponding command descriptions * below. */ /** * DOC: Virtual interface / concurrency capabilities * * Some devices are able to operate with virtual MACs, they can have * more than one virtual interface. The capability handling for this * is a bit complex though, as there may be a number of restrictions * on the types of concurrency that are supported. * * To start with, each device supports the interface types listed in * the %NL80211_ATTR_SUPPORTED_IFTYPES attribute, but by listing the * types there no concurrency is implied. * * Once concurrency is desired, more attributes must be observed: * To start with, since some interface types are purely managed in * software, like the AP-VLAN type in mac80211 for example, there's * an additional list of these, they can be added at any time and * are only restricted by some semantic restrictions (e.g. AP-VLAN * cannot be added without a corresponding AP interface). This list * is exported in the %NL80211_ATTR_SOFTWARE_IFTYPES attribute. * * Further, the list of supported combinations is exported. This is * in the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute. Basically, * it exports a list of "groups", and at any point in time the * interfaces that are currently active must fall into any one of * the advertised groups. Within each group, there are restrictions * on the number of interfaces of different types that are supported * and also the number of different channels, along with potentially * some other restrictions. See &enum nl80211_if_combination_attrs. * * All together, these attributes define the concurrency of virtual * interfaces that a given device supports. */ /** * DOC: packet coalesce support * * In most cases, host that receives IPv4 and IPv6 multicast/broadcast * packets does not do anything with these packets. Therefore the * reception of these unwanted packets causes unnecessary processing * and power consumption. * * Packet coalesce feature helps to reduce number of received interrupts * to host by buffering these packets in firmware/hardware for some * predefined time. Received interrupt will be generated when one of the * following events occur. * a) Expiration of hardware timer whose expiration time is set to maximum * coalescing delay of matching coalesce rule. * b) Coalescing buffer in hardware reaches it's limit. * c) Packet doesn't match any of the configured coalesce rules. * * User needs to configure following parameters for creating a coalesce * rule. * a) Maximum coalescing delay * b) List of packet patterns which needs to be matched * c) Condition for coalescence. pattern 'match' or 'no match' * Multiple such rules can be created. */ /** * DOC: WPA/WPA2 EAPOL handshake offload * * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK flag drivers * can indicate they support offloading EAPOL handshakes for WPA/WPA2 * preshared key authentication. In %NL80211_CMD_CONNECT the preshared * key should be specified using %NL80211_ATTR_PMK. Drivers supporting * this offload may reject the %NL80211_CMD_CONNECT when no preshared * key material is provided, for example when that driver does not * support setting the temporal keys through %CMD_NEW_KEY. * * Similarly @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X flag can be * set by drivers indicating offload support of the PTK/GTK EAPOL * handshakes during 802.1X authentication. In order to use the offload * the %NL80211_CMD_CONNECT should have %NL80211_ATTR_WANT_1X_4WAY_HS * attribute flag. Drivers supporting this offload may reject the * %NL80211_CMD_CONNECT when the attribute flag is not present. * * For 802.1X the PMK or PMK-R0 are set by providing %NL80211_ATTR_PMK * using %NL80211_CMD_SET_PMK. For offloaded FT support also * %NL80211_ATTR_PMKR0_NAME must be provided. */ /** * DOC: FILS shared key authentication offload * * FILS shared key authentication offload can be advertized by drivers by * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support * FILS shared key authentication offload should be able to construct the * authentication and association frames for FILS shared key authentication and * eventually do a key derivation as per IEEE 802.11ai. The below additional * parameters should be given to driver in %NL80211_CMD_CONNECT and/or in * %NL80211_CMD_UPDATE_CONNECT_PARAMS. * %NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai * %NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message * %NL80211_ATTR_FILS_ERP_RRK - used to generate the rIK and rMSK * rIK should be used to generate an authentication tag on the ERP message and * rMSK should be used to derive a PMKSA. * rIK, rMSK should be generated and keyname_nai, sequence number should be used * as specified in IETF RFC 6696. * * When FILS shared key authentication is completed, driver needs to provide the * below additional parameters to userspace, which can be either after setting * up a connection or after roaming. * %NL80211_ATTR_FILS_KEK - used for key renewal * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges * %NL80211_ATTR_PMKID - used to identify the PMKSA used/generated * %Nl80211_ATTR_PMK - used to update PMKSA cache in userspace * The PMKSA can be maintained in userspace persistently so that it can be used * later after reboots or wifi turn off/on also. * * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS * capable AP supporting PMK caching. It specifies the scope within which the * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based * on FILS cache identifier. Additionally %NL80211_ATTR_PMK is used with * %NL80211_SET_PMKSA to specify the PMK corresponding to a PMKSA for driver to * use in a FILS shared key connection with PMKSA caching. */ /** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors * * @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request * to get a list of all present wiphys. * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME, * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ (and the * attributes determining the channel width; this is used for setting * monitor mode channel), %NL80211_ATTR_WIPHY_RETRY_SHORT, * %NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD, * and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD. * However, for setting the channel, see %NL80211_CMD_SET_CHANNEL * instead, the support here is for backward compatibility only. * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request * or rename notification. Has attributes %NL80211_ATTR_WIPHY and * %NL80211_ATTR_WIPHY_NAME. * @NL80211_CMD_DEL_WIPHY: Wiphy deleted. Has attributes * %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME. * * @NL80211_CMD_GET_INTERFACE: Request an interface's configuration; * either a dump request for all interfaces or a specific get with a * single %NL80211_ATTR_IFINDEX is supported. * @NL80211_CMD_SET_INTERFACE: Set type of a virtual interface, requires * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_IFTYPE. * @NL80211_CMD_NEW_INTERFACE: Newly created virtual interface or response * to %NL80211_CMD_GET_INTERFACE. Has %NL80211_ATTR_IFINDEX, * %NL80211_ATTR_WIPHY and %NL80211_ATTR_IFTYPE attributes. Can also * be sent from userspace to request creation of a new virtual interface, * then requires attributes %NL80211_ATTR_WIPHY, %NL80211_ATTR_IFTYPE and * %NL80211_ATTR_IFNAME. * @NL80211_CMD_DEL_INTERFACE: Virtual interface was deleted, has attributes * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_WIPHY. Can also be sent from * userspace to request deletion of a virtual interface, then requires * attribute %NL80211_ATTR_IFINDEX. * * @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT, * %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD. * @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA, * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC, %NL80211_ATTR_KEY_CIPHER, * and %NL80211_ATTR_KEY_SEQ attributes. * @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX * or %NL80211_ATTR_MAC. * * @NL80211_CMD_GET_BEACON: (not used) * @NL80211_CMD_SET_BEACON: change the beacon on an access point interface * using the %NL80211_ATTR_BEACON_HEAD and %NL80211_ATTR_BEACON_TAIL * attributes. For drivers that generate the beacon and probe responses * internally, the following attributes must be provided: %NL80211_ATTR_IE, * %NL80211_ATTR_IE_PROBE_RESP and %NL80211_ATTR_IE_ASSOC_RESP. * @NL80211_CMD_START_AP: Start AP operation on an AP interface, parameters * are like for %NL80211_CMD_SET_BEACON, and additionally parameters that * do not change are used, these include %NL80211_ATTR_BEACON_INTERVAL, * %NL80211_ATTR_DTIM_PERIOD, %NL80211_ATTR_SSID, * %NL80211_ATTR_HIDDEN_SSID, %NL80211_ATTR_CIPHERS_PAIRWISE, * %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS, * %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY, * %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_INACTIVITY_TIMEOUT, * %NL80211_ATTR_ACL_POLICY and %NL80211_ATTR_MAC_ADDRS. * The channel to use can be set on the interface or be given using the * %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel width. * @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP * @NL80211_CMD_STOP_AP: Stop AP operation on the given interface * @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP * * @NL80211_CMD_GET_STATION: Get station attributes for station identified by * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_SET_STATION: Set station attributes for station identified by * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_NEW_STATION: Add a station with given attributes to the * the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC * or, if no MAC address given, all stations, on the interface identified * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and * %NL80211_ATTR_REASON_CODE can optionally be used to specify which type * of disconnection indication should be sent to the station * (Deauthentication or Disassociation frame and reason code for that * frame). * * @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to * destination %NL80211_ATTR_MAC on the interface identified by * %NL80211_ATTR_IFINDEX. * @NL80211_CMD_SET_MPATH: Set mesh path attributes for mesh path to * destination %NL80211_ATTR_MAC on the interface identified by * %NL80211_ATTR_IFINDEX. * @NL80211_CMD_NEW_MPATH: Create a new mesh path for the destination given by * %NL80211_ATTR_MAC via %NL80211_ATTR_MPATH_NEXT_HOP. * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by * %NL80211_ATTR_MAC. * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the * the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC * or, if no MAC address given, all mesh paths, on the interface identified * by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by * %NL80211_ATTR_IFINDEX. * * @NL80211_CMD_GET_REG: ask the wireless core to send us its currently set * regulatory domain. If %NL80211_ATTR_WIPHY is specified and the device * has a private regulatory domain, it will be returned. Otherwise, the * global regdomain will be returned. * A device will have a private regulatory domain if it uses the * regulatory_hint() API. Even when a private regdomain is used the channel * information will still be mended according to further hints from * the regulatory core to help with compliance. A dump version of this API * is now available which will returns the global regdomain as well as * all private regdomains of present wiphys (for those that have it). * If a wiphy is self-managed (%NL80211_ATTR_WIPHY_SELF_MANAGED_REG), then * its private regdomain is the only valid one for it. The regulatory * core is not used to help with compliance in this case. * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command * after being queried by the kernel. CRDA replies by sending a regulatory * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our * current alpha2 if it found a match. It also provides * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each * regulatory rule is a nested set of attributes given by * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain * to the specified ISO/IEC 3166-1 alpha2 country code. The core will * store this as a valid request and then query userspace for it. * * @NL80211_CMD_GET_MESH_CONFIG: Get mesh networking properties for the * interface identified by %NL80211_ATTR_IFINDEX * * @NL80211_CMD_SET_MESH_CONFIG: Set mesh networking properties for the * interface identified by %NL80211_ATTR_IFINDEX * * @NL80211_CMD_SET_MGMT_EXTRA_IE: Set extra IEs for management frames. The * interface is identified with %NL80211_ATTR_IFINDEX and the management * frame subtype with %NL80211_ATTR_MGMT_SUBTYPE. The extra IE data to be * added to the end of the specified management frame is specified with * %NL80211_ATTR_IE. If the command succeeds, the requested data will be * added to all specified management frames generated by * kernel/firmware/driver. * Note: This command has been removed and it is only reserved at this * point to avoid re-using existing command number. The functionality this * command was planned for has been provided with cleaner design with the * option to specify additional IEs in NL80211_CMD_TRIGGER_SCAN, * NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE, * NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE. * * @NL80211_CMD_GET_SCAN: get scan results * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the * probe requests at CCK rate or not. %NL80211_ATTR_BSSID can be used to * specify a BSSID to scan for; if not included, the wildcard BSSID will * be used. * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to * NL80211_CMD_GET_SCAN and on the "scan" multicast group) * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons, * partial scan results may be available * * @NL80211_CMD_START_SCHED_SCAN: start a scheduled scan at certain * intervals and certain number of cycles, as specified by * %NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is * not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified, * scheduled scan will run in an infinite loop with the specified interval. * These attributes are mutually exculsive, * i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if * NL80211_ATTR_SCHED_SCAN_PLANS is defined. * If for some reason scheduled scan is aborted by the driver, all scan * plans are canceled (including scan plans that did not start yet). * Like with normal scans, if SSIDs (%NL80211_ATTR_SCAN_SSIDS) * are passed, they are used in the probe requests. For * broadcast, a broadcast SSID must be passed (ie. an empty * string). If no SSID is passed, no probe requests are sent and * a passive scan is performed. %NL80211_ATTR_SCAN_FREQUENCIES, * if passed, define which channels should be scanned; if not * passed, all channels allowed for the current regulatory domain * are used. Extra IEs can also be passed from the userspace by * using the %NL80211_ATTR_IE attribute. The first cycle of the * scheduled scan can be delayed by %NL80211_ATTR_SCHED_SCAN_DELAY * is supplied. If the device supports multiple concurrent scheduled * scans, it will allow such when the caller provides the flag attribute * %NL80211_ATTR_SCHED_SCAN_MULTI to indicate user-space support for it. * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if * scheduled scan is not running. The caller may assume that as soon * as the call returns, it is safe to start a new scheduled scan again. * @NL80211_CMD_SCHED_SCAN_RESULTS: indicates that there are scheduled scan * results available. * @NL80211_CMD_SCHED_SCAN_STOPPED: indicates that the scheduled scan has * stopped. The driver may issue this event at any time during a * scheduled scan. One reason for stopping the scan is if the hardware * does not support starting an association or a normal scan while running * a scheduled scan. This event is also sent when the * %NL80211_CMD_STOP_SCHED_SCAN command is received or when the interface * is brought down while a scheduled scan was running. * * @NL80211_CMD_GET_SURVEY: get survey resuls, e.g. channel occupation * or noise level * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to * NL80211_CMD_GET_SURVEY and on the "scan" multicast group) * * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry using %NL80211_ATTR_MAC * (for the BSSID), %NL80211_ATTR_PMKID, and optionally %NL80211_ATTR_PMK * (PMK is used for PTKSA derivation in case of FILS shared key offload) or * using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID, * %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS * authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier * advertized by a FILS capable AP identifying the scope of PMKSA in an * ESS. * @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC * (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID, * %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS * authentication. * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries. * * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain * has been changed and provides details of the request information * that caused the change such as who initiated the regulatory request * (%NL80211_ATTR_REG_INITIATOR), the wiphy_idx * (%NL80211_ATTR_REG_ALPHA2) on which the request was made from if * the initiator was %NL80211_REGDOM_SET_BY_COUNTRY_IE or * %NL80211_REGDOM_SET_BY_DRIVER, the type of regulatory domain * set (%NL80211_ATTR_REG_TYPE), if the type of regulatory domain is * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on * to (%NL80211_ATTR_REG_ALPHA2). * @NL80211_CMD_REG_BEACON_HINT: indicates to userspace that an AP beacon * has been found while world roaming thus enabling active scan or * any mode of operation that initiates TX (beacons) on a channel * where we would not have been able to do either before. As an example * if you are world roaming (regulatory domain set to world or if your * driver is using a custom world roaming regulatory domain) and while * doing a passive scan on the 5 GHz band you find an AP there (if not * on a DFS channel) you will now be able to actively scan for that AP * or use AP mode on your card on that same channel. Note that this will * never be used for channels 1-11 on the 2 GHz band as they are always * enabled world wide. This beacon hint is only sent if your device had * either disabled active scanning or beaconing on a channel. We send to * userspace the wiphy on which we removed a restriction from * (%NL80211_ATTR_WIPHY) and the channel on which this occurred * before (%NL80211_ATTR_FREQ_BEFORE) and after (%NL80211_ATTR_FREQ_AFTER) * the beacon hint was processed. * * @NL80211_CMD_AUTHENTICATE: authentication request and notification. * This command is used both as a command (request to authenticate) and * as an event on the "mlme" multicast group indicating completion of the * authentication process. * When used as a command, %NL80211_ATTR_IFINDEX is used to identify the * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify * the SSID (mainly for association, but is included in authentication * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE * is used to specify the authentication type. %NL80211_ATTR_IE is used to * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs) * to be added to the frame. * When used as an event, this reports reception of an Authentication * frame in station and IBSS modes when the local MLME processed the * frame, i.e., it was for the local STA and was received in correct * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the * MLME SAP interface (kernel providing MLME, userspace SME). The * included %NL80211_ATTR_FRAME attribute contains the management frame * (including both the header and frame body, but not FCS). This event is * also used to indicate if the authentication attempt timed out. In that * case the %NL80211_ATTR_FRAME attribute is replaced with a * %NL80211_ATTR_TIMED_OUT flag (and %NL80211_ATTR_MAC to indicate which * pending authentication timed out). * @NL80211_CMD_ASSOCIATE: association request and notification; like * NL80211_CMD_AUTHENTICATE but for Association and Reassociation * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request, * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). The * %NL80211_ATTR_PREV_BSSID attribute is used to specify whether the * request is for the initial association to an ESS (that attribute not * included) or for reassociation within the ESS (that attribute is * included). * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication * primitives). * @NL80211_CMD_DISASSOCIATE: disassociation request and notification; like * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to * MLME-DISASSOCIATE.request and MLME-DISASSOCIATE.indication primitives). * * @NL80211_CMD_MICHAEL_MIC_FAILURE: notification of a locally detected Michael * MIC (part of TKIP) failure; sent on the "mlme" multicast group; the * event includes %NL80211_ATTR_MAC to describe the source MAC address of * the frame with invalid MIC, %NL80211_ATTR_KEY_TYPE to show the key * type, %NL80211_ATTR_KEY_IDX to indicate the key identifier, and * %NL80211_ATTR_KEY_SEQ to indicate the TSC value of the frame; this * event matches with MLME-MICHAELMICFAILURE.indication() primitive * * @NL80211_CMD_JOIN_IBSS: Join a new IBSS -- given at least an SSID and a * FREQ attribute (for the initial frequency if no peer can be found) * and optionally a MAC (as BSSID) and FREQ_FIXED attribute if those * should be fixed rather than automatically determined. Can only be * executed on a network interface that is UP, and fixed BSSID/FREQ * may be rejected. Another optional parameter is the beacon interval, * given in the %NL80211_ATTR_BEACON_INTERVAL attribute, which if not * given defaults to 100 TU (102.4ms). * @NL80211_CMD_LEAVE_IBSS: Leave the IBSS -- no special arguments, the IBSS is * determined by the network interface. * * @NL80211_CMD_TESTMODE: testmode command, takes a wiphy (or ifindex) attribute * to identify the device, and the TESTDATA blob attribute to pass through * to the driver. * * @NL80211_CMD_CONNECT: connection request and notification; this command * requests to connect to a specified network but without separating * auth and assoc steps. For this, you need to specify the SSID in a * %NL80211_ATTR_SSID attribute, and can optionally specify the association * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP, * %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT, * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, * %NL80211_ATTR_CONTROL_PORT_OVER_NL80211, %NL80211_ATTR_MAC_HINT, and * %NL80211_ATTR_WIPHY_FREQ_HINT. * If included, %NL80211_ATTR_MAC and %NL80211_ATTR_WIPHY_FREQ are * restrictions on BSS selection, i.e., they effectively prevent roaming * within the ESS. %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT * can be included to provide a recommendation of the initial BSS while * allowing the driver to roam to other BSSes within the ESS and also to * ignore this recommendation if the indicated BSS is not ideal. Only one * set of BSSID,frequency parameters is used (i.e., either the enforcing * %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict * %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT). * %NL80211_ATTR_PREV_BSSID can be used to request a reassociation within * the ESS in case the device is already associated and an association with * a different BSS is desired. * Background scan period can optionally be * specified in %NL80211_ATTR_BG_SCAN_PERIOD, * if not specified default background scan configuration * in driver is used and if period value is 0, bg scan will be disabled. * This attribute is ignored if driver does not support roam scan. * It is also sent as an event, with the BSSID and response IEs when the * connection is established or failed to be established. This can be * determined by the %NL80211_ATTR_STATUS_CODE attribute (0 = success, * non-zero = failure). If %NL80211_ATTR_TIMED_OUT is included in the * event, the connection attempt failed due to not being able to initiate * authentication/association or not receiving a response from the AP. * Non-zero %NL80211_ATTR_STATUS_CODE value is indicated in that case as * well to remain backwards compatible. * When establishing a security association, drivers that support 4 way * handshake offload should send %NL80211_CMD_PORT_AUTHORIZED event when * the 4 way handshake is completed successfully. * @NL80211_CMD_ROAM: Notification indicating the card/driver roamed by itself. * When a security association was established with the new AP (e.g. if * the FT protocol was used for roaming or the driver completed the 4 way * handshake), this event should be followed by an * %NL80211_CMD_PORT_AUTHORIZED event. * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify * userspace that a connection was dropped by the AP or due to other * reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and * %NL80211_ATTR_REASON_CODE attributes are used. * * @NL80211_CMD_SET_WIPHY_NETNS: Set a wiphy's netns. Note that all devices * associated with this wiphy must be down and will follow. * * @NL80211_CMD_REMAIN_ON_CHANNEL: Request to remain awake on the specified * channel for the specified amount of time. This can be used to do * off-channel operations like transmit a Public Action frame and wait for * a response while being associated to an AP on another channel. * %NL80211_ATTR_IFINDEX is used to specify which interface (and thus * radio) is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the * frequency for the operation. * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds * to remain on the channel. This command is also used as an event to * notify when the requested duration starts (it may take a while for the * driver to schedule this time due to other concurrent needs for the * radio). * When called, this operation returns a cookie (%NL80211_ATTR_COOKIE) * that will be included with any events pertaining to this request; * the cookie is also used to cancel the request. * @NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL: This command can be used to cancel a * pending remain-on-channel duration if the desired operation has been * completed prior to expiration of the originally requested duration. * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify the * radio. The %NL80211_ATTR_COOKIE attribute must be given as well to * uniquely identify the request. * This command is also used as an event to notify when a requested * remain-on-channel duration has expired. * * @NL80211_CMD_SET_TX_BITRATE_MASK: Set the mask of rates to be used in TX * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface * and @NL80211_ATTR_TX_RATES the set of allowed rates. * * @NL80211_CMD_REGISTER_FRAME: Register for receiving certain mgmt frames * (via @NL80211_CMD_FRAME) for processing in userspace. This command * requires an interface index, a frame type attribute (optional for * backward compatibility reasons, if not given assumes action frames) * and a match attribute containing the first few bytes of the frame * that should match, e.g. a single byte for only a category match or * four bytes for vendor frames including the OUI. The registration * cannot be dropped, but is removed automatically when the netlink * socket is closed. Multiple registrations can be made. * @NL80211_CMD_REGISTER_ACTION: Alias for @NL80211_CMD_REGISTER_FRAME for * backward compatibility * @NL80211_CMD_FRAME: Management frame TX request and RX notification. This * command is used both as a request to transmit a management frame and * as an event indicating reception of a frame that was not processed in * kernel code, but is for us (i.e., which may need to be processed in a * user space application). %NL80211_ATTR_FRAME is used to specify the * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ is used * to indicate on which channel the frame is to be transmitted or was * received. If this channel is not the current channel (remain-on-channel * or the operational channel) the device will switch to the given channel * and transmit the frame, optionally waiting for a response for the time * specified using %NL80211_ATTR_DURATION. When called, this operation * returns a cookie (%NL80211_ATTR_COOKIE) that will be included with the * TX status event pertaining to the TX request. * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the * management frames at CCK rate or not in 2GHz band. * %NL80211_ATTR_CSA_C_OFFSETS_TX is an array of offsets to CSA * counters which will be updated to the current value. This attribute * is used during CSA period. * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this * command may be used with the corresponding cookie to cancel the wait * time if it is known that it is no longer necessary. * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility. * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies * the TX command and %NL80211_ATTR_FRAME includes the contents of the * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged * the frame. * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for * backward compatibility. * * @NL80211_CMD_SET_POWER_SAVE: Set powersave, using %NL80211_ATTR_PS_STATE * @NL80211_CMD_GET_POWER_SAVE: Get powersave status in %NL80211_ATTR_PS_STATE * * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command * is used to configure connection quality monitoring notification trigger * levels. * @NL80211_CMD_NOTIFY_CQM: Connection quality monitor notification. This * command is used as an event to indicate the that a trigger level was * reached. * @NL80211_CMD_SET_CHANNEL: Set the channel (using %NL80211_ATTR_WIPHY_FREQ * and the attributes determining channel width) the given interface * (identifed by %NL80211_ATTR_IFINDEX) shall operate on. * In case multiple channels are supported by the device, the mechanism * with which it switches channels is implementation-defined. * When a monitor interface is given, it can only switch channel while * no other interfaces are operating to avoid disturbing the operation * of any other interfaces, and other interfaces will again take * precedence when they are used. * * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface. * * @NL80211_CMD_SET_MULTICAST_TO_UNICAST: Configure if this AP should perform * multicast to unicast conversion. When enabled, all multicast packets * with ethertype ARP, IPv4 or IPv6 (possibly within an 802.1Q header) * will be sent out to each station once with the destination (multicast) * MAC address replaced by the station's MAC address. Note that this may * break certain expectations of the receiver, e.g. the ability to drop * unicast IP packets encapsulated in multicast L2 frames, or the ability * to not send destination unreachable messages in such cases. * This can only be toggled per BSS. Configure this on an interface of * type %NL80211_IFTYPE_AP. It applies to all its VLAN interfaces * (%NL80211_IFTYPE_AP_VLAN), except for those in 4addr (WDS) mode. * If %NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED is not present with this * command, the feature is disabled. * * @NL80211_CMD_JOIN_MESH: Join a mesh. The mesh ID must be given, and initial * mesh config parameters may be given. * @NL80211_CMD_LEAVE_MESH: Leave the mesh network -- no special arguments, the * network is determined by the network interface. * * @NL80211_CMD_UNPROT_DEAUTHENTICATE: Unprotected deauthentication frame * notification. This event is used to indicate that an unprotected * deauthentication frame was dropped when MFP is in use. * @NL80211_CMD_UNPROT_DISASSOCIATE: Unprotected disassociation frame * notification. This event is used to indicate that an unprotected * disassociation frame was dropped when MFP is in use. * * @NL80211_CMD_NEW_PEER_CANDIDATE: Notification on the reception of a * beacon or probe response from a compatible mesh peer. This is only * sent while no station information (sta_info) exists for the new peer * candidate and when @NL80211_MESH_SETUP_USERSPACE_AUTH, * @NL80211_MESH_SETUP_USERSPACE_AMPE, or * @NL80211_MESH_SETUP_USERSPACE_MPM is set. On reception of this * notification, userspace may decide to create a new station * (@NL80211_CMD_NEW_STATION). To stop this notification from * reoccurring, the userspace authentication daemon may want to create the * new station with the AUTHENTICATED flag unset and maybe change it later * depending on the authentication result. * * @NL80211_CMD_GET_WOWLAN: get Wake-on-Wireless-LAN (WoWLAN) settings. * @NL80211_CMD_SET_WOWLAN: set Wake-on-Wireless-LAN (WoWLAN) settings. * Since wireless is more complex than wired ethernet, it supports * various triggers. These triggers can be configured through this * command with the %NL80211_ATTR_WOWLAN_TRIGGERS attribute. For * more background information, see * http://wireless.kernel.org/en/users/Documentation/WoWLAN. * The @NL80211_CMD_SET_WOWLAN command can also be used as a notification * from the driver reporting the wakeup reason. In this case, the * @NL80211_ATTR_WOWLAN_TRIGGERS attribute will contain the reason * for the wakeup, if it was caused by wireless. If it is not present * in the wakeup notification, the wireless device didn't cause the * wakeup but reports that it was woken up. * * @NL80211_CMD_SET_REKEY_OFFLOAD: This command is used give the driver * the necessary information for supporting GTK rekey offload. This * feature is typically used during WoWLAN. The configuration data * is contained in %NL80211_ATTR_REKEY_DATA (which is nested and * contains the data in sub-attributes). After rekeying happened, * this command may also be sent by the driver as an MLME event to * inform userspace of the new replay counter. * * @NL80211_CMD_PMKSA_CANDIDATE: This is used as an event to inform userspace * of PMKSA caching dandidates. * * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup). * In addition, this can be used as an event to request userspace to take * actions on TDLS links (set up a new link or tear down an existing one). * In such events, %NL80211_ATTR_TDLS_OPERATION indicates the requested * operation, %NL80211_ATTR_MAC contains the peer MAC address, and * %NL80211_ATTR_REASON_CODE the reason code to be used (only with * %NL80211_TDLS_TEARDOWN). * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame. The * %NL80211_ATTR_TDLS_ACTION attribute determines the type of frame to be * sent. Public Action codes (802.11-2012 8.1.5.1) will be sent as * 802.11 management frames, while TDLS action codes (802.11-2012 * 8.5.13.1) will be encapsulated and sent as data frames. The currently * supported Public Action code is %WLAN_PUB_ACTION_TDLS_DISCOVER_RES * and the currently supported TDLS actions codes are given in * &enum ieee80211_tdls_actioncode. * * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP * (or GO) interface (i.e. hostapd) to ask for unexpected frames to * implement sending deauth to stations that send unexpected class 3 * frames. Also used as the event sent by the kernel when such a frame * is received. * For the event, the %NL80211_ATTR_MAC attribute carries the TA and * other attributes like the interface index are present. * If used as the command it must have an interface index and you can * only unsubscribe from the event by closing the socket. Subscription * is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events. * * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the * associated station identified by %NL80211_ATTR_MAC sent a 4addr frame * and wasn't already in a 4-addr VLAN. The event will be sent similarly * to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener. * * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface * by sending a null data frame to it and reporting when the frame is * acknowleged. This is used to allow timing out inactive clients. Uses * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a * direct reply with an %NL80211_ATTR_COOKIE that is later used to match * up the event with the request. The event includes the same data and * has %NL80211_ATTR_ACK set if the frame was ACKed. * * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from * other BSSes when any interfaces are in AP mode. This helps implement * OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME * messages. Note that per PHY only one application may register. * * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether * No Acknowledgement Policy should be applied. * * @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels * independently of the userspace SME, send this event indicating * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ and the * attributes determining channel width. This indication may also be * sent when a remotely-initiated switch (e.g., when a STA receives a CSA * from the remote AP) is completed; * * @NL80211_CMD_CH_SWITCH_STARTED_NOTIFY: Notify that a channel switch * has been started on an interface, regardless of the initiator * (ie. whether it was requested from a remote device or * initiated on our own). It indicates that * %NL80211_ATTR_IFINDEX will be on %NL80211_ATTR_WIPHY_FREQ * after %NL80211_ATTR_CH_SWITCH_COUNT TBTT's. The userspace may * decide to react to this indication by requesting other * interfaces to change channel as well. * * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by * its %NL80211_ATTR_WDEV identifier. It must have been created with * %NL80211_CMD_NEW_INTERFACE previously. After it has been started, the * P2P Device can be used for P2P operations, e.g. remain-on-channel and * public action frame TX. * @NL80211_CMD_STOP_P2P_DEVICE: Stop the given P2P Device, identified by * its %NL80211_ATTR_WDEV identifier. * * @NL80211_CMD_CONN_FAILED: connection request to an AP failed; used to * notify userspace that AP has rejected the connection request from a * station, due to particular reason. %NL80211_ATTR_CONN_FAILED_REASON * is used for this. * * @NL80211_CMD_SET_MCAST_RATE: Change the rate used to send multicast frames * for IBSS or MESH vif. * * @NL80211_CMD_SET_MAC_ACL: sets ACL for MAC address based access control. * This is to be used with the drivers advertising the support of MAC * address based access control. List of MAC addresses is passed in * %NL80211_ATTR_MAC_ADDRS and ACL policy is passed in * %NL80211_ATTR_ACL_POLICY. Driver will enable ACL with this list, if it * is not already done. The new list will replace any existing list. Driver * will clear its ACL when the list of MAC addresses passed is empty. This * command is used in AP/P2P GO mode. Driver has to make sure to clear its * ACL list during %NL80211_CMD_STOP_AP. * * @NL80211_CMD_RADAR_DETECT: Start a Channel availability check (CAC). Once * a radar is detected or the channel availability scan (CAC) has finished * or was aborted, or a radar was detected, usermode will be notified with * this event. This command is also used to notify userspace about radars * while operating on this channel. * %NL80211_ATTR_RADAR_EVENT is used to inform about the type of the * event. * * @NL80211_CMD_GET_PROTOCOL_FEATURES: Get global nl80211 protocol features, * i.e. features for the nl80211 protocol rather than device features. * Returns the features in the %NL80211_ATTR_PROTOCOL_FEATURES bitmap. * * @NL80211_CMD_UPDATE_FT_IES: Pass down the most up-to-date Fast Transition * Information Element to the WLAN driver * * @NL80211_CMD_FT_EVENT: Send a Fast transition event from the WLAN driver * to the supplicant. This will carry the target AP's MAC address along * with the relevant Information Elements. This event is used to report * received FT IEs (MDIE, FTIE, RSN IE, TIE, RICIE). * * @NL80211_CMD_CRIT_PROTOCOL_START: Indicates user-space will start running * a critical protocol that needs more reliability in the connection to * complete. * * @NL80211_CMD_CRIT_PROTOCOL_STOP: Indicates the connection reliability can * return back to normal. * * @NL80211_CMD_GET_COALESCE: Get currently supported coalesce rules. * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules. * * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the * the new channel information (Channel Switch Announcement - CSA) * in the beacon for some time (as defined in the * %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the * new channel. Userspace provides the new channel information (using * %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel * width). %NL80211_ATTR_CH_SWITCH_BLOCK_TX may be supplied to inform * other station that transmission must be blocked until the channel * switch is complete. * * @NL80211_CMD_VENDOR: Vendor-specified command/event. The command is specified * by the %NL80211_ATTR_VENDOR_ID attribute and a sub-command in * %NL80211_ATTR_VENDOR_SUBCMD. Parameter(s) can be transported in * %NL80211_ATTR_VENDOR_DATA. * For feature advertisement, the %NL80211_ATTR_VENDOR_DATA attribute is * used in the wiphy data as a nested attribute containing descriptions * (&struct nl80211_vendor_cmd_info) of the supported vendor commands. * This may also be sent as an event with the same attributes. * * @NL80211_CMD_SET_QOS_MAP: Set Interworking QoS mapping for IP DSCP values. * The QoS mapping information is included in %NL80211_ATTR_QOS_MAP. If * that attribute is not included, QoS mapping is disabled. Since this * QoS mapping is relevant for IP packets, it is only valid during an * association. This is cleared on disassociation and AP restart. * * @NL80211_CMD_ADD_TX_TS: Ask the kernel to add a traffic stream for the given * %NL80211_ATTR_TSID and %NL80211_ATTR_MAC with %NL80211_ATTR_USER_PRIO * and %NL80211_ATTR_ADMITTED_TIME parameters. * Note that the action frame handshake with the AP shall be handled by * userspace via the normal management RX/TX framework, this only sets * up the TX TS in the driver/device. * If the admitted time attribute is not added then the request just checks * if a subsequent setup could be successful, the intent is to use this to * avoid setting up a session with the AP when local restrictions would * make that impossible. However, the subsequent "real" setup may still * fail even if the check was successful. * @NL80211_CMD_DEL_TX_TS: Remove an existing TS with the %NL80211_ATTR_TSID * and %NL80211_ATTR_MAC parameters. It isn't necessary to call this * before removing a station entry entirely, or before disassociating * or similar, cleanup will happen in the driver/device in this case. * * @NL80211_CMD_GET_MPP: Get mesh path attributes for mesh proxy path to * destination %NL80211_ATTR_MAC on the interface identified by * %NL80211_ATTR_IFINDEX. * * @NL80211_CMD_JOIN_OCB: Join the OCB network. The center frequency and * bandwidth of a channel must be given. * @NL80211_CMD_LEAVE_OCB: Leave the OCB network -- no special arguments, the * network is determined by the network interface. * * @NL80211_CMD_TDLS_CHANNEL_SWITCH: Start channel-switching with a TDLS peer, * identified by the %NL80211_ATTR_MAC parameter. A target channel is * provided via %NL80211_ATTR_WIPHY_FREQ and other attributes determining * channel width/type. The target operating class is given via * %NL80211_ATTR_OPER_CLASS. * The driver is responsible for continually initiating channel-switching * operations and returning to the base channel for communication with the * AP. * @NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH: Stop channel-switching with a TDLS * peer given by %NL80211_ATTR_MAC. Both peers must be on the base channel * when this command completes. * * @NL80211_CMD_WIPHY_REG_CHANGE: Similar to %NL80211_CMD_REG_CHANGE, but used * as an event to indicate changes for devices with wiphy-specific regdom * management. * * @NL80211_CMD_ABORT_SCAN: Stop an ongoing scan. Returns -ENOENT if a scan is * not running. The driver indicates the status of the scan through * cfg80211_scan_done(). * * @NL80211_CMD_START_NAN: Start NAN operation, identified by its * %NL80211_ATTR_WDEV interface. This interface must have been * previously created with %NL80211_CMD_NEW_INTERFACE. After it * has been started, the NAN interface will create or join a * cluster. This command must have a valid * %NL80211_ATTR_NAN_MASTER_PREF attribute and optional * %NL80211_ATTR_BANDS, %NL80211_ATTR_NAN_CDW_2G and * %NL80211_ATTR_NAN_CDW_5G attributes. If %NL80211_ATTR_BANDS is * omitted or set to 0, it means don't-care and the device will * decide what to use. After this command NAN functions can be * added. * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by * its %NL80211_ATTR_WDEV interface. * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined * with %NL80211_ATTR_NAN_FUNC nested attribute. When called, this * operation returns the strictly positive and unique instance id * (%NL80211_ATTR_NAN_FUNC_INST_ID) and a cookie (%NL80211_ATTR_COOKIE) * of the function upon success. * Since instance ID's can be re-used, this cookie is the right * way to identify the function. This will avoid races when a termination * event is handled by the user space after it has already added a new * function that got the same instance id from the kernel as the one * which just terminated. * This cookie may be used in NAN events even before the command * returns, so userspace shouldn't process NAN events until it processes * the response to this command. * Look at %NL80211_ATTR_SOCKET_OWNER as well. * @NL80211_CMD_DEL_NAN_FUNCTION: Delete a NAN function by cookie. * This command is also used as a notification sent when a NAN function is * terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID * and %NL80211_ATTR_COOKIE attributes. * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN * configuration. NAN must be operational (%NL80211_CMD_START_NAN * was executed). It must contain at least one of the following * attributes: %NL80211_ATTR_NAN_MASTER_PREF, * %NL80211_ATTR_BANDS, %NL80211_ATTR_NAN_CDW_2G, %NL80211_ATTR_NAN_CDW_5G. * If %NL80211_ATTR_BANDS is present but set to zero, the configuration is * changed to don't-care (i.e. the device can decide what to do). * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported. * This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and * %NL80211_ATTR_COOKIE. * * @NL80211_CMD_UPDATE_CONNECT_PARAMS: Update one or more connect parameters * for subsequent roaming cases if the driver or firmware uses internal * BSS selection. This command can be issued only while connected and it * does not result in a change for the current association. Currently, * only the %NL80211_ATTR_IE data is used and updated with this command. * * @NL80211_CMD_SET_PMK: For offloaded 4-Way handshake, set the PMK or PMK-R0 * for the given authenticator address (specified with %NL80211_ATTR_MAC). * When %NL80211_ATTR_PMKR0_NAME is set, %NL80211_ATTR_PMK specifies the * PMK-R0, otherwise it specifies the PMK. * @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously * configured PMK for the authenticator address identified by * %NL80211_ATTR_MAC. * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way * handshake was completed successfully by the driver. The BSSID is * specified with %NL80211_ATTR_MAC. Drivers that support 4 way handshake * offload should send this event after indicating 802.11 association with * %NL80211_CMD_CONNECT or %NL80211_CMD_ROAM. If the 4 way handshake failed * %NL80211_CMD_DISCONNECT should be indicated instead. * * @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request * and RX notification. This command is used both as a request to transmit * a control port frame and as a notification that a control port frame * has been received. %NL80211_ATTR_FRAME is used to specify the * frame contents. The frame is the raw EAPoL data, without ethernet or * 802.11 headers. * When used as an event indication %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT and %NL80211_ATTR_MAC are added * indicating the protocol type of the received frame; whether the frame * was received unencrypted and the MAC address of the peer respectively. * * @NL80211_CMD_RELOAD_REGDB: Request that the regdb firmware file is reloaded. * * @NL80211_CMD_EXTERNAL_AUTH: This interface is exclusively defined for host * drivers that do not define separate commands for authentication and * association, but rely on user space for the authentication to happen. * This interface acts both as the event request (driver to user space) * to trigger the authentication and command response (userspace to * driver) to indicate the authentication status. * * User space uses the %NL80211_CMD_CONNECT command to the host driver to * trigger a connection. The host driver selects a BSS and further uses * this interface to offload only the authentication part to the user * space. Authentication frames are passed between the driver and user * space through the %NL80211_CMD_FRAME interface. Host driver proceeds * further with the association after getting successful authentication * status. User space indicates the authentication status through * %NL80211_ATTR_STATUS_CODE attribute in %NL80211_CMD_EXTERNAL_AUTH * command interface. In case of success, user space also includes the * derived PMK and PMKID through %NL80211_ATTR_PMK and * %NL80211_ATTR_PMKID. * * Host driver reports this status on an authentication failure to the * user space through the connect result as the user space would have * initiated the connection through the connect request. * * @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's * ht opmode or vht opmode changes using any of %NL80211_ATTR_SMPS_MODE, * %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its * address(specified in %NL80211_ATTR_MAC). * * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute. * * @NL80211_CMD_PEER_MEASUREMENT_START: start a (set of) peer measurement(s) * with the given parameters, which are encapsulated in the nested * %NL80211_ATTR_PEER_MEASUREMENTS attribute. Optionally, MAC address * randomization may be enabled and configured by specifying the * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes. * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute. * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in * the netlink extended ack message. * * To cancel a measurement, close the socket that requested it. * * Measurement results are reported to the socket that requested the * measurement using @NL80211_CMD_PEER_MEASUREMENT_RESULT when they * become available, so applications must ensure a large enough socket * buffer size. * * Depending on driver support it may or may not be possible to start * multiple concurrent measurements. * @NL80211_CMD_PEER_MEASUREMENT_RESULT: This command number is used for the * result notification from the driver to the requesting socket. * @NL80211_CMD_PEER_MEASUREMENT_COMPLETE: Notification only, indicating that * the measurement completed, using the measurement cookie * (%NL80211_ATTR_COOKIE). * * @NL80211_CMD_NOTIFY_RADAR: Notify the kernel that a radar signal was * detected and reported by a neighboring device on the channel * indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes * determining the width and type. * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ enum nl80211_commands { /* don't change the order or add anything between, this is ABI! */ NL80211_CMD_UNSPEC, NL80211_CMD_GET_WIPHY, /* can dump */ NL80211_CMD_SET_WIPHY, NL80211_CMD_NEW_WIPHY, NL80211_CMD_DEL_WIPHY, NL80211_CMD_GET_INTERFACE, /* can dump */ NL80211_CMD_SET_INTERFACE, NL80211_CMD_NEW_INTERFACE, NL80211_CMD_DEL_INTERFACE, NL80211_CMD_GET_KEY, NL80211_CMD_SET_KEY, NL80211_CMD_NEW_KEY, NL80211_CMD_DEL_KEY, NL80211_CMD_GET_BEACON, NL80211_CMD_SET_BEACON, NL80211_CMD_START_AP, NL80211_CMD_NEW_BEACON = NL80211_CMD_START_AP, NL80211_CMD_STOP_AP, NL80211_CMD_DEL_BEACON = NL80211_CMD_STOP_AP, NL80211_CMD_GET_STATION, NL80211_CMD_SET_STATION, NL80211_CMD_NEW_STATION, NL80211_CMD_DEL_STATION, NL80211_CMD_GET_MPATH, NL80211_CMD_SET_MPATH, NL80211_CMD_NEW_MPATH, NL80211_CMD_DEL_MPATH, NL80211_CMD_SET_BSS, NL80211_CMD_SET_REG, NL80211_CMD_REQ_SET_REG, NL80211_CMD_GET_MESH_CONFIG, NL80211_CMD_SET_MESH_CONFIG, NL80211_CMD_SET_MGMT_EXTRA_IE /* reserved; not used */, NL80211_CMD_GET_REG, NL80211_CMD_GET_SCAN, NL80211_CMD_TRIGGER_SCAN, NL80211_CMD_NEW_SCAN_RESULTS, NL80211_CMD_SCAN_ABORTED, NL80211_CMD_REG_CHANGE, NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE, NL80211_CMD_DEAUTHENTICATE, NL80211_CMD_DISASSOCIATE, NL80211_CMD_MICHAEL_MIC_FAILURE, NL80211_CMD_REG_BEACON_HINT, NL80211_CMD_JOIN_IBSS, NL80211_CMD_LEAVE_IBSS, NL80211_CMD_TESTMODE, NL80211_CMD_CONNECT, NL80211_CMD_ROAM, NL80211_CMD_DISCONNECT, NL80211_CMD_SET_WIPHY_NETNS, NL80211_CMD_GET_SURVEY, NL80211_CMD_NEW_SURVEY_RESULTS, NL80211_CMD_SET_PMKSA, NL80211_CMD_DEL_PMKSA, NL80211_CMD_FLUSH_PMKSA, NL80211_CMD_REMAIN_ON_CHANNEL, NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, NL80211_CMD_SET_TX_BITRATE_MASK, NL80211_CMD_REGISTER_FRAME, NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME, NL80211_CMD_FRAME, NL80211_CMD_ACTION = NL80211_CMD_FRAME, NL80211_CMD_FRAME_TX_STATUS, NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS, NL80211_CMD_SET_POWER_SAVE, NL80211_CMD_GET_POWER_SAVE, NL80211_CMD_SET_CQM, NL80211_CMD_NOTIFY_CQM, NL80211_CMD_SET_CHANNEL, NL80211_CMD_SET_WDS_PEER, NL80211_CMD_FRAME_WAIT_CANCEL, NL80211_CMD_JOIN_MESH, NL80211_CMD_LEAVE_MESH, NL80211_CMD_UNPROT_DEAUTHENTICATE, NL80211_CMD_UNPROT_DISASSOCIATE, NL80211_CMD_NEW_PEER_CANDIDATE, NL80211_CMD_GET_WOWLAN, NL80211_CMD_SET_WOWLAN, NL80211_CMD_START_SCHED_SCAN, NL80211_CMD_STOP_SCHED_SCAN, NL80211_CMD_SCHED_SCAN_RESULTS, NL80211_CMD_SCHED_SCAN_STOPPED, NL80211_CMD_SET_REKEY_OFFLOAD, NL80211_CMD_PMKSA_CANDIDATE, NL80211_CMD_TDLS_OPER, NL80211_CMD_TDLS_MGMT, NL80211_CMD_UNEXPECTED_FRAME, NL80211_CMD_PROBE_CLIENT, NL80211_CMD_REGISTER_BEACONS, NL80211_CMD_UNEXPECTED_4ADDR_FRAME, NL80211_CMD_SET_NOACK_MAP, NL80211_CMD_CH_SWITCH_NOTIFY, NL80211_CMD_START_P2P_DEVICE, NL80211_CMD_STOP_P2P_DEVICE, NL80211_CMD_CONN_FAILED, NL80211_CMD_SET_MCAST_RATE, NL80211_CMD_SET_MAC_ACL, NL80211_CMD_RADAR_DETECT, NL80211_CMD_GET_PROTOCOL_FEATURES, NL80211_CMD_UPDATE_FT_IES, NL80211_CMD_FT_EVENT, NL80211_CMD_CRIT_PROTOCOL_START, NL80211_CMD_CRIT_PROTOCOL_STOP, NL80211_CMD_GET_COALESCE, NL80211_CMD_SET_COALESCE, NL80211_CMD_CHANNEL_SWITCH, NL80211_CMD_VENDOR, NL80211_CMD_SET_QOS_MAP, NL80211_CMD_ADD_TX_TS, NL80211_CMD_DEL_TX_TS, NL80211_CMD_GET_MPP, NL80211_CMD_JOIN_OCB, NL80211_CMD_LEAVE_OCB, NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, NL80211_CMD_TDLS_CHANNEL_SWITCH, NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, NL80211_CMD_WIPHY_REG_CHANGE, NL80211_CMD_ABORT_SCAN, NL80211_CMD_START_NAN, NL80211_CMD_STOP_NAN, NL80211_CMD_ADD_NAN_FUNCTION, NL80211_CMD_DEL_NAN_FUNCTION, NL80211_CMD_CHANGE_NAN_CONFIG, NL80211_CMD_NAN_MATCH, NL80211_CMD_SET_MULTICAST_TO_UNICAST, NL80211_CMD_UPDATE_CONNECT_PARAMS, NL80211_CMD_SET_PMK, NL80211_CMD_DEL_PMK, NL80211_CMD_PORT_AUTHORIZED, NL80211_CMD_RELOAD_REGDB, NL80211_CMD_EXTERNAL_AUTH, NL80211_CMD_STA_OPMODE_CHANGED, NL80211_CMD_CONTROL_PORT_FRAME, NL80211_CMD_GET_FTM_RESPONDER_STATS, NL80211_CMD_PEER_MEASUREMENT_START, NL80211_CMD_PEER_MEASUREMENT_RESULT, NL80211_CMD_PEER_MEASUREMENT_COMPLETE, NL80211_CMD_NOTIFY_RADAR, /* let this always be before all commands we haven't upstreamed yet */ __NL80211_CMD_NONUPSTREAM_START, /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ __NL80211_CMD_AFTER_LAST, NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 }; /* * Allow user space programs to use #ifdef on new commands by defining them * here */ #define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS #define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE #define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE #define NL80211_CMD_AUTHENTICATE NL80211_CMD_AUTHENTICATE #define NL80211_CMD_ASSOCIATE NL80211_CMD_ASSOCIATE #define NL80211_CMD_DEAUTHENTICATE NL80211_CMD_DEAUTHENTICATE #define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE #define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS /* source-level API compatibility */ #define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG #define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG #define NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE NL80211_MESH_SETUP_IE /** * enum nl80211_attrs - nl80211 netlink attributes * * @NL80211_ATTR_UNSPEC: unspecified attribute to catch errors * * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf. * /sys/class/ieee80211//index * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming) * @NL80211_ATTR_WIPHY_TXQ_PARAMS: a nested array of TX queue parameters * @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz, * defines the channel together with the (deprecated) * %NL80211_ATTR_WIPHY_CHANNEL_TYPE attribute or the attributes * %NL80211_ATTR_CHANNEL_WIDTH and if needed %NL80211_ATTR_CENTER_FREQ1 * and %NL80211_ATTR_CENTER_FREQ2 * @NL80211_ATTR_CHANNEL_WIDTH: u32 attribute containing one of the values * of &enum nl80211_chan_width, describing the channel width. See the * documentation of the enum for more information. * @NL80211_ATTR_CENTER_FREQ1: Center frequency of the first part of the * channel, used for anything but 20 MHz bandwidth * @NL80211_ATTR_CENTER_FREQ2: Center frequency of the second part of the * channel, used only for 80+80 MHz bandwidth * @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ * if HT20 or HT40 are to be used (i.e., HT disabled if not included): * NL80211_CHAN_NO_HT = HT not allowed (i.e., same as not including * this attribute) * NL80211_CHAN_HT20 = HT20 only * NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel * NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel * This attribute is now deprecated. * @NL80211_ATTR_WIPHY_RETRY_SHORT: TX retry limit for frames whose length is * less than or equal to the RTS threshold; allowed range: 1..255; * dot11ShortRetryLimit; u8 * @NL80211_ATTR_WIPHY_RETRY_LONG: TX retry limit for frames whose length is * greater than the RTS threshold; allowed range: 1..255; * dot11ShortLongLimit; u8 * @NL80211_ATTR_WIPHY_FRAG_THRESHOLD: fragmentation threshold, i.e., maximum * length in octets for frames; allowed range: 256..8000, disable * fragmentation with (u32)-1; dot11FragmentationThreshold; u32 * @NL80211_ATTR_WIPHY_RTS_THRESHOLD: RTS threshold (TX frames with length * larger than or equal to this use RTS/CTS handshake); allowed range: * 0..65536, disable with (u32)-1; dot11RTSThreshold; u32 * @NL80211_ATTR_WIPHY_COVERAGE_CLASS: Coverage Class as defined by IEEE 802.11 * section 7.3.2.9; dot11CoverageClass; u8 * * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on * @NL80211_ATTR_IFNAME: network interface name * @NL80211_ATTR_IFTYPE: type of virtual interface, see &enum nl80211_iftype * * @NL80211_ATTR_WDEV: wireless device identifier, used for pseudo-devices * that don't have a netdev (u64) * * @NL80211_ATTR_MAC: MAC address (various uses) * * @NL80211_ATTR_KEY_DATA: (temporal) key data; for TKIP this consists of * 16 bytes encryption key followed by 8 bytes each for TX and RX MIC * keys * @NL80211_ATTR_KEY_IDX: key ID (u8, 0-3) * @NL80211_ATTR_KEY_CIPHER: key cipher suite (u32, as defined by IEEE 802.11 * section 7.3.2.25.1, e.g. 0x000FAC04) * @NL80211_ATTR_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and * CCMP keys, each six bytes in little endian * @NL80211_ATTR_KEY_DEFAULT: Flag attribute indicating the key is default key * @NL80211_ATTR_KEY_DEFAULT_MGMT: Flag attribute indicating the key is the * default management key * * @NL80211_ATTR_BEACON_INTERVAL: beacon interval in TU * @NL80211_ATTR_DTIM_PERIOD: DTIM period for beaconing * @NL80211_ATTR_BEACON_HEAD: portion of the beacon before the TIM IE * @NL80211_ATTR_BEACON_TAIL: portion of the beacon after the TIM IE * * @NL80211_ATTR_STA_AID: Association ID for the station (u16) * @NL80211_ATTR_STA_FLAGS: flags, nested element with NLA_FLAG attributes of * &enum nl80211_sta_flags (deprecated, use %NL80211_ATTR_STA_FLAGS2) * @NL80211_ATTR_STA_LISTEN_INTERVAL: listen interval as defined by * IEEE 802.11 7.3.1.6 (u16). * @NL80211_ATTR_STA_SUPPORTED_RATES: supported rates, array of supported * rates as defined by IEEE 802.11 7.3.2.2 but without the length * restriction (at most %NL80211_MAX_SUPP_RATES). * @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station * to, or the AP interface the station was originally added to to. * @NL80211_ATTR_STA_INFO: information about a station, part of station info * given for %NL80211_CMD_GET_STATION, nested attribute containing * info as possible, see &enum nl80211_sta_info. * * @NL80211_ATTR_WIPHY_BANDS: Information about an operating bands, * consisting of a nested array. * * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes). * @NL80211_ATTR_STA_PLINK_ACTION: action to perform on the mesh peer link * (see &enum nl80211_plink_action). * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path. * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path * info given for %NL80211_CMD_GET_MPATH, nested attribute described at * &enum nl80211_mpath_info. * * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of * &enum nl80211_mntr_flags. * * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the * current regulatory domain should be set to or is already set to. * For example, 'CR', for Costa Rica. This attribute is used by the kernel * to query the CRDA to retrieve one regulatory domain. This attribute can * also be used by userspace to query the kernel for the currently set * regulatory domain. We chose an alpha2 as that is also used by the * IEEE-802.11 country information element to identify a country. * Users can also simply ask the wireless core to set regulatory domain * to a specific alpha2. * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory * rules. * * @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1) * @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled * (u8, 0 or 1) * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled * (u8, 0 or 1) * @NL80211_ATTR_BSS_BASIC_RATES: basic rates, array of basic * rates in format defined by IEEE 802.11 7.3.2.2 but without the length * restriction (at most %NL80211_MAX_SUPP_RATES). * * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from * association request when used with NL80211_CMD_NEW_STATION) * * @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all * supported interface types, each a flag attribute with the number * of the interface mode. * * @NL80211_ATTR_MGMT_SUBTYPE: Management frame subtype for * %NL80211_CMD_SET_MGMT_EXTRA_IE. * * @NL80211_ATTR_IE: Information element(s) data (used, e.g., with * %NL80211_CMD_SET_MGMT_EXTRA_IE). * * @NL80211_ATTR_MAX_NUM_SCAN_SSIDS: number of SSIDs you can scan with * a single scan request, a wiphy attribute. * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS: number of SSIDs you can * scan with a single scheduled scan request, a wiphy attribute. * @NL80211_ATTR_MAX_SCAN_IE_LEN: maximum length of information elements * that can be added to a scan request * @NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN: maximum length of information * elements that can be added to a scheduled scan request * @NL80211_ATTR_MAX_MATCH_SETS: maximum number of sets that can be * used with @NL80211_ATTR_SCHED_SCAN_MATCH, a wiphy attribute. * * @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz) * @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive * scanning and include a zero-length SSID (wildcard) for wildcard scan * @NL80211_ATTR_BSS: scan result BSS * * @NL80211_ATTR_REG_INITIATOR: indicates who requested the regulatory domain * currently in effect. This could be any of the %NL80211_REGDOM_SET_BY_* * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*) * * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies * an array of command numbers (i.e. a mapping index to command number) * that the driver for the given wiphy supports. * * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and * NL80211_CMD_ASSOCIATE events * @NL80211_ATTR_SSID: SSID (binary attribute, 0..32 octets) * @NL80211_ATTR_AUTH_TYPE: AuthenticationType, see &enum nl80211_auth_type, * represented as a u32 * @NL80211_ATTR_REASON_CODE: ReasonCode for %NL80211_CMD_DEAUTHENTICATE and * %NL80211_CMD_DISASSOCIATE, u16 * * @NL80211_ATTR_KEY_TYPE: Key Type, see &enum nl80211_key_type, represented as * a u32 * * @NL80211_ATTR_FREQ_BEFORE: A channel which has suffered a regulatory change * due to considerations from a beacon hint. This attribute reflects * the state of the channel _before_ the beacon hint processing. This * attributes consists of a nested attribute containing * NL80211_FREQUENCY_ATTR_* * @NL80211_ATTR_FREQ_AFTER: A channel which has suffered a regulatory change * due to considerations from a beacon hint. This attribute reflects * the state of the channel _after_ the beacon hint processing. This * attributes consists of a nested attribute containing * NL80211_FREQUENCY_ATTR_* * * @NL80211_ATTR_CIPHER_SUITES: a set of u32 values indicating the supported * cipher suites * * @NL80211_ATTR_FREQ_FIXED: a flag indicating the IBSS should not try to look * for other networks on different channels * * @NL80211_ATTR_TIMED_OUT: a flag indicating than an operation timed out; this * is used, e.g., with %NL80211_CMD_AUTHENTICATE event * * @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is * used for the association (&enum nl80211_mfp, represented as a u32); * this attribute can be used with %NL80211_CMD_ASSOCIATE and * %NL80211_CMD_CONNECT requests. %NL80211_MFP_OPTIONAL is not allowed for * %NL80211_CMD_ASSOCIATE since user space SME is expected and hence, it * must have decided whether to use management frame protection or not. * Setting %NL80211_MFP_OPTIONAL with a %NL80211_CMD_CONNECT request will * let the driver (or the firmware) decide whether to use MFP or not. * * @NL80211_ATTR_STA_FLAGS2: Attribute containing a * &struct nl80211_sta_flag_update. * * @NL80211_ATTR_CONTROL_PORT: A flag indicating whether user space controls * IEEE 802.1X port, i.e., sets/clears %NL80211_STA_FLAG_AUTHORIZED, in * station mode. If the flag is included in %NL80211_CMD_ASSOCIATE * request, the driver will assume that the port is unauthorized until * authorized by user space. Otherwise, port is marked authorized by * default in station mode. * @NL80211_ATTR_CONTROL_PORT_ETHERTYPE: A 16-bit value indicating the * ethertype that will be used for key negotiation. It can be * specified with the associate and connect commands. If it is not * specified, the value defaults to 0x888E (PAE, 802.1X). This * attribute is also used as a flag in the wiphy information to * indicate that protocols other than PAE are supported. * @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom * ethertype frames used for key negotiation must not be encrypted. * @NL80211_ATTR_CONTROL_PORT_OVER_NL80211: A flag indicating whether control * port frames (e.g. of type given in %NL80211_ATTR_CONTROL_PORT_ETHERTYPE) * will be sent directly to the network interface or sent via the NL80211 * socket. If this attribute is missing, then legacy behavior of sending * control port frames directly to the network interface is used. If the * flag is included, then control port frames are sent over NL80211 instead * using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is * to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER * flag. * * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. * We recommend using nested, driver-specific attributes within this. * * @NL80211_ATTR_DISCONNECTED_BY_AP: A flag indicating that the DISCONNECT * event was due to the AP disconnecting the station, and not due to * a local disconnect request. * @NL80211_ATTR_STATUS_CODE: StatusCode for the %NL80211_CMD_CONNECT * event (u16) * @NL80211_ATTR_PRIVACY: Flag attribute, used with connect(), indicating * that protected APs should be used. This is also used with NEW_BEACON to * indicate that the BSS is to use protection. * * @NL80211_ATTR_CIPHER_SUITES_PAIRWISE: Used with CONNECT, ASSOCIATE, and * NEW_BEACON to indicate which unicast key ciphers will be used with * the connection (an array of u32). * @NL80211_ATTR_CIPHER_SUITES_GROUP: Used with CONNECT to indicate that the * connection will use APs that use one of the specified cipher suites as * the group cipher suite. (an array of u32). * Also used with ASSOCIATE to indicate the group cipher suite that will be * used for the association. It is also used with NEW_BEACON to indicate * which group key cipher will be used by the AP. (u32). * @NL80211_ATTR_WPA_VERSIONS: Used with CONNECT, ASSOCIATE, and NEW_BEACON to * indicate which WPA version(s) the AP we want to associate with is using * (a u32 with flags from &enum nl80211_wpa_versions). * @NL80211_ATTR_AKM_SUITES: Used with CONNECT, ASSOCIATE, and NEW_BEACON to * indicate which key management algorithm(s) to use (an array of u32). * * @NL80211_ATTR_REQ_IE: (Re)association request information elements as * sent out by the card, for ROAM and successful CONNECT events. * @NL80211_ATTR_RESP_IE: (Re)association response information elements as * sent by peer, for ROAM and successful CONNECT events. * * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used in ASSOCIATE and CONNECT * commands to specify a request to reassociate within an ESS, i.e., to use * Reassociate Request frame (with the value of this attribute in the * Current AP address field) instead of Association Request frame which is * used for the initial association to an ESS. * * @NL80211_ATTR_KEY: key information in a nested attribute with * %NL80211_KEY_* sub-attributes * @NL80211_ATTR_KEYS: array of keys for static WEP keys for connect() * and join_ibss(), key information is in a nested attribute each * with %NL80211_KEY_* sub-attributes * * @NL80211_ATTR_PID: Process ID of a network namespace. * * @NL80211_ATTR_GENERATION: Used to indicate consistent snapshots for * dumps. This number increases whenever the object list being * dumped changes, and as such userspace can verify that it has * obtained a complete and consistent snapshot by verifying that * all dump messages contain the same generation number. If it * changed then the list changed and the dump should be repeated * completely from scratch. * * @NL80211_ATTR_4ADDR: Use 4-address frames on a virtual interface * * @NL80211_ATTR_SURVEY_INFO: survey information about a channel, part of * the survey response for %NL80211_CMD_GET_SURVEY, nested attribute * containing info as possible, see &enum survey_info. * * @NL80211_ATTR_PMKID: PMK material for PMKSA caching. * @NL80211_ATTR_MAX_NUM_PMKIDS: maximum number of PMKIDs a firmware can * cache, a wiphy attribute. * * @NL80211_ATTR_DURATION: Duration of an operation in milliseconds, u32. * @NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION: Device attribute that * specifies the maximum duration that can be requested with the * remain-on-channel operation, in milliseconds, u32. * * @NL80211_ATTR_COOKIE: Generic 64-bit cookie to identify objects. * * @NL80211_ATTR_TX_RATES: Nested set of attributes * (enum nl80211_tx_rate_attributes) describing TX rates per band. The * enum nl80211_band value is used as the index (nla_type() of the nested * data. If a band is not included, it will be configured to allow all * rates based on negotiated supported rates information. This attribute * is used with %NL80211_CMD_SET_TX_BITRATE_MASK and with starting AP, * and joining mesh networks (not IBSS yet). In the later case, it must * specify just a single bitrate, which is to be used for the beacon. * The driver must also specify support for this with the extended * features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY, * NL80211_EXT_FEATURE_BEACON_RATE_HT and * NL80211_EXT_FEATURE_BEACON_RATE_VHT. * * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain * at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME. * @NL80211_ATTR_FRAME_TYPE: A u16 indicating the frame type/subtype for the * @NL80211_CMD_REGISTER_FRAME command. * @NL80211_ATTR_TX_FRAME_TYPES: wiphy capability attribute, which is a * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing * information about which frame types can be transmitted with * %NL80211_CMD_FRAME. * @NL80211_ATTR_RX_FRAME_TYPES: wiphy capability attribute, which is a * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing * information about which frame types can be registered for RX. * * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was * acknowledged by the recipient. * * @NL80211_ATTR_PS_STATE: powersave state, using &enum nl80211_ps_state values. * * @NL80211_ATTR_CQM: connection quality monitor configuration in a * nested attribute with %NL80211_ATTR_CQM_* sub-attributes. * * @NL80211_ATTR_LOCAL_STATE_CHANGE: Flag attribute to indicate that a command * is requesting a local authentication/association state change without * invoking actual management frame exchange. This can be used with * NL80211_CMD_AUTHENTICATE, NL80211_CMD_DEAUTHENTICATE, * NL80211_CMD_DISASSOCIATE. * * @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations * connected to this BSS. * * @NL80211_ATTR_WIPHY_TX_POWER_SETTING: Transmit power setting type. See * &enum nl80211_tx_power_setting for possible values. * @NL80211_ATTR_WIPHY_TX_POWER_LEVEL: Transmit power level in signed mBm units. * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING * for non-automatic settings. * * @NL80211_ATTR_SUPPORT_IBSS_RSN: The device supports IBSS RSN, which mostly * means support for per-station GTKs. * * @NL80211_ATTR_WIPHY_ANTENNA_TX: Bitmap of allowed antennas for transmitting. * This can be used to mask out antennas which are not attached or should * not be used for transmitting. If an antenna is not selected in this * bitmap the hardware is not allowed to transmit on this antenna. * * Each bit represents one antenna, starting with antenna 1 at the first * bit. Depending on which antennas are selected in the bitmap, 802.11n * drivers can derive which chainmasks to use (if all antennas belonging to * a particular chain are disabled this chain should be disabled) and if * a chain has diversity antennas wether diversity should be used or not. * HT capabilities (STBC, TX Beamforming, Antenna selection) can be * derived from the available chains after applying the antenna mask. * Non-802.11n drivers can derive wether to use diversity or not. * Drivers may reject configurations or RX/TX mask combinations they cannot * support by returning -EINVAL. * * @NL80211_ATTR_WIPHY_ANTENNA_RX: Bitmap of allowed antennas for receiving. * This can be used to mask out antennas which are not attached or should * not be used for receiving. If an antenna is not selected in this bitmap * the hardware should not be configured to receive on this antenna. * For a more detailed description see @NL80211_ATTR_WIPHY_ANTENNA_TX. * * @NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX: Bitmap of antennas which are available * for configuration as TX antennas via the above parameters. * * @NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX: Bitmap of antennas which are available * for configuration as RX antennas via the above parameters. * * @NL80211_ATTR_MCAST_RATE: Multicast tx rate (in 100 kbps) for IBSS * * @NL80211_ATTR_OFFCHANNEL_TX_OK: For management frame TX, the frame may be * transmitted on another channel when the channel given doesn't match * the current channel. If the current channel doesn't match and this * flag isn't set, the frame will be rejected. This is also used as an * nl80211 capability flag. * * @NL80211_ATTR_BSS_HT_OPMODE: HT operation mode (u16) * * @NL80211_ATTR_KEY_DEFAULT_TYPES: A nested attribute containing flags * attributes, specifying what a key should be set as default as. * See &enum nl80211_key_default_types. * * @NL80211_ATTR_MESH_SETUP: Optional mesh setup parameters. These cannot be * changed once the mesh is active. * @NL80211_ATTR_MESH_CONFIG: Mesh configuration parameters, a nested attribute * containing attributes from &enum nl80211_meshconf_params. * @NL80211_ATTR_SUPPORT_MESH_AUTH: Currently, this means the underlying driver * allows auth frames in a mesh to be passed to userspace for processing via * the @NL80211_MESH_SETUP_USERSPACE_AUTH flag. * @NL80211_ATTR_STA_PLINK_STATE: The state of a mesh peer link as defined in * &enum nl80211_plink_state. Used when userspace is driving the peer link * management state machine. @NL80211_MESH_SETUP_USERSPACE_AMPE or * @NL80211_MESH_SETUP_USERSPACE_MPM must be enabled. * * @NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED: indicates, as part of the wiphy * capabilities, the supported WoWLAN triggers * @NL80211_ATTR_WOWLAN_TRIGGERS: used by %NL80211_CMD_SET_WOWLAN to * indicate which WoW triggers should be enabled. This is also * used by %NL80211_CMD_GET_WOWLAN to get the currently enabled WoWLAN * triggers. * * @NL80211_ATTR_SCHED_SCAN_INTERVAL: Interval between scheduled scan * cycles, in msecs. * * @NL80211_ATTR_SCHED_SCAN_MATCH: Nested attribute with one or more * sets of attributes to match during scheduled scans. Only BSSs * that match any of the sets will be reported. These are * pass-thru filter rules. * For a match to succeed, the BSS must match all attributes of a * set. Since not every hardware supports matching all types of * attributes, there is no guarantee that the reported BSSs are * fully complying with the match sets and userspace needs to be * able to ignore them by itself. * Thus, the implementation is somewhat hardware-dependent, but * this is only an optimization and the userspace application * needs to handle all the non-filtered results anyway. * If the match attributes don't make sense when combined with * the values passed in @NL80211_ATTR_SCAN_SSIDS (eg. if an SSID * is included in the probe request, but the match attributes * will never let it go through), -EINVAL may be returned. * If omitted, no filtering is done. * * @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported * interface combinations. In each nested item, it contains attributes * defined in &enum nl80211_if_combination_attrs. * @NL80211_ATTR_SOFTWARE_IFTYPES: Nested attribute (just like * %NL80211_ATTR_SUPPORTED_IFTYPES) containing the interface types that * are managed in software: interfaces of these types aren't subject to * any restrictions in their number or combinations. * * @NL80211_ATTR_REKEY_DATA: nested attribute containing the information * necessary for GTK rekeying in the device, see &enum nl80211_rekey_data. * * @NL80211_ATTR_SCAN_SUPP_RATES: rates per to be advertised as supported in scan, * nested array attribute containing an entry for each band, with the entry * being a list of supported rates as defined by IEEE 802.11 7.3.2.2 but * without the length restriction (at most %NL80211_MAX_SUPP_RATES). * * @NL80211_ATTR_HIDDEN_SSID: indicates whether SSID is to be hidden from Beacon * and Probe Response (when response to wildcard Probe Request); see * &enum nl80211_hidden_ssid, represented as a u32 * * @NL80211_ATTR_IE_PROBE_RESP: Information element(s) for Probe Response frame. * This is used with %NL80211_CMD_NEW_BEACON and %NL80211_CMD_SET_BEACON to * provide extra IEs (e.g., WPS/P2P IE) into Probe Response frames when the * driver (or firmware) replies to Probe Request frames. * @NL80211_ATTR_IE_ASSOC_RESP: Information element(s) for (Re)Association * Response frames. This is used with %NL80211_CMD_NEW_BEACON and * %NL80211_CMD_SET_BEACON to provide extra IEs (e.g., WPS/P2P IE) into * (Re)Association Response frames when the driver (or firmware) replies to * (Re)Association Request frames. * * @NL80211_ATTR_STA_WME: Nested attribute containing the wme configuration * of the station, see &enum nl80211_sta_wme_attr. * @NL80211_ATTR_SUPPORT_AP_UAPSD: the device supports uapsd when working * as AP. * * @NL80211_ATTR_ROAM_SUPPORT: Indicates whether the firmware is capable of * roaming to another AP in the same ESS if the signal lever is low. * * @NL80211_ATTR_PMKSA_CANDIDATE: Nested attribute containing the PMKSA caching * candidate information, see &enum nl80211_pmksa_candidate_attr. * * @NL80211_ATTR_TX_NO_CCK_RATE: Indicates whether to use CCK rate or not * for management frames transmission. In order to avoid p2p probe/action * frames are being transmitted at CCK rate in 2GHz band, the user space * applications use this attribute. * This attribute is used with %NL80211_CMD_TRIGGER_SCAN and * %NL80211_CMD_FRAME commands. * * @NL80211_ATTR_TDLS_ACTION: Low level TDLS action code (e.g. link setup * request, link setup confirm, link teardown, etc.). Values are * described in the TDLS (802.11z) specification. * @NL80211_ATTR_TDLS_DIALOG_TOKEN: Non-zero token for uniquely identifying a * TDLS conversation between two devices. * @NL80211_ATTR_TDLS_OPERATION: High level TDLS operation; see * &enum nl80211_tdls_operation, represented as a u8. * @NL80211_ATTR_TDLS_SUPPORT: A flag indicating the device can operate * as a TDLS peer sta. * @NL80211_ATTR_TDLS_EXTERNAL_SETUP: The TDLS discovery/setup and teardown * procedures should be performed by sending TDLS packets via * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be * used for asking the driver to perform a TDLS operation. * * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices * that have AP support to indicate that they have the AP SME integrated * with support for the features listed in this attribute, see * &enum nl80211_ap_sme_features. * * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells * the driver to not wait for an acknowledgement. Note that due to this, * it will also not give a status callback nor return a cookie. This is * mostly useful for probe responses to save airtime. * * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from * &enum nl80211_feature_flags and is advertised in wiphy information. * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe * requests while operating in AP-mode. * This attribute holds a bitmap of the supported protocols for * offloading (see &enum nl80211_probe_resp_offload_support_attr). * * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire * probe-response frame. The DA field in the 802.11 header is zero-ed out, * to be filled by the FW. * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable * this feature. Currently, only supported in mac80211 drivers. * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the * ATTR_HT_CAPABILITY to which attention should be paid. * Currently, only mac80211 NICs support this feature. * The values that may be configured are: * MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40 * AMPDU density and AMPDU factor. * All values are treated as suggestions and may be ignored * by the driver as required. The actual values may be seen in * the station debugfs ht_caps file. * * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country * abides to when initiating radiation on DFS channels. A country maps * to one DFS region. * * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of * up to 16 TIDs. * * @NL80211_ATTR_INACTIVITY_TIMEOUT: timeout value in seconds, this can be * used by the drivers which has MLME in firmware and does not have support * to report per station tx/rx activity to free up the station entry from * the list. This needs to be used when the driver advertises the * capability to timeout the stations. * * @NL80211_ATTR_RX_SIGNAL_DBM: signal strength in dBm (as a 32-bit int); * this attribute is (depending on the driver capabilities) added to * received frames indicated with %NL80211_CMD_FRAME. * * @NL80211_ATTR_BG_SCAN_PERIOD: Background scan period in seconds * or 0 to disable background scan. * * @NL80211_ATTR_USER_REG_HINT_TYPE: type of regulatory hint passed from * userspace. If unset it is assumed the hint comes directly from * a user. If set code could specify exactly what type of source * was used to provide the hint. For the different types of * allowed user regulatory hints see nl80211_user_reg_hint_type. * * @NL80211_ATTR_CONN_FAILED_REASON: The reason for which AP has rejected * the connection request from a station. nl80211_connect_failed_reason * enum has different reasons of connection failure. * * @NL80211_ATTR_AUTH_DATA: Fields and elements in Authentication frames. * This contains the authentication frame body (non-IE and IE data), * excluding the Authentication algorithm number, i.e., starting at the * Authentication transaction sequence number field. It is used with * authentication algorithms that need special fields to be added into * the frames (SAE and FILS). Currently, only the SAE cases use the * initial two fields (Authentication transaction sequence number and * Status code). However, those fields are included in the attribute data * for all authentication algorithms to keep the attribute definition * consistent. * * @NL80211_ATTR_VHT_CAPABILITY: VHT Capability information element (from * association request when used with NL80211_CMD_NEW_STATION) * * @NL80211_ATTR_SCAN_FLAGS: scan request control flags (u32) * * @NL80211_ATTR_P2P_CTWINDOW: P2P GO Client Traffic Window (u8), used with * the START_AP and SET_BSS commands * @NL80211_ATTR_P2P_OPPPS: P2P GO opportunistic PS (u8), used with the * START_AP and SET_BSS commands. This can have the values 0 or 1; * if not given in START_AP 0 is assumed, if not given in SET_BSS * no change is made. * * @NL80211_ATTR_LOCAL_MESH_POWER_MODE: local mesh STA link-specific power mode * defined in &enum nl80211_mesh_power_mode. * * @NL80211_ATTR_ACL_POLICY: ACL policy, see &enum nl80211_acl_policy, * carried in a u32 attribute * * @NL80211_ATTR_MAC_ADDRS: Array of nested MAC addresses, used for * MAC ACL. * * @NL80211_ATTR_MAC_ACL_MAX: u32 attribute to advertise the maximum * number of MAC addresses that a device can support for MAC * ACL. * * @NL80211_ATTR_RADAR_EVENT: Type of radar event for notification to userspace, * contains a value of enum nl80211_radar_event (u32). * * @NL80211_ATTR_EXT_CAPA: 802.11 extended capabilities that the kernel driver * has and handles. The format is the same as the IE contents. See * 802.11-2012 8.4.2.29 for more information. * @NL80211_ATTR_EXT_CAPA_MASK: Extended capabilities that the kernel driver * has set in the %NL80211_ATTR_EXT_CAPA value, for multibit fields. * * @NL80211_ATTR_STA_CAPABILITY: Station capabilities (u16) are advertised to * the driver, e.g., to enable TDLS power save (PU-APSD). * * @NL80211_ATTR_STA_EXT_CAPABILITY: Station extended capabilities are * advertised to the driver, e.g., to enable TDLS off channel operations * and PU-APSD. * * @NL80211_ATTR_PROTOCOL_FEATURES: global nl80211 feature flags, see * &enum nl80211_protocol_features, the attribute is a u32. * * @NL80211_ATTR_SPLIT_WIPHY_DUMP: flag attribute, userspace supports * receiving the data for a single wiphy split across multiple * messages, given with wiphy dump message * * @NL80211_ATTR_MDID: Mobility Domain Identifier * * @NL80211_ATTR_IE_RIC: Resource Information Container Information * Element * * @NL80211_ATTR_CRIT_PROT_ID: critical protocol identifier requiring increased * reliability, see &enum nl80211_crit_proto_id (u16). * @NL80211_ATTR_MAX_CRIT_PROT_DURATION: duration in milliseconds in which * the connection should have increased reliability (u16). * * @NL80211_ATTR_PEER_AID: Association ID for the peer TDLS station (u16). * This is similar to @NL80211_ATTR_STA_AID but with a difference of being * allowed to be used with the first @NL80211_CMD_SET_STATION command to * update a TDLS peer STA entry. * * @NL80211_ATTR_COALESCE_RULE: Coalesce rule information. * * @NL80211_ATTR_CH_SWITCH_COUNT: u32 attribute specifying the number of TBTT's * until the channel switch event. * @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission * must be blocked on the current channel (before the channel switch * operation). * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information * for the time while performing a channel switch. * @NL80211_ATTR_CSA_C_OFF_BEACON: An array of offsets (u16) to the channel * switch counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL). * @NL80211_ATTR_CSA_C_OFF_PRESP: An array of offsets (u16) to the channel * switch counters in the probe response (%NL80211_ATTR_PROBE_RESP). * * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32. * As specified in the &enum nl80211_rxmgmt_flags. * * @NL80211_ATTR_STA_SUPPORTED_CHANNELS: array of supported channels. * * @NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES: array of supported * supported operating classes. * * @NL80211_ATTR_HANDLE_DFS: A flag indicating whether user space * controls DFS operation in IBSS mode. If the flag is included in * %NL80211_CMD_JOIN_IBSS request, the driver will allow use of DFS * channels and reports radar events to userspace. Userspace is required * to react to radar events, e.g. initiate a channel switch or leave the * IBSS network. * * @NL80211_ATTR_SUPPORT_5_MHZ: A flag indicating that the device supports * 5 MHz channel bandwidth. * @NL80211_ATTR_SUPPORT_10_MHZ: A flag indicating that the device supports * 10 MHz channel bandwidth. * * @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode * Notification Element based on association request when used with * %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when * %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS); * u8 attribute. * * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if * %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet) * @NL80211_ATTR_VENDOR_SUBCMD: vendor sub-command * @NL80211_ATTR_VENDOR_DATA: data for the vendor command, if any; this * attribute is also used for vendor command feature advertisement * @NL80211_ATTR_VENDOR_EVENTS: used for event list advertising in the wiphy * info, containing a nested array of possible events * * @NL80211_ATTR_QOS_MAP: IP DSCP mapping for Interworking QoS mapping. This * data is in the format defined for the payload of the QoS Map Set element * in IEEE Std 802.11-2012, 8.4.2.97. * * @NL80211_ATTR_MAC_HINT: MAC address recommendation as initial BSS * @NL80211_ATTR_WIPHY_FREQ_HINT: frequency of the recommended initial BSS * * @NL80211_ATTR_MAX_AP_ASSOC_STA: Device attribute that indicates how many * associated stations are supported in AP mode (including P2P GO); u32. * Since drivers may not have a fixed limit on the maximum number (e.g., * other concurrent operations may affect this), drivers are allowed to * advertise values that cannot always be met. In such cases, an attempt * to add a new station entry with @NL80211_CMD_NEW_STATION may fail. * * @NL80211_ATTR_CSA_C_OFFSETS_TX: An array of csa counter offsets (u16) which * should be updated when the frame is transmitted. * @NL80211_ATTR_MAX_CSA_COUNTERS: U8 attribute used to advertise the maximum * supported number of csa counters. * * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32. * As specified in the &enum nl80211_tdls_peer_capability. * * @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface * creation then the new interface will be owned by the netlink socket * that created it and will be destroyed when the socket is closed. * If set during scheduled scan start then the new scan req will be * owned by the netlink socket that created it and the scheduled scan will * be stopped when the socket is closed. * If set during configuration of regulatory indoor operation then the * regulatory indoor configuration would be owned by the netlink socket * that configured the indoor setting, and the indoor operation would be * cleared when the socket is closed. * If set during NAN interface creation, the interface will be destroyed * if the socket is closed just like any other interface. Moreover, NAN * notifications will be sent in unicast to that socket. Without this * attribute, the notifications will be sent to the %NL80211_MCGRP_NAN * multicast group. * If set during %NL80211_CMD_ASSOCIATE or %NL80211_CMD_CONNECT the * station will deauthenticate when the socket is closed. * If set during %NL80211_CMD_JOIN_IBSS the IBSS will be automatically * torn down when the socket is closed. * If set during %NL80211_CMD_JOIN_MESH the mesh setup will be * automatically torn down when the socket is closed. * If set during %NL80211_CMD_START_AP the AP will be automatically * disabled when the socket is closed. * * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. * * @NL80211_ATTR_USE_RRM: flag for indicating whether the current connection * shall support Radio Resource Measurements (11k). This attribute can be * used with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests. * User space applications are expected to use this flag only if the * underlying device supports these minimal RRM features: * %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES, * %NL80211_FEATURE_QUIET, * Or, if global RRM is supported, see: * %NL80211_EXT_FEATURE_RRM * If this flag is used, driver must add the Power Capabilities IE to the * association request. In addition, it must also set the RRM capability * flag in the association request's Capability Info field. * * @NL80211_ATTR_WIPHY_DYN_ACK: flag attribute used to enable ACK timeout * estimation algorithm (dynack). In order to activate dynack * %NL80211_FEATURE_ACKTO_ESTIMATION feature flag must be set by lower * drivers to indicate dynack capability. Dynack is automatically disabled * setting valid value for coverage class. * * @NL80211_ATTR_TSID: a TSID value (u8 attribute) * @NL80211_ATTR_USER_PRIO: user priority value (u8 attribute) * @NL80211_ATTR_ADMITTED_TIME: admitted time in units of 32 microseconds * (per second) (u16 attribute) * * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see * &enum nl80211_smps_mode. * * @NL80211_ATTR_OPER_CLASS: operating class * * @NL80211_ATTR_MAC_MASK: MAC address mask * * @NL80211_ATTR_WIPHY_SELF_MANAGED_REG: flag attribute indicating this device * is self-managing its regulatory information and any regulatory domain * obtained from it is coming from the device's wiphy and not the global * cfg80211 regdomain. * * @NL80211_ATTR_EXT_FEATURES: extended feature flags contained in a byte * array. The feature flags are identified by their bit index (see &enum * nl80211_ext_feature_index). The bit index is ordered starting at the * least-significant bit of the first byte in the array, ie. bit index 0 * is located at bit 0 of byte 0. bit index 25 would be located at bit 1 * of byte 3 (u8 array). * * @NL80211_ATTR_SURVEY_RADIO_STATS: Request overall radio statistics to be * returned along with other survey data. If set, @NL80211_CMD_GET_SURVEY * may return a survey entry without a channel indicating global radio * statistics (only some values are valid and make sense.) * For devices that don't return such an entry even then, the information * should be contained in the result as the sum of the respective counters * over all channels. * * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before the first cycle of a * scheduled scan is started. Or the delay before a WoWLAN * net-detect scan is started, counting from the moment the * system is suspended. This value is a u32, in seconds. * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device * is operating in an indoor environment. * * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS: maximum number of scan plans for * scheduled scan supported by the device (u32), a wiphy attribute. * @NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL: maximum interval (in seconds) for * a scan plan (u32), a wiphy attribute. * @NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS: maximum number of iterations in * a scan plan (u32), a wiphy attribute. * @NL80211_ATTR_SCHED_SCAN_PLANS: a list of scan plans for scheduled scan. * Each scan plan defines the number of scan iterations and the interval * between scans. The last scan plan will always run infinitely, * thus it must not specify the number of iterations, only the interval * between scans. The scan plans are executed sequentially. * Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan. * @NL80211_ATTR_PBSS: flag attribute. If set it means operate * in a PBSS. Specified in %NL80211_CMD_CONNECT to request * connecting to a PCP, and in %NL80211_CMD_START_AP to start * a PCP instead of AP. Relevant for DMG networks only. * @NL80211_ATTR_BSS_SELECT: nested attribute for driver supporting the * BSS selection feature. When used with %NL80211_CMD_GET_WIPHY it contains * attributes according &enum nl80211_bss_select_attr to indicate what * BSS selection behaviours are supported. When used with %NL80211_CMD_CONNECT * it contains the behaviour-specific attribute containing the parameters for * BSS selection to be done by driver and/or firmware. * * @NL80211_ATTR_STA_SUPPORT_P2P_PS: whether P2P PS mechanism supported * or not. u8, one of the values of &enum nl80211_sta_p2p_ps_status * * @NL80211_ATTR_PAD: attribute used for padding for 64-bit alignment * * @NL80211_ATTR_IFTYPE_EXT_CAPA: Nested attribute of the following attributes: * %NL80211_ATTR_IFTYPE, %NL80211_ATTR_EXT_CAPA, * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities per * interface type. * * @NL80211_ATTR_MU_MIMO_GROUP_DATA: array of 24 bytes that defines a MU-MIMO * groupID for monitor mode. * The first 8 bytes are a mask that defines the membership in each * group (there are 64 groups, group 0 and 63 are reserved), * each bit represents a group and set to 1 for being a member in * that group and 0 for not being a member. * The remaining 16 bytes define the position in each group: 2 bits for * each group. * (smaller group numbers represented on most significant bits and bigger * group numbers on least significant bits.) * This attribute is used only if all interfaces are in monitor mode. * Set this attribute in order to monitor packets using the given MU-MIMO * groupID data. * to turn off that feature set all the bits of the groupID to zero. * @NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR: mac address for the sniffer to follow * when using MU-MIMO air sniffer. * to turn that feature off set an invalid mac address * (e.g. FF:FF:FF:FF:FF:FF) * * @NL80211_ATTR_SCAN_START_TIME_TSF: The time at which the scan was actually * started (u64). The time is the TSF of the BSS the interface that * requested the scan is connected to (if available, otherwise this * attribute must not be included). * @NL80211_ATTR_SCAN_START_TIME_TSF_BSSID: The BSS according to which * %NL80211_ATTR_SCAN_START_TIME_TSF is set. * @NL80211_ATTR_MEASUREMENT_DURATION: measurement duration in TUs (u16). If * %NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY is not set, this is the * maximum measurement duration allowed. This attribute is used with * measurement requests. It can also be used with %NL80211_CMD_TRIGGER_SCAN * if the scan is used for beacon report radio measurement. * @NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY: flag attribute that indicates * that the duration specified with %NL80211_ATTR_MEASUREMENT_DURATION is * mandatory. If this flag is not set, the duration is the maximum duration * and the actual measurement duration may be shorter. * * @NL80211_ATTR_MESH_PEER_AID: Association ID for the mesh peer (u16). This is * used to pull the stored data for mesh peer in power save state. * * @NL80211_ATTR_NAN_MASTER_PREF: the master preference to be used by * %NL80211_CMD_START_NAN and optionally with * %NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0. * Also, values 1 and 255 are reserved for certification purposes and * should not be used during a normal device operation. * @NL80211_ATTR_BANDS: operating bands configuration. This is a u32 * bitmask of BIT(NL80211_BAND_*) as described in %enum * nl80211_band. For instance, for NL80211_BAND_2GHZ, bit 0 * would be set. This attribute is used with * %NL80211_CMD_START_NAN and %NL80211_CMD_CHANGE_NAN_CONFIG, and * it is optional. If no bands are set, it means don't-care and * the device will decide what to use. * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See * &enum nl80211_nan_func_attributes for description of this nested * attribute. * @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute. * See &enum nl80211_nan_match_attributes. * @NL80211_ATTR_FILS_KEK: KEK for FILS (Re)Association Request/Response frame * protection. * @NL80211_ATTR_FILS_NONCES: Nonces (part of AAD) for FILS (Re)Association * Request/Response frame protection. This attribute contains the 16 octet * STA Nonce followed by 16 octets of AP Nonce. * * @NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED: Indicates whether or not multicast * packets should be send out as unicast to all stations (flag attribute). * * @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also * used in various commands/events for specifying the BSSID. * * @NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI: Relative RSSI threshold by which * other BSSs has to be better or slightly worse than the current * connected BSS so that they get reported to user space. * This will give an opportunity to userspace to consider connecting to * other matching BSSs which have better or slightly worse RSSI than * the current connected BSS by using an offloaded operation to avoid * unnecessary wakeups. * * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in * the specified band is to be adjusted before doing * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparison to figure out * better BSSs. The attribute value is a packed structure * value as specified by &struct nl80211_bss_select_rssi_adjust. * * @NL80211_ATTR_TIMEOUT_REASON: The reason for which an operation timed out. * u32 attribute with an &enum nl80211_timeout_reason value. This is used, * e.g., with %NL80211_CMD_CONNECT event. * * @NL80211_ATTR_FILS_ERP_USERNAME: EAP Re-authentication Protocol (ERP) * username part of NAI used to refer keys rRK and rIK. This is used with * %NL80211_CMD_CONNECT. * * @NL80211_ATTR_FILS_ERP_REALM: EAP Re-authentication Protocol (ERP) realm part * of NAI specifying the domain name of the ER server. This is used with * %NL80211_CMD_CONNECT. * * @NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM: Unsigned 16-bit ERP next sequence number * to use in ERP messages. This is used in generating the FILS wrapped data * for FILS authentication and is used with %NL80211_CMD_CONNECT. * * @NL80211_ATTR_FILS_ERP_RRK: ERP re-authentication Root Key (rRK) for the * NAI specified by %NL80211_ATTR_FILS_ERP_USERNAME and * %NL80211_ATTR_FILS_ERP_REALM. This is used for generating rIK and rMSK * from successful FILS authentication and is used with * %NL80211_CMD_CONNECT. * * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP * identifying the scope of PMKSAs. This is used with * @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA. * * @NL80211_ATTR_PMK: attribute for passing PMK key material. Used with * %NL80211_CMD_SET_PMKSA for the PMKSA identified by %NL80211_ATTR_PMKID. * For %NL80211_CMD_CONNECT it is used to provide PSK for offloading 4-way * handshake for WPA/WPA2-PSK networks. For 802.1X authentication it is * used with %NL80211_CMD_SET_PMK. For offloaded FT support this attribute * specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME is included as well. * * @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to * indicate that it supports multiple active scheduled scan requests. * @NL80211_ATTR_SCHED_SCAN_MAX_REQS: indicates maximum number of scheduled * scan request that may be active for the device (u32). * * @NL80211_ATTR_WANT_1X_4WAY_HS: flag attribute which user-space can include * in %NL80211_CMD_CONNECT to indicate that for 802.1X authentication it * wants to use the supported offload of the 4-way handshake. * @NL80211_ATTR_PMKR0_NAME: PMK-R0 Name for offloaded FT. * @NL80211_ATTR_PORT_AUTHORIZED: (reserved) * * @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external * authentication operation (u32 attribute with an * &enum nl80211_external_auth_action value). This is used with the * %NL80211_CMD_EXTERNAL_AUTH request event. * @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user * space supports external authentication. This attribute shall be used * only with %NL80211_CMD_CONNECT request. The driver may offload * authentication processing to user space if this capability is indicated * in NL80211_CMD_CONNECT requests from the user space. * * @NL80211_ATTR_NSS: Station's New/updated RX_NSS value notified using this * u8 attribute. This is used with %NL80211_CMD_STA_OPMODE_CHANGED. * * @NL80211_ATTR_TXQ_STATS: TXQ statistics (nested attribute, see &enum * nl80211_txq_stats) * @NL80211_ATTR_TXQ_LIMIT: Total packet limit for the TXQ queues for this phy. * The smaller of this and the memory limit is enforced. * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory memory limit (in bytes) for the * TXQ queues for this phy. The smaller of this and the packet limit is * enforced. * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes * a flow is assigned on each round of the DRR scheduler. * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from * association request when used with NL80211_CMD_NEW_STATION). Can be set * only if %NL80211_STA_FLAG_WME is set. * * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include * in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing * measurement (FTM) responder functionality and containing parameters as * possible, see &enum nl80211_ftm_responder_attr * * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder * statistics, see &enum nl80211_ftm_responder_stats. * * @NL80211_ATTR_NAN_CDW_2G: defines the NAN device committed DW on 2.4GHz. * This is a u8, where valid values are 1..5. The device must wake for at * least every 2^(val-1) DW on 2.4GHz. When using %NL80211_CMD_START_NAN, * if the attribute is not specified, the default committed DW is 1. * @NL80211_ATTR_NAN_CDW_5G: defines the NAN device committed DW on 5.2GHz. * This is a u8, where valid values are 0..5. If the value is 0, then there * are no wake ups on 5.2GHz DWs. When using %NL80211_CMD_START_NAN, * if the attribute is not specified, the default committed DW is 1. * * @NL80211_ATTR_TIMEOUT: Timeout for the given operation in milliseconds (u32), * if the attribute is not given no timeout is requested. Note that 0 is an * invalid value. * * @NL80211_ATTR_PEER_MEASUREMENTS: peer measurements request (and result) * data, uses nested attributes specified in * &enum nl80211_peer_measurement_attrs. * This is also used for capability advertisement in the wiphy information, * with the appropriate sub-attributes. * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ enum nl80211_attrs { /* don't change the order or add anything between, this is ABI! */ NL80211_ATTR_UNSPEC, NL80211_ATTR_WIPHY, NL80211_ATTR_WIPHY_NAME, NL80211_ATTR_IFINDEX, NL80211_ATTR_IFNAME, NL80211_ATTR_IFTYPE, NL80211_ATTR_MAC, NL80211_ATTR_KEY_DATA, NL80211_ATTR_KEY_IDX, NL80211_ATTR_KEY_CIPHER, NL80211_ATTR_KEY_SEQ, NL80211_ATTR_KEY_DEFAULT, NL80211_ATTR_BEACON_INTERVAL, NL80211_ATTR_DTIM_PERIOD, NL80211_ATTR_BEACON_HEAD, NL80211_ATTR_BEACON_TAIL, NL80211_ATTR_STA_AID, NL80211_ATTR_STA_FLAGS, NL80211_ATTR_STA_LISTEN_INTERVAL, NL80211_ATTR_STA_SUPPORTED_RATES, NL80211_ATTR_STA_VLAN, NL80211_ATTR_STA_INFO, NL80211_ATTR_WIPHY_BANDS, NL80211_ATTR_MNTR_FLAGS, NL80211_ATTR_MESH_ID, NL80211_ATTR_STA_PLINK_ACTION, NL80211_ATTR_MPATH_NEXT_HOP, NL80211_ATTR_MPATH_INFO, NL80211_ATTR_BSS_CTS_PROT, NL80211_ATTR_BSS_SHORT_PREAMBLE, NL80211_ATTR_BSS_SHORT_SLOT_TIME, NL80211_ATTR_HT_CAPABILITY, NL80211_ATTR_SUPPORTED_IFTYPES, NL80211_ATTR_REG_ALPHA2, NL80211_ATTR_REG_RULES, NL80211_ATTR_MESH_CONFIG, NL80211_ATTR_BSS_BASIC_RATES, NL80211_ATTR_WIPHY_TXQ_PARAMS, NL80211_ATTR_WIPHY_FREQ, NL80211_ATTR_WIPHY_CHANNEL_TYPE, NL80211_ATTR_KEY_DEFAULT_MGMT, NL80211_ATTR_MGMT_SUBTYPE, NL80211_ATTR_IE, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, NL80211_ATTR_SCAN_FREQUENCIES, NL80211_ATTR_SCAN_SSIDS, NL80211_ATTR_GENERATION, /* replaces old SCAN_GENERATION */ NL80211_ATTR_BSS, NL80211_ATTR_REG_INITIATOR, NL80211_ATTR_REG_TYPE, NL80211_ATTR_SUPPORTED_COMMANDS, NL80211_ATTR_FRAME, NL80211_ATTR_SSID, NL80211_ATTR_AUTH_TYPE, NL80211_ATTR_REASON_CODE, NL80211_ATTR_KEY_TYPE, NL80211_ATTR_MAX_SCAN_IE_LEN, NL80211_ATTR_CIPHER_SUITES, NL80211_ATTR_FREQ_BEFORE, NL80211_ATTR_FREQ_AFTER, NL80211_ATTR_FREQ_FIXED, NL80211_ATTR_WIPHY_RETRY_SHORT, NL80211_ATTR_WIPHY_RETRY_LONG, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, NL80211_ATTR_WIPHY_RTS_THRESHOLD, NL80211_ATTR_TIMED_OUT, NL80211_ATTR_USE_MFP, NL80211_ATTR_STA_FLAGS2, NL80211_ATTR_CONTROL_PORT, NL80211_ATTR_TESTDATA, NL80211_ATTR_PRIVACY, NL80211_ATTR_DISCONNECTED_BY_AP, NL80211_ATTR_STATUS_CODE, NL80211_ATTR_CIPHER_SUITES_PAIRWISE, NL80211_ATTR_CIPHER_SUITES_GROUP, NL80211_ATTR_WPA_VERSIONS, NL80211_ATTR_AKM_SUITES, NL80211_ATTR_REQ_IE, NL80211_ATTR_RESP_IE, NL80211_ATTR_PREV_BSSID, NL80211_ATTR_KEY, NL80211_ATTR_KEYS, NL80211_ATTR_PID, NL80211_ATTR_4ADDR, NL80211_ATTR_SURVEY_INFO, NL80211_ATTR_PMKID, NL80211_ATTR_MAX_NUM_PMKIDS, NL80211_ATTR_DURATION, NL80211_ATTR_COOKIE, NL80211_ATTR_WIPHY_COVERAGE_CLASS, NL80211_ATTR_TX_RATES, NL80211_ATTR_FRAME_MATCH, NL80211_ATTR_ACK, NL80211_ATTR_PS_STATE, NL80211_ATTR_CQM, NL80211_ATTR_LOCAL_STATE_CHANGE, NL80211_ATTR_AP_ISOLATE, NL80211_ATTR_WIPHY_TX_POWER_SETTING, NL80211_ATTR_WIPHY_TX_POWER_LEVEL, NL80211_ATTR_TX_FRAME_TYPES, NL80211_ATTR_RX_FRAME_TYPES, NL80211_ATTR_FRAME_TYPE, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, NL80211_ATTR_SUPPORT_IBSS_RSN, NL80211_ATTR_WIPHY_ANTENNA_TX, NL80211_ATTR_WIPHY_ANTENNA_RX, NL80211_ATTR_MCAST_RATE, NL80211_ATTR_OFFCHANNEL_TX_OK, NL80211_ATTR_BSS_HT_OPMODE, NL80211_ATTR_KEY_DEFAULT_TYPES, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, NL80211_ATTR_MESH_SETUP, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, NL80211_ATTR_SUPPORT_MESH_AUTH, NL80211_ATTR_STA_PLINK_STATE, NL80211_ATTR_WOWLAN_TRIGGERS, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, NL80211_ATTR_SCHED_SCAN_INTERVAL, NL80211_ATTR_INTERFACE_COMBINATIONS, NL80211_ATTR_SOFTWARE_IFTYPES, NL80211_ATTR_REKEY_DATA, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, NL80211_ATTR_SCAN_SUPP_RATES, NL80211_ATTR_HIDDEN_SSID, NL80211_ATTR_IE_PROBE_RESP, NL80211_ATTR_IE_ASSOC_RESP, NL80211_ATTR_STA_WME, NL80211_ATTR_SUPPORT_AP_UAPSD, NL80211_ATTR_ROAM_SUPPORT, NL80211_ATTR_SCHED_SCAN_MATCH, NL80211_ATTR_MAX_MATCH_SETS, NL80211_ATTR_PMKSA_CANDIDATE, NL80211_ATTR_TX_NO_CCK_RATE, NL80211_ATTR_TDLS_ACTION, NL80211_ATTR_TDLS_DIALOG_TOKEN, NL80211_ATTR_TDLS_OPERATION, NL80211_ATTR_TDLS_SUPPORT, NL80211_ATTR_TDLS_EXTERNAL_SETUP, NL80211_ATTR_DEVICE_AP_SME, NL80211_ATTR_DONT_WAIT_FOR_ACK, NL80211_ATTR_FEATURE_FLAGS, NL80211_ATTR_PROBE_RESP_OFFLOAD, NL80211_ATTR_PROBE_RESP, NL80211_ATTR_DFS_REGION, NL80211_ATTR_DISABLE_HT, NL80211_ATTR_HT_CAPABILITY_MASK, NL80211_ATTR_NOACK_MAP, NL80211_ATTR_INACTIVITY_TIMEOUT, NL80211_ATTR_RX_SIGNAL_DBM, NL80211_ATTR_BG_SCAN_PERIOD, NL80211_ATTR_WDEV, NL80211_ATTR_USER_REG_HINT_TYPE, NL80211_ATTR_CONN_FAILED_REASON, NL80211_ATTR_AUTH_DATA, NL80211_ATTR_VHT_CAPABILITY, NL80211_ATTR_SCAN_FLAGS, NL80211_ATTR_CHANNEL_WIDTH, NL80211_ATTR_CENTER_FREQ1, NL80211_ATTR_CENTER_FREQ2, NL80211_ATTR_P2P_CTWINDOW, NL80211_ATTR_P2P_OPPPS, NL80211_ATTR_LOCAL_MESH_POWER_MODE, NL80211_ATTR_ACL_POLICY, NL80211_ATTR_MAC_ADDRS, NL80211_ATTR_MAC_ACL_MAX, NL80211_ATTR_RADAR_EVENT, NL80211_ATTR_EXT_CAPA, NL80211_ATTR_EXT_CAPA_MASK, NL80211_ATTR_STA_CAPABILITY, NL80211_ATTR_STA_EXT_CAPABILITY, NL80211_ATTR_PROTOCOL_FEATURES, NL80211_ATTR_SPLIT_WIPHY_DUMP, NL80211_ATTR_DISABLE_VHT, NL80211_ATTR_VHT_CAPABILITY_MASK, NL80211_ATTR_MDID, NL80211_ATTR_IE_RIC, NL80211_ATTR_CRIT_PROT_ID, NL80211_ATTR_MAX_CRIT_PROT_DURATION, NL80211_ATTR_PEER_AID, NL80211_ATTR_COALESCE_RULE, NL80211_ATTR_CH_SWITCH_COUNT, NL80211_ATTR_CH_SWITCH_BLOCK_TX, NL80211_ATTR_CSA_IES, NL80211_ATTR_CSA_C_OFF_BEACON, NL80211_ATTR_CSA_C_OFF_PRESP, NL80211_ATTR_RXMGMT_FLAGS, NL80211_ATTR_STA_SUPPORTED_CHANNELS, NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES, NL80211_ATTR_HANDLE_DFS, NL80211_ATTR_SUPPORT_5_MHZ, NL80211_ATTR_SUPPORT_10_MHZ, NL80211_ATTR_OPMODE_NOTIF, NL80211_ATTR_VENDOR_ID, NL80211_ATTR_VENDOR_SUBCMD, NL80211_ATTR_VENDOR_DATA, NL80211_ATTR_VENDOR_EVENTS, NL80211_ATTR_QOS_MAP, NL80211_ATTR_MAC_HINT, NL80211_ATTR_WIPHY_FREQ_HINT, NL80211_ATTR_MAX_AP_ASSOC_STA, NL80211_ATTR_TDLS_PEER_CAPABILITY, NL80211_ATTR_SOCKET_OWNER, NL80211_ATTR_CSA_C_OFFSETS_TX, NL80211_ATTR_MAX_CSA_COUNTERS, NL80211_ATTR_TDLS_INITIATOR, NL80211_ATTR_USE_RRM, NL80211_ATTR_WIPHY_DYN_ACK, NL80211_ATTR_TSID, NL80211_ATTR_USER_PRIO, NL80211_ATTR_ADMITTED_TIME, NL80211_ATTR_SMPS_MODE, NL80211_ATTR_OPER_CLASS, NL80211_ATTR_MAC_MASK, NL80211_ATTR_WIPHY_SELF_MANAGED_REG, NL80211_ATTR_EXT_FEATURES, NL80211_ATTR_SURVEY_RADIO_STATS, NL80211_ATTR_NETNS_FD, NL80211_ATTR_SCHED_SCAN_DELAY, NL80211_ATTR_REG_INDOOR, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, NL80211_ATTR_SCHED_SCAN_PLANS, NL80211_ATTR_PBSS, NL80211_ATTR_BSS_SELECT, NL80211_ATTR_STA_SUPPORT_P2P_PS, NL80211_ATTR_PAD, NL80211_ATTR_IFTYPE_EXT_CAPA, NL80211_ATTR_MU_MIMO_GROUP_DATA, NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR, NL80211_ATTR_SCAN_START_TIME_TSF, NL80211_ATTR_SCAN_START_TIME_TSF_BSSID, NL80211_ATTR_MEASUREMENT_DURATION, NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY, NL80211_ATTR_MESH_PEER_AID, NL80211_ATTR_NAN_MASTER_PREF, NL80211_ATTR_BANDS, NL80211_ATTR_NAN_FUNC, NL80211_ATTR_NAN_MATCH, NL80211_ATTR_FILS_KEK, NL80211_ATTR_FILS_NONCES, NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED, NL80211_ATTR_BSSID, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST, NL80211_ATTR_TIMEOUT_REASON, NL80211_ATTR_FILS_ERP_USERNAME, NL80211_ATTR_FILS_ERP_REALM, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM, NL80211_ATTR_FILS_ERP_RRK, NL80211_ATTR_FILS_CACHE_ID, NL80211_ATTR_PMK, NL80211_ATTR_SCHED_SCAN_MULTI, NL80211_ATTR_SCHED_SCAN_MAX_REQS, NL80211_ATTR_WANT_1X_4WAY_HS, NL80211_ATTR_PMKR0_NAME, NL80211_ATTR_PORT_AUTHORIZED, NL80211_ATTR_EXTERNAL_AUTH_ACTION, NL80211_ATTR_EXTERNAL_AUTH_SUPPORT, NL80211_ATTR_NSS, NL80211_ATTR_ACK_SIGNAL, NL80211_ATTR_CONTROL_PORT_OVER_NL80211, NL80211_ATTR_TXQ_STATS, NL80211_ATTR_TXQ_LIMIT, NL80211_ATTR_TXQ_MEMORY_LIMIT, NL80211_ATTR_TXQ_QUANTUM, NL80211_ATTR_HE_CAPABILITY, NL80211_ATTR_FTM_RESPONDER, NL80211_ATTR_FTM_RESPONDER_STATS, NL80211_ATTR_TIMEOUT, NL80211_ATTR_PEER_MEASUREMENTS, NL80211_ATTR_NAN_CDW_2G, NL80211_ATTR_NAN_CDW_5G, /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, NUM_NL80211_ATTR = __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; /* source-level API compatibility */ #define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION #define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG #define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER #define NL80211_ATTR_SAE_DATA NL80211_ATTR_AUTH_DATA /* * Allow user space programs to use #ifdef on new attributes by defining them * here */ #define NL80211_CMD_CONNECT NL80211_CMD_CONNECT #define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY #define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES #define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS #define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ #define NL80211_ATTR_WIPHY_CHANNEL_TYPE NL80211_ATTR_WIPHY_CHANNEL_TYPE #define NL80211_ATTR_MGMT_SUBTYPE NL80211_ATTR_MGMT_SUBTYPE #define NL80211_ATTR_IE NL80211_ATTR_IE #define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR #define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE #define NL80211_ATTR_FRAME NL80211_ATTR_FRAME #define NL80211_ATTR_SSID NL80211_ATTR_SSID #define NL80211_ATTR_AUTH_TYPE NL80211_ATTR_AUTH_TYPE #define NL80211_ATTR_REASON_CODE NL80211_ATTR_REASON_CODE #define NL80211_ATTR_CIPHER_SUITES_PAIRWISE NL80211_ATTR_CIPHER_SUITES_PAIRWISE #define NL80211_ATTR_CIPHER_SUITE_GROUP NL80211_ATTR_CIPHER_SUITES_GROUP #define NL80211_ATTR_WPA_VERSIONS NL80211_ATTR_WPA_VERSIONS #define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES #define NL80211_ATTR_KEY NL80211_ATTR_KEY #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS #define NL80211_WIPHY_NAME_MAXLEN 64 #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_HT_RATES 77 #define NL80211_MAX_SUPP_REG_RULES 128 #define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 #define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16 #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 #define NL80211_HT_CAPABILITY_LEN 26 #define NL80211_VHT_CAPABILITY_LEN 12 #define NL80211_HE_MIN_CAPABILITY_LEN 16 #define NL80211_HE_MAX_CAPABILITY_LEN 51 #define NL80211_MAX_NR_CIPHER_SUITES 5 #define NL80211_MAX_NR_AKM_SUITES 2 #define NL80211_MAX_NR_NAN_SEC_CTX_IDS 5 #define NL80211_MIN_REMAIN_ON_CHANNEL_TIME 10 /* default RSSI threshold for scan results if none specified. */ #define NL80211_SCAN_RSSI_THOLD_OFF -300 #define NL80211_CQM_TXE_MAX_INTVL 1800 /** * enum nl80211_iftype - (virtual) interface types * * @NL80211_IFTYPE_UNSPECIFIED: unspecified type, driver decides * @NL80211_IFTYPE_ADHOC: independent BSS member * @NL80211_IFTYPE_STATION: managed BSS member * @NL80211_IFTYPE_AP: access point * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points; VLAN interfaces * are a bit special in that they must always be tied to a pre-existing * AP type interface. * @NL80211_IFTYPE_WDS: wireless distribution interface * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames * @NL80211_IFTYPE_MESH_POINT: mesh point * @NL80211_IFTYPE_P2P_CLIENT: P2P client * @NL80211_IFTYPE_P2P_GO: P2P group owner * @NL80211_IFTYPE_P2P_DEVICE: P2P device interface type, this is not a netdev * and therefore can't be created in the normal ways, use the * %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE * commands to create and destroy one * @NL80211_IF_TYPE_OCB: Outside Context of a BSS * This mode corresponds to the MIB variable dot11OCBActivated=true * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev) * @NL80211_IFTYPE_NAN_DATA: NAN data interface * @NL80211_IFTYPE_MAX: highest interface type number currently defined * @NUM_NL80211_IFTYPES: number of defined interface types * * These values are used with the %NL80211_ATTR_IFTYPE * to set the type of an interface. * */ enum nl80211_iftype { NL80211_IFTYPE_UNSPECIFIED, NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP, NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_WDS, NL80211_IFTYPE_MONITOR, NL80211_IFTYPE_MESH_POINT, NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, NL80211_IFTYPE_P2P_DEVICE, NL80211_IFTYPE_OCB, NL80211_IFTYPE_NAN, NL80211_IFTYPE_NAN_DATA, /* keep last */ NUM_NL80211_IFTYPES, NL80211_IFTYPE_MAX = NUM_NL80211_IFTYPES - 1 }; /** * enum nl80211_sta_flags - station flags * * Station flags. When a station is added to an AP interface, it is * assumed to be already associated (and hence authenticated.) * * @__NL80211_STA_FLAG_INVALID: attribute number 0 is reserved * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames * with short barker preamble * @NL80211_STA_FLAG_WME: station is WME/QoS capable * @NL80211_STA_FLAG_MFP: station uses management frame protection * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should * only be used in managed mode (even in the flags mask). Note that the * flag can't be changed, it is only valid while adding a station, and * attempts to change it will silently be ignored (rather than rejected * as errors.) * @NL80211_STA_FLAG_ASSOCIATED: station is associated; used with drivers * that support %NL80211_FEATURE_FULL_AP_CLIENT_STATE to transition a * previously added station into associated state * @NL80211_STA_FLAG_MAX: highest station flag number currently defined * @__NL80211_STA_FLAG_AFTER_LAST: internal use */ enum nl80211_sta_flags { __NL80211_STA_FLAG_INVALID, NL80211_STA_FLAG_AUTHORIZED, NL80211_STA_FLAG_SHORT_PREAMBLE, NL80211_STA_FLAG_WME, NL80211_STA_FLAG_MFP, NL80211_STA_FLAG_AUTHENTICATED, NL80211_STA_FLAG_TDLS_PEER, NL80211_STA_FLAG_ASSOCIATED, /* keep last */ __NL80211_STA_FLAG_AFTER_LAST, NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1 }; /** * enum nl80211_sta_p2p_ps_status - station support of P2P PS * * @NL80211_P2P_PS_UNSUPPORTED: station doesn't support P2P PS mechanism * @@NL80211_P2P_PS_SUPPORTED: station supports P2P PS mechanism * @NUM_NL80211_P2P_PS_STATUS: number of values */ enum nl80211_sta_p2p_ps_status { NL80211_P2P_PS_UNSUPPORTED = 0, NL80211_P2P_PS_SUPPORTED, NUM_NL80211_P2P_PS_STATUS, }; #define NL80211_STA_FLAG_MAX_OLD_API NL80211_STA_FLAG_TDLS_PEER /** * struct nl80211_sta_flag_update - station flags mask/set * @mask: mask of station flags to set * @set: which values to set them to * * Both mask and set contain bits as per &enum nl80211_sta_flags. */ struct nl80211_sta_flag_update { __u32 mask; __u32 set; } __attribute__((packed)); /** * enum nl80211_he_gi - HE guard interval * @NL80211_RATE_INFO_HE_GI_0_8: 0.8 usec * @NL80211_RATE_INFO_HE_GI_1_6: 1.6 usec * @NL80211_RATE_INFO_HE_GI_3_2: 3.2 usec */ enum nl80211_he_gi { NL80211_RATE_INFO_HE_GI_0_8, NL80211_RATE_INFO_HE_GI_1_6, NL80211_RATE_INFO_HE_GI_3_2, }; /** * enum nl80211_he_ru_alloc - HE RU allocation values * @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation * @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation * @NL80211_RATE_INFO_HE_RU_ALLOC_106: 106-tone RU allocation * @NL80211_RATE_INFO_HE_RU_ALLOC_242: 242-tone RU allocation * @NL80211_RATE_INFO_HE_RU_ALLOC_484: 484-tone RU allocation * @NL80211_RATE_INFO_HE_RU_ALLOC_996: 996-tone RU allocation * @NL80211_RATE_INFO_HE_RU_ALLOC_2x996: 2x996-tone RU allocation */ enum nl80211_he_ru_alloc { NL80211_RATE_INFO_HE_RU_ALLOC_26, NL80211_RATE_INFO_HE_RU_ALLOC_52, NL80211_RATE_INFO_HE_RU_ALLOC_106, NL80211_RATE_INFO_HE_RU_ALLOC_242, NL80211_RATE_INFO_HE_RU_ALLOC_484, NL80211_RATE_INFO_HE_RU_ALLOC_996, NL80211_RATE_INFO_HE_RU_ALLOC_2x996, }; /** * enum nl80211_rate_info - bitrate information * * These attribute types are used with %NL80211_STA_INFO_TXRATE * when getting information about the bitrate of a station. * There are 2 attributes for bitrate, a legacy one that represents * a 16-bit value, and new one that represents a 32-bit value. * If the rate value fits into 16 bit, both attributes are reported * with the same value. If the rate is too high to fit into 16 bits * (>6.5535Gbps) only 32-bit attribute is included. * User space tools encouraged to use the 32-bit attribute and fall * back to the 16-bit one for compatibility with older kernels. * * @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved * @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s) * @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8) * @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 MHz dualchannel bitrate * @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval * @NL80211_RATE_INFO_BITRATE32: total bitrate (u32, 100kbit/s) * @NL80211_RATE_INFO_MAX: highest rate_info number currently defined * @NL80211_RATE_INFO_VHT_MCS: MCS index for VHT (u8) * @NL80211_RATE_INFO_VHT_NSS: number of streams in VHT (u8) * @NL80211_RATE_INFO_80_MHZ_WIDTH: 80 MHz VHT rate * @NL80211_RATE_INFO_80P80_MHZ_WIDTH: unused - 80+80 is treated the * same as 160 for purposes of the bitrates * @NL80211_RATE_INFO_160_MHZ_WIDTH: 160 MHz VHT rate * @NL80211_RATE_INFO_10_MHZ_WIDTH: 10 MHz width - note that this is * a legacy rate and will be reported as the actual bitrate, i.e. * half the base (20 MHz) rate * @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is * a legacy rate and will be reported as the actual bitrate, i.e. * a quarter of the base (20 MHz) rate * @NL80211_RATE_INFO_HE_MCS: HE MCS index (u8, 0-11) * @NL80211_RATE_INFO_HE_NSS: HE NSS value (u8, 1-8) * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier * (u8, see &enum nl80211_he_gi) * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1) * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc) * @__NL80211_RATE_INFO_AFTER_LAST: internal use */ enum nl80211_rate_info { __NL80211_RATE_INFO_INVALID, NL80211_RATE_INFO_BITRATE, NL80211_RATE_INFO_MCS, NL80211_RATE_INFO_40_MHZ_WIDTH, NL80211_RATE_INFO_SHORT_GI, NL80211_RATE_INFO_BITRATE32, NL80211_RATE_INFO_VHT_MCS, NL80211_RATE_INFO_VHT_NSS, NL80211_RATE_INFO_80_MHZ_WIDTH, NL80211_RATE_INFO_80P80_MHZ_WIDTH, NL80211_RATE_INFO_160_MHZ_WIDTH, NL80211_RATE_INFO_10_MHZ_WIDTH, NL80211_RATE_INFO_5_MHZ_WIDTH, NL80211_RATE_INFO_HE_MCS, NL80211_RATE_INFO_HE_NSS, NL80211_RATE_INFO_HE_GI, NL80211_RATE_INFO_HE_DCM, NL80211_RATE_INFO_HE_RU_ALLOC, /* keep last */ __NL80211_RATE_INFO_AFTER_LAST, NL80211_RATE_INFO_MAX = __NL80211_RATE_INFO_AFTER_LAST - 1 }; /** * enum nl80211_sta_bss_param - BSS information collected by STA * * These attribute types are used with %NL80211_STA_INFO_BSS_PARAM * when getting information about the bitrate of a station. * * @__NL80211_STA_BSS_PARAM_INVALID: attribute number 0 is reserved * @NL80211_STA_BSS_PARAM_CTS_PROT: whether CTS protection is enabled (flag) * @NL80211_STA_BSS_PARAM_SHORT_PREAMBLE: whether short preamble is enabled * (flag) * @NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME: whether short slot time is enabled * (flag) * @NL80211_STA_BSS_PARAM_DTIM_PERIOD: DTIM period for beaconing (u8) * @NL80211_STA_BSS_PARAM_BEACON_INTERVAL: Beacon interval (u16) * @NL80211_STA_BSS_PARAM_MAX: highest sta_bss_param number currently defined * @__NL80211_STA_BSS_PARAM_AFTER_LAST: internal use */ enum nl80211_sta_bss_param { __NL80211_STA_BSS_PARAM_INVALID, NL80211_STA_BSS_PARAM_CTS_PROT, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME, NL80211_STA_BSS_PARAM_DTIM_PERIOD, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, /* keep last */ __NL80211_STA_BSS_PARAM_AFTER_LAST, NL80211_STA_BSS_PARAM_MAX = __NL80211_STA_BSS_PARAM_AFTER_LAST - 1 }; /** * enum nl80211_sta_info - station information * * These attribute types are used with %NL80211_ATTR_STA_INFO * when getting information about a station. * * @__NL80211_STA_INFO_INVALID: attribute number 0 is reserved * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs) * @NL80211_STA_INFO_RX_BYTES: total received bytes (MPDU length) * (u32, from this station) * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (MPDU length) * (u32, to this station) * @NL80211_STA_INFO_RX_BYTES64: total received bytes (MPDU length) * (u64, from this station) * @NL80211_STA_INFO_TX_BYTES64: total transmitted bytes (MPDU length) * (u64, to this station) * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm) * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute * containing info as possible, see &enum nl80211_rate_info * @NL80211_STA_INFO_RX_PACKETS: total received packet (MSDUs and MMPDUs) * (u32, from this station) * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (MSDUs and MMPDUs) * (u32, to this station) * @NL80211_STA_INFO_TX_RETRIES: total retries (MPDUs) (u32, to this station) * @NL80211_STA_INFO_TX_FAILED: total failed packets (MPDUs) * (u32, to this station) * @NL80211_STA_INFO_SIGNAL_AVG: signal strength average (u8, dBm) * @NL80211_STA_INFO_LLID: the station's mesh LLID * @NL80211_STA_INFO_PLID: the station's mesh PLID * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station * (see %enum nl80211_plink_state) * @NL80211_STA_INFO_RX_BITRATE: last unicast data frame rx rate, nested * attribute, like NL80211_STA_INFO_TX_BITRATE. * @NL80211_STA_INFO_BSS_PARAM: current station's view of BSS, nested attribute * containing info as possible, see &enum nl80211_sta_bss_param * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32) * @NL80211_STA_INFO_T_OFFSET: timing offset with respect to this STA (s64) * @NL80211_STA_INFO_LOCAL_PM: local mesh STA link-specific power mode * @NL80211_STA_INFO_PEER_PM: peer mesh STA link-specific power mode * @NL80211_STA_INFO_NONPEER_PM: neighbor mesh STA power save mode towards * non-peer STA * @NL80211_STA_INFO_CHAIN_SIGNAL: per-chain signal strength of last PPDU * Contains a nested array of signal strength attributes (u8, dBm) * @NL80211_STA_INFO_CHAIN_SIGNAL_AVG: per-chain signal strength average * Same format as NL80211_STA_INFO_CHAIN_SIGNAL. * @NL80211_STA_EXPECTED_THROUGHPUT: expected throughput considering also the * 802.11 header (u32, kbps) * @NL80211_STA_INFO_RX_DROP_MISC: RX packets dropped for unspecified reasons * (u64) * @NL80211_STA_INFO_BEACON_RX: number of beacons received from this peer (u64) * @NL80211_STA_INFO_BEACON_SIGNAL_AVG: signal strength average * for beacons only (u8, dBm) * @NL80211_STA_INFO_TID_STATS: per-TID statistics (see &enum nl80211_tid_stats) * This is a nested attribute where each the inner attribute number is the * TID+1 and the special TID 16 (i.e. value 17) is used for non-QoS frames; * each one of those is again nested with &enum nl80211_tid_stats * attributes carrying the actual values. * @NL80211_STA_INFO_RX_DURATION: aggregate PPDU duration for all frames * received from the station (u64, usec) * @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment * @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm) * @NL80211_STA_INFO_ACK_SIGNAL_AVG: avg signal strength of ACK frames (s8, dBm) * @NL80211_STA_INFO_RX_MPDUS: total number of received packets (MPDUs) * (u32, from this station) * @NL80211_STA_INFO_FCS_ERROR_COUNT: total number of packets (MPDUs) received * with an FCS error (u32, from this station). This count may not include * some packets with an FCS error due to TA corruption. Hence this counter * might not be fully accurate. * @NL80211_STA_INFO_CONNECTED_TO_GATE: set to true if STA has a path to a * mesh gate (u8, 0 or 1) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ enum nl80211_sta_info { __NL80211_STA_INFO_INVALID, NL80211_STA_INFO_INACTIVE_TIME, NL80211_STA_INFO_RX_BYTES, NL80211_STA_INFO_TX_BYTES, NL80211_STA_INFO_LLID, NL80211_STA_INFO_PLID, NL80211_STA_INFO_PLINK_STATE, NL80211_STA_INFO_SIGNAL, NL80211_STA_INFO_TX_BITRATE, NL80211_STA_INFO_RX_PACKETS, NL80211_STA_INFO_TX_PACKETS, NL80211_STA_INFO_TX_RETRIES, NL80211_STA_INFO_TX_FAILED, NL80211_STA_INFO_SIGNAL_AVG, NL80211_STA_INFO_RX_BITRATE, NL80211_STA_INFO_BSS_PARAM, NL80211_STA_INFO_CONNECTED_TIME, NL80211_STA_INFO_STA_FLAGS, NL80211_STA_INFO_BEACON_LOSS, NL80211_STA_INFO_T_OFFSET, NL80211_STA_INFO_LOCAL_PM, NL80211_STA_INFO_PEER_PM, NL80211_STA_INFO_NONPEER_PM, NL80211_STA_INFO_RX_BYTES64, NL80211_STA_INFO_TX_BYTES64, NL80211_STA_INFO_CHAIN_SIGNAL, NL80211_STA_INFO_CHAIN_SIGNAL_AVG, NL80211_STA_INFO_EXPECTED_THROUGHPUT, NL80211_STA_INFO_RX_DROP_MISC, NL80211_STA_INFO_BEACON_RX, NL80211_STA_INFO_BEACON_SIGNAL_AVG, NL80211_STA_INFO_TID_STATS, NL80211_STA_INFO_RX_DURATION, NL80211_STA_INFO_PAD, NL80211_STA_INFO_ACK_SIGNAL, NL80211_STA_INFO_ACK_SIGNAL_AVG, NL80211_STA_INFO_RX_MPDUS, NL80211_STA_INFO_FCS_ERROR_COUNT, NL80211_STA_INFO_CONNECTED_TO_GATE, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1 }; /* we renamed this - stay compatible */ #define NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG NL80211_STA_INFO_ACK_SIGNAL_AVG /** * enum nl80211_tid_stats - per TID statistics attributes * @__NL80211_TID_STATS_INVALID: attribute number 0 is reserved * @NL80211_TID_STATS_RX_MSDU: number of MSDUs received (u64) * @NL80211_TID_STATS_TX_MSDU: number of MSDUs transmitted (or * attempted to transmit; u64) * @NL80211_TID_STATS_TX_MSDU_RETRIES: number of retries for * transmitted MSDUs (not counting the first attempt; u64) * @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted * MSDUs (u64) * @NL80211_TID_STATS_PAD: attribute used for padding for 64-bit alignment * @NL80211_TID_STATS_TXQ_STATS: TXQ stats (nested attribute) * @NUM_NL80211_TID_STATS: number of attributes here * @NL80211_TID_STATS_MAX: highest numbered attribute here */ enum nl80211_tid_stats { __NL80211_TID_STATS_INVALID, NL80211_TID_STATS_RX_MSDU, NL80211_TID_STATS_TX_MSDU, NL80211_TID_STATS_TX_MSDU_RETRIES, NL80211_TID_STATS_TX_MSDU_FAILED, NL80211_TID_STATS_PAD, NL80211_TID_STATS_TXQ_STATS, /* keep last */ NUM_NL80211_TID_STATS, NL80211_TID_STATS_MAX = NUM_NL80211_TID_STATS - 1 }; /** * enum nl80211_txq_stats - per TXQ statistics attributes * @__NL80211_TXQ_STATS_INVALID: attribute number 0 is reserved * @NUM_NL80211_TXQ_STATS: number of attributes here * @NL80211_TXQ_STATS_BACKLOG_BYTES: number of bytes currently backlogged * @NL80211_TXQ_STATS_BACKLOG_PACKETS: number of packets currently * backlogged * @NL80211_TXQ_STATS_FLOWS: total number of new flows seen * @NL80211_TXQ_STATS_DROPS: total number of packet drops * @NL80211_TXQ_STATS_ECN_MARKS: total number of packet ECN marks * @NL80211_TXQ_STATS_OVERLIMIT: number of drops due to queue space overflow * @NL80211_TXQ_STATS_OVERMEMORY: number of drops due to memory limit overflow * (only for per-phy stats) * @NL80211_TXQ_STATS_COLLISIONS: number of hash collisions * @NL80211_TXQ_STATS_TX_BYTES: total number of bytes dequeued from TXQ * @NL80211_TXQ_STATS_TX_PACKETS: total number of packets dequeued from TXQ * @NL80211_TXQ_STATS_MAX_FLOWS: number of flow buckets for PHY * @NL80211_TXQ_STATS_MAX: highest numbered attribute here */ enum nl80211_txq_stats { __NL80211_TXQ_STATS_INVALID, NL80211_TXQ_STATS_BACKLOG_BYTES, NL80211_TXQ_STATS_BACKLOG_PACKETS, NL80211_TXQ_STATS_FLOWS, NL80211_TXQ_STATS_DROPS, NL80211_TXQ_STATS_ECN_MARKS, NL80211_TXQ_STATS_OVERLIMIT, NL80211_TXQ_STATS_OVERMEMORY, NL80211_TXQ_STATS_COLLISIONS, NL80211_TXQ_STATS_TX_BYTES, NL80211_TXQ_STATS_TX_PACKETS, NL80211_TXQ_STATS_MAX_FLOWS, /* keep last */ NUM_NL80211_TXQ_STATS, NL80211_TXQ_STATS_MAX = NUM_NL80211_TXQ_STATS - 1 }; /** * enum nl80211_mpath_flags - nl80211 mesh path flags * * @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active * @NL80211_MPATH_FLAG_RESOLVING: the mesh path discovery process is running * @NL80211_MPATH_FLAG_SN_VALID: the mesh path contains a valid SN * @NL80211_MPATH_FLAG_FIXED: the mesh path has been manually set * @NL80211_MPATH_FLAG_RESOLVED: the mesh path discovery process succeeded */ enum nl80211_mpath_flags { NL80211_MPATH_FLAG_ACTIVE = 1<<0, NL80211_MPATH_FLAG_RESOLVING = 1<<1, NL80211_MPATH_FLAG_SN_VALID = 1<<2, NL80211_MPATH_FLAG_FIXED = 1<<3, NL80211_MPATH_FLAG_RESOLVED = 1<<4, }; /** * enum nl80211_mpath_info - mesh path information * * These attribute types are used with %NL80211_ATTR_MPATH_INFO when getting * information about a mesh path. * * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved * @NL80211_MPATH_INFO_FRAME_QLEN: number of queued frames for this destination * @NL80211_MPATH_INFO_SN: destination sequence number * @NL80211_MPATH_INFO_METRIC: metric (cost) of this mesh path * @NL80211_MPATH_INFO_EXPTIME: expiration time for the path, in msec from now * @NL80211_MPATH_INFO_FLAGS: mesh path flags, enumerated in * &enum nl80211_mpath_flags; * @NL80211_MPATH_INFO_DISCOVERY_TIMEOUT: total path discovery timeout, in msec * @NL80211_MPATH_INFO_DISCOVERY_RETRIES: mesh path discovery retries * @NL80211_MPATH_INFO_MAX: highest mesh path information attribute number * currently defind * @__NL80211_MPATH_INFO_AFTER_LAST: internal use */ enum nl80211_mpath_info { __NL80211_MPATH_INFO_INVALID, NL80211_MPATH_INFO_FRAME_QLEN, NL80211_MPATH_INFO_SN, NL80211_MPATH_INFO_METRIC, NL80211_MPATH_INFO_EXPTIME, NL80211_MPATH_INFO_FLAGS, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, NL80211_MPATH_INFO_DISCOVERY_RETRIES, /* keep last */ __NL80211_MPATH_INFO_AFTER_LAST, NL80211_MPATH_INFO_MAX = __NL80211_MPATH_INFO_AFTER_LAST - 1 }; /** * enum nl80211_band_iftype_attr - Interface type data attributes * * @__NL80211_BAND_IFTYPE_ATTR_INVALID: attribute number 0 is reserved * @NL80211_BAND_IFTYPE_ATTR_IFTYPES: nested attribute containing a flag attribute * for each interface type that supports the band data * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC: HE MAC capabilities as in HE * capabilities IE * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY: HE PHY capabilities as in HE * capabilities IE * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET: HE supported NSS/MCS as in HE * capabilities IE * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as * defined in HE capabilities IE * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently * defined * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use */ enum nl80211_band_iftype_attr { __NL80211_BAND_IFTYPE_ATTR_INVALID, NL80211_BAND_IFTYPE_ATTR_IFTYPES, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, /* keep last */ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, NL80211_BAND_IFTYPE_ATTR_MAX = __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST - 1 }; /** * enum nl80211_band_attr - band attributes * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved * @NL80211_BAND_ATTR_FREQS: supported frequencies in this band, * an array of nested frequency attributes * @NL80211_BAND_ATTR_RATES: supported bitrates in this band, * an array of nested bitrate attributes * @NL80211_BAND_ATTR_HT_MCS_SET: 16-byte attribute containing the MCS set as * defined in 802.11n * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n * @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as * defined in 802.11ac * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using * attributes from &enum nl80211_band_iftype_attr * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined * @__NL80211_BAND_ATTR_AFTER_LAST: internal use */ enum nl80211_band_attr { __NL80211_BAND_ATTR_INVALID, NL80211_BAND_ATTR_FREQS, NL80211_BAND_ATTR_RATES, NL80211_BAND_ATTR_HT_MCS_SET, NL80211_BAND_ATTR_HT_CAPA, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, NL80211_BAND_ATTR_VHT_MCS_SET, NL80211_BAND_ATTR_VHT_CAPA, NL80211_BAND_ATTR_IFTYPE_DATA, /* keep last */ __NL80211_BAND_ATTR_AFTER_LAST, NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 }; #define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA /** * enum nl80211_wmm_rule - regulatory wmm rule * * @__NL80211_WMMR_INVALID: attribute number 0 is reserved * @NL80211_WMMR_CW_MIN: Minimum contention window slot. * @NL80211_WMMR_CW_MAX: Maximum contention window slot. * @NL80211_WMMR_AIFSN: Arbitration Inter Frame Space. * @NL80211_WMMR_TXOP: Maximum allowed tx operation time. * @nl80211_WMMR_MAX: highest possible wmm rule. * @__NL80211_WMMR_LAST: Internal use. */ enum nl80211_wmm_rule { __NL80211_WMMR_INVALID, NL80211_WMMR_CW_MIN, NL80211_WMMR_CW_MAX, NL80211_WMMR_AIFSN, NL80211_WMMR_TXOP, /* keep last */ __NL80211_WMMR_LAST, NL80211_WMMR_MAX = __NL80211_WMMR_LAST - 1 }; /** * enum nl80211_frequency_attr - frequency attributes * @__NL80211_FREQUENCY_ATTR_INVALID: attribute number 0 is reserved * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current * regulatory domain. * @NL80211_FREQUENCY_ATTR_NO_IR: no mechanisms that initiate radiation * are permitted on this channel, this includes sending probe * requests, or modes of operation that require beaconing. * @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm * (100 * dBm). * @NL80211_FREQUENCY_ATTR_DFS_STATE: current state for DFS * (enum nl80211_dfs_state) * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in miliseconds for how long * this channel is in this DFS state. * @NL80211_FREQUENCY_ATTR_NO_HT40_MINUS: HT40- isn't possible with this * channel as the control channel * @NL80211_FREQUENCY_ATTR_NO_HT40_PLUS: HT40+ isn't possible with this * channel as the control channel * @NL80211_FREQUENCY_ATTR_NO_80MHZ: any 80 MHz channel using this channel * as the primary or any of the secondary channels isn't possible, * this includes 80+80 channels * @NL80211_FREQUENCY_ATTR_NO_160MHZ: any 160 MHz (but not 80+80) channel * using this channel as the primary or any of the secondary channels * isn't possible * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds. * @NL80211_FREQUENCY_ATTR_INDOOR_ONLY: Only indoor use is permitted on this * channel. A channel that has the INDOOR_ONLY attribute can only be * used when there is a clear assessment that the device is operating in * an indoor surroundings, i.e., it is connected to AC power (and not * through portable DC inverters) or is under the control of a master * that is acting as an AP and is connected to AC power. * @NL80211_FREQUENCY_ATTR_IR_CONCURRENT: IR operation is allowed on this * channel if it's connected concurrently to a BSS on the same channel on * the 2 GHz band or to a channel in the same UNII band (on the 5 GHz * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO or TDLS * off-channel on a channel that has the IR_CONCURRENT attribute set can be * done when there is a clear assessment that the device is operating under * the guidance of an authorized master, i.e., setting up a GO or TDLS * off-channel while the device is also connected to an AP with DFS and * radar detection on the UNII band (it is up to user-space, i.e., * wpa_supplicant to perform the required verifications). Using this * attribute for IR is disallowed for master interfaces (IBSS, AP). * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_WMM: this channel has wmm limitations. * This is a nested attribute that contains the wmm limitation per AC. * (see &enum nl80211_wmm_rule) * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use * * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122 * for more information on the FCC description of the relaxations allowed * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and * NL80211_FREQUENCY_ATTR_IR_CONCURRENT. */ enum nl80211_frequency_attr { __NL80211_FREQUENCY_ATTR_INVALID, NL80211_FREQUENCY_ATTR_FREQ, NL80211_FREQUENCY_ATTR_DISABLED, NL80211_FREQUENCY_ATTR_NO_IR, __NL80211_FREQUENCY_ATTR_NO_IBSS, NL80211_FREQUENCY_ATTR_RADAR, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, NL80211_FREQUENCY_ATTR_DFS_STATE, NL80211_FREQUENCY_ATTR_DFS_TIME, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS, NL80211_FREQUENCY_ATTR_NO_80MHZ, NL80211_FREQUENCY_ATTR_NO_160MHZ, NL80211_FREQUENCY_ATTR_DFS_CAC_TIME, NL80211_FREQUENCY_ATTR_INDOOR_ONLY, NL80211_FREQUENCY_ATTR_IR_CONCURRENT, NL80211_FREQUENCY_ATTR_NO_20MHZ, NL80211_FREQUENCY_ATTR_NO_10MHZ, NL80211_FREQUENCY_ATTR_WMM, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1 }; #define NL80211_FREQUENCY_ATTR_MAX_TX_POWER NL80211_FREQUENCY_ATTR_MAX_TX_POWER #define NL80211_FREQUENCY_ATTR_PASSIVE_SCAN NL80211_FREQUENCY_ATTR_NO_IR #define NL80211_FREQUENCY_ATTR_NO_IBSS NL80211_FREQUENCY_ATTR_NO_IR #define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR #define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \ NL80211_FREQUENCY_ATTR_IR_CONCURRENT /** * enum nl80211_bitrate_attr - bitrate attributes * @__NL80211_BITRATE_ATTR_INVALID: attribute number 0 is reserved * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported * in 2.4 GHz band. * @NL80211_BITRATE_ATTR_MAX: highest bitrate attribute number * currently defined * @__NL80211_BITRATE_ATTR_AFTER_LAST: internal use */ enum nl80211_bitrate_attr { __NL80211_BITRATE_ATTR_INVALID, NL80211_BITRATE_ATTR_RATE, NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE, /* keep last */ __NL80211_BITRATE_ATTR_AFTER_LAST, NL80211_BITRATE_ATTR_MAX = __NL80211_BITRATE_ATTR_AFTER_LAST - 1 }; /** * enum nl80211_initiator - Indicates the initiator of a reg domain request * @NL80211_REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world * regulatory domain. * @NL80211_REGDOM_SET_BY_USER: User asked the wireless core to set the * regulatory domain. * @NL80211_REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the * wireless core it thinks its knows the regulatory domain we should be in. * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an * 802.11 country information element with regulatory information it * thinks we should consider. cfg80211 only processes the country * code from the IE, and relies on the regulatory domain information * structure passed by userspace (CRDA) from our wireless-regdb. * If a channel is enabled but the country code indicates it should * be disabled we disable the channel and re-enable it upon disassociation. */ enum nl80211_reg_initiator { NL80211_REGDOM_SET_BY_CORE, NL80211_REGDOM_SET_BY_USER, NL80211_REGDOM_SET_BY_DRIVER, NL80211_REGDOM_SET_BY_COUNTRY_IE, }; /** * enum nl80211_reg_type - specifies the type of regulatory domain * @NL80211_REGDOM_TYPE_COUNTRY: the regulatory domain set is one that pertains * to a specific country. When this is set you can count on the * ISO / IEC 3166 alpha2 country code being valid. * @NL80211_REGDOM_TYPE_WORLD: the regulatory set domain is the world regulatory * domain. * @NL80211_REGDOM_TYPE_CUSTOM_WORLD: the regulatory domain set is a custom * driver specific world regulatory domain. These do not apply system-wide * and are only applicable to the individual devices which have requested * them to be applied. * @NL80211_REGDOM_TYPE_INTERSECTION: the regulatory domain set is the product * of an intersection between two regulatory domains -- the previously * set regulatory domain on the system and the last accepted regulatory * domain request to be processed. */ enum nl80211_reg_type { NL80211_REGDOM_TYPE_COUNTRY, NL80211_REGDOM_TYPE_WORLD, NL80211_REGDOM_TYPE_CUSTOM_WORLD, NL80211_REGDOM_TYPE_INTERSECTION, }; /** * enum nl80211_reg_rule_attr - regulatory rule attributes * @__NL80211_REG_RULE_ATTR_INVALID: attribute number 0 is reserved * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional * considerations for a given frequency range. These are the * &enum nl80211_reg_rule_flags. * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory * rule in KHz. This is not a center of frequency but an actual regulatory * band edge. * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule * in KHz. This is not a center a frequency but an actual regulatory * band edge. * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this * frequency range, in KHz. * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain * for a given frequency range. The value is in mBi (100 * dBi). * If you don't have one then don't send this. * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for * a given frequency range. The value is in mBm (100 * dBm). * @NL80211_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds. * If not present or 0 default CAC time will be used. * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number * currently defined * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use */ enum nl80211_reg_rule_attr { __NL80211_REG_RULE_ATTR_INVALID, NL80211_ATTR_REG_RULE_FLAGS, NL80211_ATTR_FREQ_RANGE_START, NL80211_ATTR_FREQ_RANGE_END, NL80211_ATTR_FREQ_RANGE_MAX_BW, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, NL80211_ATTR_POWER_RULE_MAX_EIRP, NL80211_ATTR_DFS_CAC_TIME, /* keep last */ __NL80211_REG_RULE_ATTR_AFTER_LAST, NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1 }; /** * enum nl80211_sched_scan_match_attr - scheduled scan match attributes * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching, * only report BSS with matching SSID. * (This cannot be used together with BSSID.) * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a * BSS in scan results. Filtering is turned off if not specified. Note that * if this attribute is in a match set of its own, then it is treated as * the default value for all matchsets with an SSID, rather than being a * matchset of its own without an RSSI filter. This is due to problems with * how this API was implemented in the past. Also, due to the same problem, * the only way to create a matchset with only an RSSI filter (with this * attribute) is if there's only a single matchset with the RSSI attribute. * @NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI: Flag indicating whether * %NL80211_SCHED_SCAN_MATCH_ATTR_RSSI to be used as absolute RSSI or * relative to current bss's RSSI. * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST: When present the RSSI level for * BSS-es in the specified band is to be adjusted before doing * RSSI-based BSS selection. The attribute value is a packed structure * value as specified by &struct nl80211_bss_select_rssi_adjust. * @NL80211_SCHED_SCAN_MATCH_ATTR_BSSID: BSSID to be used for matching * (this cannot be used together with SSID). * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter * attribute number currently defined * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use */ enum nl80211_sched_scan_match_attr { __NL80211_SCHED_SCAN_MATCH_ATTR_INVALID, NL80211_SCHED_SCAN_MATCH_ATTR_SSID, NL80211_SCHED_SCAN_MATCH_ATTR_RSSI, NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI, NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST, NL80211_SCHED_SCAN_MATCH_ATTR_BSSID, /* keep last */ __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST, NL80211_SCHED_SCAN_MATCH_ATTR_MAX = __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST - 1 }; /* only for backward compatibility */ #define NL80211_ATTR_SCHED_SCAN_MATCH_SSID NL80211_SCHED_SCAN_MATCH_ATTR_SSID /** * enum nl80211_reg_rule_flags - regulatory rule flags * * @NL80211_RRF_NO_OFDM: OFDM modulation not allowed * @NL80211_RRF_NO_CCK: CCK modulation not allowed * @NL80211_RRF_NO_INDOOR: indoor operation not allowed * @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed * @NL80211_RRF_DFS: DFS support is required to be used * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links * @NL80211_RRF_NO_IR: no mechanisms that initiate radiation are allowed, * this includes probe requests or modes of operation that require * beaconing. * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated * base on contiguous rules and wider channels will be allowed to cross * multiple contiguous/overlapping frequency ranges. * @NL80211_RRF_IR_CONCURRENT: See %NL80211_FREQUENCY_ATTR_IR_CONCURRENT * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, NL80211_RRF_NO_CCK = 1<<1, NL80211_RRF_NO_INDOOR = 1<<2, NL80211_RRF_NO_OUTDOOR = 1<<3, NL80211_RRF_DFS = 1<<4, NL80211_RRF_PTP_ONLY = 1<<5, NL80211_RRF_PTMP_ONLY = 1<<6, NL80211_RRF_NO_IR = 1<<7, __NL80211_RRF_NO_IBSS = 1<<8, NL80211_RRF_AUTO_BW = 1<<11, NL80211_RRF_IR_CONCURRENT = 1<<12, NL80211_RRF_NO_HT40MINUS = 1<<13, NL80211_RRF_NO_HT40PLUS = 1<<14, NL80211_RRF_NO_80MHZ = 1<<15, NL80211_RRF_NO_160MHZ = 1<<16, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR #define NL80211_RRF_NO_IBSS NL80211_RRF_NO_IR #define NL80211_RRF_NO_IR NL80211_RRF_NO_IR #define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\ NL80211_RRF_NO_HT40PLUS) #define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT /* For backport compatibility with older userspace */ #define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS) /** * enum nl80211_dfs_regions - regulatory DFS regions * * @NL80211_DFS_UNSET: Country has no DFS master region specified * @NL80211_DFS_FCC: Country follows DFS master rules from FCC * @NL80211_DFS_ETSI: Country follows DFS master rules from ETSI * @NL80211_DFS_JP: Country follows DFS master rules from JP/MKK/Telec */ enum nl80211_dfs_regions { NL80211_DFS_UNSET = 0, NL80211_DFS_FCC = 1, NL80211_DFS_ETSI = 2, NL80211_DFS_JP = 3, }; /** * enum nl80211_user_reg_hint_type - type of user regulatory hint * * @NL80211_USER_REG_HINT_USER: a user sent the hint. This is always * assumed if the attribute is not set. * @NL80211_USER_REG_HINT_CELL_BASE: the hint comes from a cellular * base station. Device drivers that have been tested to work * properly to support this type of hint can enable these hints * by setting the NL80211_FEATURE_CELL_BASE_REG_HINTS feature * capability on the struct wiphy. The wireless core will * ignore all cell base station hints until at least one device * present has been registered with the wireless core that * has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a * supported feature. * @NL80211_USER_REG_HINT_INDOOR: a user sent an hint indicating that the * platform is operating in an indoor environment. */ enum nl80211_user_reg_hint_type { NL80211_USER_REG_HINT_USER = 0, NL80211_USER_REG_HINT_CELL_BASE = 1, NL80211_USER_REG_HINT_INDOOR = 2, }; /** * enum nl80211_survey_info - survey information * * These attribute types are used with %NL80211_ATTR_SURVEY_INFO * when getting information about a survey. * * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) * @NL80211_SURVEY_INFO_IN_USE: channel is currently being used * @NL80211_SURVEY_INFO_TIME: amount of time (in ms) that the radio * was turned on (on channel or globally) * @NL80211_SURVEY_INFO_TIME_BUSY: amount of the time the primary * channel was sensed busy (either due to activity or energy detect) * @NL80211_SURVEY_INFO_TIME_EXT_BUSY: amount of time the extension * channel was sensed busy * @NL80211_SURVEY_INFO_TIME_RX: amount of time the radio spent * receiving data (on channel or globally) * @NL80211_SURVEY_INFO_TIME_TX: amount of time the radio spent * transmitting data (on channel or globally) * @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan * (on this channel or globally) * @NL80211_SURVEY_INFO_PAD: attribute used for padding for 64-bit alignment * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number * currently defined * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use */ enum nl80211_survey_info { __NL80211_SURVEY_INFO_INVALID, NL80211_SURVEY_INFO_FREQUENCY, NL80211_SURVEY_INFO_NOISE, NL80211_SURVEY_INFO_IN_USE, NL80211_SURVEY_INFO_TIME, NL80211_SURVEY_INFO_TIME_BUSY, NL80211_SURVEY_INFO_TIME_EXT_BUSY, NL80211_SURVEY_INFO_TIME_RX, NL80211_SURVEY_INFO_TIME_TX, NL80211_SURVEY_INFO_TIME_SCAN, NL80211_SURVEY_INFO_PAD, /* keep last */ __NL80211_SURVEY_INFO_AFTER_LAST, NL80211_SURVEY_INFO_MAX = __NL80211_SURVEY_INFO_AFTER_LAST - 1 }; /* keep old names for compatibility */ #define NL80211_SURVEY_INFO_CHANNEL_TIME NL80211_SURVEY_INFO_TIME #define NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY NL80211_SURVEY_INFO_TIME_BUSY #define NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY NL80211_SURVEY_INFO_TIME_EXT_BUSY #define NL80211_SURVEY_INFO_CHANNEL_TIME_RX NL80211_SURVEY_INFO_TIME_RX #define NL80211_SURVEY_INFO_CHANNEL_TIME_TX NL80211_SURVEY_INFO_TIME_TX /** * enum nl80211_mntr_flags - monitor configuration flags * * Monitor configuration flags. * * @__NL80211_MNTR_FLAG_INVALID: reserved * * @NL80211_MNTR_FLAG_FCSFAIL: pass frames with bad FCS * @NL80211_MNTR_FLAG_PLCPFAIL: pass frames with bad PLCP * @NL80211_MNTR_FLAG_CONTROL: pass control frames * @NL80211_MNTR_FLAG_OTHER_BSS: disable BSSID filtering * @NL80211_MNTR_FLAG_COOK_FRAMES: report frames after processing. * overrides all other flags. * @NL80211_MNTR_FLAG_ACTIVE: use the configured MAC address * and ACK incoming unicast packets. * * @__NL80211_MNTR_FLAG_AFTER_LAST: internal use * @NL80211_MNTR_FLAG_MAX: highest possible monitor flag */ enum nl80211_mntr_flags { __NL80211_MNTR_FLAG_INVALID, NL80211_MNTR_FLAG_FCSFAIL, NL80211_MNTR_FLAG_PLCPFAIL, NL80211_MNTR_FLAG_CONTROL, NL80211_MNTR_FLAG_OTHER_BSS, NL80211_MNTR_FLAG_COOK_FRAMES, NL80211_MNTR_FLAG_ACTIVE, /* keep last */ __NL80211_MNTR_FLAG_AFTER_LAST, NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1 }; /** * enum nl80211_mesh_power_mode - mesh power save modes * * @NL80211_MESH_POWER_UNKNOWN: The mesh power mode of the mesh STA is * not known or has not been set yet. * @NL80211_MESH_POWER_ACTIVE: Active mesh power mode. The mesh STA is * in Awake state all the time. * @NL80211_MESH_POWER_LIGHT_SLEEP: Light sleep mode. The mesh STA will * alternate between Active and Doze states, but will wake up for * neighbor's beacons. * @NL80211_MESH_POWER_DEEP_SLEEP: Deep sleep mode. The mesh STA will * alternate between Active and Doze states, but may not wake up * for neighbor's beacons. * * @__NL80211_MESH_POWER_AFTER_LAST - internal use * @NL80211_MESH_POWER_MAX - highest possible power save level */ enum nl80211_mesh_power_mode { NL80211_MESH_POWER_UNKNOWN, NL80211_MESH_POWER_ACTIVE, NL80211_MESH_POWER_LIGHT_SLEEP, NL80211_MESH_POWER_DEEP_SLEEP, __NL80211_MESH_POWER_AFTER_LAST, NL80211_MESH_POWER_MAX = __NL80211_MESH_POWER_AFTER_LAST - 1 }; /** * enum nl80211_meshconf_params - mesh configuration parameters * * Mesh configuration parameters. These can be changed while the mesh is * active. * * @__NL80211_MESHCONF_INVALID: internal use * * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in * millisecond units, used by the Peer Link Open message * * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the initial confirm timeout, in * millisecond units, used by the peer link management to close a peer link * * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in * millisecond units * * @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed * on this mesh interface * * @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link * open retries that can be sent to establish a new peer link instance in a * mesh * * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh * point. * * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically open * peer links when we detect compatible mesh peers. Disabled if * @NL80211_MESH_SETUP_USERSPACE_MPM or @NL80211_MESH_SETUP_USERSPACE_AMPE are * set. * * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames * containing a PREQ that an MP can send to a particular destination (path * target) * * @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths * (in milliseconds) * * @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait * until giving up on a path discovery (in milliseconds) * * @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh * points receiving a PREQ shall consider the forwarding information from * the root to be valid. (TU = time unit) * * @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in * TUs) during which an MP can send only one action frame containing a PREQ * reference element * * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs) * that it takes for an HWMP information element to propagate across the * mesh * * @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not * * @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a * source mesh point for path selection elements. * * @NL80211_MESHCONF_HWMP_RANN_INTERVAL: The interval of time (in TUs) between * root announcements are transmitted. * * @NL80211_MESHCONF_GATE_ANNOUNCEMENTS: Advertise that this mesh station has * access to a broader network beyond the MBSS. This is done via Root * Announcement frames. * * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in * TUs) during which a mesh STA can send only one Action frame containing a * PERR element. * * @NL80211_MESHCONF_FORWARDING: set Mesh STA as forwarding or non-forwarding * or forwarding entity (default is TRUE - forwarding entity) * * @NL80211_MESHCONF_RSSI_THRESHOLD: RSSI threshold in dBm. This specifies the * threshold for average signal strength of candidate station to establish * a peer link. * * @NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR: maximum number of neighbors * to synchronize to for 11s default synchronization method * (see 11C.12.2.2) * * @NL80211_MESHCONF_HT_OPMODE: set mesh HT protection mode. * * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute * * @NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT: The time (in TUs) for * which mesh STAs receiving a proactive PREQ shall consider the forwarding * information to the root mesh STA to be valid. * * @NL80211_MESHCONF_HWMP_ROOT_INTERVAL: The interval of time (in TUs) between * proactive PREQs are transmitted. * * @NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL: The minimum interval of time * (in TUs) during which a mesh STA can send only one Action frame * containing a PREQ element for root path confirmation. * * @NL80211_MESHCONF_POWER_MODE: Default mesh power mode for new peer links. * type &enum nl80211_mesh_power_mode (u32) * * @NL80211_MESHCONF_AWAKE_WINDOW: awake window duration (in TUs) * * @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've * established peering with for longer than this time (in seconds), then * remove it from the STA's list of peers. You may set this to 0 to disable * the removal of the STA. Default is 30 minutes. * * @NL80211_MESHCONF_CONNECTED_TO_GATE: If set to true then this mesh STA * will advertise that it is connected to a gate in the mesh formation * field. If left unset then the mesh formation field will only * advertise such if there is an active root mesh path. * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { __NL80211_MESHCONF_INVALID, NL80211_MESHCONF_RETRY_TIMEOUT, NL80211_MESHCONF_CONFIRM_TIMEOUT, NL80211_MESHCONF_HOLDING_TIMEOUT, NL80211_MESHCONF_MAX_PEER_LINKS, NL80211_MESHCONF_MAX_RETRIES, NL80211_MESHCONF_TTL, NL80211_MESHCONF_AUTO_OPEN_PLINKS, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, NL80211_MESHCONF_PATH_REFRESH_TIME, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, NL80211_MESHCONF_HWMP_ROOTMODE, NL80211_MESHCONF_ELEMENT_TTL, NL80211_MESHCONF_HWMP_RANN_INTERVAL, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, NL80211_MESHCONF_FORWARDING, NL80211_MESHCONF_RSSI_THRESHOLD, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, NL80211_MESHCONF_HT_OPMODE, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, NL80211_MESHCONF_POWER_MODE, NL80211_MESHCONF_AWAKE_WINDOW, NL80211_MESHCONF_PLINK_TIMEOUT, NL80211_MESHCONF_CONNECTED_TO_GATE, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, NL80211_MESHCONF_ATTR_MAX = __NL80211_MESHCONF_ATTR_AFTER_LAST - 1 }; /** * enum nl80211_mesh_setup_params - mesh setup parameters * * Mesh setup parameters. These are used to start/join a mesh and cannot be * changed while the mesh is active. * * @__NL80211_MESH_SETUP_INVALID: Internal use * * @NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL: Enable this option to use a * vendor specific path selection algorithm or disable it to use the * default HWMP. * * @NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC: Enable this option to use a * vendor specific path metric or disable it to use the default Airtime * metric. * * @NL80211_MESH_SETUP_IE: Information elements for this mesh, for instance, a * robust security network ie, or a vendor specific information element * that vendors will use to identify the path selection methods and * metrics in use. * * @NL80211_MESH_SETUP_USERSPACE_AUTH: Enable this option if an authentication * daemon will be authenticating mesh candidates. * * @NL80211_MESH_SETUP_USERSPACE_AMPE: Enable this option if an authentication * daemon will be securing peer link frames. AMPE is a secured version of * Mesh Peering Management (MPM) and is implemented with the assistance of * a userspace daemon. When this flag is set, the kernel will send peer * management frames to a userspace daemon that will implement AMPE * functionality (security capabilities selection, key confirmation, and * key management). When the flag is unset (default), the kernel can * autonomously complete (unsecured) mesh peering without the need of a * userspace daemon. * * @NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC: Enable this option to use a * vendor specific synchronization method or disable it to use the default * neighbor offset synchronization * * @NL80211_MESH_SETUP_USERSPACE_MPM: Enable this option if userspace will * implement an MPM which handles peer allocation and state. * * @NL80211_MESH_SETUP_AUTH_PROTOCOL: Inform the kernel of the authentication * method (u8, as defined in IEEE 8.4.2.100.6, e.g. 0x1 for SAE). * Default is no authentication method required. * * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number * * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use */ enum nl80211_mesh_setup_params { __NL80211_MESH_SETUP_INVALID, NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL, NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC, NL80211_MESH_SETUP_IE, NL80211_MESH_SETUP_USERSPACE_AUTH, NL80211_MESH_SETUP_USERSPACE_AMPE, NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, NL80211_MESH_SETUP_USERSPACE_MPM, NL80211_MESH_SETUP_AUTH_PROTOCOL, /* keep last */ __NL80211_MESH_SETUP_ATTR_AFTER_LAST, NL80211_MESH_SETUP_ATTR_MAX = __NL80211_MESH_SETUP_ATTR_AFTER_LAST - 1 }; /** * enum nl80211_txq_attr - TX queue parameter attributes * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved * @NL80211_TXQ_ATTR_AC: AC identifier (NL80211_AC_*) * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning * disabled * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form * 2^n-1 in the range 1..32767] * @NL80211_TXQ_ATTR_CWMAX: Maximum contention window [a value of the form * 2^n-1 in the range 1..32767] * @NL80211_TXQ_ATTR_AIFS: Arbitration interframe space [0..255] * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal * @NL80211_TXQ_ATTR_MAX: Maximum TXQ attribute number */ enum nl80211_txq_attr { __NL80211_TXQ_ATTR_INVALID, NL80211_TXQ_ATTR_AC, NL80211_TXQ_ATTR_TXOP, NL80211_TXQ_ATTR_CWMIN, NL80211_TXQ_ATTR_CWMAX, NL80211_TXQ_ATTR_AIFS, /* keep last */ __NL80211_TXQ_ATTR_AFTER_LAST, NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1 }; enum nl80211_ac { NL80211_AC_VO, NL80211_AC_VI, NL80211_AC_BE, NL80211_AC_BK, NL80211_NUM_ACS }; /* backward compat */ #define NL80211_TXQ_ATTR_QUEUE NL80211_TXQ_ATTR_AC #define NL80211_TXQ_Q_VO NL80211_AC_VO #define NL80211_TXQ_Q_VI NL80211_AC_VI #define NL80211_TXQ_Q_BE NL80211_AC_BE #define NL80211_TXQ_Q_BK NL80211_AC_BK /** * enum nl80211_channel_type - channel type * @NL80211_CHAN_NO_HT: 20 MHz, non-HT channel * @NL80211_CHAN_HT20: 20 MHz HT channel * @NL80211_CHAN_HT40MINUS: HT40 channel, secondary channel * below the control channel * @NL80211_CHAN_HT40PLUS: HT40 channel, secondary channel * above the control channel */ enum nl80211_channel_type { NL80211_CHAN_NO_HT, NL80211_CHAN_HT20, NL80211_CHAN_HT40MINUS, NL80211_CHAN_HT40PLUS }; /** * enum nl80211_chan_width - channel width definitions * * These values are used with the %NL80211_ATTR_CHANNEL_WIDTH * attribute. * * @NL80211_CHAN_WIDTH_20_NOHT: 20 MHz, non-HT channel * @NL80211_CHAN_WIDTH_20: 20 MHz HT channel * @NL80211_CHAN_WIDTH_40: 40 MHz channel, the %NL80211_ATTR_CENTER_FREQ1 * attribute must be provided as well * @NL80211_CHAN_WIDTH_80: 80 MHz channel, the %NL80211_ATTR_CENTER_FREQ1 * attribute must be provided as well * @NL80211_CHAN_WIDTH_80P80: 80+80 MHz channel, the %NL80211_ATTR_CENTER_FREQ1 * and %NL80211_ATTR_CENTER_FREQ2 attributes must be provided as well * @NL80211_CHAN_WIDTH_160: 160 MHz channel, the %NL80211_ATTR_CENTER_FREQ1 * attribute must be provided as well * @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel * @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel */ enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT, NL80211_CHAN_WIDTH_20, NL80211_CHAN_WIDTH_40, NL80211_CHAN_WIDTH_80, NL80211_CHAN_WIDTH_80P80, NL80211_CHAN_WIDTH_160, NL80211_CHAN_WIDTH_5, NL80211_CHAN_WIDTH_10, }; /** * enum nl80211_bss_scan_width - control channel width for a BSS * * These values are used with the %NL80211_BSS_CHAN_WIDTH attribute. * * @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible * @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide * @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide */ enum nl80211_bss_scan_width { NL80211_BSS_CHAN_WIDTH_20, NL80211_BSS_CHAN_WIDTH_10, NL80211_BSS_CHAN_WIDTH_5, }; /** * enum nl80211_bss - netlink attributes for a BSS * * @__NL80211_BSS_INVALID: invalid * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets) * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) * (if @NL80211_BSS_PRESP_DATA is present then this is known to be * from a probe response, otherwise it may be from the same beacon * that the NL80211_BSS_BEACON_TSF will be from) * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the * raw information elements from the probe response/beacon (bin); * if the %NL80211_BSS_BEACON_IES attribute is present and the data is * different then the IEs here are from a Probe Response frame; otherwise * they are from a Beacon frame. * However, if the driver does not indicate the source of the IEs, these * IEs may be from either frame subtype. * If present, the @NL80211_BSS_PRESP_DATA attribute indicates that the * data here is known to be from a probe response, without any heuristics. * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon * in mBm (100 * dBm) (s32) * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon * in unspecified units, scaled to 0..100 (u8) * @NL80211_BSS_STATUS: status, if this BSS is "used" * @NL80211_BSS_SEEN_MS_AGO: age of this BSS entry in ms * @NL80211_BSS_BEACON_IES: binary attribute containing the raw information * elements from a Beacon frame (bin); not present if no Beacon frame has * yet been received * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel * (u32, enum nl80211_bss_scan_width) * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64) * (not present if no beacon frame has been received yet) * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and * @NL80211_BSS_TSF is known to be from a probe response (flag attribute) * @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry * was last updated by a received frame. The value is expected to be * accurate to about 10ms. (u64, nanoseconds) * @NL80211_BSS_PAD: attribute used for padding for 64-bit alignment * @NL80211_BSS_PARENT_TSF: the time at the start of reception of the first * octet of the timestamp field of the last beacon/probe received for * this BSS. The time is the TSF of the BSS specified by * @NL80211_BSS_PARENT_BSSID. (u64). * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF * is set. * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update. * Contains a nested array of signal strength attributes (u8, dBm), * using the nesting index as the antenna number. * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ enum nl80211_bss { __NL80211_BSS_INVALID, NL80211_BSS_BSSID, NL80211_BSS_FREQUENCY, NL80211_BSS_TSF, NL80211_BSS_BEACON_INTERVAL, NL80211_BSS_CAPABILITY, NL80211_BSS_INFORMATION_ELEMENTS, NL80211_BSS_SIGNAL_MBM, NL80211_BSS_SIGNAL_UNSPEC, NL80211_BSS_STATUS, NL80211_BSS_SEEN_MS_AGO, NL80211_BSS_BEACON_IES, NL80211_BSS_CHAN_WIDTH, NL80211_BSS_BEACON_TSF, NL80211_BSS_PRESP_DATA, NL80211_BSS_LAST_SEEN_BOOTTIME, NL80211_BSS_PAD, NL80211_BSS_PARENT_TSF, NL80211_BSS_PARENT_BSSID, NL80211_BSS_CHAIN_SIGNAL, /* keep last */ __NL80211_BSS_AFTER_LAST, NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1 }; /** * enum nl80211_bss_status - BSS "status" * @NL80211_BSS_STATUS_AUTHENTICATED: Authenticated with this BSS. * Note that this is no longer used since cfg80211 no longer * keeps track of whether or not authentication was done with * a given BSS. * @NL80211_BSS_STATUS_ASSOCIATED: Associated with this BSS. * @NL80211_BSS_STATUS_IBSS_JOINED: Joined to this IBSS. * * The BSS status is a BSS attribute in scan dumps, which * indicates the status the interface has wrt. this BSS. */ enum nl80211_bss_status { NL80211_BSS_STATUS_AUTHENTICATED, NL80211_BSS_STATUS_ASSOCIATED, NL80211_BSS_STATUS_IBSS_JOINED, }; /** * enum nl80211_auth_type - AuthenticationType * * @NL80211_AUTHTYPE_OPEN_SYSTEM: Open System authentication * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only) * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r) * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP) * @NL80211_AUTHTYPE_SAE: Simultaneous authentication of equals * @NL80211_AUTHTYPE_FILS_SK: Fast Initial Link Setup shared key * @NL80211_AUTHTYPE_FILS_SK_PFS: Fast Initial Link Setup shared key with PFS * @NL80211_AUTHTYPE_FILS_PK: Fast Initial Link Setup public key * @__NL80211_AUTHTYPE_NUM: internal * @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm * @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by * trying multiple times); this is invalid in netlink -- leave out * the attribute for this on CONNECT commands. */ enum nl80211_auth_type { NL80211_AUTHTYPE_OPEN_SYSTEM, NL80211_AUTHTYPE_SHARED_KEY, NL80211_AUTHTYPE_FT, NL80211_AUTHTYPE_NETWORK_EAP, NL80211_AUTHTYPE_SAE, NL80211_AUTHTYPE_FILS_SK, NL80211_AUTHTYPE_FILS_SK_PFS, NL80211_AUTHTYPE_FILS_PK, /* keep last */ __NL80211_AUTHTYPE_NUM, NL80211_AUTHTYPE_MAX = __NL80211_AUTHTYPE_NUM - 1, NL80211_AUTHTYPE_AUTOMATIC }; /** * enum nl80211_key_type - Key Type * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS) * @NUM_NL80211_KEYTYPES: number of defined key types */ enum nl80211_key_type { NL80211_KEYTYPE_GROUP, NL80211_KEYTYPE_PAIRWISE, NL80211_KEYTYPE_PEERKEY, NUM_NL80211_KEYTYPES }; /** * enum nl80211_mfp - Management frame protection state * @NL80211_MFP_NO: Management frame protection not used * @NL80211_MFP_REQUIRED: Management frame protection required * @NL80211_MFP_OPTIONAL: Management frame protection is optional */ enum nl80211_mfp { NL80211_MFP_NO, NL80211_MFP_REQUIRED, NL80211_MFP_OPTIONAL, }; enum nl80211_wpa_versions { NL80211_WPA_VERSION_1 = 1 << 0, NL80211_WPA_VERSION_2 = 1 << 1, }; /** * enum nl80211_key_default_types - key default types * @__NL80211_KEY_DEFAULT_TYPE_INVALID: invalid * @NL80211_KEY_DEFAULT_TYPE_UNICAST: key should be used as default * unicast key * @NL80211_KEY_DEFAULT_TYPE_MULTICAST: key should be used as default * multicast key * @NUM_NL80211_KEY_DEFAULT_TYPES: number of default types */ enum nl80211_key_default_types { __NL80211_KEY_DEFAULT_TYPE_INVALID, NL80211_KEY_DEFAULT_TYPE_UNICAST, NL80211_KEY_DEFAULT_TYPE_MULTICAST, NUM_NL80211_KEY_DEFAULT_TYPES }; /** * enum nl80211_key_attributes - key attributes * @__NL80211_KEY_INVALID: invalid * @NL80211_KEY_DATA: (temporal) key data; for TKIP this consists of * 16 bytes encryption key followed by 8 bytes each for TX and RX MIC * keys * @NL80211_KEY_IDX: key ID (u8, 0-3) * @NL80211_KEY_CIPHER: key cipher suite (u32, as defined by IEEE 802.11 * section 7.3.2.25.1, e.g. 0x000FAC04) * @NL80211_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and * CCMP keys, each six bytes in little endian * @NL80211_KEY_DEFAULT: flag indicating default key * @NL80211_KEY_DEFAULT_MGMT: flag indicating default management key * @NL80211_KEY_TYPE: the key type from enum nl80211_key_type, if not * specified the default depends on whether a MAC address was * given with the command using the key or not (u32) * @NL80211_KEY_DEFAULT_TYPES: A nested attribute containing flags * attributes, specifying what a key should be set as default as. * See &enum nl80211_key_default_types. * @__NL80211_KEY_AFTER_LAST: internal * @NL80211_KEY_MAX: highest key attribute */ enum nl80211_key_attributes { __NL80211_KEY_INVALID, NL80211_KEY_DATA, NL80211_KEY_IDX, NL80211_KEY_CIPHER, NL80211_KEY_SEQ, NL80211_KEY_DEFAULT, NL80211_KEY_DEFAULT_MGMT, NL80211_KEY_TYPE, NL80211_KEY_DEFAULT_TYPES, /* keep last */ __NL80211_KEY_AFTER_LAST, NL80211_KEY_MAX = __NL80211_KEY_AFTER_LAST - 1 }; /** * enum nl80211_tx_rate_attributes - TX rate set attributes * @__NL80211_TXRATE_INVALID: invalid * @NL80211_TXRATE_LEGACY: Legacy (non-MCS) rates allowed for TX rate selection * in an array of rates as defined in IEEE 802.11 7.3.2.2 (u8 values with * 1 = 500 kbps) but without the IE length restriction (at most * %NL80211_MAX_SUPP_RATES in a single array). * @NL80211_TXRATE_HT: HT (MCS) rates allowed for TX rate selection * in an array of MCS numbers. * @NL80211_TXRATE_VHT: VHT rates allowed for TX rate selection, * see &struct nl80211_txrate_vht * @NL80211_TXRATE_GI: configure GI, see &enum nl80211_txrate_gi * @__NL80211_TXRATE_AFTER_LAST: internal * @NL80211_TXRATE_MAX: highest TX rate attribute */ enum nl80211_tx_rate_attributes { __NL80211_TXRATE_INVALID, NL80211_TXRATE_LEGACY, NL80211_TXRATE_HT, NL80211_TXRATE_VHT, NL80211_TXRATE_GI, /* keep last */ __NL80211_TXRATE_AFTER_LAST, NL80211_TXRATE_MAX = __NL80211_TXRATE_AFTER_LAST - 1 }; #define NL80211_TXRATE_MCS NL80211_TXRATE_HT #define NL80211_VHT_NSS_MAX 8 /** * struct nl80211_txrate_vht - VHT MCS/NSS txrate bitmap * @mcs: MCS bitmap table for each NSS (array index 0 for 1 stream, etc.) */ struct nl80211_txrate_vht { __u16 mcs[NL80211_VHT_NSS_MAX]; }; enum nl80211_txrate_gi { NL80211_TXRATE_DEFAULT_GI, NL80211_TXRATE_FORCE_SGI, NL80211_TXRATE_FORCE_LGI, }; /** * enum nl80211_band - Frequency band * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz) * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ enum nl80211_band { NL80211_BAND_2GHZ, NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, NUM_NL80211_BANDS, }; /** * enum nl80211_ps_state - powersave state * @NL80211_PS_DISABLED: powersave is disabled * @NL80211_PS_ENABLED: powersave is enabled */ enum nl80211_ps_state { NL80211_PS_DISABLED, NL80211_PS_ENABLED, }; /** * enum nl80211_attr_cqm - connection quality monitor attributes * @__NL80211_ATTR_CQM_INVALID: invalid * @NL80211_ATTR_CQM_RSSI_THOLD: RSSI threshold in dBm. This value specifies * the threshold for the RSSI level at which an event will be sent. Zero * to disable. Alternatively, if %NL80211_EXT_FEATURE_CQM_RSSI_LIST is * set, multiple values can be supplied as a low-to-high sorted array of * threshold values in dBm. Events will be sent when the RSSI value * crosses any of the thresholds. * @NL80211_ATTR_CQM_RSSI_HYST: RSSI hysteresis in dBm. This value specifies * the minimum amount the RSSI level must change after an event before a * new event may be issued (to reduce effects of RSSI oscillation). * @NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT: RSSI threshold event * @NL80211_ATTR_CQM_PKT_LOSS_EVENT: a u32 value indicating that this many * consecutive packets were not acknowledged by the peer * @NL80211_ATTR_CQM_TXE_RATE: TX error rate in %. Minimum % of TX failures * during the given %NL80211_ATTR_CQM_TXE_INTVL before an * %NL80211_CMD_NOTIFY_CQM with reported %NL80211_ATTR_CQM_TXE_RATE and * %NL80211_ATTR_CQM_TXE_PKTS is generated. * @NL80211_ATTR_CQM_TXE_PKTS: number of attempted packets in a given * %NL80211_ATTR_CQM_TXE_INTVL before %NL80211_ATTR_CQM_TXE_RATE is * checked. * @NL80211_ATTR_CQM_TXE_INTVL: interval in seconds. Specifies the periodic * interval in which %NL80211_ATTR_CQM_TXE_PKTS and * %NL80211_ATTR_CQM_TXE_RATE must be satisfied before generating an * %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting. * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon * loss event * @NL80211_ATTR_CQM_RSSI_LEVEL: the RSSI value in dBm that triggered the * RSSI threshold event. * @__NL80211_ATTR_CQM_AFTER_LAST: internal * @NL80211_ATTR_CQM_MAX: highest key attribute */ enum nl80211_attr_cqm { __NL80211_ATTR_CQM_INVALID, NL80211_ATTR_CQM_RSSI_THOLD, NL80211_ATTR_CQM_RSSI_HYST, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, NL80211_ATTR_CQM_PKT_LOSS_EVENT, NL80211_ATTR_CQM_TXE_RATE, NL80211_ATTR_CQM_TXE_PKTS, NL80211_ATTR_CQM_TXE_INTVL, NL80211_ATTR_CQM_BEACON_LOSS_EVENT, NL80211_ATTR_CQM_RSSI_LEVEL, /* keep last */ __NL80211_ATTR_CQM_AFTER_LAST, NL80211_ATTR_CQM_MAX = __NL80211_ATTR_CQM_AFTER_LAST - 1 }; /** * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW: The RSSI level is lower than the * configured threshold * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the * configured threshold * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: (reserved, never sent) */ enum nl80211_cqm_rssi_threshold_event { NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, NL80211_CQM_RSSI_BEACON_LOSS_EVENT, }; /** * enum nl80211_tx_power_setting - TX power adjustment * @NL80211_TX_POWER_AUTOMATIC: automatically determine transmit power * @NL80211_TX_POWER_LIMITED: limit TX power by the mBm parameter * @NL80211_TX_POWER_FIXED: fix TX power to the mBm parameter */ enum nl80211_tx_power_setting { NL80211_TX_POWER_AUTOMATIC, NL80211_TX_POWER_LIMITED, NL80211_TX_POWER_FIXED, }; /** * enum nl80211_packet_pattern_attr - packet pattern attribute * @__NL80211_PKTPAT_INVALID: invalid number for nested attribute * @NL80211_PKTPAT_PATTERN: the pattern, values where the mask has * a zero bit are ignored * @NL80211_PKTPAT_MASK: pattern mask, must be long enough to have * a bit for each byte in the pattern. The lowest-order bit corresponds * to the first byte of the pattern, but the bytes of the pattern are * in a little-endian-like format, i.e. the 9th byte of the pattern * corresponds to the lowest-order bit in the second byte of the mask. * For example: The match 00:xx:00:00:xx:00:00:00:00:xx:xx:xx (where * xx indicates "don't care") would be represented by a pattern of * twelve zero bytes, and a mask of "0xed,0x01". * Note that the pattern matching is done as though frames were not * 802.11 frames but 802.3 frames, i.e. the frame is fully unpacked * first (including SNAP header unpacking) and then matched. * @NL80211_PKTPAT_OFFSET: packet offset, pattern is matched after * these fixed number of bytes of received packet * @NUM_NL80211_PKTPAT: number of attributes * @MAX_NL80211_PKTPAT: max attribute number */ enum nl80211_packet_pattern_attr { __NL80211_PKTPAT_INVALID, NL80211_PKTPAT_MASK, NL80211_PKTPAT_PATTERN, NL80211_PKTPAT_OFFSET, NUM_NL80211_PKTPAT, MAX_NL80211_PKTPAT = NUM_NL80211_PKTPAT - 1, }; /** * struct nl80211_pattern_support - packet pattern support information * @max_patterns: maximum number of patterns supported * @min_pattern_len: minimum length of each pattern * @max_pattern_len: maximum length of each pattern * @max_pkt_offset: maximum Rx packet offset * * This struct is carried in %NL80211_WOWLAN_TRIG_PKT_PATTERN when * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED or in * %NL80211_ATTR_COALESCE_RULE_PKT_PATTERN when that is part of * %NL80211_ATTR_COALESCE_RULE in the capability information given * by the kernel to userspace. */ struct nl80211_pattern_support { __u32 max_patterns; __u32 min_pattern_len; __u32 max_pattern_len; __u32 max_pkt_offset; } __attribute__((packed)); /* only for backward compatibility */ #define __NL80211_WOWLAN_PKTPAT_INVALID __NL80211_PKTPAT_INVALID #define NL80211_WOWLAN_PKTPAT_MASK NL80211_PKTPAT_MASK #define NL80211_WOWLAN_PKTPAT_PATTERN NL80211_PKTPAT_PATTERN #define NL80211_WOWLAN_PKTPAT_OFFSET NL80211_PKTPAT_OFFSET #define NUM_NL80211_WOWLAN_PKTPAT NUM_NL80211_PKTPAT #define MAX_NL80211_WOWLAN_PKTPAT MAX_NL80211_PKTPAT #define nl80211_wowlan_pattern_support nl80211_pattern_support /** * enum nl80211_wowlan_triggers - WoWLAN trigger definitions * @__NL80211_WOWLAN_TRIG_INVALID: invalid number for nested attributes * @NL80211_WOWLAN_TRIG_ANY: wake up on any activity, do not really put * the chip into a special state -- works best with chips that have * support for low-power operation already (flag) * Note that this mode is incompatible with all of the others, if * any others are even supported by the device. * @NL80211_WOWLAN_TRIG_DISCONNECT: wake up on disconnect, the way disconnect * is detected is implementation-specific (flag) * @NL80211_WOWLAN_TRIG_MAGIC_PKT: wake up on magic packet (6x 0xff, followed * by 16 repetitions of MAC addr, anywhere in payload) (flag) * @NL80211_WOWLAN_TRIG_PKT_PATTERN: wake up on the specified packet patterns * which are passed in an array of nested attributes, each nested attribute * defining a with attributes from &struct nl80211_wowlan_trig_pkt_pattern. * Each pattern defines a wakeup packet. Packet offset is associated with * each pattern which is used while matching the pattern. The matching is * done on the MSDU, i.e. as though the packet was an 802.3 packet, so the * pattern matching is done after the packet is converted to the MSDU. * * In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute * carrying a &struct nl80211_pattern_support. * * When reporting wakeup. it is a u32 attribute containing the 0-based * index of the pattern that caused the wakeup, in the patterns passed * to the kernel when configuring. * @NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED: Not a real trigger, and cannot be * used when setting, used only to indicate that GTK rekeying is supported * by the device (flag) * @NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE: wake up on GTK rekey failure (if * done by the device) (flag) * @NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST: wake up on EAP Identity Request * packet (flag) * @NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE: wake up on 4-way handshake (flag) * @NL80211_WOWLAN_TRIG_RFKILL_RELEASE: wake up when rfkill is released * (on devices that have rfkill in the device) (flag) * @NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211: For wakeup reporting only, contains * the 802.11 packet that caused the wakeup, e.g. a deauth frame. The frame * may be truncated, the @NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN * attribute contains the original length. * @NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN: Original length of the 802.11 * packet, may be bigger than the @NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 * attribute if the packet was truncated somewhere. * @NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023: For wakeup reporting only, contains the * 802.11 packet that caused the wakeup, e.g. a magic packet. The frame may * be truncated, the @NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN attribute * contains the original length. * @NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN: Original length of the 802.3 * packet, may be bigger than the @NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 * attribute if the packet was truncated somewhere. * @NL80211_WOWLAN_TRIG_TCP_CONNECTION: TCP connection wake, see DOC section * "TCP connection wakeup" for more details. This is a nested attribute * containing the exact information for establishing and keeping alive * the TCP connection. * @NL80211_WOWLAN_TRIG_TCP_WAKEUP_MATCH: For wakeup reporting only, the * wakeup packet was received on the TCP connection * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST: For wakeup reporting only, the * TCP connection was lost or failed to be established * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS: For wakeup reporting only, * the TCP connection ran out of tokens to use for data to send to the * service * @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network * is detected. This is a nested attribute that contains the * same attributes used with @NL80211_CMD_START_SCHED_SCAN. It * specifies how the scan is performed (e.g. the interval, the * channels to scan and the initial delay) as well as the scan * results that will trigger a wake (i.e. the matchsets). This * attribute is also sent in a response to * @NL80211_CMD_GET_WIPHY, indicating the number of match sets * supported by the driver (u32). * @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute * containing an array with information about what triggered the * wake up. If no elements are present in the array, it means * that the information is not available. If more than one * element is present, it means that more than one match * occurred. * Each element in the array is a nested attribute that contains * one optional %NL80211_ATTR_SSID attribute and one optional * %NL80211_ATTR_SCAN_FREQUENCIES attribute. At least one of * these attributes must be present. If * %NL80211_ATTR_SCAN_FREQUENCIES contains more than one * frequency, it means that the match occurred in more than one * channel. * @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers * @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number * * These nested attributes are used to configure the wakeup triggers and * to report the wakeup reason(s). */ enum nl80211_wowlan_triggers { __NL80211_WOWLAN_TRIG_INVALID, NL80211_WOWLAN_TRIG_ANY, NL80211_WOWLAN_TRIG_DISCONNECT, NL80211_WOWLAN_TRIG_MAGIC_PKT, NL80211_WOWLAN_TRIG_PKT_PATTERN, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE, NL80211_WOWLAN_TRIG_RFKILL_RELEASE, NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211, NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN, NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023, NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN, NL80211_WOWLAN_TRIG_TCP_CONNECTION, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST, NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS, NL80211_WOWLAN_TRIG_NET_DETECT, NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS, /* keep last */ NUM_NL80211_WOWLAN_TRIG, MAX_NL80211_WOWLAN_TRIG = NUM_NL80211_WOWLAN_TRIG - 1 }; /** * DOC: TCP connection wakeup * * Some devices can establish a TCP connection in order to be woken up by a * packet coming in from outside their network segment, or behind NAT. If * configured, the device will establish a TCP connection to the given * service, and periodically send data to that service. The first data * packet is usually transmitted after SYN/ACK, also ACKing the SYN/ACK. * The data packets can optionally include a (little endian) sequence * number (in the TCP payload!) that is generated by the device, and, also * optionally, a token from a list of tokens. This serves as a keep-alive * with the service, and for NATed connections, etc. * * During this keep-alive period, the server doesn't send any data to the * client. When receiving data, it is compared against the wakeup pattern * (and mask) and if it matches, the host is woken up. Similarly, if the * connection breaks or cannot be established to start with, the host is * also woken up. * * Developer's note: ARP offload is required for this, otherwise TCP * response packets might not go through correctly. */ /** * struct nl80211_wowlan_tcp_data_seq - WoWLAN TCP data sequence * @start: starting value * @offset: offset of sequence number in packet * @len: length of the sequence value to write, 1 through 4 * * Note: don't confuse with the TCP sequence number(s), this is for the * keepalive packet payload. The actual value is written into the packet * in little endian. */ struct nl80211_wowlan_tcp_data_seq { __u32 start, offset, len; }; /** * struct nl80211_wowlan_tcp_data_token - WoWLAN TCP data token config * @offset: offset of token in packet * @len: length of each token * @token_stream: stream of data to be used for the tokens, the length must * be a multiple of @len for this to make sense */ struct nl80211_wowlan_tcp_data_token { __u32 offset, len; __u8 token_stream[]; }; /** * struct nl80211_wowlan_tcp_data_token_feature - data token features * @min_len: minimum token length * @max_len: maximum token length * @bufsize: total available token buffer size (max size of @token_stream) */ struct nl80211_wowlan_tcp_data_token_feature { __u32 min_len, max_len, bufsize; }; /** * enum nl80211_wowlan_tcp_attrs - WoWLAN TCP connection parameters * @__NL80211_WOWLAN_TCP_INVALID: invalid number for nested attributes * @NL80211_WOWLAN_TCP_SRC_IPV4: source IPv4 address (in network byte order) * @NL80211_WOWLAN_TCP_DST_IPV4: destination IPv4 address * (in network byte order) * @NL80211_WOWLAN_TCP_DST_MAC: destination MAC address, this is given because * route lookup when configured might be invalid by the time we suspend, * and doing a route lookup when suspending is no longer possible as it * might require ARP querying. * @NL80211_WOWLAN_TCP_SRC_PORT: source port (u16); optional, if not given a * socket and port will be allocated * @NL80211_WOWLAN_TCP_DST_PORT: destination port (u16) * @NL80211_WOWLAN_TCP_DATA_PAYLOAD: data packet payload, at least one byte. * For feature advertising, a u32 attribute holding the maximum length * of the data payload. * @NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ: data packet sequence configuration * (if desired), a &struct nl80211_wowlan_tcp_data_seq. For feature * advertising it is just a flag * @NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN: data packet token configuration, * see &struct nl80211_wowlan_tcp_data_token and for advertising see * &struct nl80211_wowlan_tcp_data_token_feature. * @NL80211_WOWLAN_TCP_DATA_INTERVAL: data interval in seconds, maximum * interval in feature advertising (u32) * @NL80211_WOWLAN_TCP_WAKE_PAYLOAD: wake packet payload, for advertising a * u32 attribute holding the maximum length * @NL80211_WOWLAN_TCP_WAKE_MASK: Wake packet payload mask, not used for * feature advertising. The mask works like @NL80211_PKTPAT_MASK * but on the TCP payload only. * @NUM_NL80211_WOWLAN_TCP: number of TCP attributes * @MAX_NL80211_WOWLAN_TCP: highest attribute number */ enum nl80211_wowlan_tcp_attrs { __NL80211_WOWLAN_TCP_INVALID, NL80211_WOWLAN_TCP_SRC_IPV4, NL80211_WOWLAN_TCP_DST_IPV4, NL80211_WOWLAN_TCP_DST_MAC, NL80211_WOWLAN_TCP_SRC_PORT, NL80211_WOWLAN_TCP_DST_PORT, NL80211_WOWLAN_TCP_DATA_PAYLOAD, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, NL80211_WOWLAN_TCP_DATA_INTERVAL, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, NL80211_WOWLAN_TCP_WAKE_MASK, /* keep last */ NUM_NL80211_WOWLAN_TCP, MAX_NL80211_WOWLAN_TCP = NUM_NL80211_WOWLAN_TCP - 1 }; /** * struct nl80211_coalesce_rule_support - coalesce rule support information * @max_rules: maximum number of rules supported * @pat: packet pattern support information * @max_delay: maximum supported coalescing delay in msecs * * This struct is carried in %NL80211_ATTR_COALESCE_RULE in the * capability information given by the kernel to userspace. */ struct nl80211_coalesce_rule_support { __u32 max_rules; struct nl80211_pattern_support pat; __u32 max_delay; } __attribute__((packed)); /** * enum nl80211_attr_coalesce_rule - coalesce rule attribute * @__NL80211_COALESCE_RULE_INVALID: invalid number for nested attribute * @NL80211_ATTR_COALESCE_RULE_DELAY: delay in msecs used for packet coalescing * @NL80211_ATTR_COALESCE_RULE_CONDITION: condition for packet coalescence, * see &enum nl80211_coalesce_condition. * @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN: packet offset, pattern is matched * after these fixed number of bytes of received packet * @NUM_NL80211_ATTR_COALESCE_RULE: number of attributes * @NL80211_ATTR_COALESCE_RULE_MAX: max attribute number */ enum nl80211_attr_coalesce_rule { __NL80211_COALESCE_RULE_INVALID, NL80211_ATTR_COALESCE_RULE_DELAY, NL80211_ATTR_COALESCE_RULE_CONDITION, NL80211_ATTR_COALESCE_RULE_PKT_PATTERN, /* keep last */ NUM_NL80211_ATTR_COALESCE_RULE, NL80211_ATTR_COALESCE_RULE_MAX = NUM_NL80211_ATTR_COALESCE_RULE - 1 }; /** * enum nl80211_coalesce_condition - coalesce rule conditions * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns * in a rule are matched. * @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns * in a rule are not matched. */ enum nl80211_coalesce_condition { NL80211_COALESCE_CONDITION_MATCH, NL80211_COALESCE_CONDITION_NO_MATCH }; /** * enum nl80211_iface_limit_attrs - limit attributes * @NL80211_IFACE_LIMIT_UNSPEC: (reserved) * @NL80211_IFACE_LIMIT_MAX: maximum number of interfaces that * can be chosen from this set of interface types (u32) * @NL80211_IFACE_LIMIT_TYPES: nested attribute containing a * flag attribute for each interface type in this set * @NUM_NL80211_IFACE_LIMIT: number of attributes * @MAX_NL80211_IFACE_LIMIT: highest attribute number */ enum nl80211_iface_limit_attrs { NL80211_IFACE_LIMIT_UNSPEC, NL80211_IFACE_LIMIT_MAX, NL80211_IFACE_LIMIT_TYPES, /* keep last */ NUM_NL80211_IFACE_LIMIT, MAX_NL80211_IFACE_LIMIT = NUM_NL80211_IFACE_LIMIT - 1 }; /** * enum nl80211_if_combination_attrs -- interface combination attributes * * @NL80211_IFACE_COMB_UNSPEC: (reserved) * @NL80211_IFACE_COMB_LIMITS: Nested attributes containing the limits * for given interface types, see &enum nl80211_iface_limit_attrs. * @NL80211_IFACE_COMB_MAXNUM: u32 attribute giving the total number of * interfaces that can be created in this group. This number doesn't * apply to interfaces purely managed in software, which are listed * in a separate attribute %NL80211_ATTR_INTERFACES_SOFTWARE. * @NL80211_IFACE_COMB_STA_AP_BI_MATCH: flag attribute specifying that * beacon intervals within this group must be all the same even for * infrastructure and AP/GO combinations, i.e. the GO(s) must adopt * the infrastructure network's beacon interval. * @NL80211_IFACE_COMB_NUM_CHANNELS: u32 attribute specifying how many * different channels may be used within this group. * @NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS: u32 attribute containing the bitmap * of supported channel widths for radar detection. * @NL80211_IFACE_COMB_RADAR_DETECT_REGIONS: u32 attribute containing the bitmap * of supported regulatory regions for radar detection. * @NL80211_IFACE_COMB_BI_MIN_GCD: u32 attribute specifying the minimum GCD of * different beacon intervals supported by all the interface combinations * in this group (if not present, all beacon intervals be identical). * @NUM_NL80211_IFACE_COMB: number of attributes * @MAX_NL80211_IFACE_COMB: highest attribute number * * Examples: * limits = [ #{STA} <= 1, #{AP} <= 1 ], matching BI, channels = 1, max = 2 * => allows an AP and a STA that must match BIs * * numbers = [ #{AP, P2P-GO} <= 8 ], BI min gcd, channels = 1, max = 8, * => allows 8 of AP/GO that can have BI gcd >= min gcd * * numbers = [ #{STA} <= 2 ], channels = 2, max = 2 * => allows two STAs on different channels * * numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4 * => allows a STA plus three P2P interfaces * * The list of these four possibilities could completely be contained * within the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute to indicate * that any of these groups must match. * * "Combinations" of just a single interface will not be listed here, * a single interface of any valid interface type is assumed to always * be possible by itself. This means that implicitly, for each valid * interface type, the following group always exists: * numbers = [ #{} <= 1 ], channels = 1, max = 1 */ enum nl80211_if_combination_attrs { NL80211_IFACE_COMB_UNSPEC, NL80211_IFACE_COMB_LIMITS, NL80211_IFACE_COMB_MAXNUM, NL80211_IFACE_COMB_STA_AP_BI_MATCH, NL80211_IFACE_COMB_NUM_CHANNELS, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, NL80211_IFACE_COMB_BI_MIN_GCD, /* keep last */ NUM_NL80211_IFACE_COMB, MAX_NL80211_IFACE_COMB = NUM_NL80211_IFACE_COMB - 1 }; /** * enum nl80211_plink_state - state of a mesh peer link finite state machine * * @NL80211_PLINK_LISTEN: initial state, considered the implicit * state of non existent mesh peer links * @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to * this mesh peer * @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received * from this mesh peer * @NL80211_PLINK_CNF_RCVD: mesh plink confirm frame has been * received from this mesh peer * @NL80211_PLINK_ESTAB: mesh peer link is established * @NL80211_PLINK_HOLDING: mesh peer link is being closed or cancelled * @NL80211_PLINK_BLOCKED: all frames transmitted from this mesh * plink are discarded * @NUM_NL80211_PLINK_STATES: number of peer link states * @MAX_NL80211_PLINK_STATES: highest numerical value of plink states */ enum nl80211_plink_state { NL80211_PLINK_LISTEN, NL80211_PLINK_OPN_SNT, NL80211_PLINK_OPN_RCVD, NL80211_PLINK_CNF_RCVD, NL80211_PLINK_ESTAB, NL80211_PLINK_HOLDING, NL80211_PLINK_BLOCKED, /* keep last */ NUM_NL80211_PLINK_STATES, MAX_NL80211_PLINK_STATES = NUM_NL80211_PLINK_STATES - 1 }; /** * enum nl80211_plink_action - actions to perform in mesh peers * * @NL80211_PLINK_ACTION_NO_ACTION: perform no action * @NL80211_PLINK_ACTION_OPEN: start mesh peer link establishment * @NL80211_PLINK_ACTION_BLOCK: block traffic from this mesh peer * @NUM_NL80211_PLINK_ACTIONS: number of possible actions */ enum plink_actions { NL80211_PLINK_ACTION_NO_ACTION, NL80211_PLINK_ACTION_OPEN, NL80211_PLINK_ACTION_BLOCK, NUM_NL80211_PLINK_ACTIONS, }; #define NL80211_KCK_LEN 16 #define NL80211_KEK_LEN 16 #define NL80211_REPLAY_CTR_LEN 8 /** * enum nl80211_rekey_data - attributes for GTK rekey offload * @__NL80211_REKEY_DATA_INVALID: invalid number for nested attributes * @NL80211_REKEY_DATA_KEK: key encryption key (binary) * @NL80211_REKEY_DATA_KCK: key confirmation key (binary) * @NL80211_REKEY_DATA_REPLAY_CTR: replay counter (binary) * @NUM_NL80211_REKEY_DATA: number of rekey attributes (internal) * @MAX_NL80211_REKEY_DATA: highest rekey attribute (internal) */ enum nl80211_rekey_data { __NL80211_REKEY_DATA_INVALID, NL80211_REKEY_DATA_KEK, NL80211_REKEY_DATA_KCK, NL80211_REKEY_DATA_REPLAY_CTR, /* keep last */ NUM_NL80211_REKEY_DATA, MAX_NL80211_REKEY_DATA = NUM_NL80211_REKEY_DATA - 1 }; /** * enum nl80211_hidden_ssid - values for %NL80211_ATTR_HIDDEN_SSID * @NL80211_HIDDEN_SSID_NOT_IN_USE: do not hide SSID (i.e., broadcast it in * Beacon frames) * @NL80211_HIDDEN_SSID_ZERO_LEN: hide SSID by using zero-length SSID element * in Beacon frames * @NL80211_HIDDEN_SSID_ZERO_CONTENTS: hide SSID by using correct length of SSID * element in Beacon frames but zero out each byte in the SSID */ enum nl80211_hidden_ssid { NL80211_HIDDEN_SSID_NOT_IN_USE, NL80211_HIDDEN_SSID_ZERO_LEN, NL80211_HIDDEN_SSID_ZERO_CONTENTS }; /** * enum nl80211_sta_wme_attr - station WME attributes * @__NL80211_STA_WME_INVALID: invalid number for nested attribute * @NL80211_STA_WME_UAPSD_QUEUES: bitmap of uapsd queues. the format * is the same as the AC bitmap in the QoS info field. * @NL80211_STA_WME_MAX_SP: max service period. the format is the same * as the MAX_SP field in the QoS info field (but already shifted down). * @__NL80211_STA_WME_AFTER_LAST: internal * @NL80211_STA_WME_MAX: highest station WME attribute */ enum nl80211_sta_wme_attr { __NL80211_STA_WME_INVALID, NL80211_STA_WME_UAPSD_QUEUES, NL80211_STA_WME_MAX_SP, /* keep last */ __NL80211_STA_WME_AFTER_LAST, NL80211_STA_WME_MAX = __NL80211_STA_WME_AFTER_LAST - 1 }; /** * enum nl80211_pmksa_candidate_attr - attributes for PMKSA caching candidates * @__NL80211_PMKSA_CANDIDATE_INVALID: invalid number for nested attributes * @NL80211_PMKSA_CANDIDATE_INDEX: candidate index (u32; the smaller, the higher * priority) * @NL80211_PMKSA_CANDIDATE_BSSID: candidate BSSID (6 octets) * @NL80211_PMKSA_CANDIDATE_PREAUTH: RSN pre-authentication supported (flag) * @NUM_NL80211_PMKSA_CANDIDATE: number of PMKSA caching candidate attributes * (internal) * @MAX_NL80211_PMKSA_CANDIDATE: highest PMKSA caching candidate attribute * (internal) */ enum nl80211_pmksa_candidate_attr { __NL80211_PMKSA_CANDIDATE_INVALID, NL80211_PMKSA_CANDIDATE_INDEX, NL80211_PMKSA_CANDIDATE_BSSID, NL80211_PMKSA_CANDIDATE_PREAUTH, /* keep last */ NUM_NL80211_PMKSA_CANDIDATE, MAX_NL80211_PMKSA_CANDIDATE = NUM_NL80211_PMKSA_CANDIDATE - 1 }; /** * enum nl80211_tdls_operation - values for %NL80211_ATTR_TDLS_OPERATION * @NL80211_TDLS_DISCOVERY_REQ: Send a TDLS discovery request * @NL80211_TDLS_SETUP: Setup TDLS link * @NL80211_TDLS_TEARDOWN: Teardown a TDLS link which is already established * @NL80211_TDLS_ENABLE_LINK: Enable TDLS link * @NL80211_TDLS_DISABLE_LINK: Disable TDLS link */ enum nl80211_tdls_operation { NL80211_TDLS_DISCOVERY_REQ, NL80211_TDLS_SETUP, NL80211_TDLS_TEARDOWN, NL80211_TDLS_ENABLE_LINK, NL80211_TDLS_DISABLE_LINK, }; /* * enum nl80211_ap_sme_features - device-integrated AP features * Reserved for future use, no bits are defined in * NL80211_ATTR_DEVICE_AP_SME yet. enum nl80211_ap_sme_features { }; */ /** * enum nl80211_feature_flags - device/driver features * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back * TX status to the socket error queue when requested with the * socket option. * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates. * @NL80211_FEATURE_INACTIVITY_TIMER: This driver takes care of freeing up * the connected inactive stations in AP mode. * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested * to work properly to suppport receiving regulatory hints from * cellular base stations. * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only * here to reserve the value for API/ABI compatibility) * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of * equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station * mode * @NL80211_FEATURE_LOW_PRIORITY_SCAN: This driver supports low priority scan * @NL80211_FEATURE_SCAN_FLUSH: Scan flush is supported * @NL80211_FEATURE_AP_SCAN: Support scanning using an AP vif * @NL80211_FEATURE_VIF_TXPOWER: The driver supports per-vif TX power setting * @NL80211_FEATURE_NEED_OBSS_SCAN: The driver expects userspace to perform * OBSS scans and generate 20/40 BSS coex reports. This flag is used only * for drivers implementing the CONNECT API, for AUTH/ASSOC it is implied. * @NL80211_FEATURE_P2P_GO_CTWIN: P2P GO implementation supports CT Window * setting * @NL80211_FEATURE_P2P_GO_OPPPS: P2P GO implementation supports opportunistic * powersave * @NL80211_FEATURE_FULL_AP_CLIENT_STATE: The driver supports full state * transitions for AP clients. Without this flag (and if the driver * doesn't have the AP SME in the device) the driver supports adding * stations only when they're associated and adds them in associated * state (to later be transitioned into authorized), with this flag * they should be added before even sending the authentication reply * and then transitioned into authenticated, associated and authorized * states using station flags. * Note that even for drivers that support this, the default is to add * stations in authenticated/associated state, so to add unauthenticated * stations the authenticated/associated bits have to be set in the mask. * @NL80211_FEATURE_ADVERTISE_CHAN_LIMITS: cfg80211 advertises channel limits * (HT40, VHT 80/160 MHz) if this flag is set * @NL80211_FEATURE_USERSPACE_MPM: This driver supports a userspace Mesh * Peering Management entity which may be implemented by registering for * beacons or NL80211_CMD_NEW_PEER_CANDIDATE events. The mesh beacon is * still generated by the driver. * @NL80211_FEATURE_ACTIVE_MONITOR: This driver supports an active monitor * interface. An active monitor interface behaves like a normal monitor * interface, but gets added to the driver. It ensures that incoming * unicast packets directed at the configured interface address get ACKed. * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic * channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the * lifetime of a BSS. * @NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES: This device adds a DS Parameter * Set IE to probe requests. * @NL80211_FEATURE_WFA_TPC_IE_IN_PROBES: This device adds a WFA TPC Report IE * to probe requests. * @NL80211_FEATURE_QUIET: This device, in client mode, supports Quiet Period * requests sent to it by an AP. * @NL80211_FEATURE_TX_POWER_INSERTION: This device is capable of inserting the * current tx power value into the TPC Report IE in the spectrum * management TPC Report action frame, and in the Radio Measurement Link * Measurement Report action frame. * @NL80211_FEATURE_ACKTO_ESTIMATION: This driver supports dynamic ACK timeout * estimation (dynack). %NL80211_ATTR_WIPHY_DYN_ACK flag attribute is used * to enable dynack. * @NL80211_FEATURE_STATIC_SMPS: Device supports static spatial * multiplexing powersave, ie. can turn off all but one chain * even on HT connections that should be using more chains. * @NL80211_FEATURE_DYNAMIC_SMPS: Device supports dynamic spatial * multiplexing powersave, ie. can turn off all but one chain * and then wake the rest up as required after, for example, * rts/cts handshake. * @NL80211_FEATURE_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM * TSPEC sessions (TID aka TSID 0-7) with the %NL80211_CMD_ADD_TX_TS * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it * needs to be able to handle Block-Ack agreements and other things. * @NL80211_FEATURE_MAC_ON_CREATE: Device supports configuring * the vif's MAC address upon creation. * See 'macaddr' field in the vif_params (cfg80211.h). * @NL80211_FEATURE_TDLS_CHANNEL_SWITCH: Driver supports channel switching when * operating as a TDLS peer. * @NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR: This device/driver supports using a * random MAC address during scan (if the device is unassociated); the * %NL80211_SCAN_FLAG_RANDOM_ADDR flag may be set for scans and the MAC * address mask/value will be used. * @NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR: This device/driver supports * using a random MAC address for every scan iteration during scheduled * scan (while not associated), the %NL80211_SCAN_FLAG_RANDOM_ADDR may * be set for scheduled scan and the MAC address mask/value will be used. * @NL80211_FEATURE_ND_RANDOM_MAC_ADDR: This device/driver supports using a * random MAC address for every scan iteration during "net detect", i.e. * scan in unassociated WoWLAN, the %NL80211_SCAN_FLAG_RANDOM_ADDR may * be set for scheduled scan and the MAC address mask/value will be used. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, NL80211_FEATURE_HT_IBSS = 1 << 1, NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2, NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3, NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 1 << 4, NL80211_FEATURE_SAE = 1 << 5, NL80211_FEATURE_LOW_PRIORITY_SCAN = 1 << 6, NL80211_FEATURE_SCAN_FLUSH = 1 << 7, NL80211_FEATURE_AP_SCAN = 1 << 8, NL80211_FEATURE_VIF_TXPOWER = 1 << 9, NL80211_FEATURE_NEED_OBSS_SCAN = 1 << 10, NL80211_FEATURE_P2P_GO_CTWIN = 1 << 11, NL80211_FEATURE_P2P_GO_OPPPS = 1 << 12, /* bit 13 is reserved */ NL80211_FEATURE_ADVERTISE_CHAN_LIMITS = 1 << 14, NL80211_FEATURE_FULL_AP_CLIENT_STATE = 1 << 15, NL80211_FEATURE_USERSPACE_MPM = 1 << 16, NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17, NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18, NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 1 << 19, NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 1 << 20, NL80211_FEATURE_QUIET = 1 << 21, NL80211_FEATURE_TX_POWER_INSERTION = 1 << 22, NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23, NL80211_FEATURE_STATIC_SMPS = 1 << 24, NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25, NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 1 << 26, NL80211_FEATURE_MAC_ON_CREATE = 1 << 27, NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28, NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29, NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30, NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31, }; /** * enum nl80211_ext_feature_index - bit index of extended features. * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates. * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can * can request to use RRM (see %NL80211_ATTR_USE_RRM) with * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set * the ASSOC_REQ_USE_RRM flag in the association request even if * NL80211_FEATURE_QUIET is not advertized. * @NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER: This device supports MU-MIMO air * sniffer which means that it can be configured to hear packets from * certain groups which can be configured by the * %NL80211_ATTR_MU_MIMO_GROUP_DATA attribute, * or can be configured to follow a station by configuring the * %NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR attribute. * @NL80211_EXT_FEATURE_SCAN_START_TIME: This driver includes the actual * time the scan started in scan results event. The time is the TSF of * the BSS that the interface that requested the scan is connected to * (if available). * @NL80211_EXT_FEATURE_BSS_PARENT_TSF: Per BSS, this driver reports the * time the last beacon/probe was received. The time is the TSF of the * BSS that the interface that requested the scan is connected to * (if available). * @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of * channel dwell time. * @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate. * @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate * configuration (AP/mesh) with HT rates. * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate * configuration (AP/mesh) with VHT rates. * @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup * with user space SME (NL80211_CMD_AUTHENTICATE) in station mode. * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA: This driver supports randomized TA * in @NL80211_CMD_FRAME while not associated. * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports * randomized TA in @NL80211_CMD_FRAME while associated. * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan * for reporting BSSs with better RSSI than the current connected BSS * (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI). * @NL80211_EXT_FEATURE_CQM_RSSI_LIST: With this driver the * %NL80211_ATTR_CQM_RSSI_THOLD attribute accepts a list of zero or more * RSSI threshold values to monitor rather than exactly one threshold. * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key * authentication with %NL80211_CMD_CONNECT. * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK: Device wants to do 4-way * handshake with PSK in station mode (PSK is passed as part of the connect * and associate commands), doing it in the host might not be supported. * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X: Device wants to do doing 4-way * handshake with 802.1X in station mode (will pass EAP frames to the host * and accept the set_pmk/del_pmk commands), doing it in the host might not * be supported. * @NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME: Driver is capable of overriding * the max channel attribute in the FILS request params IE with the * actual dwell time. * @NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP: Driver accepts broadcast probe * response * @NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE: Driver supports sending * the first probe request in each channel at rate of at least 5.5Mbps. * @NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: Driver supports * probe request tx deferral and suppression * @NL80211_EXT_FEATURE_MFP_OPTIONAL: Driver supports the %NL80211_MFP_OPTIONAL * value in %NL80211_ATTR_USE_MFP. * @NL80211_EXT_FEATURE_LOW_SPAN_SCAN: Driver supports low span scan. * @NL80211_EXT_FEATURE_LOW_POWER_SCAN: Driver supports low power scan. * @NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN: Driver supports high accuracy scan. * @NL80211_EXT_FEATURE_DFS_OFFLOAD: HW/driver will offload DFS actions. * Device or driver will do all DFS-related actions by itself, * informing user-space about CAC progress, radar detection event, * channel change triggered by radar detection event. * No need to start CAC from user-space, no need to react to * "radar detected" event. * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and * receiving control port frames over nl80211 instead of the netdevice. * @NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT: This driver/device supports * (average) ACK signal strength reporting. * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate * TXQs. * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the * SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN. * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data * except for supported rates from the probe request content if requested * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag. * @NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER: Driver supports enabling fine * timing measurement responder role. * * @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0: Driver/device confirm that they are * able to rekey an in-use key correctly. Userspace must not rekey PTK keys * if this flag is not set. Ignoring this can leak clear text packets and/or * freeze the connection. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_VHT_IBSS, NL80211_EXT_FEATURE_RRM, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER, NL80211_EXT_FEATURE_SCAN_START_TIME, NL80211_EXT_FEATURE_BSS_PARENT_TSF, NL80211_EXT_FEATURE_SET_SCAN_DWELL, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY, NL80211_EXT_FEATURE_BEACON_RATE_HT, NL80211_EXT_FEATURE_BEACON_RATE_VHT, NL80211_EXT_FEATURE_FILS_STA, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI, NL80211_EXT_FEATURE_CQM_RSSI_LIST, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION, NL80211_EXT_FEATURE_MFP_OPTIONAL, NL80211_EXT_FEATURE_LOW_SPAN_SCAN, NL80211_EXT_FEATURE_LOW_POWER_SCAN, NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN, NL80211_EXT_FEATURE_DFS_OFFLOAD, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT, /* we renamed this - stay compatible */ NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT, NL80211_EXT_FEATURE_TXQS, NL80211_EXT_FEATURE_SCAN_RANDOM_SN, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, MAX_NL80211_EXT_FEATURES = NUM_NL80211_EXT_FEATURES - 1 }; /** * enum nl80211_probe_resp_offload_support_attr - optional supported * protocols for probe-response offloading by the driver/FW. * To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute. * Each enum value represents a bit in the bitmap of supported * protocols. Typically a subset of probe-requests belonging to a * supported protocol will be excluded from offload and uploaded * to the host. * * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1 * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2 * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u */ enum nl80211_probe_resp_offload_support_attr { NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 1<<0, NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 1<<1, NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 1<<2, NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3, }; /** * enum nl80211_connect_failed_reason - connection request failed reasons * @NL80211_CONN_FAIL_MAX_CLIENTS: Maximum number of clients that can be * handled by the AP is reached. * @NL80211_CONN_FAIL_BLOCKED_CLIENT: Connection request is rejected due to ACL. */ enum nl80211_connect_failed_reason { NL80211_CONN_FAIL_MAX_CLIENTS, NL80211_CONN_FAIL_BLOCKED_CLIENT, }; /** * enum nl80211_timeout_reason - timeout reasons * * @NL80211_TIMEOUT_UNSPECIFIED: Timeout reason unspecified. * @NL80211_TIMEOUT_SCAN: Scan (AP discovery) timed out. * @NL80211_TIMEOUT_AUTH: Authentication timed out. * @NL80211_TIMEOUT_ASSOC: Association timed out. */ enum nl80211_timeout_reason { NL80211_TIMEOUT_UNSPECIFIED, NL80211_TIMEOUT_SCAN, NL80211_TIMEOUT_AUTH, NL80211_TIMEOUT_ASSOC, }; /** * enum nl80211_scan_flags - scan request control flags * * Scan request control flags are used to control the handling * of NL80211_CMD_TRIGGER_SCAN and NL80211_CMD_START_SCHED_SCAN * requests. * * NL80211_SCAN_FLAG_LOW_SPAN, NL80211_SCAN_FLAG_LOW_POWER, and * NL80211_SCAN_FLAG_HIGH_ACCURACY flags are exclusive of each other, i.e., only * one of them can be used in the request. * * @NL80211_SCAN_FLAG_LOW_PRIORITY: scan request has low priority * @NL80211_SCAN_FLAG_FLUSH: flush cache before scanning * @NL80211_SCAN_FLAG_AP: force a scan even if the interface is configured * as AP and the beaconing has already been configured. This attribute is * dangerous because will destroy stations performance as a lot of frames * will be lost while scanning off-channel, therefore it must be used only * when really needed * @NL80211_SCAN_FLAG_RANDOM_ADDR: use a random MAC address for this scan (or * for scheduled scan: a different one for every scan iteration). When the * flag is set, depending on device capabilities the @NL80211_ATTR_MAC and * @NL80211_ATTR_MAC_MASK attributes may also be given in which case only * the masked bits will be preserved from the MAC address and the remainder * randomised. If the attributes are not given full randomisation (46 bits, * locally administered 1, multicast 0) is assumed. * This flag must not be requested when the feature isn't supported, check * the nl80211 feature flags for the device. * @NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME: fill the dwell time in the FILS * request parameters IE in the probe request * @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses * @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at * rate of at least 5.5M. In case non OCE AP is discovered in the channel, * only the first probe req in the channel will be sent in high rate. * @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request * tx deferral (dot11FILSProbeDelay shall be set to 15ms) * and suppression (if it has received a broadcast Probe Response frame, * Beacon frame or FILS Discovery frame from an AP that the STA considers * a suitable candidate for (re-)association - suitable in terms of * SSID and/or RSSI. * @NL80211_SCAN_FLAG_LOW_SPAN: Span corresponds to the total time taken to * accomplish the scan. Thus, this flag intends the driver to perform the * scan request with lesser span/duration. It is specific to the driver * implementations on how this is accomplished. Scan accuracy may get * impacted with this flag. * @NL80211_SCAN_FLAG_LOW_POWER: This flag intends the scan attempts to consume * optimal possible power. Drivers can resort to their specific means to * optimize the power. Scan accuracy may get impacted with this flag. * @NL80211_SCAN_FLAG_HIGH_ACCURACY: Accuracy here intends to the extent of scan * results obtained. Thus HIGH_ACCURACY scan flag aims to get maximum * possible scan results. This flag hints the driver to use the best * possible scan configuration to improve the accuracy in scanning. * Latency and power use may get impacted with this flag. * @NL80211_SCAN_FLAG_RANDOM_SN: randomize the sequence number in probe * request frames from this scan to avoid correlation/tracking being * possible. * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to * only have supported rates and no additional capabilities (unless * added by userspace explicitly.) */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, NL80211_SCAN_FLAG_FLUSH = 1<<1, NL80211_SCAN_FLAG_AP = 1<<2, NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3, NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME = 1<<4, NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = 1<<5, NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = 1<<6, NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 1<<7, NL80211_SCAN_FLAG_LOW_SPAN = 1<<8, NL80211_SCAN_FLAG_LOW_POWER = 1<<9, NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10, NL80211_SCAN_FLAG_RANDOM_SN = 1<<11, NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12, }; /** * enum nl80211_acl_policy - access control policy * * Access control policy is applied on a MAC list set by * %NL80211_CMD_START_AP and %NL80211_CMD_SET_MAC_ACL, to * be used with %NL80211_ATTR_ACL_POLICY. * * @NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED: Deny stations which are * listed in ACL, i.e. allow all the stations which are not listed * in ACL to authenticate. * @NL80211_ACL_POLICY_DENY_UNLESS_LISTED: Allow the stations which are listed * in ACL, i.e. deny all the stations which are not listed in ACL. */ enum nl80211_acl_policy { NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED, NL80211_ACL_POLICY_DENY_UNLESS_LISTED, }; /** * enum nl80211_smps_mode - SMPS mode * * Requested SMPS mode (for AP mode) * * @NL80211_SMPS_OFF: SMPS off (use all antennas). * @NL80211_SMPS_STATIC: static SMPS (use a single antenna) * @NL80211_SMPS_DYNAMIC: dynamic smps (start with a single antenna and * turn on other antennas after CTS/RTS). */ enum nl80211_smps_mode { NL80211_SMPS_OFF, NL80211_SMPS_STATIC, NL80211_SMPS_DYNAMIC, __NL80211_SMPS_AFTER_LAST, NL80211_SMPS_MAX = __NL80211_SMPS_AFTER_LAST - 1 }; /** * enum nl80211_radar_event - type of radar event for DFS operation * * Type of event to be used with NL80211_ATTR_RADAR_EVENT to inform userspace * about detected radars or success of the channel available check (CAC) * * @NL80211_RADAR_DETECTED: A radar pattern has been detected. The channel is * now unusable. * @NL80211_RADAR_CAC_FINISHED: Channel Availability Check has been finished, * the channel is now available. * @NL80211_RADAR_CAC_ABORTED: Channel Availability Check has been aborted, no * change to the channel status. * @NL80211_RADAR_NOP_FINISHED: The Non-Occupancy Period for this channel is * over, channel becomes usable. * @NL80211_RADAR_PRE_CAC_EXPIRED: Channel Availability Check done on this * non-operating channel is expired and no longer valid. New CAC must * be done on this channel before starting the operation. This is not * applicable for ETSI dfs domain where pre-CAC is valid for ever. * @NL80211_RADAR_CAC_STARTED: Channel Availability Check has been started, * should be generated by HW if NL80211_EXT_FEATURE_DFS_OFFLOAD is enabled. */ enum nl80211_radar_event { NL80211_RADAR_DETECTED, NL80211_RADAR_CAC_FINISHED, NL80211_RADAR_CAC_ABORTED, NL80211_RADAR_NOP_FINISHED, NL80211_RADAR_PRE_CAC_EXPIRED, NL80211_RADAR_CAC_STARTED, }; /** * enum nl80211_dfs_state - DFS states for channels * * Channel states used by the DFS code. * * @NL80211_DFS_USABLE: The channel can be used, but channel availability * check (CAC) must be performed before using it for AP or IBSS. * @NL80211_DFS_UNAVAILABLE: A radar has been detected on this channel, it * is therefore marked as not available. * @NL80211_DFS_AVAILABLE: The channel has been CAC checked and is available. */ enum nl80211_dfs_state { NL80211_DFS_USABLE, NL80211_DFS_UNAVAILABLE, NL80211_DFS_AVAILABLE, }; /** * enum enum nl80211_protocol_features - nl80211 protocol features * @NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP: nl80211 supports splitting * wiphy dumps (if requested by the application with the attribute * %NL80211_ATTR_SPLIT_WIPHY_DUMP. Also supported is filtering the * wiphy dump by %NL80211_ATTR_WIPHY, %NL80211_ATTR_IFINDEX or * %NL80211_ATTR_WDEV. */ enum nl80211_protocol_features { NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP = 1 << 0, }; /** * enum nl80211_crit_proto_id - nl80211 critical protocol identifiers * * @NL80211_CRIT_PROTO_UNSPEC: protocol unspecified. * @NL80211_CRIT_PROTO_DHCP: BOOTP or DHCPv6 protocol. * @NL80211_CRIT_PROTO_EAPOL: EAPOL protocol. * @NL80211_CRIT_PROTO_APIPA: APIPA protocol. * @NUM_NL80211_CRIT_PROTO: must be kept last. */ enum nl80211_crit_proto_id { NL80211_CRIT_PROTO_UNSPEC, NL80211_CRIT_PROTO_DHCP, NL80211_CRIT_PROTO_EAPOL, NL80211_CRIT_PROTO_APIPA, /* add other protocols before this one */ NUM_NL80211_CRIT_PROTO }; /* maximum duration for critical protocol measures */ #define NL80211_CRIT_PROTO_MAX_DURATION 5000 /* msec */ /** * enum nl80211_rxmgmt_flags - flags for received management frame. * * Used by cfg80211_rx_mgmt() * * @NL80211_RXMGMT_FLAG_ANSWERED: frame was answered by device/driver. */ enum nl80211_rxmgmt_flags { NL80211_RXMGMT_FLAG_ANSWERED = 1 << 0, }; /* * If this flag is unset, the lower 24 bits are an OUI, if set * a Linux nl80211 vendor ID is used (no such IDs are allocated * yet, so that's not valid so far) */ #define NL80211_VENDOR_ID_IS_LINUX 0x80000000 /** * struct nl80211_vendor_cmd_info - vendor command data * @vendor_id: If the %NL80211_VENDOR_ID_IS_LINUX flag is clear, then the * value is a 24-bit OUI; if it is set then a separately allocated ID * may be used, but no such IDs are allocated yet. New IDs should be * added to this file when needed. * @subcmd: sub-command ID for the command */ struct nl80211_vendor_cmd_info { __u32 vendor_id; __u32 subcmd; }; /** * enum nl80211_tdls_peer_capability - TDLS peer flags. * * Used by tdls_mgmt() to determine which conditional elements need * to be added to TDLS Setup frames. * * @NL80211_TDLS_PEER_HT: TDLS peer is HT capable. * @NL80211_TDLS_PEER_VHT: TDLS peer is VHT capable. * @NL80211_TDLS_PEER_WMM: TDLS peer is WMM capable. */ enum nl80211_tdls_peer_capability { NL80211_TDLS_PEER_HT = 1<<0, NL80211_TDLS_PEER_VHT = 1<<1, NL80211_TDLS_PEER_WMM = 1<<2, }; /** * enum nl80211_sched_scan_plan - scanning plan for scheduled scan * @__NL80211_SCHED_SCAN_PLAN_INVALID: attribute number 0 is reserved * @NL80211_SCHED_SCAN_PLAN_INTERVAL: interval between scan iterations. In * seconds (u32). * @NL80211_SCHED_SCAN_PLAN_ITERATIONS: number of scan iterations in this * scan plan (u32). The last scan plan must not specify this attribute * because it will run infinitely. A value of zero is invalid as it will * make the scan plan meaningless. * @NL80211_SCHED_SCAN_PLAN_MAX: highest scheduled scan plan attribute number * currently defined * @__NL80211_SCHED_SCAN_PLAN_AFTER_LAST: internal use */ enum nl80211_sched_scan_plan { __NL80211_SCHED_SCAN_PLAN_INVALID, NL80211_SCHED_SCAN_PLAN_INTERVAL, NL80211_SCHED_SCAN_PLAN_ITERATIONS, /* keep last */ __NL80211_SCHED_SCAN_PLAN_AFTER_LAST, NL80211_SCHED_SCAN_PLAN_MAX = __NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1 }; /** * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters. * * @band: band of BSS that must match for RSSI value adjustment. The value * of this field is according to &enum nl80211_band. * @delta: value used to adjust the RSSI value of matching BSS in dB. */ struct nl80211_bss_select_rssi_adjust { __u8 band; __s8 delta; } __attribute__((packed)); /** * enum nl80211_bss_select_attr - attributes for bss selection. * * @__NL80211_BSS_SELECT_ATTR_INVALID: reserved. * @NL80211_BSS_SELECT_ATTR_RSSI: Flag indicating only RSSI-based BSS selection * is requested. * @NL80211_BSS_SELECT_ATTR_BAND_PREF: attribute indicating BSS * selection should be done such that the specified band is preferred. * When there are multiple BSS-es in the preferred band, the driver * shall use RSSI-based BSS selection as a second step. The value of * this attribute is according to &enum nl80211_band (u32). * @NL80211_BSS_SELECT_ATTR_RSSI_ADJUST: When present the RSSI level for * BSS-es in the specified band is to be adjusted before doing * RSSI-based BSS selection. The attribute value is a packed structure * value as specified by &struct nl80211_bss_select_rssi_adjust. * @NL80211_BSS_SELECT_ATTR_MAX: highest bss select attribute number. * @__NL80211_BSS_SELECT_ATTR_AFTER_LAST: internal use. * * One and only one of these attributes are found within %NL80211_ATTR_BSS_SELECT * for %NL80211_CMD_CONNECT. It specifies the required BSS selection behaviour * which the driver shall use. */ enum nl80211_bss_select_attr { __NL80211_BSS_SELECT_ATTR_INVALID, NL80211_BSS_SELECT_ATTR_RSSI, NL80211_BSS_SELECT_ATTR_BAND_PREF, NL80211_BSS_SELECT_ATTR_RSSI_ADJUST, /* keep last */ __NL80211_BSS_SELECT_ATTR_AFTER_LAST, NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1 }; /** * enum nl80211_nan_function_type - NAN function type * * Defines the function type of a NAN function * * @NL80211_NAN_FUNC_PUBLISH: function is publish * @NL80211_NAN_FUNC_SUBSCRIBE: function is subscribe * @NL80211_NAN_FUNC_FOLLOW_UP: function is follow-up */ enum nl80211_nan_function_type { NL80211_NAN_FUNC_PUBLISH, NL80211_NAN_FUNC_SUBSCRIBE, NL80211_NAN_FUNC_FOLLOW_UP, /* keep last */ __NL80211_NAN_FUNC_TYPE_AFTER_LAST, NL80211_NAN_FUNC_MAX_TYPE = __NL80211_NAN_FUNC_TYPE_AFTER_LAST - 1, }; /** * enum nl80211_nan_publish_type - NAN publish tx type * * Defines how to send publish Service Discovery Frames * * @NL80211_NAN_SOLICITED_PUBLISH: publish function is solicited * @NL80211_NAN_UNSOLICITED_PUBLISH: publish function is unsolicited */ enum nl80211_nan_publish_type { NL80211_NAN_SOLICITED_PUBLISH = 1 << 0, NL80211_NAN_UNSOLICITED_PUBLISH = 1 << 1, }; /** * enum nl80211_nan_func_term_reason - NAN functions termination reason * * Defines termination reasons of a NAN function * * @NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST: requested by user * @NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED: timeout * @NL80211_NAN_FUNC_TERM_REASON_ERROR: errored */ enum nl80211_nan_func_term_reason { NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST, NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED, NL80211_NAN_FUNC_TERM_REASON_ERROR, }; #define NL80211_NAN_FUNC_SERVICE_ID_LEN 6 #define NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN 0xff #define NL80211_NAN_FUNC_SRF_MAX_LEN 0xff /** * enum nl80211_nan_sec_ctx_type - NAN security context identifier type * @NL80211_NAN_SEC_CTX_TYPE_PMKID: a 16 octet PMKID identifying the PMK used * for setting up the secure data path. */ enum nl80211_nan_sec_ctx_type { NL80211_NAN_SEC_CTX_TYPE_PMKID, }; /** * enum nl80211_nan_cs_ids - NAN cipher suite identifiers * * Defines cipher suite identifiers for NAN security association * * @NL80211_NAN_CS_ID_SK_CCM_128: uses CCMP-128 for frame encryption, SHA-256 * for the hash function used in PRF, and HMAC-SHA-256 for KDF. * @NL80211_NAN_CS_ID_SK_GCM_256: uses GCMP-256 for frames encryption, SHA-384 * for the hash function used in PRF, and HMAC-SHA-384 for KDF. */ enum nl80211_nan_cs_ids { NL80211_NAN_CS_ID_SK_CCM_128 = 1 << 0, NL80211_NAN_CS_ID_SK_GCM_256 = 1 << 1, }; /** * enum nl80211_nan_sec_ctx_id - NAN security context identifier * @_NL80211_NAN_SEC_CTX_INVALID: invalid * @NL80211_NAN_SEC_CTX_ID_TYPE: the type of the identifier as specified in * &enum nl80211_nan_sec_ctx_type. * @NL80211_NAN_SEC_CTX_ID_DATA: the security context identifier data. * * @NUM_NL80211_NAN_SEC_CTX: internal * @NL80211_NAN_SEC_CTX_MAX: highest NAN security context attribute */ enum nl80211_nan_sec_ctx_id { _NL80211_NAN_SEC_CTX_INVALID, NL80211_NAN_SEC_CTX_ID_TYPE, NL80211_NAN_SEC_CTX_ID_DATA, /* keep last */ NUM_NL80211_NAN_SEC_CTX, NL80211_NAN_SEC_CTX_MAX = NUM_NL80211_NAN_SEC_CTX - 1 }; /** * enum nl80211_nan_fsd - NAN further service discovery method * @NL80211_NAN_FSD_FOLLOW_UP: follow up is used for further service discovery. * @NL80211_NAN_FSD_GAS: GAS is used for further service discovery. */ enum nl80211_nan_fsd { NL80211_NAN_FSD_FOLLOW_UP, NL80211_NAN_FSD_GAS, }; /** * enum nl80211_nan_ndp_type - NAN data path type * @NL80211_NAN_NDP_TYPE_UNICAST: unicast data path is required for the service * @NL80211_NAN_NDP_TYPE_MCAST_ONE_TO_MANY: NAN multicat service group (NMSG) * of type one to many is required for the service. * @NL80211_NAN_NDP_TYPE_MCAST_MANY_TO_MANY: NAN multicat service group (NMSG) * of type many to many is required for the service. */ enum nl80211_nan_ndp_type { NL80211_NAN_NDP_TYPE_UNICAST, NL80211_NAN_NDP_TYPE_MCAST_ONE_TO_MANY, NL80211_NAN_NDP_TYPE_MCAST_MANY_TO_MANY, }; #define NL80211_NAN_FUNC_MAX_DW_INTERVAL 4 /** * enum nl80211_nan_func_attributes - NAN function attributes * @__NL80211_NAN_FUNC_INVALID: invalid * @NL80211_NAN_FUNC_TYPE: &enum nl80211_nan_function_type (u8). * @NL80211_NAN_FUNC_SERVICE_ID: 6 bytes of the service ID hash as * specified in NAN spec. This is a binary attribute. * @NL80211_NAN_FUNC_PUBLISH_TYPE: relevant if the function's type is * publish. Defines the transmission type for the publish Service Discovery * Frame, see &enum nl80211_nan_publish_type. Its type is u8. * @NL80211_NAN_FUNC_PUBLISH_BCAST: relevant if the function is a solicited * publish. Should the solicited publish Service Discovery Frame be sent to * the NAN Broadcast address. This is a flag. * @NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE: relevant if the function's type is * subscribe. Is the subscribe active. This is a flag. * @NL80211_NAN_FUNC_FOLLOW_UP_ID: relevant if the function's type is follow up. * The instance ID for the follow up Service Discovery Frame. This is u8. * @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type * is follow up. This is a u8. * The requestor instance ID for the follow up Service Discovery Frame. * @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the * follow up Service Discovery Frame. This is a binary attribute. * @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a * close range. The range itself (RSSI) is defined by the device. * This is a flag. * @NL80211_NAN_FUNC_TTL: strictly positive number of DWs this function should * stay active. If not present infinite TTL is assumed. This is a u32. * @NL80211_NAN_FUNC_SERVICE_INFO: array of bytes describing the service * specific info. This is a binary attribute. * @NL80211_NAN_FUNC_SRF: Service Receive Filter. This is a nested attribute. * See &enum nl80211_nan_srf_attributes. * @NL80211_NAN_FUNC_RX_MATCH_FILTER: Receive Matching filter. This is a nested * attribute. It is a list of binary values. * @NL80211_NAN_FUNC_TX_MATCH_FILTER: Transmit Matching filter. This is a * nested attribute. It is a list of binary values. * @NL80211_NAN_FUNC_INSTANCE_ID: The instance ID of the function. * Its type is u8 and it cannot be 0. * @NL80211_NAN_FUNC_TERM_REASON: NAN function termination reason. * See &enum nl80211_nan_func_term_reason. * @NL80211_NAN_FUNC_SECURITY_CIPHER_SUITES: relevant if the function's type is * publish. This is a bitmap specifying the cipher suites identifiers of * the cipher suites supported by the service. See &enum nl80211_nan_cs_ids * (u32). * @NL80211_NAN_FUNC_SECURITY_CTX_IDS: relevant if the function's type is * publish and %NL80211_NAN_FUNC_SECURITY_CIPHER_SUITES is set. This is a * set of security context identifiers that may be used to set up a secured * data path for the service, each one is a nested attribute, * see &enum nl80211_nan_sec_ctx_id. * @NL80211_NAN_FUNC_FSD: relevant if the function's type is publish. If further * service discovery is required for the service, specifies the further * service discovery method to be used, as specified by * &enum nl80211_nan_fsd (u32). * @NL80211_NAN_FUNC_NDP_TYPE: relevant if the function's type is publish. If * NAN data path is required for the service, specifies the required NDP * type as specified by &enum nl80211_nan_ndp_type (u32). * @NL80211_NAN_FUNC_RANGE_LIMIT_INGRESS: specifies the ingress range limit for * the service in centimeters (u16). * @NL80211_NAN_FUNC_RANGE_LIMIT_EGRESS: specifies the egress range limit for * the service in centimeters (u16). * @NL80211_NAN_FUNC_AWAKE_DW_INTERVAL: specifies the interval between two * discovery windows in which the device shall be awake to transmit or * receive SDFs. The interval will be 2^dw_interval * 512 TUs. Valid values * are 0 - 4. * * @NUM_NL80211_NAN_FUNC_ATTR: internal * @NL80211_NAN_FUNC_ATTR_MAX: highest NAN function attribute */ enum nl80211_nan_func_attributes { __NL80211_NAN_FUNC_INVALID, NL80211_NAN_FUNC_TYPE, NL80211_NAN_FUNC_SERVICE_ID, NL80211_NAN_FUNC_PUBLISH_TYPE, NL80211_NAN_FUNC_PUBLISH_BCAST, NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE, NL80211_NAN_FUNC_FOLLOW_UP_ID, NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID, NL80211_NAN_FUNC_FOLLOW_UP_DEST, NL80211_NAN_FUNC_CLOSE_RANGE, NL80211_NAN_FUNC_TTL, NL80211_NAN_FUNC_SERVICE_INFO, NL80211_NAN_FUNC_SRF, NL80211_NAN_FUNC_RX_MATCH_FILTER, NL80211_NAN_FUNC_TX_MATCH_FILTER, NL80211_NAN_FUNC_INSTANCE_ID, NL80211_NAN_FUNC_TERM_REASON, NL80211_NAN_FUNC_SECURITY_CIPHER_SUITES, NL80211_NAN_FUNC_SECURITY_CTX_IDS, NL80211_NAN_FUNC_FSD, NL80211_NAN_FUNC_NDP_TYPE, NL80211_NAN_FUNC_RANGE_LIMIT_INGRESS, NL80211_NAN_FUNC_RANGE_LIMIT_EGRESS, NL80211_NAN_FUNC_AWAKE_DW_INTERVAL, /* keep last */ NUM_NL80211_NAN_FUNC_ATTR, NL80211_NAN_FUNC_ATTR_MAX = NUM_NL80211_NAN_FUNC_ATTR - 1 }; /** * enum nl80211_nan_srf_attributes - NAN Service Response filter attributes * @__NL80211_NAN_SRF_INVALID: invalid * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set. * This is a flag. * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if * %NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary. * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if * %NL80211_NAN_SRF_BF is present. This is a u8. * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if * and only if %NL80211_NAN_SRF_BF isn't present. This is a nested * attribute. Each nested attribute is a MAC address. * @NUM_NL80211_NAN_SRF_ATTR: internal * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute */ enum nl80211_nan_srf_attributes { __NL80211_NAN_SRF_INVALID, NL80211_NAN_SRF_INCLUDE, NL80211_NAN_SRF_BF, NL80211_NAN_SRF_BF_IDX, NL80211_NAN_SRF_MAC_ADDRS, /* keep last */ NUM_NL80211_NAN_SRF_ATTR, NL80211_NAN_SRF_ATTR_MAX = NUM_NL80211_NAN_SRF_ATTR - 1, }; /** * enum nl80211_nan_match_attributes - NAN match attributes * @__NL80211_NAN_MATCH_INVALID: invalid * @NL80211_NAN_MATCH_FUNC_LOCAL: the local function that had the * match. This is a nested attribute. * See &enum nl80211_nan_func_attributes. * @NL80211_NAN_MATCH_FUNC_PEER: the peer function * that caused the match. This is a nested attribute. * See &enum nl80211_nan_func_attributes. * @NL80211_NAN_MATCH_DEVICE_ATTRS: attributes from the service discovery frame * with information about the peer device (e.g. device capabilities, * availability attribute etc). * * @NUM_NL80211_NAN_MATCH_ATTR: internal * @NL80211_NAN_MATCH_ATTR_MAX: highest NAN match attribute */ enum nl80211_nan_match_attributes { __NL80211_NAN_MATCH_INVALID, NL80211_NAN_MATCH_FUNC_LOCAL, NL80211_NAN_MATCH_FUNC_PEER, NL80211_NAN_MATCH_DEVICE_ATTRS, /* keep last */ NUM_NL80211_NAN_MATCH_ATTR, NL80211_NAN_MATCH_ATTR_MAX = NUM_NL80211_NAN_MATCH_ATTR - 1 }; /** * nl80211_external_auth_action - Action to perform with external * authentication request. Used by NL80211_ATTR_EXTERNAL_AUTH_ACTION. * @NL80211_EXTERNAL_AUTH_START: Start the authentication. * @NL80211_EXTERNAL_AUTH_ABORT: Abort the ongoing authentication. */ enum nl80211_external_auth_action { NL80211_EXTERNAL_AUTH_START, NL80211_EXTERNAL_AUTH_ABORT, }; /** * enum nl80211_ftm_responder_attributes - fine timing measurement * responder attributes * @__NL80211_FTM_RESP_ATTR_INVALID: Invalid * @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled * @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10), * i.e. starting with the measurement token * @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13), * i.e. starting with the measurement token * @__NL80211_FTM_RESP_ATTR_LAST: Internal * @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute. */ enum nl80211_ftm_responder_attributes { __NL80211_FTM_RESP_ATTR_INVALID, NL80211_FTM_RESP_ATTR_ENABLED, NL80211_FTM_RESP_ATTR_LCI, NL80211_FTM_RESP_ATTR_CIVICLOC, /* keep last */ __NL80211_FTM_RESP_ATTR_LAST, NL80211_FTM_RESP_ATTR_MAX = __NL80211_FTM_RESP_ATTR_LAST - 1, }; /* * enum nl80211_ftm_responder_stats - FTM responder statistics * * These attribute types are used with %NL80211_ATTR_FTM_RESPONDER_STATS * when getting FTM responder statistics. * * @__NL80211_FTM_STATS_INVALID: attribute number 0 is reserved * @NL80211_FTM_STATS_SUCCESS_NUM: number of FTM sessions in which all frames * were ssfully answered (u32) * @NL80211_FTM_STATS_PARTIAL_NUM: number of FTM sessions in which part of the * frames were successfully answered (u32) * @NL80211_FTM_STATS_FAILED_NUM: number of failed FTM sessions (u32) * @NL80211_FTM_STATS_ASAP_NUM: number of ASAP sessions (u32) * @NL80211_FTM_STATS_NON_ASAP_NUM: number of non-ASAP sessions (u32) * @NL80211_FTM_STATS_TOTAL_DURATION_MSEC: total sessions durations - gives an * indication of how much time the responder was busy (u64, msec) * @NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM: number of unknown FTM triggers - * triggers from initiators that didn't finish successfully the negotiation * phase with the responder (u32) * @NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM: number of FTM reschedule requests * - initiator asks for a new scheduling although it already has scheduled * FTM slot (u32) * @NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM: number of FTM triggers out of * scheduled window (u32) * @NL80211_FTM_STATS_PAD: used for padding, ignore * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal * @NL80211_FTM_STATS_MAX: highest possible FTM responder stats attribute */ enum nl80211_ftm_responder_stats { __NL80211_FTM_STATS_INVALID, NL80211_FTM_STATS_SUCCESS_NUM, NL80211_FTM_STATS_PARTIAL_NUM, NL80211_FTM_STATS_FAILED_NUM, NL80211_FTM_STATS_ASAP_NUM, NL80211_FTM_STATS_NON_ASAP_NUM, NL80211_FTM_STATS_TOTAL_DURATION_MSEC, NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM, NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM, NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM, NL80211_FTM_STATS_PAD, /* keep last */ __NL80211_FTM_STATS_AFTER_LAST, NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1 }; /** * enum nl80211_preamble - frame preamble types * @NL80211_PREAMBLE_LEGACY: legacy (HR/DSSS, OFDM, ERP PHY) preamble * @NL80211_PREAMBLE_HT: HT preamble * @NL80211_PREAMBLE_VHT: VHT preamble * @NL80211_PREAMBLE_DMG: DMG preamble */ enum nl80211_preamble { NL80211_PREAMBLE_LEGACY, NL80211_PREAMBLE_HT, NL80211_PREAMBLE_VHT, NL80211_PREAMBLE_DMG, }; /** * enum nl80211_peer_measurement_type - peer measurement types * @NL80211_PMSR_TYPE_INVALID: invalid/unused, needed as we use * these numbers also for attributes * * @NL80211_PMSR_TYPE_FTM: flight time measurement * * @NUM_NL80211_PMSR_TYPES: internal * @NL80211_PMSR_TYPE_MAX: highest type number */ enum nl80211_peer_measurement_type { NL80211_PMSR_TYPE_INVALID, NL80211_PMSR_TYPE_FTM, NUM_NL80211_PMSR_TYPES, NL80211_PMSR_TYPE_MAX = NUM_NL80211_PMSR_TYPES - 1 }; /** * enum nl80211_peer_measurement_status - peer measurement status * @NL80211_PMSR_STATUS_SUCCESS: measurement completed successfully * @NL80211_PMSR_STATUS_REFUSED: measurement was locally refused * @NL80211_PMSR_STATUS_TIMEOUT: measurement timed out * @NL80211_PMSR_STATUS_FAILURE: measurement failed, a type-dependent * reason may be available in the response data */ enum nl80211_peer_measurement_status { NL80211_PMSR_STATUS_SUCCESS, NL80211_PMSR_STATUS_REFUSED, NL80211_PMSR_STATUS_TIMEOUT, NL80211_PMSR_STATUS_FAILURE, }; /** * enum nl80211_peer_measurement_req - peer measurement request attributes * @__NL80211_PMSR_REQ_ATTR_INVALID: invalid * * @NL80211_PMSR_REQ_ATTR_DATA: This is a nested attribute with measurement * type-specific request data inside. The attributes used are from the * enums named nl80211_peer_measurement__req. * @NL80211_PMSR_REQ_ATTR_GET_AP_TSF: include AP TSF timestamp, if supported * (flag attribute) * * @NUM_NL80211_PMSR_REQ_ATTRS: internal * @NL80211_PMSR_REQ_ATTR_MAX: highest attribute number */ enum nl80211_peer_measurement_req { __NL80211_PMSR_REQ_ATTR_INVALID, NL80211_PMSR_REQ_ATTR_DATA, NL80211_PMSR_REQ_ATTR_GET_AP_TSF, /* keep last */ NUM_NL80211_PMSR_REQ_ATTRS, NL80211_PMSR_REQ_ATTR_MAX = NUM_NL80211_PMSR_REQ_ATTRS - 1 }; /** * enum nl80211_peer_measurement_resp - peer measurement response attributes * @__NL80211_PMSR_RESP_ATTR_INVALID: invalid * * @NL80211_PMSR_RESP_ATTR_DATA: This is a nested attribute with measurement * type-specific results inside. The attributes used are from the enums * named nl80211_peer_measurement__resp. * @NL80211_PMSR_RESP_ATTR_STATUS: u32 value with the measurement status * (using values from &enum nl80211_peer_measurement_status.) * @NL80211_PMSR_RESP_ATTR_HOST_TIME: host time (%CLOCK_BOOTTIME) when the * result was measured; this value is not expected to be accurate to * more than 20ms. (u64, nanoseconds) * @NL80211_PMSR_RESP_ATTR_AP_TSF: TSF of the AP that the interface * doing the measurement is connected to when the result was measured. * This shall be accurately reported if supported and requested * (u64, usec) * @NL80211_PMSR_RESP_ATTR_FINAL: If results are sent to the host partially * (*e.g. with FTM per-burst data) this flag will be cleared on all but * the last result; if all results are combined it's set on the single * result. * @NL80211_PMSR_RESP_ATTR_PAD: padding for 64-bit attributes, ignore * * @NUM_NL80211_PMSR_RESP_ATTRS: internal * @NL80211_PMSR_RESP_ATTR_MAX: highest attribute number */ enum nl80211_peer_measurement_resp { __NL80211_PMSR_RESP_ATTR_INVALID, NL80211_PMSR_RESP_ATTR_DATA, NL80211_PMSR_RESP_ATTR_STATUS, NL80211_PMSR_RESP_ATTR_HOST_TIME, NL80211_PMSR_RESP_ATTR_AP_TSF, NL80211_PMSR_RESP_ATTR_FINAL, NL80211_PMSR_RESP_ATTR_PAD, /* keep last */ NUM_NL80211_PMSR_RESP_ATTRS, NL80211_PMSR_RESP_ATTR_MAX = NUM_NL80211_PMSR_RESP_ATTRS - 1 }; /** * enum nl80211_peer_measurement_peer_attrs - peer attributes for measurement * @__NL80211_PMSR_PEER_ATTR_INVALID: invalid * * @NL80211_PMSR_PEER_ATTR_ADDR: peer's MAC address * @NL80211_PMSR_PEER_ATTR_CHAN: channel definition, nested, using top-level * attributes like %NL80211_ATTR_WIPHY_FREQ etc. * @NL80211_PMSR_PEER_ATTR_REQ: This is a nested attribute indexed by * measurement type, with attributes from the * &enum nl80211_peer_measurement_req inside. * @NL80211_PMSR_PEER_ATTR_RESP: This is a nested attribute indexed by * measurement type, with attributes from the * &enum nl80211_peer_measurement_resp inside. * * @NUM_NL80211_PMSR_PEER_ATTRS: internal * @NL80211_PMSR_PEER_ATTR_MAX: highest attribute number */ enum nl80211_peer_measurement_peer_attrs { __NL80211_PMSR_PEER_ATTR_INVALID, NL80211_PMSR_PEER_ATTR_ADDR, NL80211_PMSR_PEER_ATTR_CHAN, NL80211_PMSR_PEER_ATTR_REQ, NL80211_PMSR_PEER_ATTR_RESP, /* keep last */ NUM_NL80211_PMSR_PEER_ATTRS, NL80211_PMSR_PEER_ATTR_MAX = NUM_NL80211_PMSR_PEER_ATTRS - 1, }; /** * enum nl80211_peer_measurement_attrs - peer measurement attributes * @__NL80211_PMSR_ATTR_INVALID: invalid * * @NL80211_PMSR_ATTR_MAX_PEERS: u32 attribute used for capability * advertisement only, indicates the maximum number of peers * measurements can be done with in a single request * @NL80211_PMSR_ATTR_REPORT_AP_TSF: flag attribute in capability * indicating that the connected AP's TSF can be reported in * measurement results * @NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR: flag attribute in capability * indicating that MAC address randomization is supported. * @NL80211_PMSR_ATTR_TYPE_CAPA: capabilities reported by the device, * this contains a nesting indexed by measurement type, and * type-specific capabilities inside, which are from the enums * named nl80211_peer_measurement__capa. * @NL80211_PMSR_ATTR_PEERS: nested attribute, the nesting index is * meaningless, just a list of peers to measure with, with the * sub-attributes taken from * &enum nl80211_peer_measurement_peer_attrs. * * @NUM_NL80211_PMSR_ATTR: internal * @NL80211_PMSR_ATTR_MAX: highest attribute number */ enum nl80211_peer_measurement_attrs { __NL80211_PMSR_ATTR_INVALID, NL80211_PMSR_ATTR_MAX_PEERS, NL80211_PMSR_ATTR_REPORT_AP_TSF, NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR, NL80211_PMSR_ATTR_TYPE_CAPA, NL80211_PMSR_ATTR_PEERS, /* keep last */ NUM_NL80211_PMSR_ATTR, NL80211_PMSR_ATTR_MAX = NUM_NL80211_PMSR_ATTR - 1 }; /** * enum nl80211_peer_measurement_ftm_capa - FTM capabilities * @__NL80211_PMSR_FTM_CAPA_ATTR_INVALID: invalid * * @NL80211_PMSR_FTM_CAPA_ATTR_ASAP: flag attribute indicating ASAP mode * is supported * @NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP: flag attribute indicating non-ASAP * mode is supported * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI: flag attribute indicating if LCI * data can be requested during the measurement * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC: flag attribute indicating if civic * location data can be requested during the measurement * @NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES: u32 bitmap attribute of bits * from &enum nl80211_preamble. * @NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS: bitmap of values from * &enum nl80211_chan_width indicating the supported channel * bandwidths for FTM. Note that a higher channel bandwidth may be * configured to allow for other measurements types with different * bandwidth requirement in the same measurement. * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT: u32 attribute indicating * the maximum bursts exponent that can be used (if not present anything * is valid) * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST: u32 attribute indicating * the maximum FTMs per burst (if not present anything is valid) * * @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal * @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number */ enum nl80211_peer_measurement_ftm_capa { __NL80211_PMSR_FTM_CAPA_ATTR_INVALID, NL80211_PMSR_FTM_CAPA_ATTR_ASAP, NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP, NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI, NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC, NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES, NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS, NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT, NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST, /* keep last */ NUM_NL80211_PMSR_FTM_CAPA_ATTR, NL80211_PMSR_FTM_CAPA_ATTR_MAX = NUM_NL80211_PMSR_FTM_CAPA_ATTR - 1 }; /** * enum nl80211_peer_measurement_ftm_req - FTM request attributes * @__NL80211_PMSR_FTM_REQ_ATTR_INVALID: invalid * * @NL80211_PMSR_FTM_REQ_ATTR_ASAP: ASAP mode requested (flag) * @NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE: preamble type (see * &enum nl80211_preamble), optional for DMG (u32) * @NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP: number of bursts exponent as in * 802.11-2016 9.4.2.168 "Fine Timing Measurement Parameters element" * (u8, 0-15, optional with default 15 i.e. "no preference") * @NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD: interval between bursts in units * of 100ms (u16, optional with default 0) * @NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION: burst duration, as in 802.11-2016 * Table 9-257 "Burst Duration field encoding" (u8, 0-15, optional with * default 15 i.e. "no preference") * @NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST: number of successful FTM frames * requested per burst * (u8, 0-31, optional with default 0 i.e. "no preference") * @NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES: number of FTMR frame retries * (u8, default 3) * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI: request LCI data (flag) * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC: request civic location data * (flag) * * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number */ enum nl80211_peer_measurement_ftm_req { __NL80211_PMSR_FTM_REQ_ATTR_INVALID, NL80211_PMSR_FTM_REQ_ATTR_ASAP, NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE, NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP, NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD, NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION, NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST, NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES, NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI, NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC, /* keep last */ NUM_NL80211_PMSR_FTM_REQ_ATTR, NL80211_PMSR_FTM_REQ_ATTR_MAX = NUM_NL80211_PMSR_FTM_REQ_ATTR - 1 }; /** * enum nl80211_peer_measurement_ftm_failure_reasons - FTM failure reasons * @NL80211_PMSR_FTM_FAILURE_UNSPECIFIED: unspecified failure, not used * @NL80211_PMSR_FTM_FAILURE_NO_RESPONSE: no response from the FTM responder * @NL80211_PMSR_FTM_FAILURE_REJECTED: FTM responder rejected measurement * @NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL: we already know the peer is * on a different channel, so can't measure (if we didn't know, we'd * try and get no response) * @NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE: peer can't actually do FTM * @NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP: invalid T1/T4 timestamps * received * @NL80211_PMSR_FTM_FAILURE_PEER_BUSY: peer reports busy, you may retry * later (see %NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME) * @NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS: parameters were changed * by the peer and are no longer supported */ enum nl80211_peer_measurement_ftm_failure_reasons { NL80211_PMSR_FTM_FAILURE_UNSPECIFIED, NL80211_PMSR_FTM_FAILURE_NO_RESPONSE, NL80211_PMSR_FTM_FAILURE_REJECTED, NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL, NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE, NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP, NL80211_PMSR_FTM_FAILURE_PEER_BUSY, NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS, }; /** * enum nl80211_peer_measurement_ftm_resp - FTM response attributes * @__NL80211_PMSR_FTM_RESP_ATTR_INVALID: invalid * * @NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON: FTM-specific failure reason * (u32, optional) * @NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX: optional, if bursts are reported * as separate results then it will be the burst index 0...(N-1) and * the top level will indicate partial results (u32) * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames * transmitted (u32, optional) * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames * that were acknowleged (u32, optional) * @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the * busy peer (u32, seconds) * @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent * used by the responder (similar to request, u8) * @NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION: actual burst duration used by * the responder (similar to request, u8) * @NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST: actual FTMs per burst used * by the responder (similar to request, u8) * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG: average RSSI across all FTM action * frames (optional, s32, 1/2 dBm) * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD: RSSI spread across all FTM action * frames (optional, s32, 1/2 dBm) * @NL80211_PMSR_FTM_RESP_ATTR_TX_RATE: bitrate we used for the response to the * FTM action frame (optional, nested, using &enum nl80211_rate_info * attributes) * @NL80211_PMSR_FTM_RESP_ATTR_RX_RATE: bitrate the responder used for the FTM * action frame (optional, nested, using &enum nl80211_rate_info attrs) * @NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG: average RTT (s64, picoseconds, optional * but one of RTT/DIST must be present) * @NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE: RTT variance (u64, ps^2, note that * standard deviation is the square root of variance, optional) * @NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD: RTT spread (u64, picoseconds, * optional) * @NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG: average distance (s64, mm, optional * but one of RTT/DIST must be present) * @NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE: distance variance (u64, mm^2, note * that standard deviation is the square root of variance, optional) * @NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD: distance spread (u64, mm, optional) * @NL80211_PMSR_FTM_RESP_ATTR_LCI: LCI data from peer (binary, optional); * this is the contents of the Measurement Report Element (802.11-2016 * 9.4.2.22.1) starting with the Measurement Token, with Measurement * Type 8. * @NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC: civic location data from peer * (binary, optional); * this is the contents of the Measurement Report Element (802.11-2016 * 9.4.2.22.1) starting with the Measurement Token, with Measurement * Type 11. * @NL80211_PMSR_FTM_RESP_ATTR_PAD: ignore, for u64/s64 padding only * * @NUM_NL80211_PMSR_FTM_RESP_ATTR: internal * @NL80211_PMSR_FTM_RESP_ATTR_MAX: highest attribute number */ enum nl80211_peer_measurement_ftm_resp { __NL80211_PMSR_FTM_RESP_ATTR_INVALID, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON, NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX, NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS, NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME, NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP, NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION, NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST, NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG, NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD, NL80211_PMSR_FTM_RESP_ATTR_TX_RATE, NL80211_PMSR_FTM_RESP_ATTR_RX_RATE, NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG, NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE, NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD, NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG, NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE, NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD, NL80211_PMSR_FTM_RESP_ATTR_LCI, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC, NL80211_PMSR_FTM_RESP_ATTR_PAD, /* keep last */ NUM_NL80211_PMSR_FTM_RESP_ATTR, NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1 }; #endif /* __LINUX_NL80211_H */ backport-iwlwifi-dkms-7906/include/uapi/linux/pci_regs.h000066400000000000000000001537351351064634500234030ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * pci_regs.h * * PCI standard defines * Copyright 1994, Drew Eckhardt * Copyright 1997--1999 Martin Mares * * For more information, please consult the following manuals (look at * http://www.pcisig.com/ for how to get them): * * PCI BIOS Specification * PCI Local Bus Specification * PCI to PCI Bridge Specification * PCI System Design Guide * * For HyperTransport information, please consult the following manuals * from http://www.hypertransport.org * * The HyperTransport I/O Link Specification */ #ifndef LINUX_PCI_REGS_H #define LINUX_PCI_REGS_H /* * Conventional PCI and PCI-X Mode 1 devices have 256 bytes of * configuration space. PCI-X Mode 2 and PCIe devices have 4096 bytes of * configuration space. */ #define PCI_CFG_SPACE_SIZE 256 #define PCI_CFG_SPACE_EXP_SIZE 4096 /* * Under PCI, each device has 256 bytes of configuration address space, * of which the first 64 bytes are standardized as follows: */ #define PCI_STD_HEADER_SIZEOF 64 #define PCI_VENDOR_ID 0x00 /* 16 bits */ #define PCI_DEVICE_ID 0x02 /* 16 bits */ #define PCI_COMMAND 0x04 /* 16 bits */ #define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */ #define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */ #define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */ #define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */ #define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */ #define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */ #define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */ #define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */ #define PCI_COMMAND_SERR 0x100 /* Enable SERR */ #define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */ #define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */ #define PCI_STATUS 0x06 /* 16 bits */ #define PCI_STATUS_IMM_READY 0x01 /* Immediate Readiness */ #define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */ #define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ #define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */ #define PCI_STATUS_UDF 0x40 /* Support User Definable Features [obsolete] */ #define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */ #define PCI_STATUS_PARITY 0x100 /* Detected parity error */ #define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */ #define PCI_STATUS_DEVSEL_FAST 0x000 #define PCI_STATUS_DEVSEL_MEDIUM 0x200 #define PCI_STATUS_DEVSEL_SLOW 0x400 #define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */ #define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */ #define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */ #define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */ #define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */ #define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */ #define PCI_REVISION_ID 0x08 /* Revision ID */ #define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */ #define PCI_CLASS_DEVICE 0x0a /* Device class */ #define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */ #define PCI_LATENCY_TIMER 0x0d /* 8 bits */ #define PCI_HEADER_TYPE 0x0e /* 8 bits */ #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 #define PCI_BIST 0x0f /* 8 bits */ #define PCI_BIST_CODE_MASK 0x0f /* Return result */ #define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */ #define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */ /* * Base addresses specify locations in memory or I/O space. * Decoded size can be determined by writing a value of * 0xffffffff to the register, and reading it back. Only * 1 bits are decoded. */ #define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */ #define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */ #define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */ #define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */ #define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */ #define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */ #define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */ #define PCI_BASE_ADDRESS_SPACE_IO 0x01 #define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00 #define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06 #define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */ #define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */ #define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ #define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ #define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL) #define PCI_BASE_ADDRESS_IO_MASK (~0x03UL) /* bit 1 is reserved if address_space = 1 */ /* Header type 0 (normal devices) */ #define PCI_CARDBUS_CIS 0x28 #define PCI_SUBSYSTEM_VENDOR_ID 0x2c #define PCI_SUBSYSTEM_ID 0x2e #define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ #define PCI_ROM_ADDRESS_ENABLE 0x01 #define PCI_ROM_ADDRESS_MASK (~0x7ffU) #define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ /* 0x35-0x3b are reserved */ #define PCI_INTERRUPT_LINE 0x3c /* 8 bits */ #define PCI_INTERRUPT_PIN 0x3d /* 8 bits */ #define PCI_MIN_GNT 0x3e /* 8 bits */ #define PCI_MAX_LAT 0x3f /* 8 bits */ /* Header type 1 (PCI-to-PCI bridges) */ #define PCI_PRIMARY_BUS 0x18 /* Primary bus number */ #define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */ #define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */ #define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */ #define PCI_IO_BASE 0x1c /* I/O range behind the bridge */ #define PCI_IO_LIMIT 0x1d #define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */ #define PCI_IO_RANGE_TYPE_16 0x00 #define PCI_IO_RANGE_TYPE_32 0x01 #define PCI_IO_RANGE_MASK (~0x0fUL) /* Standard 4K I/O windows */ #define PCI_IO_1K_RANGE_MASK (~0x03UL) /* Intel 1K I/O windows */ #define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */ #define PCI_MEMORY_BASE 0x20 /* Memory range behind */ #define PCI_MEMORY_LIMIT 0x22 #define PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL #define PCI_MEMORY_RANGE_MASK (~0x0fUL) #define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */ #define PCI_PREF_MEMORY_LIMIT 0x26 #define PCI_PREF_RANGE_TYPE_MASK 0x0fUL #define PCI_PREF_RANGE_TYPE_32 0x00 #define PCI_PREF_RANGE_TYPE_64 0x01 #define PCI_PREF_RANGE_MASK (~0x0fUL) #define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */ #define PCI_PREF_LIMIT_UPPER32 0x2c #define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */ #define PCI_IO_LIMIT_UPPER16 0x32 /* 0x34 same as for htype 0 */ /* 0x35-0x3b is reserved */ #define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */ /* 0x3c-0x3d are same as for htype 0 */ #define PCI_BRIDGE_CONTROL 0x3e #define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */ #define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */ #define PCI_BRIDGE_CTL_ISA 0x04 /* Enable ISA mode */ #define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */ #define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */ #define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */ #define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */ /* Header type 2 (CardBus bridges) */ #define PCI_CB_CAPABILITY_LIST 0x14 /* 0x15 reserved */ #define PCI_CB_SEC_STATUS 0x16 /* Secondary status */ #define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */ #define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */ #define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */ #define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */ #define PCI_CB_MEMORY_BASE_0 0x1c #define PCI_CB_MEMORY_LIMIT_0 0x20 #define PCI_CB_MEMORY_BASE_1 0x24 #define PCI_CB_MEMORY_LIMIT_1 0x28 #define PCI_CB_IO_BASE_0 0x2c #define PCI_CB_IO_BASE_0_HI 0x2e #define PCI_CB_IO_LIMIT_0 0x30 #define PCI_CB_IO_LIMIT_0_HI 0x32 #define PCI_CB_IO_BASE_1 0x34 #define PCI_CB_IO_BASE_1_HI 0x36 #define PCI_CB_IO_LIMIT_1 0x38 #define PCI_CB_IO_LIMIT_1_HI 0x3a #define PCI_CB_IO_RANGE_MASK (~0x03UL) /* 0x3c-0x3d are same as for htype 0 */ #define PCI_CB_BRIDGE_CONTROL 0x3e #define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */ #define PCI_CB_BRIDGE_CTL_SERR 0x02 #define PCI_CB_BRIDGE_CTL_ISA 0x04 #define PCI_CB_BRIDGE_CTL_VGA 0x08 #define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20 #define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */ #define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */ #define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */ #define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200 #define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400 #define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40 #define PCI_CB_SUBSYSTEM_ID 0x42 #define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */ /* 0x48-0x7f reserved */ /* Capability lists */ #define PCI_CAP_LIST_ID 0 /* Capability ID */ #define PCI_CAP_ID_PM 0x01 /* Power Management */ #define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */ #define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */ #define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */ #define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */ #define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */ #define PCI_CAP_ID_PCIX 0x07 /* PCI-X */ #define PCI_CAP_ID_HT 0x08 /* HyperTransport */ #define PCI_CAP_ID_VNDR 0x09 /* Vendor-Specific */ #define PCI_CAP_ID_DBG 0x0A /* Debug port */ #define PCI_CAP_ID_CCRC 0x0B /* CompactPCI Central Resource Control */ #define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */ #define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */ #define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */ #define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */ #define PCI_CAP_ID_EXP 0x10 /* PCI Express */ #define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ #define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */ #define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */ #define PCI_CAP_ID_EA 0x14 /* PCI Enhanced Allocation */ #define PCI_CAP_ID_MAX PCI_CAP_ID_EA #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ #define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */ #define PCI_CAP_SIZEOF 4 /* Power Management Registers */ #define PCI_PM_PMC 2 /* PM Capabilities Register */ #define PCI_PM_CAP_VER_MASK 0x0007 /* Version */ #define PCI_PM_CAP_PME_CLOCK 0x0008 /* PME clock required */ #define PCI_PM_CAP_RESERVED 0x0010 /* Reserved field */ #define PCI_PM_CAP_DSI 0x0020 /* Device specific initialization */ #define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxiliary power support mask */ #define PCI_PM_CAP_D1 0x0200 /* D1 power state support */ #define PCI_PM_CAP_D2 0x0400 /* D2 power state support */ #define PCI_PM_CAP_PME 0x0800 /* PME pin supported */ #define PCI_PM_CAP_PME_MASK 0xF800 /* PME Mask of all supported states */ #define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */ #define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */ #define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */ #define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */ #define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */ #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ #define PCI_PM_CTRL 4 /* PM control and status register */ #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ #define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */ #define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ #define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ #define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ #define PCI_PM_CTRL_PME_STATUS 0x8000 /* PME pin status */ #define PCI_PM_PPB_EXTENSIONS 6 /* PPB support extensions (??) */ #define PCI_PM_PPB_B2_B3 0x40 /* Stop clock when in D3hot (??) */ #define PCI_PM_BPCC_ENABLE 0x80 /* Bus power/clock control enable (??) */ #define PCI_PM_DATA_REGISTER 7 /* (??) */ #define PCI_PM_SIZEOF 8 /* AGP registers */ #define PCI_AGP_VERSION 2 /* BCD version number */ #define PCI_AGP_RFU 3 /* Rest of capability flags */ #define PCI_AGP_STATUS 4 /* Status register */ #define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */ #define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */ #define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */ #define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */ #define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */ #define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */ #define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */ #define PCI_AGP_COMMAND 8 /* Control register */ #define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */ #define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */ #define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */ #define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */ #define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */ #define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */ #define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */ #define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */ #define PCI_AGP_SIZEOF 12 /* Vital Product Data */ #define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */ #define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */ #define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */ #define PCI_VPD_DATA 4 /* 32-bits of data returned here */ #define PCI_CAP_VPD_SIZEOF 8 /* Slot Identification */ #define PCI_SID_ESR 2 /* Expansion Slot Register */ #define PCI_SID_ESR_NSLOTS 0x1f /* Number of expansion slots available */ #define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */ #define PCI_SID_CHASSIS_NR 3 /* Chassis Number */ /* Message Signalled Interrupts registers */ #define PCI_MSI_FLAGS 2 /* Message Control */ #define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */ #define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */ #define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */ #define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */ #define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */ #define PCI_MSI_RFU 3 /* Rest of capability flags */ #define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */ #define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */ #define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */ #define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */ #define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */ #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ #define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */ #define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */ /* MSI-X registers */ #define PCI_MSIX_FLAGS 2 /* Message Control */ #define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */ #define PCI_MSIX_FLAGS_MASKALL 0x4000 /* Mask all vectors for this function */ #define PCI_MSIX_FLAGS_ENABLE 0x8000 /* MSI-X enable */ #define PCI_MSIX_TABLE 4 /* Table offset */ #define PCI_MSIX_TABLE_BIR 0x00000007 /* BAR index */ #define PCI_MSIX_TABLE_OFFSET 0xfffffff8 /* Offset into specified BAR */ #define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ #define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ #define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ #define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */ #define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ /* MSI-X Table entry format */ #define PCI_MSIX_ENTRY_SIZE 16 #define PCI_MSIX_ENTRY_LOWER_ADDR 0 #define PCI_MSIX_ENTRY_UPPER_ADDR 4 #define PCI_MSIX_ENTRY_DATA 8 #define PCI_MSIX_ENTRY_VECTOR_CTRL 12 #define PCI_MSIX_ENTRY_CTRL_MASKBIT 1 /* CompactPCI Hotswap Register */ #define PCI_CHSWP_CSR 2 /* Control and Status Register */ #define PCI_CHSWP_DHA 0x01 /* Device Hiding Arm */ #define PCI_CHSWP_EIM 0x02 /* ENUM# Signal Mask */ #define PCI_CHSWP_PIE 0x04 /* Pending Insert or Extract */ #define PCI_CHSWP_LOO 0x08 /* LED On / Off */ #define PCI_CHSWP_PI 0x30 /* Programming Interface */ #define PCI_CHSWP_EXT 0x40 /* ENUM# status - extraction */ #define PCI_CHSWP_INS 0x80 /* ENUM# status - insertion */ /* PCI Advanced Feature registers */ #define PCI_AF_LENGTH 2 #define PCI_AF_CAP 3 #define PCI_AF_CAP_TP 0x01 #define PCI_AF_CAP_FLR 0x02 #define PCI_AF_CTRL 4 #define PCI_AF_CTRL_FLR 0x01 #define PCI_AF_STATUS 5 #define PCI_AF_STATUS_TP 0x01 #define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */ /* PCI Enhanced Allocation registers */ #define PCI_EA_NUM_ENT 2 /* Number of Capability Entries */ #define PCI_EA_NUM_ENT_MASK 0x3f /* Num Entries Mask */ #define PCI_EA_FIRST_ENT 4 /* First EA Entry in List */ #define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */ #define PCI_EA_ES 0x00000007 /* Entry Size */ #define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */ /* 0-5 map to BARs 0-5 respectively */ #define PCI_EA_BEI_BAR0 0 #define PCI_EA_BEI_BAR5 5 #define PCI_EA_BEI_BRIDGE 6 /* Resource behind bridge */ #define PCI_EA_BEI_ENI 7 /* Equivalent Not Indicated */ #define PCI_EA_BEI_ROM 8 /* Expansion ROM */ /* 9-14 map to VF BARs 0-5 respectively */ #define PCI_EA_BEI_VF_BAR0 9 #define PCI_EA_BEI_VF_BAR5 14 #define PCI_EA_BEI_RESERVED 15 /* Reserved - Treat like ENI */ #define PCI_EA_PP 0x0000ff00 /* Primary Properties */ #define PCI_EA_SP 0x00ff0000 /* Secondary Properties */ #define PCI_EA_P_MEM 0x00 /* Non-Prefetch Memory */ #define PCI_EA_P_MEM_PREFETCH 0x01 /* Prefetchable Memory */ #define PCI_EA_P_IO 0x02 /* I/O Space */ #define PCI_EA_P_VF_MEM_PREFETCH 0x03 /* VF Prefetchable Memory */ #define PCI_EA_P_VF_MEM 0x04 /* VF Non-Prefetch Memory */ #define PCI_EA_P_BRIDGE_MEM 0x05 /* Bridge Non-Prefetch Memory */ #define PCI_EA_P_BRIDGE_MEM_PREFETCH 0x06 /* Bridge Prefetchable Memory */ #define PCI_EA_P_BRIDGE_IO 0x07 /* Bridge I/O Space */ /* 0x08-0xfc reserved */ #define PCI_EA_P_MEM_RESERVED 0xfd /* Reserved Memory */ #define PCI_EA_P_IO_RESERVED 0xfe /* Reserved I/O Space */ #define PCI_EA_P_UNAVAILABLE 0xff /* Entry Unavailable */ #define PCI_EA_WRITABLE 0x40000000 /* Writable: 1 = RW, 0 = HwInit */ #define PCI_EA_ENABLE 0x80000000 /* Enable for this entry */ #define PCI_EA_BASE 4 /* Base Address Offset */ #define PCI_EA_MAX_OFFSET 8 /* MaxOffset (resource length) */ /* bit 0 is reserved */ #define PCI_EA_IS_64 0x00000002 /* 64-bit field flag */ #define PCI_EA_FIELD_MASK 0xfffffffc /* For Base & Max Offset */ /* PCI-X registers (Type 0 (non-bridge) devices) */ #define PCI_X_CMD 2 /* Modes & Features */ #define PCI_X_CMD_DPERR_E 0x0001 /* Data Parity Error Recovery Enable */ #define PCI_X_CMD_ERO 0x0002 /* Enable Relaxed Ordering */ #define PCI_X_CMD_READ_512 0x0000 /* 512 byte maximum read byte count */ #define PCI_X_CMD_READ_1K 0x0004 /* 1Kbyte maximum read byte count */ #define PCI_X_CMD_READ_2K 0x0008 /* 2Kbyte maximum read byte count */ #define PCI_X_CMD_READ_4K 0x000c /* 4Kbyte maximum read byte count */ #define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */ /* Max # of outstanding split transactions */ #define PCI_X_CMD_SPLIT_1 0x0000 /* Max 1 */ #define PCI_X_CMD_SPLIT_2 0x0010 /* Max 2 */ #define PCI_X_CMD_SPLIT_3 0x0020 /* Max 3 */ #define PCI_X_CMD_SPLIT_4 0x0030 /* Max 4 */ #define PCI_X_CMD_SPLIT_8 0x0040 /* Max 8 */ #define PCI_X_CMD_SPLIT_12 0x0050 /* Max 12 */ #define PCI_X_CMD_SPLIT_16 0x0060 /* Max 16 */ #define PCI_X_CMD_SPLIT_32 0x0070 /* Max 32 */ #define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */ #define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */ #define PCI_X_STATUS 4 /* PCI-X capabilities */ #define PCI_X_STATUS_DEVFN 0x000000ff /* A copy of devfn */ #define PCI_X_STATUS_BUS 0x0000ff00 /* A copy of bus nr */ #define PCI_X_STATUS_64BIT 0x00010000 /* 64-bit device */ #define PCI_X_STATUS_133MHZ 0x00020000 /* 133 MHz capable */ #define PCI_X_STATUS_SPL_DISC 0x00040000 /* Split Completion Discarded */ #define PCI_X_STATUS_UNX_SPL 0x00080000 /* Unexpected Split Completion */ #define PCI_X_STATUS_COMPLEX 0x00100000 /* Device Complexity */ #define PCI_X_STATUS_MAX_READ 0x00600000 /* Designed Max Memory Read Count */ #define PCI_X_STATUS_MAX_SPLIT 0x03800000 /* Designed Max Outstanding Split Transactions */ #define PCI_X_STATUS_MAX_CUM 0x1c000000 /* Designed Max Cumulative Read Size */ #define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */ #define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */ #define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */ #define PCI_X_ECC_CSR 8 /* ECC control and status */ #define PCI_CAP_PCIX_SIZEOF_V0 8 /* size of registers for Version 0 */ #define PCI_CAP_PCIX_SIZEOF_V1 24 /* size for Version 1 */ #define PCI_CAP_PCIX_SIZEOF_V2 PCI_CAP_PCIX_SIZEOF_V1 /* Same for v2 */ /* PCI-X registers (Type 1 (bridge) devices) */ #define PCI_X_BRIDGE_SSTATUS 2 /* Secondary Status */ #define PCI_X_SSTATUS_64BIT 0x0001 /* Secondary AD interface is 64 bits */ #define PCI_X_SSTATUS_133MHZ 0x0002 /* 133 MHz capable */ #define PCI_X_SSTATUS_FREQ 0x03c0 /* Secondary Bus Mode and Frequency */ #define PCI_X_SSTATUS_VERS 0x3000 /* PCI-X Capability Version */ #define PCI_X_SSTATUS_V1 0x1000 /* Mode 2, not Mode 1 */ #define PCI_X_SSTATUS_V2 0x2000 /* Mode 1 or Modes 1 and 2 */ #define PCI_X_SSTATUS_266MHZ 0x4000 /* 266 MHz capable */ #define PCI_X_SSTATUS_533MHZ 0x8000 /* 533 MHz capable */ #define PCI_X_BRIDGE_STATUS 4 /* Bridge Status */ /* PCI Bridge Subsystem ID registers */ #define PCI_SSVID_VENDOR_ID 4 /* PCI Bridge subsystem vendor ID */ #define PCI_SSVID_DEVICE_ID 6 /* PCI Bridge subsystem device ID */ /* PCI Express capability registers */ #define PCI_EXP_FLAGS 2 /* Capabilities register */ #define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ #define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ #define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ #define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ #define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ #define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */ #define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */ #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */ #define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ #define PCI_EXP_DEVCAP 4 /* Device capabilities */ #define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */ #define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */ #define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */ #define PCI_EXP_DEVCAP_L0S 0x000001c0 /* L0s Acceptable Latency */ #define PCI_EXP_DEVCAP_L1 0x00000e00 /* L1 Acceptable Latency */ #define PCI_EXP_DEVCAP_ATN_BUT 0x00001000 /* Attention Button Present */ #define PCI_EXP_DEVCAP_ATN_IND 0x00002000 /* Attention Indicator Present */ #define PCI_EXP_DEVCAP_PWR_IND 0x00004000 /* Power Indicator Present */ #define PCI_EXP_DEVCAP_RBER 0x00008000 /* Role-Based Error Reporting */ #define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */ #define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */ #define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ #define PCI_EXP_DEVCTL 8 /* Device Control */ #define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */ #define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */ #define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */ #define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */ #define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */ #define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ #define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */ #define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ #define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */ #define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ #define PCI_EXP_DEVCTL_READRQ_128B 0x0000 /* 128 Bytes */ #define PCI_EXP_DEVCTL_READRQ_256B 0x1000 /* 256 Bytes */ #define PCI_EXP_DEVCTL_READRQ_512B 0x2000 /* 512 Bytes */ #define PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */ #define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */ #define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */ #define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ #define PCI_EXP_DEVSTA 10 /* Device Status */ #define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */ #define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */ #define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */ #define PCI_EXP_DEVSTA_URD 0x0008 /* Unsupported Request Detected */ #define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */ #define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */ #define PCI_EXP_LNKCAP 12 /* Link Capabilities */ #define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ #define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ #define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ #define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ #define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ #define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */ #define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* Clock Power Management */ #define PCI_EXP_LNKCAP_SDERC 0x00080000 /* Surprise Down Error Reporting Capable */ #define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */ #define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */ #define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */ #define PCI_EXP_LNKCTL 16 /* Link Control */ #define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ #define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */ #define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */ #define PCI_EXP_LNKCTL_RCB 0x0008 /* Read Completion Boundary */ #define PCI_EXP_LNKCTL_LD 0x0010 /* Link Disable */ #define PCI_EXP_LNKCTL_RL 0x0020 /* Retrain Link */ #define PCI_EXP_LNKCTL_CCC 0x0040 /* Common Clock Configuration */ #define PCI_EXP_LNKCTL_ES 0x0080 /* Extended Synch */ #define PCI_EXP_LNKCTL_CLKREQ_EN 0x0100 /* Enable clkreq */ #define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */ #define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */ #define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */ #define PCI_EXP_LNKSTA 18 /* Link Status */ #define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ #define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */ #define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */ #define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */ #define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ #define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ #define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */ #define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */ #define PCI_EXP_LNKSTA_NLW_X4 0x0040 /* Current Link Width x4 */ #define PCI_EXP_LNKSTA_NLW_X8 0x0080 /* Current Link Width x8 */ #define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */ #define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */ #define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */ #define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */ #define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */ #define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */ #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints with link end here */ #define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ #define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */ #define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */ #define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */ #define PCI_EXP_SLTCAP_AIP 0x00000008 /* Attention Indicator Present */ #define PCI_EXP_SLTCAP_PIP 0x00000010 /* Power Indicator Present */ #define PCI_EXP_SLTCAP_HPS 0x00000020 /* Hot-Plug Surprise */ #define PCI_EXP_SLTCAP_HPC 0x00000040 /* Hot-Plug Capable */ #define PCI_EXP_SLTCAP_SPLV 0x00007f80 /* Slot Power Limit Value */ #define PCI_EXP_SLTCAP_SPLS 0x00018000 /* Slot Power Limit Scale */ #define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */ #define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */ #define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */ #define PCI_EXP_SLTCTL 24 /* Slot Control */ #define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */ #define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */ #define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */ #define PCI_EXP_SLTCTL_PDCE 0x0008 /* Presence Detect Changed Enable */ #define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ #define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ #define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ #define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */ #define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */ #define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */ #define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */ #define PCI_EXP_SLTCTL_PWR_IND_ON 0x0100 /* Power Indicator on */ #define PCI_EXP_SLTCTL_PWR_IND_BLINK 0x0200 /* Power Indicator blinking */ #define PCI_EXP_SLTCTL_PWR_IND_OFF 0x0300 /* Power Indicator off */ #define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */ #define PCI_EXP_SLTCTL_PWR_ON 0x0000 /* Power On */ #define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */ #define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ #define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ #define PCI_EXP_SLTSTA 26 /* Slot Status */ #define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */ #define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */ #define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */ #define PCI_EXP_SLTSTA_PDC 0x0008 /* Presence Detect Changed */ #define PCI_EXP_SLTSTA_CC 0x0010 /* Command Completed */ #define PCI_EXP_SLTSTA_MRLSS 0x0020 /* MRL Sensor State */ #define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ #define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */ #define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */ #define PCI_EXP_RTCTL 28 /* Root Control */ #define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */ #define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */ #define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */ #define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */ #define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */ #define PCI_EXP_RTCAP 30 /* Root Capabilities */ #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ #define PCI_EXP_RTSTA 32 /* Root Status */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ /* * The Device Capabilities 2, Device Status 2, Device Control 2, * Link Capabilities 2, Link Status 2, Link Control 2, * Slot Capabilities 2, Slot Status 2, and Slot Control 2 registers * are only present on devices with PCIe Capability version 2. * Use pcie_capability_read_word() and similar interfaces to use them * safely. */ #define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ #define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */ #define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */ #define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ #define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */ #define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */ #define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */ #define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ #define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ #define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */ #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */ #define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ #define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ #define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */ #define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */ #define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */ #define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */ #define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */ #define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */ #define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */ #define PCI_EXP_DEVSTA2 42 /* Device Status 2 */ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */ #define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */ #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ #define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ #define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */ #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ #define PCI_EXP_LNKCTL2_TLS 0x000f #define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */ #define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */ #define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */ #define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */ #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */ #define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */ #define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ #define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */ /* Extended Capabilities (PCI-X 2.0 and Express) */ #define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) #define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf) #define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc) #define PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */ #define PCI_EXT_CAP_ID_VC 0x02 /* Virtual Channel Capability */ #define PCI_EXT_CAP_ID_DSN 0x03 /* Device Serial Number */ #define PCI_EXT_CAP_ID_PWR 0x04 /* Power Budgeting */ #define PCI_EXT_CAP_ID_RCLD 0x05 /* Root Complex Link Declaration */ #define PCI_EXT_CAP_ID_RCILC 0x06 /* Root Complex Internal Link Control */ #define PCI_EXT_CAP_ID_RCEC 0x07 /* Root Complex Event Collector */ #define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */ #define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */ #define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */ #define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor-Specific */ #define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */ #define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */ #define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */ #define PCI_EXT_CAP_ID_ATS 0x0F /* Address Translation Services */ #define PCI_EXT_CAP_ID_SRIOV 0x10 /* Single Root I/O Virtualization */ #define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */ #define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */ #define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */ #define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* Reserved for AMD */ #define PCI_EXT_CAP_ID_REBAR 0x15 /* Resizable BAR */ #define PCI_EXT_CAP_ID_DPA 0x16 /* Dynamic Power Allocation */ #define PCI_EXT_CAP_ID_TPH 0x17 /* TPH Requester */ #define PCI_EXT_CAP_ID_LTR 0x18 /* Latency Tolerance Reporting */ #define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe Capability */ #define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */ #define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ #define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ #define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ #define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 /* Advanced Error Reporting */ #define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ #define PCI_ERR_UNC_UND 0x00000001 /* Undefined */ #define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */ #define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */ #define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */ #define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */ #define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */ #define PCI_ERR_UNC_COMP_ABORT 0x00008000 /* Completer Abort */ #define PCI_ERR_UNC_UNX_COMP 0x00010000 /* Unexpected Completion */ #define PCI_ERR_UNC_RX_OVER 0x00020000 /* Receiver Overflow */ #define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */ #define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */ #define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */ #define PCI_ERR_UNC_ACSV 0x00200000 /* ACS Violation */ #define PCI_ERR_UNC_INTN 0x00400000 /* internal error */ #define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */ #define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */ #define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */ #define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */ /* Same bits as above */ #define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */ /* Same bits as above */ #define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */ #define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */ #define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */ #define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */ #define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */ #define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */ #define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */ #define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */ #define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */ #define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */ /* Same bits as above */ #define PCI_ERR_CAP 24 /* Advanced Error Capabilities */ #define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */ #define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */ #define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */ #define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */ #define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ #define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */ #define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */ #define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */ #define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */ #define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */ #define PCI_ERR_ROOT_STATUS 48 #define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */ #define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */ #define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */ #define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 /* Multiple FATAL/NONFATAL */ #define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */ #define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */ #define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */ #define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */ #define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ /* Virtual Channel */ #define PCI_VC_PORT_CAP1 4 #define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */ #define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */ #define PCI_VC_CAP1_ARB_SIZE 0x00000c00 #define PCI_VC_PORT_CAP2 8 #define PCI_VC_CAP2_32_PHASE 0x00000002 #define PCI_VC_CAP2_64_PHASE 0x00000004 #define PCI_VC_CAP2_128_PHASE 0x00000008 #define PCI_VC_CAP2_ARB_OFF 0xff000000 #define PCI_VC_PORT_CTRL 12 #define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001 #define PCI_VC_PORT_STATUS 14 #define PCI_VC_PORT_STATUS_TABLE 0x00000001 #define PCI_VC_RES_CAP 16 #define PCI_VC_RES_CAP_32_PHASE 0x00000002 #define PCI_VC_RES_CAP_64_PHASE 0x00000004 #define PCI_VC_RES_CAP_128_PHASE 0x00000008 #define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010 #define PCI_VC_RES_CAP_256_PHASE 0x00000020 #define PCI_VC_RES_CAP_ARB_OFF 0xff000000 #define PCI_VC_RES_CTRL 20 #define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000 #define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000 #define PCI_VC_RES_CTRL_ID 0x07000000 #define PCI_VC_RES_CTRL_ENABLE 0x80000000 #define PCI_VC_RES_STATUS 26 #define PCI_VC_RES_STATUS_TABLE 0x00000001 #define PCI_VC_RES_STATUS_NEGO 0x00000002 #define PCI_CAP_VC_BASE_SIZEOF 0x10 #define PCI_CAP_VC_PER_VC_SIZEOF 0x0C /* Power Budgeting */ #define PCI_PWR_DSR 4 /* Data Select Register */ #define PCI_PWR_DATA 8 /* Data Register */ #define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */ #define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */ #define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */ #define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */ #define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */ #define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */ #define PCI_PWR_CAP 12 /* Capability */ #define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */ #define PCI_EXT_CAP_PWR_SIZEOF 16 /* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */ #define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */ #define PCI_VNDR_HEADER_ID(x) ((x) & 0xffff) #define PCI_VNDR_HEADER_REV(x) (((x) >> 16) & 0xf) #define PCI_VNDR_HEADER_LEN(x) (((x) >> 20) & 0xfff) /* * HyperTransport sub capability types * * Unfortunately there are both 3 bit and 5 bit capability types defined * in the HT spec, catering for that is a little messy. You probably don't * want to use these directly, just use pci_find_ht_capability() and it * will do the right thing for you. */ #define HT_3BIT_CAP_MASK 0xE0 #define HT_CAPTYPE_SLAVE 0x00 /* Slave/Primary link configuration */ #define HT_CAPTYPE_HOST 0x20 /* Host/Secondary link configuration */ #define HT_5BIT_CAP_MASK 0xF8 #define HT_CAPTYPE_IRQ 0x80 /* IRQ Configuration */ #define HT_CAPTYPE_REMAPPING_40 0xA0 /* 40 bit address remapping */ #define HT_CAPTYPE_REMAPPING_64 0xA2 /* 64 bit address remapping */ #define HT_CAPTYPE_UNITID_CLUMP 0x90 /* Unit ID clumping */ #define HT_CAPTYPE_EXTCONF 0x98 /* Extended Configuration Space Access */ #define HT_CAPTYPE_MSI_MAPPING 0xA8 /* MSI Mapping Capability */ #define HT_MSI_FLAGS 0x02 /* Offset to flags */ #define HT_MSI_FLAGS_ENABLE 0x1 /* Mapping enable */ #define HT_MSI_FLAGS_FIXED 0x2 /* Fixed mapping only */ #define HT_MSI_FIXED_ADDR 0x00000000FEE00000ULL /* Fixed addr */ #define HT_MSI_ADDR_LO 0x04 /* Offset to low addr bits */ #define HT_MSI_ADDR_LO_MASK 0xFFF00000 /* Low address bit mask */ #define HT_MSI_ADDR_HI 0x08 /* Offset to high addr bits */ #define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */ #define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */ #define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */ #define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 HyperTransport configuration */ #define HT_CAPTYPE_PM 0xE0 /* HyperTransport power management configuration */ #define HT_CAP_SIZEOF_LONG 28 /* slave & primary */ #define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */ /* Alternative Routing-ID Interpretation */ #define PCI_ARI_CAP 0x04 /* ARI Capability Register */ #define PCI_ARI_CAP_MFVC 0x0001 /* MFVC Function Groups Capability */ #define PCI_ARI_CAP_ACS 0x0002 /* ACS Function Groups Capability */ #define PCI_ARI_CAP_NFN(x) (((x) >> 8) & 0xff) /* Next Function Number */ #define PCI_ARI_CTRL 0x06 /* ARI Control Register */ #define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */ #define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ #define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ #define PCI_EXT_CAP_ARI_SIZEOF 8 /* Address Translation Service */ #define PCI_ATS_CAP 0x04 /* ATS Capability Register */ #define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */ #define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */ #define PCI_ATS_CTRL 0x06 /* ATS Control Register */ #define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */ #define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */ #define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */ #define PCI_EXT_CAP_ATS_SIZEOF 8 /* Page Request Interface */ #define PCI_PRI_CTRL 0x04 /* PRI control register */ #define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */ #define PCI_PRI_CTRL_RESET 0x02 /* Reset */ #define PCI_PRI_STATUS 0x06 /* PRI status register */ #define PCI_PRI_STATUS_RF 0x001 /* Response Failure */ #define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */ #define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */ #define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */ #define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */ #define PCI_EXT_CAP_PRI_SIZEOF 16 /* Process Address Space ID */ #define PCI_PASID_CAP 0x04 /* PASID feature register */ #define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */ #define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */ #define PCI_PASID_CTRL 0x06 /* PASID control register */ #define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ #define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ #define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */ #define PCI_EXT_CAP_PASID_SIZEOF 8 /* Single Root I/O Virtualization */ #define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ #define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */ #define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */ #define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */ #define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */ #define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */ #define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */ #define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */ #define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */ #define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */ #define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */ #define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */ #define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ #define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */ #define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */ #define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */ #define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */ #define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */ #define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */ #define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */ #define PCI_SRIOV_BAR 0x24 /* VF BAR0 */ #define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */ #define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/ #define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */ #define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */ #define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */ #define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */ #define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */ #define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */ #define PCI_EXT_CAP_SRIOV_SIZEOF 64 #define PCI_LTR_MAX_SNOOP_LAT 0x4 #define PCI_LTR_MAX_NOSNOOP_LAT 0x6 #define PCI_LTR_VALUE_MASK 0x000003ff #define PCI_LTR_SCALE_MASK 0x00001c00 #define PCI_LTR_SCALE_SHIFT 10 #define PCI_EXT_CAP_LTR_SIZEOF 8 /* Access Control Service */ #define PCI_ACS_CAP 0x04 /* ACS Capability Register */ #define PCI_ACS_SV 0x01 /* Source Validation */ #define PCI_ACS_TB 0x02 /* Translation Blocking */ #define PCI_ACS_RR 0x04 /* P2P Request Redirect */ #define PCI_ACS_CR 0x08 /* P2P Completion Redirect */ #define PCI_ACS_UF 0x10 /* Upstream Forwarding */ #define PCI_ACS_EC 0x20 /* P2P Egress Control */ #define PCI_ACS_DT 0x40 /* Direct Translated P2P */ #define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */ #define PCI_ACS_CTRL 0x06 /* ACS Control Register */ #define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ #define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */ #define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */ /* SATA capability */ #define PCI_SATA_REGS 4 /* SATA REGs specifier */ #define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */ #define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */ #define PCI_SATA_SIZEOF_SHORT 8 #define PCI_SATA_SIZEOF_LONG 16 /* Resizable BARs */ #define PCI_REBAR_CAP 4 /* capability register */ #define PCI_REBAR_CAP_SIZES 0x00FFFFF0 /* supported BAR sizes */ #define PCI_REBAR_CTRL 8 /* control register */ #define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */ #define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */ #define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */ #define PCI_REBAR_CTRL_BAR_SIZE 0x00001F00 /* BAR size */ #define PCI_REBAR_CTRL_BAR_SHIFT 8 /* shift for BAR size */ /* Dynamic Power Allocation */ #define PCI_DPA_CAP 4 /* capability register */ #define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */ #define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */ /* TPH Requester */ #define PCI_TPH_CAP 4 /* capability register */ #define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */ #define PCI_TPH_LOC_NONE 0x000 /* no location */ #define PCI_TPH_LOC_CAP 0x200 /* in capability */ #define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */ #define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */ #define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */ #define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */ /* Downstream Port Containment */ #define PCI_EXP_DPC_CAP 4 /* DPC Capability */ #define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */ #define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */ #define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */ #define PCI_EXP_DPC_CAP_SW_TRIGGER 0x0080 /* Software Triggering Supported */ #define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */ #define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ #define PCI_EXP_DPC_CTL 6 /* DPC control */ #define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */ #define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */ #define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */ #define PCI_EXP_DPC_STATUS 8 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */ #define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */ #define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ #define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */ #define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */ #define PCI_EXP_DPC_RP_PIO_SEVERITY 0x14 /* RP PIO Severity */ #define PCI_EXP_DPC_RP_PIO_SYSERROR 0x18 /* RP PIO SysError */ #define PCI_EXP_DPC_RP_PIO_EXCEPTION 0x1C /* RP PIO Exception */ #define PCI_EXP_DPC_RP_PIO_HEADER_LOG 0x20 /* RP PIO Header Log */ #define PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG 0x30 /* RP PIO ImpSpec Log */ #define PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG 0x34 /* RP PIO TLP Prefix Log */ /* Precision Time Measurement */ #define PCI_PTM_CAP 0x04 /* PTM Capability */ #define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */ #define PCI_PTM_CAP_ROOT 0x00000004 /* Root capable */ #define PCI_PTM_GRANULARITY_MASK 0x0000FF00 /* Clock granularity */ #define PCI_PTM_CTRL 0x08 /* PTM Control */ #define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */ #define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */ /* ASPM L1 PM Substates */ #define PCI_L1SS_CAP 0x04 /* Capabilities Register */ #define PCI_L1SS_CAP_PCIPM_L1_2 0x00000001 /* PCI-PM L1.2 Supported */ #define PCI_L1SS_CAP_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Supported */ #define PCI_L1SS_CAP_ASPM_L1_2 0x00000004 /* ASPM L1.2 Supported */ #define PCI_L1SS_CAP_ASPM_L1_1 0x00000008 /* ASPM L1.1 Supported */ #define PCI_L1SS_CAP_L1_PM_SS 0x00000010 /* L1 PM Substates Supported */ #define PCI_L1SS_CAP_CM_RESTORE_TIME 0x0000ff00 /* Port Common_Mode_Restore_Time */ #define PCI_L1SS_CAP_P_PWR_ON_SCALE 0x00030000 /* Port T_POWER_ON scale */ #define PCI_L1SS_CAP_P_PWR_ON_VALUE 0x00f80000 /* Port T_POWER_ON value */ #define PCI_L1SS_CTL1 0x08 /* Control 1 Register */ #define PCI_L1SS_CTL1_PCIPM_L1_2 0x00000001 /* PCI-PM L1.2 Enable */ #define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */ #define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */ #define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */ #define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f #define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */ #define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ #endif /* LINUX_PCI_REGS_H */ backport-iwlwifi-dkms-7906/intc-scripts/000077500000000000000000000000001351064634500203235ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/intc-scripts/mv-compat-mod.py000077500000000000000000000066541351064634500233730ustar00rootroot00000000000000#!/usr/bin/env python ############################################################################ # # This file is provided under a dual BSD/GPLv2 license. When using or # redistributing this file, you may do so under either license. # # GPL LICENSE SUMMARY # # Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, # USA # # The full GNU General Public License is included in this distribution # in the file called COPYING. # # Contact Information: # Intel Linux Wireless # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 # # BSD LICENSE # # Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name Intel Corporation nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ############################################################################# import sys, os, shutil if __name__ == "__main__": if len(sys.argv) < 3: raise Exception("Usage: %s ") toplevel_mod = sys.argv[2] basename = os.path.basename(sys.argv[1]) # don't change the name of the toplevel module - the kernel loads it # directly if basename.find(toplevel_mod) != -1: new_name = basename else: new_name = "compat_" + os.path.basename(sys.argv[1]) shutil.move(sys.argv[1], os.path.dirname(sys.argv[1]) + "/" + new_name) print "module copied to %s" % new_name sys.exit(0) backport-iwlwifi-dkms-7906/intc-scripts/ren-compat-aliases.py000077500000000000000000000072361351064634500243740ustar00rootroot00000000000000#!/usr/bin/env python ############################################################################ # # This file is provided under a dual BSD/GPLv2 license. When using or # redistributing this file, you may do so under either license. # # GPL LICENSE SUMMARY # # Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, # USA # # The full GNU General Public License is included in this distribution # in the file called COPYING. # # Contact Information: # Intel Linux Wireless # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 # # BSD LICENSE # # Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name Intel Corporation nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ############################################################################# import sys, os def compat_name(modname): return "compat_" + modname def ren_compat_aliases(filename, modname): fd = open(filename, "rb") data = fd.read() fd.close() res_lines = list() for line in data.split("\n"): # rename the modules built by compat items = line.split() if len(items) > 0 and items[-1] == modname: items[-1] = compat_name(modname) res_lines.append(" ".join(items)) else: res_lines.append(line) res_data = "\n".join(res_lines) fd = open(filename, "wb") fd.write(res_data) fd.close() print "compat aliases successfully resolved in %s!" % filename if __name__ == "__main__": if len(sys.argv) < 3: raise Exception("Usage: %s " % sys.argv[0]) ren_compat_aliases(sys.argv[1], sys.argv[2]) sys.exit(0) backport-iwlwifi-dkms-7906/intc-scripts/ren-compat-deps.py000077500000000000000000000100641351064634500236770ustar00rootroot00000000000000#!/usr/bin/env python ############################################################################ # # This file is provided under a dual BSD/GPLv2 license. When using or # redistributing this file, you may do so under either license. # # GPL LICENSE SUMMARY # # Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, # USA # # The full GNU General Public License is included in this distribution # in the file called COPYING. # # Contact Information: # Intel Linux Wireless # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 # # BSD LICENSE # # Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name Intel Corporation nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ############################################################################# import sys, os # this works for files of the form "a.ko:" as well, so we can use this # for the dependencies part def compat_name(filename, toplevel_mod): # we can't change the name of the toplevel module, as iwlwifi loads # it explicitly using load_module() if os.path.basename(filename).find(toplevel_mod) != -1: return filename return os.path.dirname(filename) + "/compat_" + os.path.basename(filename) def ren_compat_deps(filename, compat_prefix, toplevel_mod): fd = open(filename, "rb") data = fd.read() fd.close() res_lines = list() for line in data.split("\n"): # rename the modules built by compat, delete all other lines if line.find(compat_prefix) == 0: res_line = " ".join([compat_name(item, toplevel_mod) for item in line.split()]) res_lines.append(res_line) res_data = "\n".join(res_lines) res_data += "\n" fd = open(filename, "wb") fd.write(res_data) fd.close() print "compat deps successfully resolved in %s!" % filename if __name__ == "__main__": if len(sys.argv) < 4: raise Exception("Usage: %s " % sys.argv[0]) ren_compat_deps(sys.argv[1], sys.argv[2], sys.argv[3]) sys.exit(0) backport-iwlwifi-dkms-7906/kconf/000077500000000000000000000000001351064634500170015ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/kconf/Makefile000066400000000000000000000010331351064634500204360ustar00rootroot00000000000000CFLAGS=-Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer LXDIALOG := lxdialog/checklist.o lxdialog/inputbox.o lxdialog/menubox.o lxdialog/textbox.o lxdialog/util.o lxdialog/yesno.o conf: conf.o zconf.tab.o mconf_CFLAGS := $(shell ./lxdialog/check-lxdialog.sh -ccflags) -DLOCALE mconf_LDFLAGS := $(shell ./lxdialog/check-lxdialog.sh -ldflags $(CC)) mconf: CFLAGS += $(mconf_CFLAGS) mconf: mconf.o zconf.tab.o $(LXDIALOG) $(CC) -o mconf $^ $(mconf_LDFLAGS) .PHONY: clean clean: @rm -f mconf conf *.o lxdialog/*.o backport-iwlwifi-dkms-7906/kconf/conf.c000066400000000000000000000373731351064634500201070ustar00rootroot00000000000000/* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "lkc.h" static void conf(struct menu *menu); static void check_conf(struct menu *menu); static void xfgets(char *str, int size, FILE *in); enum input_mode { oldaskconfig, silentoldconfig, oldconfig, allnoconfig, allyesconfig, allmodconfig, alldefconfig, randconfig, defconfig, savedefconfig, listnewconfig, olddefconfig, } input_mode = oldaskconfig; static int indent = 1; static int tty_stdio; static int valid_stdin = 1; static int sync_kconfig; static int conf_cnt; static char line[PATH_MAX]; static struct menu *rootEntry; static void print_help(struct menu *menu) { struct gstr help = str_new(); menu_get_ext_help(menu, &help); printf("\n%s\n", str_get(&help)); str_free(&help); } static void strip(char *str) { char *p = str; int l; while ((isspace(*p))) p++; l = strlen(p); if (p != str) memmove(str, p, l + 1); if (!l) return; p = str + l - 1; while ((isspace(*p))) *p-- = 0; } static void check_stdin(void) { if (!valid_stdin) { printf(_("aborted!\n\n")); printf(_("Console input/output is redirected. ")); printf(_("Run 'make oldconfig' to update configuration.\n\n")); exit(1); } } static int conf_askvalue(struct symbol *sym, const char *def) { enum symbol_type type = sym_get_type(sym); if (!sym_has_value(sym)) printf(_("(NEW) ")); line[0] = '\n'; line[1] = 0; if (!sym_is_changable(sym)) { printf("%s\n", def); line[0] = '\n'; line[1] = 0; return 0; } switch (input_mode) { case oldconfig: case silentoldconfig: if (sym_has_value(sym)) { printf("%s\n", def); return 0; } check_stdin(); /* fall through */ case oldaskconfig: fflush(stdout); xfgets(line, sizeof(line), stdin); if (!tty_stdio) printf("\n"); return 1; default: break; } switch (type) { case S_INT: case S_HEX: case S_STRING: printf("%s\n", def); return 1; default: ; } printf("%s", line); return 1; } static int conf_string(struct menu *menu) { struct symbol *sym = menu->sym; const char *def; while (1) { printf("%*s%s ", indent - 1, "", _(menu->prompt->text)); printf("(%s) ", sym->name); def = sym_get_string_value(sym); if (sym_get_string_value(sym)) printf("[%s] ", def); if (!conf_askvalue(sym, def)) return 0; switch (line[0]) { case '\n': break; case '?': /* print help */ if (line[1] == '\n') { print_help(menu); def = NULL; break; } /* fall through */ default: line[strlen(line)-1] = 0; def = line; } if (def && sym_set_string_value(sym, def)) return 0; } } static int conf_sym(struct menu *menu) { struct symbol *sym = menu->sym; tristate oldval, newval; while (1) { printf("%*s%s ", indent - 1, "", _(menu->prompt->text)); if (sym->name) printf("(%s) ", sym->name); putchar('['); oldval = sym_get_tristate_value(sym); switch (oldval) { case no: putchar('N'); break; case mod: putchar('M'); break; case yes: putchar('Y'); break; } if (oldval != no && sym_tristate_within_range(sym, no)) printf("/n"); if (oldval != mod && sym_tristate_within_range(sym, mod)) printf("/m"); if (oldval != yes && sym_tristate_within_range(sym, yes)) printf("/y"); if (menu_has_help(menu)) printf("/?"); printf("] "); if (!conf_askvalue(sym, sym_get_string_value(sym))) return 0; strip(line); switch (line[0]) { case 'n': case 'N': newval = no; if (!line[1] || !strcmp(&line[1], "o")) break; continue; case 'm': case 'M': newval = mod; if (!line[1]) break; continue; case 'y': case 'Y': newval = yes; if (!line[1] || !strcmp(&line[1], "es")) break; continue; case 0: newval = oldval; break; case '?': goto help; default: continue; } if (sym_set_tristate_value(sym, newval)) return 0; help: print_help(menu); } } static int conf_choice(struct menu *menu) { struct symbol *sym, *def_sym; struct menu *child; bool is_new; sym = menu->sym; is_new = !sym_has_value(sym); if (sym_is_changable(sym)) { conf_sym(menu); sym_calc_value(sym); switch (sym_get_tristate_value(sym)) { case no: return 1; case mod: return 0; case yes: break; } } else { switch (sym_get_tristate_value(sym)) { case no: return 1; case mod: printf("%*s%s\n", indent - 1, "", _(menu_get_prompt(menu))); return 0; case yes: break; } } while (1) { int cnt, def; printf("%*s%s\n", indent - 1, "", _(menu_get_prompt(menu))); def_sym = sym_get_choice_value(sym); cnt = def = 0; line[0] = 0; for (child = menu->list; child; child = child->next) { if (!menu_is_visible(child)) continue; if (!child->sym) { printf("%*c %s\n", indent, '*', _(menu_get_prompt(child))); continue; } cnt++; if (child->sym == def_sym) { def = cnt; printf("%*c", indent, '>'); } else printf("%*c", indent, ' '); printf(" %d. %s", cnt, _(menu_get_prompt(child))); if (child->sym->name) printf(" (%s)", child->sym->name); if (!sym_has_value(child->sym)) printf(_(" (NEW)")); printf("\n"); } printf(_("%*schoice"), indent - 1, ""); if (cnt == 1) { printf("[1]: 1\n"); goto conf_childs; } printf("[1-%d", cnt); if (menu_has_help(menu)) printf("?"); printf("]: "); switch (input_mode) { case oldconfig: case silentoldconfig: if (!is_new) { cnt = def; printf("%d\n", cnt); break; } check_stdin(); /* fall through */ case oldaskconfig: fflush(stdout); xfgets(line, sizeof(line), stdin); strip(line); if (line[0] == '?') { print_help(menu); continue; } if (!line[0]) cnt = def; else if (isdigit(line[0])) cnt = atoi(line); else continue; break; default: break; } conf_childs: for (child = menu->list; child; child = child->next) { if (!child->sym || !menu_is_visible(child)) continue; if (!--cnt) break; } if (!child) continue; if (line[0] && line[strlen(line) - 1] == '?') { print_help(child); continue; } sym_set_choice_value(sym, child->sym); for (child = child->list; child; child = child->next) { indent += 2; conf(child); indent -= 2; } return 1; } } static void conf(struct menu *menu) { struct symbol *sym; struct property *prop; struct menu *child; if (!menu_is_visible(menu)) return; sym = menu->sym; prop = menu->prompt; if (prop) { const char *prompt; switch (prop->type) { case P_MENU: if ((input_mode == silentoldconfig || input_mode == listnewconfig || input_mode == olddefconfig) && rootEntry != menu) { check_conf(menu); return; } /* fall through */ case P_COMMENT: prompt = menu_get_prompt(menu); if (prompt) printf("%*c\n%*c %s\n%*c\n", indent, '*', indent, '*', _(prompt), indent, '*'); default: ; } } if (!sym) goto conf_childs; if (sym_is_choice(sym)) { conf_choice(menu); if (sym->curr.tri != mod) return; goto conf_childs; } switch (sym->type) { case S_INT: case S_HEX: case S_STRING: conf_string(menu); break; default: conf_sym(menu); break; } conf_childs: if (sym) indent += 2; for (child = menu->list; child; child = child->next) conf(child); if (sym) indent -= 2; } static void check_conf(struct menu *menu) { struct symbol *sym; struct menu *child; if (!menu_is_visible(menu)) return; sym = menu->sym; if (sym && !sym_has_value(sym)) { if (sym_is_changable(sym) || (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) { if (input_mode == listnewconfig) { if (sym->name && !sym_is_choice_value(sym)) { printf("%s%s\n", CONFIG_, sym->name); } } else if (input_mode != olddefconfig) { if (!conf_cnt++) printf(_("*\n* Restart config...\n*\n")); rootEntry = menu_get_parent_menu(menu); conf(rootEntry); } } } for (child = menu->list; child; child = child->next) check_conf(child); } static struct option long_opts[] = { {"oldaskconfig", no_argument, NULL, oldaskconfig}, {"oldconfig", no_argument, NULL, oldconfig}, {"silentoldconfig", no_argument, NULL, silentoldconfig}, {"defconfig", optional_argument, NULL, defconfig}, {"savedefconfig", required_argument, NULL, savedefconfig}, {"allnoconfig", no_argument, NULL, allnoconfig}, {"allyesconfig", no_argument, NULL, allyesconfig}, {"allmodconfig", no_argument, NULL, allmodconfig}, {"alldefconfig", no_argument, NULL, alldefconfig}, {"randconfig", no_argument, NULL, randconfig}, {"listnewconfig", no_argument, NULL, listnewconfig}, {"olddefconfig", no_argument, NULL, olddefconfig}, /* * oldnoconfig is an alias of olddefconfig, because people already * are dependent on its behavior(sets new symbols to their default * value but not 'n') with the counter-intuitive name. */ {"oldnoconfig", no_argument, NULL, olddefconfig}, {NULL, 0, NULL, 0} }; static void conf_usage(const char *progname) { printf("Usage: %s [-s] [option] \n", progname); printf("[option] is _one_ of the following:\n"); printf(" --listnewconfig List new options\n"); printf(" --oldaskconfig Start a new configuration using a line-oriented program\n"); printf(" --oldconfig Update a configuration using a provided .config as base\n"); printf(" --silentoldconfig Same as oldconfig, but quietly, additionally update deps\n"); printf(" --olddefconfig Same as silentoldconfig but sets new symbols to their default value\n"); printf(" --oldnoconfig An alias of olddefconfig\n"); printf(" --defconfig New config with default defined in \n"); printf(" --savedefconfig Save the minimal current configuration to \n"); printf(" --allnoconfig New config where all options are answered with no\n"); printf(" --allyesconfig New config where all options are answered with yes\n"); printf(" --allmodconfig New config where all options are answered with mod\n"); printf(" --alldefconfig New config with all symbols set to default\n"); printf(" --randconfig New config with random answer to all options\n"); } int main(int ac, char **av) { const char *progname = av[0]; int opt; const char *name, *defconfig_file = NULL /* gcc uninit */; struct stat tmpstat; setlocale(LC_ALL, ""); bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); tty_stdio = isatty(0) && isatty(1) && isatty(2); while ((opt = getopt_long(ac, av, "s", long_opts, NULL)) != -1) { if (opt == 's') { conf_set_message_callback(NULL); continue; } input_mode = (enum input_mode)opt; switch (opt) { case silentoldconfig: sync_kconfig = 1; break; case defconfig: case savedefconfig: defconfig_file = optarg; break; case randconfig: { struct timeval now; unsigned int seed; char *seed_env; /* * Use microseconds derived seed, * compensate for systems where it may be zero */ gettimeofday(&now, NULL); seed = (unsigned int)((now.tv_sec + 1) * (now.tv_usec + 1)); seed_env = getenv("KCONFIG_SEED"); if( seed_env && *seed_env ) { char *endp; int tmp = (int)strtol(seed_env, &endp, 0); if (*endp == '\0') { seed = tmp; } } fprintf( stderr, "KCONFIG_SEED=0x%X\n", seed ); srand(seed); break; } case oldaskconfig: case oldconfig: case allnoconfig: case allyesconfig: case allmodconfig: case alldefconfig: case listnewconfig: case olddefconfig: break; case '?': conf_usage(progname); exit(1); break; } } if (ac == optind) { printf(_("%s: Kconfig file missing\n"), av[0]); conf_usage(progname); exit(1); } name = av[optind]; conf_parse(name); //zconfdump(stdout); if (sync_kconfig) { name = conf_get_configname(); if (stat(name, &tmpstat)) { fprintf(stderr, _("***\n" "*** Configuration file \"%s\" not found!\n" "***\n" "*** Please run some configurator (e.g. \"make oldconfig\" or\n" "*** \"make menuconfig\" or \"make xconfig\").\n" "***\n"), name); exit(1); } } switch (input_mode) { case defconfig: if (!defconfig_file) defconfig_file = conf_get_default_confname(); if (conf_read(defconfig_file)) { printf(_("***\n" "*** Can't find default configuration \"%s\"!\n" "***\n"), defconfig_file); exit(1); } break; case savedefconfig: case silentoldconfig: case oldaskconfig: case oldconfig: case listnewconfig: case olddefconfig: conf_read(NULL); break; case allnoconfig: case allyesconfig: case allmodconfig: case alldefconfig: case randconfig: name = getenv("KCONFIG_ALLCONFIG"); if (!name) break; if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) { if (conf_read_simple(name, S_DEF_USER)) { fprintf(stderr, _("*** Can't read seed configuration \"%s\"!\n"), name); exit(1); } break; } switch (input_mode) { case allnoconfig: name = "allno.config"; break; case allyesconfig: name = "allyes.config"; break; case allmodconfig: name = "allmod.config"; break; case alldefconfig: name = "alldef.config"; break; case randconfig: name = "allrandom.config"; break; default: break; } if (conf_read_simple(name, S_DEF_USER) && conf_read_simple("all.config", S_DEF_USER)) { fprintf(stderr, _("*** KCONFIG_ALLCONFIG set, but no \"%s\" or \"all.config\" file found\n"), name); exit(1); } break; default: break; } if (sync_kconfig) { if (conf_get_changed()) { name = getenv("KCONFIG_NOSILENTUPDATE"); if (name && *name) { fprintf(stderr, _("\n*** The configuration requires explicit update.\n\n")); return 1; } } valid_stdin = tty_stdio; } switch (input_mode) { case allnoconfig: conf_set_all_new_symbols(def_no); break; case allyesconfig: conf_set_all_new_symbols(def_yes); break; case allmodconfig: conf_set_all_new_symbols(def_mod); break; case alldefconfig: conf_set_all_new_symbols(def_default); break; case randconfig: /* Really nothing to do in this loop */ while (conf_set_all_new_symbols(def_random)) ; break; case defconfig: conf_set_all_new_symbols(def_default); break; case savedefconfig: break; case oldaskconfig: rootEntry = &rootmenu; conf(&rootmenu); input_mode = silentoldconfig; /* fall through */ case oldconfig: case listnewconfig: case olddefconfig: case silentoldconfig: /* Update until a loop caused no more changes */ do { conf_cnt = 0; check_conf(&rootmenu); } while (conf_cnt && (input_mode != listnewconfig && input_mode != olddefconfig)); break; } if (sync_kconfig) { /* silentoldconfig is used during the build so we shall update autoconf. * All other commands are only used to generate a config. */ if (conf_get_changed() && conf_write(NULL)) { fprintf(stderr, _("\n*** Error during writing of the configuration.\n\n")); exit(1); } if (conf_write_autoconf()) { fprintf(stderr, _("\n*** Error during update of the configuration.\n\n")); return 1; } } else if (input_mode == savedefconfig) { if (conf_write_defconfig(defconfig_file)) { fprintf(stderr, _("n*** Error while saving defconfig to: %s\n\n"), defconfig_file); return 1; } } else if (input_mode != listnewconfig) { if (conf_write(NULL)) { fprintf(stderr, _("\n*** Error during writing of the configuration.\n\n")); exit(1); } } return 0; } /* * Helper function to facilitate fgets() by Jean Sacren. */ void xfgets(char *str, int size, FILE *in) { if (fgets(str, size, in) == NULL) fprintf(stderr, "\nError in reading or end of file.\n"); } backport-iwlwifi-dkms-7906/kconf/confdata.c000066400000000000000000000632311351064634500207310ustar00rootroot00000000000000/* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #include #include #include #include #include #include #include #include #include #include #include "lkc.h" struct conf_printer { void (*print_symbol)(FILE *, struct symbol *, const char *, void *); void (*print_comment)(FILE *, const char *, void *); }; static void conf_warning(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); static void conf_message(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); static const char *conf_filename; static int conf_lineno, conf_warnings, conf_unsaved; const char conf_defname[] = "arch/$ARCH/defconfig"; static void conf_warning(const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "%s:%d:warning: ", conf_filename, conf_lineno); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); conf_warnings++; } static void conf_default_message_callback(const char *fmt, va_list ap) { printf("#\n# "); vprintf(fmt, ap); printf("\n#\n"); } static void (*conf_message_callback) (const char *fmt, va_list ap) = conf_default_message_callback; void conf_set_message_callback(void (*fn) (const char *fmt, va_list ap)) { conf_message_callback = fn; } static void conf_message(const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (conf_message_callback) conf_message_callback(fmt, ap); va_end(ap); } const char *conf_get_configname(void) { char *name = getenv("KCONFIG_CONFIG"); return name ? name : ".config"; } const char *conf_get_autoconfig_name(void) { char *name = getenv("KCONFIG_AUTOCONFIG"); return name ? name : "include/config/auto.conf"; } static char *conf_expand_value(const char *in) { struct symbol *sym; const char *src; static char res_value[SYMBOL_MAXLENGTH]; char *dst, name[SYMBOL_MAXLENGTH]; res_value[0] = 0; dst = name; while ((src = strchr(in, '$'))) { strncat(res_value, in, src - in); src++; dst = name; while (isalnum(*src) || *src == '_') *dst++ = *src++; *dst = 0; sym = sym_lookup(name, 0); sym_calc_value(sym); strcat(res_value, sym_get_string_value(sym)); in = src; } strcat(res_value, in); return res_value; } char *conf_get_default_confname(void) { struct stat buf; static char fullname[PATH_MAX+1]; char *env, *name; name = conf_expand_value(conf_defname); env = getenv(SRCTREE); if (env) { sprintf(fullname, "%s/%s", env, name); if (!stat(fullname, &buf)) return fullname; } return name; } static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) { char *p2; switch (sym->type) { case S_TRISTATE: if (p[0] == 'm') { sym->def[def].tri = mod; sym->flags |= def_flags; break; } /* fall through */ case S_BOOLEAN: if (p[0] == 'y') { sym->def[def].tri = yes; sym->flags |= def_flags; break; } if (p[0] == 'n') { sym->def[def].tri = no; sym->flags |= def_flags; break; } if (def != S_DEF_AUTO) conf_warning("symbol value '%s' invalid for %s", p, sym->name); return 1; case S_OTHER: if (*p != '"') { for (p2 = p; *p2 && !isspace(*p2); p2++) ; sym->type = S_STRING; goto done; } /* fall through */ case S_STRING: if (*p++ != '"') break; for (p2 = p; (p2 = strpbrk(p2, "\"\\")); p2++) { if (*p2 == '"') { *p2 = 0; break; } memmove(p2, p2 + 1, strlen(p2)); } if (!p2) { if (def != S_DEF_AUTO) conf_warning("invalid string found"); return 1; } /* fall through */ case S_INT: case S_HEX: done: if (sym_string_valid(sym, p)) { sym->def[def].val = strdup(p); sym->flags |= def_flags; } else { if (def != S_DEF_AUTO) conf_warning("symbol value '%s' invalid for %s", p, sym->name); return 1; } break; default: ; } return 0; } #define LINE_GROWTH 16 static int add_byte(int c, char **lineptr, size_t slen, size_t *n) { char *nline; size_t new_size = slen + 1; if (new_size > *n) { new_size += LINE_GROWTH - 1; new_size *= 2; nline = realloc(*lineptr, new_size); if (!nline) return -1; *lineptr = nline; *n = new_size; } (*lineptr)[slen] = c; return 0; } static ssize_t compat_getline(char **lineptr, size_t *n, FILE *stream) { char *line = *lineptr; size_t slen = 0; for (;;) { int c = getc(stream); switch (c) { case '\n': if (add_byte(c, &line, slen, n) < 0) goto e_out; slen++; /* fall through */ case EOF: if (add_byte('\0', &line, slen, n) < 0) goto e_out; *lineptr = line; if (slen == 0) return -1; return slen; default: if (add_byte(c, &line, slen, n) < 0) goto e_out; slen++; } } e_out: line[slen-1] = '\0'; *lineptr = line; return -1; } int conf_read_simple(const char *name, int def) { FILE *in = NULL; char *line = NULL; size_t line_asize = 0; char *p, *p2; struct symbol *sym; int i, def_flags; if (name) { in = zconf_fopen(name); } else { struct property *prop; name = conf_get_configname(); in = zconf_fopen(name); if (in) goto load; sym_add_change_count(1); if (!sym_defconfig_list) return 1; for_all_defaults(sym_defconfig_list, prop) { if (expr_calc_value(prop->visible.expr) == no || prop->expr->type != E_SYMBOL) continue; name = conf_expand_value(prop->expr->left.sym->name); in = zconf_fopen(name); if (in) { conf_message(_("using defaults found in %s"), name); goto load; } } } if (!in) return 1; load: conf_filename = name; conf_lineno = 0; conf_warnings = 0; conf_unsaved = 0; def_flags = SYMBOL_DEF << def; for_all_symbols(i, sym) { sym->flags |= SYMBOL_CHANGED; sym->flags &= ~(def_flags|SYMBOL_VALID); if (sym_is_choice(sym)) sym->flags |= def_flags; switch (sym->type) { case S_INT: case S_HEX: case S_STRING: if (sym->def[def].val) free(sym->def[def].val); /* fall through */ default: sym->def[def].val = NULL; sym->def[def].tri = no; } } while (compat_getline(&line, &line_asize, in) != -1) { conf_lineno++; sym = NULL; if (line[0] == '#') { if (memcmp(line + 2, CONFIG_, strlen(CONFIG_))) continue; p = strchr(line + 2 + strlen(CONFIG_), ' '); if (!p) continue; *p++ = 0; if (strncmp(p, "is not set", 10)) continue; if (def == S_DEF_USER) { sym = sym_find(line + 2 + strlen(CONFIG_)); if (!sym) { sym_add_change_count(1); goto setsym; } } else { sym = sym_lookup(line + 2 + strlen(CONFIG_), 0); if (sym->type == S_UNKNOWN) sym->type = S_BOOLEAN; } if (sym->flags & def_flags) { conf_warning("override: reassigning to symbol %s", sym->name); } switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: sym->def[def].tri = no; sym->flags |= def_flags; break; default: ; } } else if (memcmp(line, CONFIG_, strlen(CONFIG_)) == 0) { p = strchr(line + strlen(CONFIG_), '='); if (!p) continue; *p++ = 0; p2 = strchr(p, '\n'); if (p2) { *p2-- = 0; if (*p2 == '\r') *p2 = 0; } if (def == S_DEF_USER) { sym = sym_find(line + strlen(CONFIG_)); if (!sym) { sym_add_change_count(1); goto setsym; } } else { sym = sym_lookup(line + strlen(CONFIG_), 0); if (sym->type == S_UNKNOWN) sym->type = S_OTHER; } if (sym->flags & def_flags) { conf_warning("override: reassigning to symbol %s", sym->name); } if (conf_set_sym_val(sym, def, def_flags, p)) continue; } else { if (line[0] != '\r' && line[0] != '\n') conf_warning("unexpected data: %.*s", (int)strcspn(line, "\r\n"), line); continue; } setsym: if (sym && sym_is_choice_value(sym)) { struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); switch (sym->def[def].tri) { case no: break; case mod: if (cs->def[def].tri == yes) { conf_warning("%s creates inconsistent choice state", sym->name); cs->flags &= ~def_flags; } break; case yes: if (cs->def[def].tri != no) conf_warning("override: %s changes choice state", sym->name); cs->def[def].val = sym; break; } cs->def[def].tri = EXPR_OR(cs->def[def].tri, sym->def[def].tri); } } free(line); fclose(in); return 0; } int conf_read(const char *name) { struct symbol *sym; int i; sym_set_change_count(0); if (conf_read_simple(name, S_DEF_USER)) { sym_calc_value(modules_sym); return 1; } sym_calc_value(modules_sym); for_all_symbols(i, sym) { sym_calc_value(sym); if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO)) continue; if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) { /* check that calculated value agrees with saved value */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym)) break; if (!sym_is_choice(sym)) continue; /* fall through */ default: if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val)) continue; break; } } else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE)) /* no previous value and not saved */ continue; conf_unsaved++; /* maybe print value in verbose mode... */ } for_all_symbols(i, sym) { if (sym_has_value(sym) && !sym_is_choice_value(sym)) { /* Reset values of generates values, so they'll appear * as new, if they should become visible, but that * doesn't quite work if the Kconfig and the saved * configuration disagree. */ if (sym->visible == no && !conf_unsaved) sym->flags &= ~SYMBOL_DEF_USER; switch (sym->type) { case S_STRING: case S_INT: case S_HEX: /* Reset a string value if it's out of range */ if (sym_string_within_range(sym, sym->def[S_DEF_USER].val)) break; sym->flags &= ~(SYMBOL_VALID|SYMBOL_DEF_USER); conf_unsaved++; break; default: break; } } } sym_add_change_count(conf_warnings || conf_unsaved); return 0; } /* * Kconfig configuration printer * * This printer is used when generating the resulting configuration after * kconfig invocation and `defconfig' files. Unset symbol might be omitted by * passing a non-NULL argument to the printer. * */ static void kconfig_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (*value == 'n') { bool skip_unset = (arg != NULL); if (!skip_unset) fprintf(fp, "# %s%s is not set\n", CONFIG_, sym->name); return; } break; default: break; } fprintf(fp, "%s%s=%s\n", CONFIG_, sym->name, value); } static void kconfig_print_comment(FILE *fp, const char *value, void *arg) { const char *p = value; size_t l; for (;;) { l = strcspn(p, "\n"); fprintf(fp, "#"); if (l) { fprintf(fp, " "); xfwrite(p, l, 1, fp); p += l; } fprintf(fp, "\n"); if (*p++ == '\0') break; } } static struct conf_printer kconfig_printer_cb = { .print_symbol = kconfig_print_symbol, .print_comment = kconfig_print_comment, }; /* * Header printer * * This printer is used when generating the `include/generated/autoconf.h' file. */ static void header_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: { const char *suffix = ""; switch (*value) { case 'n': break; case 'm': suffix = "_MODULE"; /* fall through */ default: fprintf(fp, "#define %s%s%s 1\n", CONFIG_, sym->name, suffix); } break; } case S_HEX: { const char *prefix = ""; if (value[0] != '0' || (value[1] != 'x' && value[1] != 'X')) prefix = "0x"; fprintf(fp, "#define %s%s %s%s\n", CONFIG_, sym->name, prefix, value); break; } case S_STRING: case S_INT: fprintf(fp, "#define %s%s %s\n", CONFIG_, sym->name, value); break; default: break; } } static void header_print_comment(FILE *fp, const char *value, void *arg) { const char *p = value; size_t l; fprintf(fp, "/*\n"); for (;;) { l = strcspn(p, "\n"); fprintf(fp, " *"); if (l) { fprintf(fp, " "); xfwrite(p, l, 1, fp); p += l; } fprintf(fp, "\n"); if (*p++ == '\0') break; } fprintf(fp, " */\n"); } static struct conf_printer header_printer_cb = { .print_symbol = header_print_symbol, .print_comment = header_print_comment, }; /* * Tristate printer * * This printer is used when generating the `include/config/tristate.conf' file. */ static void tristate_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { if (sym->type == S_TRISTATE && *value != 'n') fprintf(fp, "%s%s=%c\n", CONFIG_, sym->name, (char)toupper(*value)); } static struct conf_printer tristate_printer_cb = { .print_symbol = tristate_print_symbol, .print_comment = kconfig_print_comment, }; static void conf_write_symbol(FILE *fp, struct symbol *sym, struct conf_printer *printer, void *printer_arg) { const char *str; switch (sym->type) { case S_OTHER: case S_UNKNOWN: break; case S_STRING: str = sym_get_string_value(sym); str = sym_escape_string_value(str); printer->print_symbol(fp, sym, str, printer_arg); free((void *)str); break; default: str = sym_get_string_value(sym); printer->print_symbol(fp, sym, str, printer_arg); } } static void conf_write_heading(FILE *fp, struct conf_printer *printer, void *printer_arg) { char buf[256]; snprintf(buf, sizeof(buf), "\n" "Automatically generated file; DO NOT EDIT.\n" "%s\n", rootmenu.prompt->text); printer->print_comment(fp, buf, printer_arg); } /* * Write out a minimal config. * All values that has default values are skipped as this is redundant. */ int conf_write_defconfig(const char *filename) { struct symbol *sym; struct menu *menu; FILE *out; out = fopen(filename, "w"); if (!out) return 1; sym_clear_all_valid(); /* Traverse all menus to find all relevant symbols */ menu = rootmenu.list; while (menu != NULL) { sym = menu->sym; if (sym == NULL) { if (!menu_is_visible(menu)) goto next_menu; } else if (!sym_is_choice(sym)) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE)) goto next_menu; sym->flags &= ~SYMBOL_WRITE; /* If we cannot change the symbol - skip */ if (!sym_is_changable(sym)) goto next_menu; /* If symbol equals to default value - skip */ if (strcmp(sym_get_string_value(sym), sym_get_string_default(sym)) == 0) goto next_menu; /* * If symbol is a choice value and equals to the * default for a choice - skip. * But only if value is bool and equal to "y" and * choice is not "optional". * (If choice is "optional" then all values can be "n") */ if (sym_is_choice_value(sym)) { struct symbol *cs; struct symbol *ds; cs = prop_get_symbol(sym_get_choice_prop(sym)); ds = sym_choice_default(cs); if (!sym_is_optional(cs) && sym == ds) { if ((sym->type == S_BOOLEAN) && sym_get_tristate_value(sym) == yes) goto next_menu; } } conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); } next_menu: if (menu->list != NULL) { menu = menu->list; } else if (menu->next != NULL) { menu = menu->next; } else { while ((menu = menu->parent)) { if (menu->next != NULL) { menu = menu->next; break; } } } } fclose(out); return 0; } int conf_write(const char *name) { FILE *out; struct symbol *sym; struct menu *menu; const char *basename; const char *str; char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1]; char *env; dirname[0] = 0; if (name && name[0]) { struct stat st; char *slash; if (!stat(name, &st) && S_ISDIR(st.st_mode)) { strcpy(dirname, name); strcat(dirname, "/"); basename = conf_get_configname(); } else if ((slash = strrchr(name, '/'))) { int size = slash - name + 1; memcpy(dirname, name, size); dirname[size] = 0; if (slash[1]) basename = slash + 1; else basename = conf_get_configname(); } else basename = name; } else basename = conf_get_configname(); sprintf(newname, "%s%s", dirname, basename); env = getenv("KCONFIG_OVERWRITECONFIG"); if (!env || !*env) { sprintf(tmpname, "%s.tmpconfig.%d", dirname, (int)getpid()); out = fopen(tmpname, "w"); } else { *tmpname = 0; out = fopen(newname, "w"); } if (!out) return 1; conf_write_heading(out, &kconfig_printer_cb, NULL); if (!conf_get_changed()) sym_clear_all_valid(); menu = rootmenu.list; while (menu) { sym = menu->sym; if (!sym) { if (!menu_is_visible(menu)) goto next; str = menu_get_prompt(menu); fprintf(out, "\n" "#\n" "# %s\n" "#\n", str); } else if (!(sym->flags & SYMBOL_CHOICE)) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE)) goto next; sym->flags &= ~SYMBOL_WRITE; conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); } next: if (menu->list) { menu = menu->list; continue; } if (menu->next) menu = menu->next; else while ((menu = menu->parent)) { if (menu->next) { menu = menu->next; break; } } } fclose(out); if (*tmpname) { strcat(dirname, basename); strcat(dirname, ".old"); rename(newname, dirname); if (rename(tmpname, newname)) return 1; } conf_message(_("configuration written to %s"), newname); sym_set_change_count(0); return 0; } static int conf_split_config(void) { const char *name; char path[PATH_MAX+1]; char *s, *d, c; struct symbol *sym; struct stat sb; int res, i, fd; name = conf_get_autoconfig_name(); conf_read_simple(name, S_DEF_AUTO); sym_calc_value(modules_sym); if (chdir("include/config")) return 1; res = 0; for_all_symbols(i, sym) { sym_calc_value(sym); if ((sym->flags & SYMBOL_AUTO) || !sym->name) continue; if (sym->flags & SYMBOL_WRITE) { if (sym->flags & SYMBOL_DEF_AUTO) { /* * symbol has old and new value, * so compare them... */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym_get_tristate_value(sym) == sym->def[S_DEF_AUTO].tri) continue; break; case S_STRING: case S_HEX: case S_INT: if (!strcmp(sym_get_string_value(sym), sym->def[S_DEF_AUTO].val)) continue; break; default: break; } } else { /* * If there is no old value, only 'no' (unset) * is allowed as new value. */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym_get_tristate_value(sym) == no) continue; break; default: break; } } } else if (!(sym->flags & SYMBOL_DEF_AUTO)) /* There is neither an old nor a new value. */ continue; /* else * There is an old value, but no new value ('no' (unset) * isn't saved in auto.conf, so the old value is always * different from 'no'). */ /* Replace all '_' and append ".h" */ s = sym->name; d = path; while ((c = *s++)) { c = tolower(c); *d++ = (c == '_') ? '/' : c; } strcpy(d, ".h"); /* Assume directory path already exists. */ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); if (fd == -1) { if (errno != ENOENT) { res = 1; break; } /* * Create directory components, * unless they exist already. */ d = path; while ((d = strchr(d, '/'))) { *d = 0; if (stat(path, &sb) && mkdir(path, 0755)) { res = 1; goto out; } *d++ = '/'; } /* Try it again. */ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); if (fd == -1) { res = 1; break; } } close(fd); } out: if (chdir("../..")) return 1; return res; } int conf_write_autoconf(void) { struct symbol *sym; const char *name; FILE *out, *tristate, *out_h; int i; sym_clear_all_valid(); file_write_dep("include/config/auto.conf.cmd"); if (conf_split_config()) return 1; out = fopen(".tmpconfig", "w"); if (!out) return 1; tristate = fopen(".tmpconfig_tristate", "w"); if (!tristate) { fclose(out); return 1; } out_h = fopen(".tmpconfig.h", "w"); if (!out_h) { fclose(out); fclose(tristate); return 1; } conf_write_heading(out, &kconfig_printer_cb, NULL); conf_write_heading(tristate, &tristate_printer_cb, NULL); conf_write_heading(out_h, &header_printer_cb, NULL); for_all_symbols(i, sym) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE) || !sym->name) continue; /* write symbol to auto.conf, tristate and header files */ conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1); conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1); conf_write_symbol(out_h, sym, &header_printer_cb, NULL); } fclose(out); fclose(tristate); fclose(out_h); name = getenv("KCONFIG_AUTOHEADER"); if (!name) name = "include/generated/autoconf.h"; if (rename(".tmpconfig.h", name)) return 1; name = getenv("KCONFIG_TRISTATE"); if (!name) name = "include/config/tristate.conf"; if (rename(".tmpconfig_tristate", name)) return 1; name = conf_get_autoconfig_name(); /* * This must be the last step, kbuild has a dependency on auto.conf * and this marks the successful completion of the previous steps. */ if (rename(".tmpconfig", name)) return 1; return 0; } static int sym_change_count; static void (*conf_changed_callback)(void); void sym_set_change_count(int count) { int _sym_change_count = sym_change_count; sym_change_count = count; if (conf_changed_callback && (bool)_sym_change_count != (bool)count) conf_changed_callback(); } void sym_add_change_count(int count) { sym_set_change_count(count + sym_change_count); } bool conf_get_changed(void) { return sym_change_count; } void conf_set_changed_callback(void (*fn)(void)) { conf_changed_callback = fn; } static bool randomize_choice_values(struct symbol *csym) { struct property *prop; struct symbol *sym; struct expr *e; int cnt, def; /* * If choice is mod then we may have more items selected * and if no then no-one. * In both cases stop. */ if (csym->curr.tri != yes) return false; prop = sym_get_choice_prop(csym); /* count entries in choice block */ cnt = 0; expr_list_for_each_sym(prop->expr, e, sym) cnt++; /* * find a random value and set it to yes, * set the rest to no so we have only one set */ def = (rand() % cnt); cnt = 0; expr_list_for_each_sym(prop->expr, e, sym) { if (def == cnt++) { sym->def[S_DEF_USER].tri = yes; csym->def[S_DEF_USER].val = sym; } else { sym->def[S_DEF_USER].tri = no; } sym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ sym->flags &= ~SYMBOL_VALID; } csym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ csym->flags &= ~(SYMBOL_VALID); return true; } void set_all_choice_values(struct symbol *csym) { struct property *prop; struct symbol *sym; struct expr *e; prop = sym_get_choice_prop(csym); /* * Set all non-assinged choice values to no */ expr_list_for_each_sym(prop->expr, e, sym) { if (!sym_has_value(sym)) sym->def[S_DEF_USER].tri = no; } csym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES); } bool conf_set_all_new_symbols(enum conf_def_mode mode) { struct symbol *sym, *csym; int i, cnt, pby, pty, ptm; /* pby: probability of boolean = y * pty: probability of tristate = y * ptm: probability of tristate = m */ pby = 50; pty = ptm = 33; /* can't go as the default in switch-case * below, otherwise gcc whines about * -Wmaybe-uninitialized */ if (mode == def_random) { int n, p[3]; char *env = getenv("KCONFIG_PROBABILITY"); n = 0; while( env && *env ) { char *endp; int tmp = strtol( env, &endp, 10 ); if( tmp >= 0 && tmp <= 100 ) { p[n++] = tmp; } else { errno = ERANGE; perror( "KCONFIG_PROBABILITY" ); exit( 1 ); } env = (*endp == ':') ? endp+1 : endp; if( n >=3 ) { break; } } switch( n ) { case 1: pby = p[0]; ptm = pby/2; pty = pby-ptm; break; case 2: pty = p[0]; ptm = p[1]; pby = pty + ptm; break; case 3: pby = p[0]; pty = p[1]; ptm = p[2]; break; } if( pty+ptm > 100 ) { errno = ERANGE; perror( "KCONFIG_PROBABILITY" ); exit( 1 ); } } bool has_changed = false; for_all_symbols(i, sym) { if (sym_has_value(sym) || (sym->flags & SYMBOL_VALID)) continue; switch (sym_get_type(sym)) { case S_BOOLEAN: case S_TRISTATE: has_changed = true; switch (mode) { case def_yes: sym->def[S_DEF_USER].tri = yes; break; case def_mod: sym->def[S_DEF_USER].tri = mod; break; case def_no: if (sym->flags & SYMBOL_ALLNOCONFIG_Y) sym->def[S_DEF_USER].tri = yes; else sym->def[S_DEF_USER].tri = no; break; case def_random: sym->def[S_DEF_USER].tri = no; cnt = rand() % 100; if (sym->type == S_TRISTATE) { if (cnt < pty) sym->def[S_DEF_USER].tri = yes; else if (cnt < (pty+ptm)) sym->def[S_DEF_USER].tri = mod; } else if (cnt < pby) sym->def[S_DEF_USER].tri = yes; break; default: continue; } if (!(sym_is_choice(sym) && mode == def_random)) sym->flags |= SYMBOL_DEF_USER; break; default: break; } } sym_clear_all_valid(); /* * We have different type of choice blocks. * If curr.tri equals to mod then we can select several * choice symbols in one block. * In this case we do nothing. * If curr.tri equals yes then only one symbol can be * selected in a choice block and we set it to yes, * and the rest to no. */ if (mode != def_random) { for_all_symbols(i, csym) { if ((sym_is_choice(csym) && !sym_has_value(csym)) || sym_is_choice_value(csym)) csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES; } } for_all_symbols(i, csym) { if (sym_has_value(csym) || !sym_is_choice(csym)) continue; sym_calc_value(csym); if (mode == def_random) has_changed = randomize_choice_values(csym); else { set_all_choice_values(csym); has_changed = true; } } return has_changed; } backport-iwlwifi-dkms-7906/kconf/expr.c000066400000000000000000000657141351064634500201400ustar00rootroot00000000000000/* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #include #include #include #include "lkc.h" #define DEBUG_EXPR 0 static int expr_eq(struct expr *e1, struct expr *e2); static struct expr *expr_eliminate_yn(struct expr *e); struct expr *expr_alloc_symbol(struct symbol *sym) { struct expr *e = xcalloc(1, sizeof(*e)); e->type = E_SYMBOL; e->left.sym = sym; return e; } struct expr *expr_alloc_one(enum expr_type type, struct expr *ce) { struct expr *e = xcalloc(1, sizeof(*e)); e->type = type; e->left.expr = ce; return e; } struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e2) { struct expr *e = xcalloc(1, sizeof(*e)); e->type = type; e->left.expr = e1; e->right.expr = e2; return e; } struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2) { struct expr *e = xcalloc(1, sizeof(*e)); e->type = type; e->left.sym = s1; e->right.sym = s2; return e; } struct expr *expr_alloc_and(struct expr *e1, struct expr *e2) { if (!e1) return e2; return e2 ? expr_alloc_two(E_AND, e1, e2) : e1; } struct expr *expr_alloc_or(struct expr *e1, struct expr *e2) { if (!e1) return e2; return e2 ? expr_alloc_two(E_OR, e1, e2) : e1; } struct expr *expr_copy(const struct expr *org) { struct expr *e; if (!org) return NULL; e = xmalloc(sizeof(*org)); memcpy(e, org, sizeof(*org)); switch (org->type) { case E_SYMBOL: e->left = org->left; break; case E_NOT: e->left.expr = expr_copy(org->left.expr); break; case E_EQUAL: case E_GEQ: case E_GTH: case E_LEQ: case E_LTH: case E_UNEQUAL: e->left.sym = org->left.sym; e->right.sym = org->right.sym; break; case E_AND: case E_OR: case E_LIST: e->left.expr = expr_copy(org->left.expr); e->right.expr = expr_copy(org->right.expr); break; default: printf("can't copy type %d\n", e->type); free(e); e = NULL; break; } return e; } void expr_free(struct expr *e) { if (!e) return; switch (e->type) { case E_SYMBOL: break; case E_NOT: expr_free(e->left.expr); return; case E_EQUAL: case E_GEQ: case E_GTH: case E_LEQ: case E_LTH: case E_UNEQUAL: break; case E_OR: case E_AND: expr_free(e->left.expr); expr_free(e->right.expr); break; default: printf("how to free type %d?\n", e->type); break; } free(e); } static int trans_count; #define e1 (*ep1) #define e2 (*ep2) static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct expr **ep2) { if (e1->type == type) { __expr_eliminate_eq(type, &e1->left.expr, &e2); __expr_eliminate_eq(type, &e1->right.expr, &e2); return; } if (e2->type == type) { __expr_eliminate_eq(type, &e1, &e2->left.expr); __expr_eliminate_eq(type, &e1, &e2->right.expr); return; } if (e1->type == E_SYMBOL && e2->type == E_SYMBOL && e1->left.sym == e2->left.sym && (e1->left.sym == &symbol_yes || e1->left.sym == &symbol_no)) return; if (!expr_eq(e1, e2)) return; trans_count++; expr_free(e1); expr_free(e2); switch (type) { case E_OR: e1 = expr_alloc_symbol(&symbol_no); e2 = expr_alloc_symbol(&symbol_no); break; case E_AND: e1 = expr_alloc_symbol(&symbol_yes); e2 = expr_alloc_symbol(&symbol_yes); break; default: ; } } void expr_eliminate_eq(struct expr **ep1, struct expr **ep2) { if (!e1 || !e2) return; switch (e1->type) { case E_OR: case E_AND: __expr_eliminate_eq(e1->type, ep1, ep2); default: ; } if (e1->type != e2->type) switch (e2->type) { case E_OR: case E_AND: __expr_eliminate_eq(e2->type, ep1, ep2); default: ; } e1 = expr_eliminate_yn(e1); e2 = expr_eliminate_yn(e2); } #undef e1 #undef e2 static int expr_eq(struct expr *e1, struct expr *e2) { int res, old_count; if (e1->type != e2->type) return 0; switch (e1->type) { case E_EQUAL: case E_GEQ: case E_GTH: case E_LEQ: case E_LTH: case E_UNEQUAL: return e1->left.sym == e2->left.sym && e1->right.sym == e2->right.sym; case E_SYMBOL: return e1->left.sym == e2->left.sym; case E_NOT: return expr_eq(e1->left.expr, e2->left.expr); case E_AND: case E_OR: e1 = expr_copy(e1); e2 = expr_copy(e2); old_count = trans_count; expr_eliminate_eq(&e1, &e2); res = (e1->type == E_SYMBOL && e2->type == E_SYMBOL && e1->left.sym == e2->left.sym); expr_free(e1); expr_free(e2); trans_count = old_count; return res; case E_LIST: case E_RANGE: case E_NONE: /* panic */; } if (DEBUG_EXPR) { expr_fprint(e1, stdout); printf(" = "); expr_fprint(e2, stdout); printf(" ?\n"); } return 0; } static struct expr *expr_eliminate_yn(struct expr *e) { struct expr *tmp; if (e) switch (e->type) { case E_AND: e->left.expr = expr_eliminate_yn(e->left.expr); e->right.expr = expr_eliminate_yn(e->right.expr); if (e->left.expr->type == E_SYMBOL) { if (e->left.expr->left.sym == &symbol_no) { expr_free(e->left.expr); expr_free(e->right.expr); e->type = E_SYMBOL; e->left.sym = &symbol_no; e->right.expr = NULL; return e; } else if (e->left.expr->left.sym == &symbol_yes) { free(e->left.expr); tmp = e->right.expr; *e = *(e->right.expr); free(tmp); return e; } } if (e->right.expr->type == E_SYMBOL) { if (e->right.expr->left.sym == &symbol_no) { expr_free(e->left.expr); expr_free(e->right.expr); e->type = E_SYMBOL; e->left.sym = &symbol_no; e->right.expr = NULL; return e; } else if (e->right.expr->left.sym == &symbol_yes) { free(e->right.expr); tmp = e->left.expr; *e = *(e->left.expr); free(tmp); return e; } } break; case E_OR: e->left.expr = expr_eliminate_yn(e->left.expr); e->right.expr = expr_eliminate_yn(e->right.expr); if (e->left.expr->type == E_SYMBOL) { if (e->left.expr->left.sym == &symbol_no) { free(e->left.expr); tmp = e->right.expr; *e = *(e->right.expr); free(tmp); return e; } else if (e->left.expr->left.sym == &symbol_yes) { expr_free(e->left.expr); expr_free(e->right.expr); e->type = E_SYMBOL; e->left.sym = &symbol_yes; e->right.expr = NULL; return e; } } if (e->right.expr->type == E_SYMBOL) { if (e->right.expr->left.sym == &symbol_no) { free(e->right.expr); tmp = e->left.expr; *e = *(e->left.expr); free(tmp); return e; } else if (e->right.expr->left.sym == &symbol_yes) { expr_free(e->left.expr); expr_free(e->right.expr); e->type = E_SYMBOL; e->left.sym = &symbol_yes; e->right.expr = NULL; return e; } } break; default: ; } return e; } /* * bool FOO!=n => FOO */ struct expr *expr_trans_bool(struct expr *e) { if (!e) return NULL; switch (e->type) { case E_AND: case E_OR: case E_NOT: e->left.expr = expr_trans_bool(e->left.expr); e->right.expr = expr_trans_bool(e->right.expr); break; case E_UNEQUAL: // FOO!=n -> FOO if (e->left.sym->type == S_TRISTATE) { if (e->right.sym == &symbol_no) { e->type = E_SYMBOL; e->right.sym = NULL; } } break; default: ; } return e; } /* * e1 || e2 -> ? */ static struct expr *expr_join_or(struct expr *e1, struct expr *e2) { struct expr *tmp; struct symbol *sym1, *sym2; if (expr_eq(e1, e2)) return expr_copy(e1); if (e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && e1->type != E_NOT) return NULL; if (e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && e2->type != E_NOT) return NULL; if (e1->type == E_NOT) { tmp = e1->left.expr; if (tmp->type != E_EQUAL && tmp->type != E_UNEQUAL && tmp->type != E_SYMBOL) return NULL; sym1 = tmp->left.sym; } else sym1 = e1->left.sym; if (e2->type == E_NOT) { if (e2->left.expr->type != E_SYMBOL) return NULL; sym2 = e2->left.expr->left.sym; } else sym2 = e2->left.sym; if (sym1 != sym2) return NULL; if (sym1->type != S_BOOLEAN && sym1->type != S_TRISTATE) return NULL; if (sym1->type == S_TRISTATE) { if (e1->type == E_EQUAL && e2->type == E_EQUAL && ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_mod) || (e1->right.sym == &symbol_mod && e2->right.sym == &symbol_yes))) { // (a='y') || (a='m') -> (a!='n') return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_no); } if (e1->type == E_EQUAL && e2->type == E_EQUAL && ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_no) || (e1->right.sym == &symbol_no && e2->right.sym == &symbol_yes))) { // (a='y') || (a='n') -> (a!='m') return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_mod); } if (e1->type == E_EQUAL && e2->type == E_EQUAL && ((e1->right.sym == &symbol_mod && e2->right.sym == &symbol_no) || (e1->right.sym == &symbol_no && e2->right.sym == &symbol_mod))) { // (a='m') || (a='n') -> (a!='y') return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_yes); } } if (sym1->type == S_BOOLEAN && sym1 == sym2) { if ((e1->type == E_NOT && e1->left.expr->type == E_SYMBOL && e2->type == E_SYMBOL) || (e2->type == E_NOT && e2->left.expr->type == E_SYMBOL && e1->type == E_SYMBOL)) return expr_alloc_symbol(&symbol_yes); } if (DEBUG_EXPR) { printf("optimize ("); expr_fprint(e1, stdout); printf(") || ("); expr_fprint(e2, stdout); printf(")?\n"); } return NULL; } static struct expr *expr_join_and(struct expr *e1, struct expr *e2) { struct expr *tmp; struct symbol *sym1, *sym2; if (expr_eq(e1, e2)) return expr_copy(e1); if (e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && e1->type != E_NOT) return NULL; if (e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && e2->type != E_NOT) return NULL; if (e1->type == E_NOT) { tmp = e1->left.expr; if (tmp->type != E_EQUAL && tmp->type != E_UNEQUAL && tmp->type != E_SYMBOL) return NULL; sym1 = tmp->left.sym; } else sym1 = e1->left.sym; if (e2->type == E_NOT) { if (e2->left.expr->type != E_SYMBOL) return NULL; sym2 = e2->left.expr->left.sym; } else sym2 = e2->left.sym; if (sym1 != sym2) return NULL; if (sym1->type != S_BOOLEAN && sym1->type != S_TRISTATE) return NULL; if ((e1->type == E_SYMBOL && e2->type == E_EQUAL && e2->right.sym == &symbol_yes) || (e2->type == E_SYMBOL && e1->type == E_EQUAL && e1->right.sym == &symbol_yes)) // (a) && (a='y') -> (a='y') return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); if ((e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_no) || (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_no)) // (a) && (a!='n') -> (a) return expr_alloc_symbol(sym1); if ((e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_mod) || (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_mod)) // (a) && (a!='m') -> (a='y') return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); if (sym1->type == S_TRISTATE) { if (e1->type == E_EQUAL && e2->type == E_UNEQUAL) { // (a='b') && (a!='c') -> 'b'='c' ? 'n' : a='b' sym2 = e1->right.sym; if ((e2->right.sym->flags & SYMBOL_CONST) && (sym2->flags & SYMBOL_CONST)) return sym2 != e2->right.sym ? expr_alloc_comp(E_EQUAL, sym1, sym2) : expr_alloc_symbol(&symbol_no); } if (e1->type == E_UNEQUAL && e2->type == E_EQUAL) { // (a='b') && (a!='c') -> 'b'='c' ? 'n' : a='b' sym2 = e2->right.sym; if ((e1->right.sym->flags & SYMBOL_CONST) && (sym2->flags & SYMBOL_CONST)) return sym2 != e1->right.sym ? expr_alloc_comp(E_EQUAL, sym1, sym2) : expr_alloc_symbol(&symbol_no); } if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_no) || (e1->right.sym == &symbol_no && e2->right.sym == &symbol_yes))) // (a!='y') && (a!='n') -> (a='m') return expr_alloc_comp(E_EQUAL, sym1, &symbol_mod); if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_mod) || (e1->right.sym == &symbol_mod && e2->right.sym == &symbol_yes))) // (a!='y') && (a!='m') -> (a='n') return expr_alloc_comp(E_EQUAL, sym1, &symbol_no); if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && ((e1->right.sym == &symbol_mod && e2->right.sym == &symbol_no) || (e1->right.sym == &symbol_no && e2->right.sym == &symbol_mod))) // (a!='m') && (a!='n') -> (a='m') return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); if ((e1->type == E_SYMBOL && e2->type == E_EQUAL && e2->right.sym == &symbol_mod) || (e2->type == E_SYMBOL && e1->type == E_EQUAL && e1->right.sym == &symbol_mod) || (e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_yes) || (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_yes)) return NULL; } if (DEBUG_EXPR) { printf("optimize ("); expr_fprint(e1, stdout); printf(") && ("); expr_fprint(e2, stdout); printf(")?\n"); } return NULL; } static void expr_eliminate_dups1(enum expr_type type, struct expr **ep1, struct expr **ep2) { #define e1 (*ep1) #define e2 (*ep2) struct expr *tmp; if (e1->type == type) { expr_eliminate_dups1(type, &e1->left.expr, &e2); expr_eliminate_dups1(type, &e1->right.expr, &e2); return; } if (e2->type == type) { expr_eliminate_dups1(type, &e1, &e2->left.expr); expr_eliminate_dups1(type, &e1, &e2->right.expr); return; } if (e1 == e2) return; switch (e1->type) { case E_OR: case E_AND: expr_eliminate_dups1(e1->type, &e1, &e1); default: ; } switch (type) { case E_OR: tmp = expr_join_or(e1, e2); if (tmp) { expr_free(e1); expr_free(e2); e1 = expr_alloc_symbol(&symbol_no); e2 = tmp; trans_count++; } break; case E_AND: tmp = expr_join_and(e1, e2); if (tmp) { expr_free(e1); expr_free(e2); e1 = expr_alloc_symbol(&symbol_yes); e2 = tmp; trans_count++; } break; default: ; } #undef e1 #undef e2 } struct expr *expr_eliminate_dups(struct expr *e) { int oldcount; if (!e) return e; oldcount = trans_count; while (1) { trans_count = 0; switch (e->type) { case E_OR: case E_AND: expr_eliminate_dups1(e->type, &e, &e); default: ; } if (!trans_count) break; e = expr_eliminate_yn(e); } trans_count = oldcount; return e; } struct expr *expr_transform(struct expr *e) { struct expr *tmp; if (!e) return NULL; switch (e->type) { case E_EQUAL: case E_GEQ: case E_GTH: case E_LEQ: case E_LTH: case E_UNEQUAL: case E_SYMBOL: case E_LIST: break; default: e->left.expr = expr_transform(e->left.expr); e->right.expr = expr_transform(e->right.expr); } switch (e->type) { case E_EQUAL: if (e->left.sym->type != S_BOOLEAN) break; if (e->right.sym == &symbol_no) { e->type = E_NOT; e->left.expr = expr_alloc_symbol(e->left.sym); e->right.sym = NULL; break; } if (e->right.sym == &symbol_mod) { printf("boolean symbol %s tested for 'm'? test forced to 'n'\n", e->left.sym->name); e->type = E_SYMBOL; e->left.sym = &symbol_no; e->right.sym = NULL; break; } if (e->right.sym == &symbol_yes) { e->type = E_SYMBOL; e->right.sym = NULL; break; } break; case E_UNEQUAL: if (e->left.sym->type != S_BOOLEAN) break; if (e->right.sym == &symbol_no) { e->type = E_SYMBOL; e->right.sym = NULL; break; } if (e->right.sym == &symbol_mod) { printf("boolean symbol %s tested for 'm'? test forced to 'y'\n", e->left.sym->name); e->type = E_SYMBOL; e->left.sym = &symbol_yes; e->right.sym = NULL; break; } if (e->right.sym == &symbol_yes) { e->type = E_NOT; e->left.expr = expr_alloc_symbol(e->left.sym); e->right.sym = NULL; break; } break; case E_NOT: switch (e->left.expr->type) { case E_NOT: // !!a -> a tmp = e->left.expr->left.expr; free(e->left.expr); free(e); e = tmp; e = expr_transform(e); break; case E_EQUAL: case E_UNEQUAL: // !a='x' -> a!='x' tmp = e->left.expr; free(e); e = tmp; e->type = e->type == E_EQUAL ? E_UNEQUAL : E_EQUAL; break; case E_LEQ: case E_GEQ: // !a<='x' -> a>'x' tmp = e->left.expr; free(e); e = tmp; e->type = e->type == E_LEQ ? E_GTH : E_LTH; break; case E_LTH: case E_GTH: // !a<'x' -> a>='x' tmp = e->left.expr; free(e); e = tmp; e->type = e->type == E_LTH ? E_GEQ : E_LEQ; break; case E_OR: // !(a || b) -> !a && !b tmp = e->left.expr; e->type = E_AND; e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr); tmp->type = E_NOT; tmp->right.expr = NULL; e = expr_transform(e); break; case E_AND: // !(a && b) -> !a || !b tmp = e->left.expr; e->type = E_OR; e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr); tmp->type = E_NOT; tmp->right.expr = NULL; e = expr_transform(e); break; case E_SYMBOL: if (e->left.expr->left.sym == &symbol_yes) { // !'y' -> 'n' tmp = e->left.expr; free(e); e = tmp; e->type = E_SYMBOL; e->left.sym = &symbol_no; break; } if (e->left.expr->left.sym == &symbol_mod) { // !'m' -> 'm' tmp = e->left.expr; free(e); e = tmp; e->type = E_SYMBOL; e->left.sym = &symbol_mod; break; } if (e->left.expr->left.sym == &symbol_no) { // !'n' -> 'y' tmp = e->left.expr; free(e); e = tmp; e->type = E_SYMBOL; e->left.sym = &symbol_yes; break; } break; default: ; } break; default: ; } return e; } int expr_contains_symbol(struct expr *dep, struct symbol *sym) { if (!dep) return 0; switch (dep->type) { case E_AND: case E_OR: return expr_contains_symbol(dep->left.expr, sym) || expr_contains_symbol(dep->right.expr, sym); case E_SYMBOL: return dep->left.sym == sym; case E_EQUAL: case E_GEQ: case E_GTH: case E_LEQ: case E_LTH: case E_UNEQUAL: return dep->left.sym == sym || dep->right.sym == sym; case E_NOT: return expr_contains_symbol(dep->left.expr, sym); default: ; } return 0; } bool expr_depends_symbol(struct expr *dep, struct symbol *sym) { if (!dep) return false; switch (dep->type) { case E_AND: return expr_depends_symbol(dep->left.expr, sym) || expr_depends_symbol(dep->right.expr, sym); case E_SYMBOL: return dep->left.sym == sym; case E_EQUAL: if (dep->left.sym == sym) { if (dep->right.sym == &symbol_yes || dep->right.sym == &symbol_mod) return true; } break; case E_UNEQUAL: if (dep->left.sym == sym) { if (dep->right.sym == &symbol_no) return true; } break; default: ; } return false; } struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym) { struct expr *e1, *e2; if (!e) { e = expr_alloc_symbol(sym); if (type == E_UNEQUAL) e = expr_alloc_one(E_NOT, e); return e; } switch (e->type) { case E_AND: e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym); e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym); if (sym == &symbol_yes) e = expr_alloc_two(E_AND, e1, e2); if (sym == &symbol_no) e = expr_alloc_two(E_OR, e1, e2); if (type == E_UNEQUAL) e = expr_alloc_one(E_NOT, e); return e; case E_OR: e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym); e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym); if (sym == &symbol_yes) e = expr_alloc_two(E_OR, e1, e2); if (sym == &symbol_no) e = expr_alloc_two(E_AND, e1, e2); if (type == E_UNEQUAL) e = expr_alloc_one(E_NOT, e); return e; case E_NOT: return expr_trans_compare(e->left.expr, type == E_EQUAL ? E_UNEQUAL : E_EQUAL, sym); case E_UNEQUAL: case E_LTH: case E_LEQ: case E_GTH: case E_GEQ: case E_EQUAL: if (type == E_EQUAL) { if (sym == &symbol_yes) return expr_copy(e); if (sym == &symbol_mod) return expr_alloc_symbol(&symbol_no); if (sym == &symbol_no) return expr_alloc_one(E_NOT, expr_copy(e)); } else { if (sym == &symbol_yes) return expr_alloc_one(E_NOT, expr_copy(e)); if (sym == &symbol_mod) return expr_alloc_symbol(&symbol_yes); if (sym == &symbol_no) return expr_copy(e); } break; case E_SYMBOL: return expr_alloc_comp(type, e->left.sym, sym); case E_LIST: case E_RANGE: case E_NONE: /* panic */; } return NULL; } enum string_value_kind { k_string, k_signed, k_unsigned, k_invalid }; union string_value { unsigned long long u; signed long long s; }; static enum string_value_kind expr_parse_string(const char *str, enum symbol_type type, union string_value *val) { char *tail; enum string_value_kind kind; errno = 0; switch (type) { case S_BOOLEAN: case S_TRISTATE: return k_string; case S_INT: val->s = strtoll(str, &tail, 10); kind = k_signed; break; case S_HEX: val->u = strtoull(str, &tail, 16); kind = k_unsigned; break; case S_STRING: case S_UNKNOWN: val->s = strtoll(str, &tail, 0); kind = k_signed; break; default: return k_invalid; } return !errno && !*tail && tail > str && isxdigit(tail[-1]) ? kind : k_string; } tristate expr_calc_value(struct expr *e) { tristate val1, val2; const char *str1, *str2; enum string_value_kind k1 = k_string, k2 = k_string; union string_value lval = {}, rval = {}; int res; if (!e) return yes; switch (e->type) { case E_SYMBOL: sym_calc_value(e->left.sym); return e->left.sym->curr.tri; case E_AND: val1 = expr_calc_value(e->left.expr); val2 = expr_calc_value(e->right.expr); return EXPR_AND(val1, val2); case E_OR: val1 = expr_calc_value(e->left.expr); val2 = expr_calc_value(e->right.expr); return EXPR_OR(val1, val2); case E_NOT: val1 = expr_calc_value(e->left.expr); return EXPR_NOT(val1); case E_EQUAL: case E_GEQ: case E_GTH: case E_LEQ: case E_LTH: case E_UNEQUAL: break; default: printf("expr_calc_value: %d?\n", e->type); return no; } sym_calc_value(e->left.sym); sym_calc_value(e->right.sym); str1 = sym_get_string_value(e->left.sym); str2 = sym_get_string_value(e->right.sym); if (e->left.sym->type != S_STRING || e->right.sym->type != S_STRING) { k1 = expr_parse_string(str1, e->left.sym->type, &lval); k2 = expr_parse_string(str2, e->right.sym->type, &rval); } if (k1 == k_string || k2 == k_string) res = strcmp(str1, str2); else if (k1 == k_invalid || k2 == k_invalid) { if (e->type != E_EQUAL && e->type != E_UNEQUAL) { printf("Cannot compare \"%s\" and \"%s\"\n", str1, str2); return no; } res = strcmp(str1, str2); } else if (k1 == k_unsigned || k2 == k_unsigned) res = (lval.u > rval.u) - (lval.u < rval.u); else /* if (k1 == k_signed && k2 == k_signed) */ res = (lval.s > rval.s) - (lval.s < rval.s); switch(e->type) { case E_EQUAL: return res ? no : yes; case E_GEQ: return res >= 0 ? yes : no; case E_GTH: return res > 0 ? yes : no; case E_LEQ: return res <= 0 ? yes : no; case E_LTH: return res < 0 ? yes : no; case E_UNEQUAL: return res ? yes : no; default: printf("expr_calc_value: relation %d?\n", e->type); return no; } } static int expr_compare_type(enum expr_type t1, enum expr_type t2) { if (t1 == t2) return 0; switch (t1) { case E_LEQ: case E_LTH: case E_GEQ: case E_GTH: if (t2 == E_EQUAL || t2 == E_UNEQUAL) return 1; case E_EQUAL: case E_UNEQUAL: if (t2 == E_NOT) return 1; case E_NOT: if (t2 == E_AND) return 1; case E_AND: if (t2 == E_OR) return 1; case E_OR: if (t2 == E_LIST) return 1; case E_LIST: if (t2 == 0) return 1; default: return -1; } printf("[%dgt%d?]", t1, t2); return 0; } static inline struct expr * expr_get_leftmost_symbol(const struct expr *e) { if (e == NULL) return NULL; while (e->type != E_SYMBOL) e = e->left.expr; return expr_copy(e); } /* * Given expression `e1' and `e2', returns the leaf of the longest * sub-expression of `e1' not containing 'e2. */ struct expr *expr_simplify_unmet_dep(struct expr *e1, struct expr *e2) { struct expr *ret; switch (e1->type) { case E_OR: return expr_alloc_and( expr_simplify_unmet_dep(e1->left.expr, e2), expr_simplify_unmet_dep(e1->right.expr, e2)); case E_AND: { struct expr *e; e = expr_alloc_and(expr_copy(e1), expr_copy(e2)); e = expr_eliminate_dups(e); ret = (!expr_eq(e, e1)) ? e1 : NULL; expr_free(e); break; } default: ret = e1; break; } return expr_get_leftmost_symbol(ret); } void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken) { if (!e) { fn(data, NULL, "y"); return; } if (expr_compare_type(prevtoken, e->type) > 0) fn(data, NULL, "("); switch (e->type) { case E_SYMBOL: if (e->left.sym->name) fn(data, e->left.sym, e->left.sym->name); else fn(data, NULL, ""); break; case E_NOT: fn(data, NULL, "!"); expr_print(e->left.expr, fn, data, E_NOT); break; case E_EQUAL: if (e->left.sym->name) fn(data, e->left.sym, e->left.sym->name); else fn(data, NULL, ""); fn(data, NULL, "="); fn(data, e->right.sym, e->right.sym->name); break; case E_LEQ: case E_LTH: if (e->left.sym->name) fn(data, e->left.sym, e->left.sym->name); else fn(data, NULL, ""); fn(data, NULL, e->type == E_LEQ ? "<=" : "<"); fn(data, e->right.sym, e->right.sym->name); break; case E_GEQ: case E_GTH: if (e->left.sym->name) fn(data, e->left.sym, e->left.sym->name); else fn(data, NULL, ""); fn(data, NULL, e->type == E_GEQ ? ">=" : ">"); fn(data, e->right.sym, e->right.sym->name); break; case E_UNEQUAL: if (e->left.sym->name) fn(data, e->left.sym, e->left.sym->name); else fn(data, NULL, ""); fn(data, NULL, "!="); fn(data, e->right.sym, e->right.sym->name); break; case E_OR: expr_print(e->left.expr, fn, data, E_OR); fn(data, NULL, " || "); expr_print(e->right.expr, fn, data, E_OR); break; case E_AND: expr_print(e->left.expr, fn, data, E_AND); fn(data, NULL, " && "); expr_print(e->right.expr, fn, data, E_AND); break; case E_LIST: fn(data, e->right.sym, e->right.sym->name); if (e->left.expr) { fn(data, NULL, " ^ "); expr_print(e->left.expr, fn, data, E_LIST); } break; case E_RANGE: fn(data, NULL, "["); fn(data, e->left.sym, e->left.sym->name); fn(data, NULL, " "); fn(data, e->right.sym, e->right.sym->name); fn(data, NULL, "]"); break; default: { char buf[32]; sprintf(buf, "", e->type); fn(data, NULL, buf); break; } } if (expr_compare_type(prevtoken, e->type) > 0) fn(data, NULL, ")"); } static void expr_print_file_helper(void *data, struct symbol *sym, const char *str) { xfwrite(str, strlen(str), 1, data); } void expr_fprint(struct expr *e, FILE *out) { expr_print(e, expr_print_file_helper, out, E_NONE); } static void expr_print_gstr_helper(void *data, struct symbol *sym, const char *str) { struct gstr *gs = (struct gstr*)data; const char *sym_str = NULL; if (sym) sym_str = sym_get_string_value(sym); if (gs->max_width) { unsigned extra_length = strlen(str); const char *last_cr = strrchr(gs->s, '\n'); unsigned last_line_length; if (sym_str) extra_length += 4 + strlen(sym_str); if (!last_cr) last_cr = gs->s; last_line_length = strlen(gs->s) - (last_cr - gs->s); if ((last_line_length + extra_length) > gs->max_width) str_append(gs, "\\\n"); } str_append(gs, str); if (sym && sym->type != S_UNKNOWN) str_printf(gs, " [=%s]", sym_str); } void expr_gstr_print(struct expr *e, struct gstr *gs) { expr_print(e, expr_print_gstr_helper, gs, E_NONE); } backport-iwlwifi-dkms-7906/kconf/expr.h000066400000000000000000000161071351064634500201350ustar00rootroot00000000000000/* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #ifndef EXPR_H #define EXPR_H #ifdef __cplusplus extern "C" { #endif #include #include #include "list.h" #ifndef __cplusplus #include #endif struct file { struct file *next; struct file *parent; const char *name; int lineno; }; typedef enum tristate { no, mod, yes } tristate; enum expr_type { E_NONE, E_OR, E_AND, E_NOT, E_EQUAL, E_UNEQUAL, E_LTH, E_LEQ, E_GTH, E_GEQ, E_LIST, E_SYMBOL, E_RANGE }; union expr_data { struct expr *expr; struct symbol *sym; }; struct expr { enum expr_type type; union expr_data left, right; }; #define EXPR_OR(dep1, dep2) (((dep1)>(dep2))?(dep1):(dep2)) #define EXPR_AND(dep1, dep2) (((dep1)<(dep2))?(dep1):(dep2)) #define EXPR_NOT(dep) (2-(dep)) #define expr_list_for_each_sym(l, e, s) \ for (e = (l); e && (s = e->right.sym); e = e->left.expr) struct expr_value { struct expr *expr; tristate tri; }; struct symbol_value { void *val; tristate tri; }; enum symbol_type { S_UNKNOWN, S_BOOLEAN, S_TRISTATE, S_INT, S_HEX, S_STRING, S_OTHER }; /* enum values are used as index to symbol.def[] */ enum { S_DEF_USER, /* main user value */ S_DEF_AUTO, /* values read from auto.conf */ S_DEF_DEF3, /* Reserved for UI usage */ S_DEF_DEF4, /* Reserved for UI usage */ S_DEF_COUNT }; struct symbol { struct symbol *next; char *name; enum symbol_type type; struct symbol_value curr; struct symbol_value def[S_DEF_COUNT]; tristate visible; int flags; struct property *prop; struct expr_value dir_dep; struct expr_value rev_dep; struct expr_value implied; }; #define for_all_symbols(i, sym) for (i = 0; i < SYMBOL_HASHSIZE; i++) for (sym = symbol_hash[i]; sym; sym = sym->next) if (sym->type != S_OTHER) #define SYMBOL_CONST 0x0001 /* symbol is const */ #define SYMBOL_CHECK 0x0008 /* used during dependency checking */ #define SYMBOL_CHOICE 0x0010 /* start of a choice block (null name) */ #define SYMBOL_CHOICEVAL 0x0020 /* used as a value in a choice block */ #define SYMBOL_VALID 0x0080 /* set when symbol.curr is calculated */ #define SYMBOL_OPTIONAL 0x0100 /* choice is optional - values can be 'n' */ #define SYMBOL_WRITE 0x0200 /* write symbol to file (KCONFIG_CONFIG) */ #define SYMBOL_CHANGED 0x0400 /* ? */ #define SYMBOL_AUTO 0x1000 /* value from environment variable */ #define SYMBOL_CHECKED 0x2000 /* used during dependency checking */ #define SYMBOL_WARNED 0x8000 /* warning has been issued */ /* Set when symbol.def[] is used */ #define SYMBOL_DEF 0x10000 /* First bit of SYMBOL_DEF */ #define SYMBOL_DEF_USER 0x10000 /* symbol.def[S_DEF_USER] is valid */ #define SYMBOL_DEF_AUTO 0x20000 /* symbol.def[S_DEF_AUTO] is valid */ #define SYMBOL_DEF3 0x40000 /* symbol.def[S_DEF_3] is valid */ #define SYMBOL_DEF4 0x80000 /* symbol.def[S_DEF_4] is valid */ /* choice values need to be set before calculating this symbol value */ #define SYMBOL_NEED_SET_CHOICE_VALUES 0x100000 /* Set symbol to y if allnoconfig; used for symbols that hide others */ #define SYMBOL_ALLNOCONFIG_Y 0x200000 #define SYMBOL_MAXLENGTH 256 #define SYMBOL_HASHSIZE 9973 /* A property represent the config options that can be associated * with a config "symbol". * Sample: * config FOO * default y * prompt "foo prompt" * select BAR * config BAZ * int "BAZ Value" * range 1..255 */ enum prop_type { P_UNKNOWN, P_PROMPT, /* prompt "foo prompt" or "BAZ Value" */ P_COMMENT, /* text associated with a comment */ P_MENU, /* prompt associated with a menuconfig option */ P_DEFAULT, /* default y */ P_CHOICE, /* choice value */ P_SELECT, /* select BAR */ P_IMPLY, /* imply BAR */ P_RANGE, /* range 7..100 (for a symbol) */ P_ENV, /* value from environment variable */ P_SYMBOL, /* where a symbol is defined */ }; struct property { struct property *next; /* next property - null if last */ struct symbol *sym; /* the symbol for which the property is associated */ enum prop_type type; /* type of property */ const char *text; /* the prompt value - P_PROMPT, P_MENU, P_COMMENT */ struct expr_value visible; struct expr *expr; /* the optional conditional part of the property */ struct menu *menu; /* the menu the property are associated with * valid for: P_SELECT, P_RANGE, P_CHOICE, * P_PROMPT, P_DEFAULT, P_MENU, P_COMMENT */ struct file *file; /* what file was this property defined */ int lineno; /* what lineno was this property defined */ }; #define for_all_properties(sym, st, tok) \ for (st = sym->prop; st; st = st->next) \ if (st->type == (tok)) #define for_all_defaults(sym, st) for_all_properties(sym, st, P_DEFAULT) #define for_all_choices(sym, st) for_all_properties(sym, st, P_CHOICE) #define for_all_prompts(sym, st) \ for (st = sym->prop; st; st = st->next) \ if (st->text) struct menu { struct menu *next; struct menu *parent; struct menu *list; struct symbol *sym; struct property *prompt; struct expr *visibility; struct expr *dep; unsigned int flags; char *help; struct file *file; int lineno; void *data; }; #define MENU_CHANGED 0x0001 #define MENU_ROOT 0x0002 struct jump_key { struct list_head entries; size_t offset; struct menu *target; int index; }; #define JUMP_NB 9 extern struct file *file_list; extern struct file *current_file; struct file *lookup_file(const char *name); extern struct symbol symbol_yes, symbol_no, symbol_mod; extern struct symbol *modules_sym; extern struct symbol *sym_defconfig_list; extern int cdebug; struct expr *expr_alloc_symbol(struct symbol *sym); struct expr *expr_alloc_one(enum expr_type type, struct expr *ce); struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e2); struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2); struct expr *expr_alloc_and(struct expr *e1, struct expr *e2); struct expr *expr_alloc_or(struct expr *e1, struct expr *e2); struct expr *expr_copy(const struct expr *org); void expr_free(struct expr *e); void expr_eliminate_eq(struct expr **ep1, struct expr **ep2); tristate expr_calc_value(struct expr *e); struct expr *expr_trans_bool(struct expr *e); struct expr *expr_eliminate_dups(struct expr *e); struct expr *expr_transform(struct expr *e); int expr_contains_symbol(struct expr *dep, struct symbol *sym); bool expr_depends_symbol(struct expr *dep, struct symbol *sym); struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym); struct expr *expr_simplify_unmet_dep(struct expr *e1, struct expr *e2); void expr_fprint(struct expr *e, FILE *out); struct gstr; /* forward */ void expr_gstr_print(struct expr *e, struct gstr *gs); static inline int expr_is_yes(struct expr *e) { return !e || (e->type == E_SYMBOL && e->left.sym == &symbol_yes); } static inline int expr_is_no(struct expr *e) { return e && (e->type == E_SYMBOL && e->left.sym == &symbol_no); } #ifdef __cplusplus } #endif #endif /* EXPR_H */ backport-iwlwifi-dkms-7906/kconf/list.h000066400000000000000000000071761351064634500201400ustar00rootroot00000000000000#ifndef LIST_H #define LIST_H /* * Copied from include/linux/... */ #undef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) /** * container_of - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. * @type: the type of the container struct this is embedded in. * @member: the name of the member within the struct. * */ #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) struct list_head { struct list_head *next, *prev; }; #define LIST_HEAD_INIT(name) { &(name), &(name) } #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) /** * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) /** * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member), \ n = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member)) /** * list_empty - tests whether a list is empty * @head: the list to test. */ static inline int list_empty(const struct list_head *head) { return head->next == head; } /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add(struct list_head *_new, struct list_head *prev, struct list_head *next) { next->prev = _new; _new->next = next; _new->prev = prev; prev->next = _new; } /** * list_add_tail - add a new entry * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. */ static inline void list_add_tail(struct list_head *_new, struct list_head *head) { __list_add(_new, head->prev, head); } /* * Delete a list entry by making the prev/next entries * point to each other. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_del(struct list_head *prev, struct list_head *next) { next->prev = prev; prev->next = next; } #define LIST_POISON1 ((void *) 0x00100100) #define LIST_POISON2 ((void *) 0x00200200) /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->next = (struct list_head*)LIST_POISON1; entry->prev = (struct list_head*)LIST_POISON2; } #endif backport-iwlwifi-dkms-7906/kconf/lkc.h000066400000000000000000000107121351064634500177240ustar00rootroot00000000000000/* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #ifndef LKC_H #define LKC_H #include "expr.h" #ifndef KBUILD_NO_NLS # include #else static inline const char *gettext(const char *txt) { return txt; } static inline void textdomain(const char *domainname) {} static inline void bindtextdomain(const char *name, const char *dir) {} static inline char *bind_textdomain_codeset(const char *dn, char *c) { return c; } #endif #ifdef __cplusplus extern "C" { #endif #include "lkc_proto.h" #define SRCTREE "srctree" #ifndef PACKAGE #define PACKAGE "linux" #endif #define LOCALEDIR "/usr/share/locale" #define _(text) gettext(text) #define N_(text) (text) #ifndef CONFIG_ #define CONFIG_ "CONFIG_" #endif static inline const char *CONFIG_prefix(void) { return getenv( "CONFIG_" ) ?: CONFIG_; } #undef CONFIG_ #define CONFIG_ CONFIG_prefix() #define TF_COMMAND 0x0001 #define TF_PARAM 0x0002 #define TF_OPTION 0x0004 enum conf_def_mode { def_default, def_yes, def_mod, def_no, def_random }; #define T_OPT_MODULES 1 #define T_OPT_DEFCONFIG_LIST 2 #define T_OPT_ENV 3 #define T_OPT_ALLNOCONFIG_Y 4 struct kconf_id { int name; int token; unsigned int flags; enum symbol_type stype; }; void zconfdump(FILE *out); void zconf_starthelp(void); FILE *zconf_fopen(const char *name); void zconf_initscan(const char *name); void zconf_nextfile(const char *name); int zconf_lineno(void); const char *zconf_curname(void); /* confdata.c */ const char *conf_get_configname(void); const char *conf_get_autoconfig_name(void); char *conf_get_default_confname(void); void sym_set_change_count(int count); void sym_add_change_count(int count); bool conf_set_all_new_symbols(enum conf_def_mode mode); void set_all_choice_values(struct symbol *csym); /* confdata.c and expr.c */ static inline void xfwrite(const void *str, size_t len, size_t count, FILE *out) { assert(len != 0); if (fwrite(str, len, count, out) != count) fprintf(stderr, "Error in writing or end of file.\n"); } /* menu.c */ void _menu_init(void); void menu_warn(struct menu *menu, const char *fmt, ...); struct menu *menu_add_menu(void); void menu_end_menu(void); void menu_add_entry(struct symbol *sym); void menu_end_entry(void); void menu_add_dep(struct expr *dep); void menu_add_visibility(struct expr *dep); struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep); void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep); void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep); void menu_add_option(int token, char *arg); void menu_finalize(struct menu *parent); void menu_set_type(int type); /* util.c */ struct file *file_lookup(const char *name); int file_write_dep(const char *name); void *xmalloc(size_t size); void *xcalloc(size_t nmemb, size_t size); struct gstr { size_t len; char *s; /* * when max_width is not zero long lines in string s (if any) get * wrapped not to exceed the max_width value */ int max_width; }; struct gstr str_new(void); void str_free(struct gstr *gs); void str_append(struct gstr *gs, const char *s); void str_printf(struct gstr *gs, const char *fmt, ...); const char *str_get(struct gstr *gs); /* symbol.c */ extern struct expr *sym_env_list; void sym_init(void); void sym_clear_all_valid(void); struct symbol *sym_choice_default(struct symbol *sym); const char *sym_get_string_default(struct symbol *sym); struct symbol *sym_check_deps(struct symbol *sym); struct property *prop_alloc(enum prop_type type, struct symbol *sym); struct symbol *prop_get_symbol(struct property *prop); struct property *sym_get_env_prop(struct symbol *sym); static inline tristate sym_get_tristate_value(struct symbol *sym) { return sym->curr.tri; } static inline struct symbol *sym_get_choice_value(struct symbol *sym) { return (struct symbol *)sym->curr.val; } static inline bool sym_set_choice_value(struct symbol *ch, struct symbol *chval) { return sym_set_tristate_value(chval, yes); } static inline bool sym_is_choice(struct symbol *sym) { return sym->flags & SYMBOL_CHOICE ? true : false; } static inline bool sym_is_choice_value(struct symbol *sym) { return sym->flags & SYMBOL_CHOICEVAL ? true : false; } static inline bool sym_is_optional(struct symbol *sym) { return sym->flags & SYMBOL_OPTIONAL ? true : false; } static inline bool sym_has_value(struct symbol *sym) { return sym->flags & SYMBOL_DEF_USER ? true : false; } #ifdef __cplusplus } #endif #endif /* LKC_H */ backport-iwlwifi-dkms-7906/kconf/lkc_proto.h000066400000000000000000000041521351064634500211500ustar00rootroot00000000000000#include /* confdata.c */ void conf_parse(const char *name); int conf_read(const char *name); int conf_read_simple(const char *name, int); int conf_write_defconfig(const char *name); int conf_write(const char *name); int conf_write_autoconf(void); bool conf_get_changed(void); void conf_set_changed_callback(void (*fn)(void)); void conf_set_message_callback(void (*fn)(const char *fmt, va_list ap)); /* menu.c */ extern struct menu rootmenu; bool menu_is_empty(struct menu *menu); bool menu_is_visible(struct menu *menu); bool menu_has_prompt(struct menu *menu); const char * menu_get_prompt(struct menu *menu); struct menu * menu_get_root_menu(struct menu *menu); struct menu * menu_get_parent_menu(struct menu *menu); bool menu_has_help(struct menu *menu); const char * menu_get_help(struct menu *menu); struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head); void menu_get_ext_help(struct menu *menu, struct gstr *help); /* symbol.c */ extern struct symbol * symbol_hash[SYMBOL_HASHSIZE]; struct symbol * sym_lookup(const char *name, int flags); struct symbol * sym_find(const char *name); const char * sym_expand_string_value(const char *in); const char * sym_escape_string_value(const char *in); struct symbol ** sym_re_search(const char *pattern); const char * sym_type_name(enum symbol_type type); void sym_calc_value(struct symbol *sym); enum symbol_type sym_get_type(struct symbol *sym); bool sym_tristate_within_range(struct symbol *sym,tristate tri); bool sym_set_tristate_value(struct symbol *sym,tristate tri); tristate sym_toggle_tristate_value(struct symbol *sym); bool sym_string_valid(struct symbol *sym, const char *newval); bool sym_string_within_range(struct symbol *sym, const char *str); bool sym_set_string_value(struct symbol *sym, const char *newval); bool sym_is_changable(struct symbol *sym); struct property * sym_get_choice_prop(struct symbol *sym); const char * sym_get_string_value(struct symbol *sym); const char * prop_get_type_name(enum prop_type type); /* expr.c */ void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken); backport-iwlwifi-dkms-7906/kconf/lxdialog/000077500000000000000000000000001351064634500206045ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/kconf/lxdialog/check-lxdialog.sh000077500000000000000000000040451351064634500240240ustar00rootroot00000000000000#!/bin/sh # Check ncurses compatibility # What library to link ldflags() { pkg-config --libs ncursesw 2>/dev/null && exit pkg-config --libs ncurses 2>/dev/null && exit for ext in so a dll.a dylib ; do for lib in ncursesw ncurses curses ; do $cc -print-file-name=lib${lib}.${ext} | grep -q / if [ $? -eq 0 ]; then echo "-l${lib}" exit fi done done exit 1 } # Where is ncurses.h? ccflags() { if pkg-config --cflags ncursesw 2>/dev/null; then echo '-DCURSES_LOC="" -DNCURSES_WIDECHAR=1' elif pkg-config --cflags ncurses 2>/dev/null; then echo '-DCURSES_LOC=""' elif [ -f /usr/include/ncursesw/curses.h ]; then echo '-I/usr/include/ncursesw -DCURSES_LOC=""' echo ' -DNCURSES_WIDECHAR=1' elif [ -f /usr/include/ncurses/ncurses.h ]; then echo '-I/usr/include/ncurses -DCURSES_LOC=""' elif [ -f /usr/include/ncurses/curses.h ]; then echo '-I/usr/include/ncurses -DCURSES_LOC=""' elif [ -f /usr/include/ncurses.h ]; then echo '-DCURSES_LOC=""' else echo '-DCURSES_LOC=""' fi } # Temp file, try to clean up after us tmp=.lxdialog.tmp trap "rm -f $tmp" 0 1 2 3 15 # Check if we can link to ncurses check() { $cc -x c - -o $tmp 2>/dev/null <<'EOF' #include CURSES_LOC main() {} EOF if [ $? != 0 ]; then echo " *** Unable to find the ncurses libraries or the" 1>&2 echo " *** required header files." 1>&2 echo " *** 'make menuconfig' requires the ncurses libraries." 1>&2 echo " *** " 1>&2 echo " *** Install ncurses (ncurses-devel) and try again." 1>&2 echo " *** " 1>&2 exit 1 fi } usage() { printf "Usage: $0 [-check compiler options|-ccflags|-ldflags compiler options]\n" } if [ $# -eq 0 ]; then usage exit 1 fi cc="" case "$1" in "-check") shift cc="$@" check ;; "-ccflags") ccflags ;; "-ldflags") shift cc="$@" ldflags ;; "*") usage exit 1 ;; esac backport-iwlwifi-dkms-7906/kconf/lxdialog/checklist.c000066400000000000000000000203661351064634500227300ustar00rootroot00000000000000/* * checklist.c -- implements the checklist box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * Stuart Herbert - S.Herbert@sheffield.ac.uk: radiolist extension * Alessandro Rubini - rubini@ipvvis.unipv.it: merged the two * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "dialog.h" static int list_width, check_x, item_x; /* * Print list item */ static void print_item(WINDOW * win, int choice, int selected) { int i; char *list_item = malloc(list_width + 1); strncpy(list_item, item_str(), list_width - item_x); list_item[list_width - item_x] = '\0'; /* Clear 'residue' of last item */ wattrset(win, dlg.menubox.atr); wmove(win, choice, 0); for (i = 0; i < list_width; i++) waddch(win, ' '); wmove(win, choice, check_x); wattrset(win, selected ? dlg.check_selected.atr : dlg.check.atr); if (!item_is_tag(':')) wprintw(win, "(%c)", item_is_tag('X') ? 'X' : ' '); wattrset(win, selected ? dlg.tag_selected.atr : dlg.tag.atr); mvwaddch(win, choice, item_x, list_item[0]); wattrset(win, selected ? dlg.item_selected.atr : dlg.item.atr); waddstr(win, list_item + 1); if (selected) { wmove(win, choice, check_x + 1); wrefresh(win); } free(list_item); } /* * Print the scroll indicators. */ static void print_arrows(WINDOW * win, int choice, int item_no, int scroll, int y, int x, int height) { wmove(win, y, x); if (scroll > 0) { wattrset(win, dlg.uarrow.atr); waddch(win, ACS_UARROW); waddstr(win, "(-)"); } else { wattrset(win, dlg.menubox.atr); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); } y = y + height + 1; wmove(win, y, x); if ((height < item_no) && (scroll + choice < item_no - 1)) { wattrset(win, dlg.darrow.atr); waddch(win, ACS_DARROW); waddstr(win, "(+)"); } else { wattrset(win, dlg.menubox_border.atr); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); } } /* * Display the termination buttons */ static void print_buttons(WINDOW * dialog, int height, int width, int selected) { int x = width / 2 - 11; int y = height - 2; print_button(dialog, gettext("Select"), y, x, selected == 0); print_button(dialog, gettext(" Help "), y, x + 14, selected == 1); wmove(dialog, y, x + 1 + 14 * selected); wrefresh(dialog); } /* * Display a dialog box with a list of options that can be turned on or off * in the style of radiolist (only one option turned on at a time). */ int dialog_checklist(const char *title, const char *prompt, int height, int width, int list_height) { int i, x, y, box_x, box_y; int key = 0, button = 0, choice = 0, scroll = 0, max_choice; WINDOW *dialog, *list; /* which item to highlight */ item_foreach() { if (item_is_tag('X')) choice = item_n(); if (item_is_selected()) { choice = item_n(); break; } } do_resize: if (getmaxy(stdscr) < (height + CHECKLIST_HEIGTH_MIN)) return -ERRDISPLAYTOOSMALL; if (getmaxx(stdscr) < (width + CHECKLIST_WIDTH_MIN)) return -ERRDISPLAYTOOSMALL; max_choice = MIN(list_height, item_count()); /* center dialog box on screen */ x = (getmaxx(stdscr) - width) / 2; y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); wattrset(dialog, dlg.dialog.atr); print_autowrap(dialog, prompt, width - 2, 1, 3); list_width = width - 6; box_y = height - list_height - 5; box_x = (width - list_width) / 2 - 1; /* create new window for the list */ list = subwin(dialog, list_height, list_width, y + box_y + 1, x + box_x + 1); keypad(list, TRUE); /* draw a box around the list items */ draw_box(dialog, box_y, box_x, list_height + 2, list_width + 2, dlg.menubox_border.atr, dlg.menubox.atr); /* Find length of longest item in order to center checklist */ check_x = 0; item_foreach() check_x = MAX(check_x, strlen(item_str()) + 4); check_x = MIN(check_x, list_width); check_x = (list_width - check_x) / 2; item_x = check_x + 4; if (choice >= list_height) { scroll = choice - list_height + 1; choice -= scroll; } /* Print the list */ for (i = 0; i < max_choice; i++) { item_set(scroll + i); print_item(list, i, i == choice); } print_arrows(dialog, choice, item_count(), scroll, box_y, box_x + check_x + 5, list_height); print_buttons(dialog, height, width, 0); wnoutrefresh(dialog); wnoutrefresh(list); doupdate(); while (key != KEY_ESC) { key = wgetch(dialog); for (i = 0; i < max_choice; i++) { item_set(i + scroll); if (toupper(key) == toupper(item_str()[0])) break; } if (i < max_choice || key == KEY_UP || key == KEY_DOWN || key == '+' || key == '-') { if (key == KEY_UP || key == '-') { if (!choice) { if (!scroll) continue; /* Scroll list down */ if (list_height > 1) { /* De-highlight current first item */ item_set(scroll); print_item(list, 0, FALSE); scrollok(list, TRUE); wscrl(list, -1); scrollok(list, FALSE); } scroll--; item_set(scroll); print_item(list, 0, TRUE); print_arrows(dialog, choice, item_count(), scroll, box_y, box_x + check_x + 5, list_height); wnoutrefresh(dialog); wrefresh(list); continue; /* wait for another key press */ } else i = choice - 1; } else if (key == KEY_DOWN || key == '+') { if (choice == max_choice - 1) { if (scroll + choice >= item_count() - 1) continue; /* Scroll list up */ if (list_height > 1) { /* De-highlight current last item before scrolling up */ item_set(scroll + max_choice - 1); print_item(list, max_choice - 1, FALSE); scrollok(list, TRUE); wscrl(list, 1); scrollok(list, FALSE); } scroll++; item_set(scroll + max_choice - 1); print_item(list, max_choice - 1, TRUE); print_arrows(dialog, choice, item_count(), scroll, box_y, box_x + check_x + 5, list_height); wnoutrefresh(dialog); wrefresh(list); continue; /* wait for another key press */ } else i = choice + 1; } if (i != choice) { /* De-highlight current item */ item_set(scroll + choice); print_item(list, choice, FALSE); /* Highlight new item */ choice = i; item_set(scroll + choice); print_item(list, choice, TRUE); wnoutrefresh(dialog); wrefresh(list); } continue; /* wait for another key press */ } switch (key) { case 'H': case 'h': case '?': button = 1; /* fall-through */ case 'S': case 's': case ' ': case '\n': item_foreach() item_set_selected(0); item_set(scroll + choice); item_set_selected(1); delwin(list); delwin(dialog); return button; case TAB: case KEY_LEFT: case KEY_RIGHT: button = ((key == KEY_LEFT ? --button : ++button) < 0) ? 1 : (button > 1 ? 0 : button); print_buttons(dialog, height, width, button); wrefresh(dialog); break; case 'X': case 'x': key = KEY_ESC; break; case KEY_ESC: key = on_key_esc(dialog); break; case KEY_RESIZE: delwin(list); delwin(dialog); on_key_resize(); goto do_resize; } /* Now, update everything... */ doupdate(); } delwin(list); delwin(dialog); return key; /* ESC pressed */ } backport-iwlwifi-dkms-7906/kconf/lxdialog/dialog.h000066400000000000000000000167171351064634500222300ustar00rootroot00000000000000/* * dialog.h -- common declarations for all dialog modules * * AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include #include #include #include #include #include #include #ifndef KBUILD_NO_NLS # include #else # define gettext(Msgid) ((const char *) (Msgid)) #endif #ifdef __sun__ #define CURS_MACROS #endif #include CURSES_LOC /* * Colors in ncurses 1.9.9e do not work properly since foreground and * background colors are OR'd rather than separately masked. This version * of dialog was hacked to work with ncurses 1.9.9e, making it incompatible * with standard curses. The simplest fix (to make this work with standard * curses) uses the wbkgdset() function, not used in the original hack. * Turn it off if we're building with 1.9.9e, since it just confuses things. */ #if defined(NCURSES_VERSION) && defined(_NEED_WRAP) && !defined(GCC_PRINTFLIKE) #define OLD_NCURSES 1 #undef wbkgdset #define wbkgdset(w,p) /*nothing */ #else #define OLD_NCURSES 0 #endif #define TR(params) _tracef params #define KEY_ESC 27 #define TAB 9 #define MAX_LEN 2048 #define BUF_SIZE (10*1024) #define MIN(x,y) (x < y ? x : y) #define MAX(x,y) (x > y ? x : y) #ifndef ACS_ULCORNER #define ACS_ULCORNER '+' #endif #ifndef ACS_LLCORNER #define ACS_LLCORNER '+' #endif #ifndef ACS_URCORNER #define ACS_URCORNER '+' #endif #ifndef ACS_LRCORNER #define ACS_LRCORNER '+' #endif #ifndef ACS_HLINE #define ACS_HLINE '-' #endif #ifndef ACS_VLINE #define ACS_VLINE '|' #endif #ifndef ACS_LTEE #define ACS_LTEE '+' #endif #ifndef ACS_RTEE #define ACS_RTEE '+' #endif #ifndef ACS_UARROW #define ACS_UARROW '^' #endif #ifndef ACS_DARROW #define ACS_DARROW 'v' #endif /* error return codes */ #define ERRDISPLAYTOOSMALL (KEY_MAX + 1) /* * Color definitions */ struct dialog_color { chtype atr; /* Color attribute */ int fg; /* foreground */ int bg; /* background */ int hl; /* highlight this item */ }; struct subtitle_list { struct subtitle_list *next; const char *text; }; struct dialog_info { const char *backtitle; struct subtitle_list *subtitles; struct dialog_color screen; struct dialog_color shadow; struct dialog_color dialog; struct dialog_color title; struct dialog_color border; struct dialog_color button_active; struct dialog_color button_inactive; struct dialog_color button_key_active; struct dialog_color button_key_inactive; struct dialog_color button_label_active; struct dialog_color button_label_inactive; struct dialog_color inputbox; struct dialog_color inputbox_border; struct dialog_color searchbox; struct dialog_color searchbox_title; struct dialog_color searchbox_border; struct dialog_color position_indicator; struct dialog_color menubox; struct dialog_color menubox_border; struct dialog_color item; struct dialog_color item_selected; struct dialog_color tag; struct dialog_color tag_selected; struct dialog_color tag_key; struct dialog_color tag_key_selected; struct dialog_color check; struct dialog_color check_selected; struct dialog_color uarrow; struct dialog_color darrow; }; /* * Global variables */ extern struct dialog_info dlg; extern char dialog_input_result[]; extern int saved_x, saved_y; /* Needed in signal handler in mconf.c */ /* * Function prototypes */ /* item list as used by checklist and menubox */ void item_reset(void); void item_make(const char *fmt, ...); void item_add_str(const char *fmt, ...); void item_set_tag(char tag); void item_set_data(void *p); void item_set_selected(int val); int item_activate_selected(void); void *item_data(void); char item_tag(void); /* item list manipulation for lxdialog use */ #define MAXITEMSTR 200 struct dialog_item { char str[MAXITEMSTR]; /* prompt displayed */ char tag; void *data; /* pointer to menu item - used by menubox+checklist */ int selected; /* Set to 1 by dialog_*() function if selected. */ }; /* list of lialog_items */ struct dialog_list { struct dialog_item node; struct dialog_list *next; }; extern struct dialog_list *item_cur; extern struct dialog_list item_nil; extern struct dialog_list *item_head; int item_count(void); void item_set(int n); int item_n(void); const char *item_str(void); int item_is_selected(void); int item_is_tag(char tag); #define item_foreach() \ for (item_cur = item_head ? item_head: item_cur; \ item_cur && (item_cur != &item_nil); item_cur = item_cur->next) /* generic key handlers */ int on_key_esc(WINDOW *win); int on_key_resize(void); /* minimum (re)size values */ #define CHECKLIST_HEIGTH_MIN 6 /* For dialog_checklist() */ #define CHECKLIST_WIDTH_MIN 6 #define INPUTBOX_HEIGTH_MIN 2 /* For dialog_inputbox() */ #define INPUTBOX_WIDTH_MIN 2 #define MENUBOX_HEIGTH_MIN 15 /* For dialog_menu() */ #define MENUBOX_WIDTH_MIN 65 #define TEXTBOX_HEIGTH_MIN 8 /* For dialog_textbox() */ #define TEXTBOX_WIDTH_MIN 8 #define YESNO_HEIGTH_MIN 4 /* For dialog_yesno() */ #define YESNO_WIDTH_MIN 4 #define WINDOW_HEIGTH_MIN 19 /* For init_dialog() */ #define WINDOW_WIDTH_MIN 80 int init_dialog(const char *backtitle); void set_dialog_backtitle(const char *backtitle); void set_dialog_subtitles(struct subtitle_list *subtitles); void end_dialog(int x, int y); void attr_clear(WINDOW * win, int height, int width, chtype attr); void dialog_clear(void); void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x); void print_button(WINDOW * win, const char *label, int y, int x, int selected); void print_title(WINDOW *dialog, const char *title, int width); void draw_box(WINDOW * win, int y, int x, int height, int width, chtype box, chtype border); void draw_shadow(WINDOW * win, int y, int x, int height, int width); int first_alpha(const char *string, const char *exempt); int dialog_yesno(const char *title, const char *prompt, int height, int width); int dialog_msgbox(const char *title, const char *prompt, int height, int width, int pause); typedef void (*update_text_fn)(char *buf, size_t start, size_t end, void *_data); int dialog_textbox(const char *title, char *tbuf, int initial_height, int initial_width, int *keys, int *_vscroll, int *_hscroll, update_text_fn update_text, void *data); int dialog_menu(const char *title, const char *prompt, const void *selected, int *s_scroll); int dialog_checklist(const char *title, const char *prompt, int height, int width, int list_height); int dialog_inputbox(const char *title, const char *prompt, int height, int width, const char *init); /* * This is the base for fictitious keys, which activate * the buttons. * * Mouse-generated keys are the following: * -- the first 32 are used as numbers, in addition to '0'-'9' * -- the lowercase are used to signal mouse-enter events (M_EVENT + 'o') * -- uppercase chars are used to invoke the button (M_EVENT + 'O') */ #define M_EVENT (KEY_MAX+1) backport-iwlwifi-dkms-7906/kconf/lxdialog/inputbox.c000066400000000000000000000160001351064634500226150ustar00rootroot00000000000000/* * inputbox.c -- implements the input box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "dialog.h" char dialog_input_result[MAX_LEN + 1]; /* * Print the termination buttons */ static void print_buttons(WINDOW * dialog, int height, int width, int selected) { int x = width / 2 - 11; int y = height - 2; print_button(dialog, gettext(" Ok "), y, x, selected == 0); print_button(dialog, gettext(" Help "), y, x + 14, selected == 1); wmove(dialog, y, x + 1 + 14 * selected); wrefresh(dialog); } /* * Display a dialog box for inputing a string */ int dialog_inputbox(const char *title, const char *prompt, int height, int width, const char *init) { int i, x, y, box_y, box_x, box_width; int input_x = 0, key = 0, button = -1; int show_x, len, pos; char *instr = dialog_input_result; WINDOW *dialog; if (!init) instr[0] = '\0'; else strcpy(instr, init); do_resize: if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN)) return -ERRDISPLAYTOOSMALL; if (getmaxx(stdscr) <= (width - INPUTBOX_WIDTH_MIN)) return -ERRDISPLAYTOOSMALL; /* center dialog box on screen */ x = (getmaxx(stdscr) - width) / 2; y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); wattrset(dialog, dlg.dialog.atr); print_autowrap(dialog, prompt, width - 2, 1, 3); /* Draw the input field box */ box_width = width - 6; getyx(dialog, y, x); box_y = y + 2; box_x = (width - box_width) / 2; draw_box(dialog, y + 1, box_x - 1, 3, box_width + 2, dlg.dialog.atr, dlg.border.atr); print_buttons(dialog, height, width, 0); /* Set up the initial value */ wmove(dialog, box_y, box_x); wattrset(dialog, dlg.inputbox.atr); len = strlen(instr); pos = len; if (len >= box_width) { show_x = len - box_width + 1; input_x = box_width - 1; for (i = 0; i < box_width - 1; i++) waddch(dialog, instr[show_x + i]); } else { show_x = 0; input_x = len; waddstr(dialog, instr); } wmove(dialog, box_y, box_x + input_x); wrefresh(dialog); while (key != KEY_ESC) { key = wgetch(dialog); if (button == -1) { /* Input box selected */ switch (key) { case TAB: case KEY_UP: case KEY_DOWN: break; case KEY_BACKSPACE: case 127: if (pos) { wattrset(dialog, dlg.inputbox.atr); if (input_x == 0) { show_x--; } else input_x--; if (pos < len) { for (i = pos - 1; i < len; i++) { instr[i] = instr[i+1]; } } pos--; len--; instr[len] = '\0'; wmove(dialog, box_y, box_x); for (i = 0; i < box_width; i++) { if (!instr[show_x + i]) { waddch(dialog, ' '); break; } waddch(dialog, instr[show_x + i]); } wmove(dialog, box_y, input_x + box_x); wrefresh(dialog); } continue; case KEY_LEFT: if (pos > 0) { if (input_x > 0) { wmove(dialog, box_y, --input_x + box_x); } else if (input_x == 0) { show_x--; wmove(dialog, box_y, box_x); for (i = 0; i < box_width; i++) { if (!instr[show_x + i]) { waddch(dialog, ' '); break; } waddch(dialog, instr[show_x + i]); } wmove(dialog, box_y, box_x); } pos--; } continue; case KEY_RIGHT: if (pos < len) { if (input_x < box_width - 1) { wmove(dialog, box_y, ++input_x + box_x); } else if (input_x == box_width - 1) { show_x++; wmove(dialog, box_y, box_x); for (i = 0; i < box_width; i++) { if (!instr[show_x + i]) { waddch(dialog, ' '); break; } waddch(dialog, instr[show_x + i]); } wmove(dialog, box_y, input_x + box_x); } pos++; } continue; default: if (key < 0x100 && isprint(key)) { if (len < MAX_LEN) { wattrset(dialog, dlg.inputbox.atr); if (pos < len) { for (i = len; i > pos; i--) instr[i] = instr[i-1]; instr[pos] = key; } else { instr[len] = key; } pos++; len++; instr[len] = '\0'; if (input_x == box_width - 1) { show_x++; } else { input_x++; } wmove(dialog, box_y, box_x); for (i = 0; i < box_width; i++) { if (!instr[show_x + i]) { waddch(dialog, ' '); break; } waddch(dialog, instr[show_x + i]); } wmove(dialog, box_y, input_x + box_x); wrefresh(dialog); } else flash(); /* Alarm user about overflow */ continue; } } } switch (key) { case 'O': case 'o': delwin(dialog); return 0; case 'H': case 'h': delwin(dialog); return 1; case KEY_UP: case KEY_LEFT: switch (button) { case -1: button = 1; /* Indicates "Help" button is selected */ print_buttons(dialog, height, width, 1); break; case 0: button = -1; /* Indicates input box is selected */ print_buttons(dialog, height, width, 0); wmove(dialog, box_y, box_x + input_x); wrefresh(dialog); break; case 1: button = 0; /* Indicates "OK" button is selected */ print_buttons(dialog, height, width, 0); break; } break; case TAB: case KEY_DOWN: case KEY_RIGHT: switch (button) { case -1: button = 0; /* Indicates "OK" button is selected */ print_buttons(dialog, height, width, 0); break; case 0: button = 1; /* Indicates "Help" button is selected */ print_buttons(dialog, height, width, 1); break; case 1: button = -1; /* Indicates input box is selected */ print_buttons(dialog, height, width, 0); wmove(dialog, box_y, box_x + input_x); wrefresh(dialog); break; } break; case ' ': case '\n': delwin(dialog); return (button == -1 ? 0 : button); case 'X': case 'x': key = KEY_ESC; break; case KEY_ESC: key = on_key_esc(dialog); break; case KEY_RESIZE: delwin(dialog); on_key_resize(); goto do_resize; } } delwin(dialog); return KEY_ESC; /* ESC pressed */ } backport-iwlwifi-dkms-7906/kconf/lxdialog/menubox.c000066400000000000000000000257511351064634500224370ustar00rootroot00000000000000/* * menubox.c -- implements the menu box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcapw@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Changes by Clifford Wolf (god@clifford.at) * * [ 1998-06-13 ] * * *) A bugfix for the Page-Down problem * * *) Formerly when I used Page Down and Page Up, the cursor would be set * to the first position in the menu box. Now lxdialog is a bit * smarter and works more like other menu systems (just have a look at * it). * * *) Formerly if I selected something my scrolling would be broken because * lxdialog is re-invoked by the Menuconfig shell script, can't * remember the last scrolling position, and just sets it so that the * cursor is at the bottom of the box. Now it writes the temporary file * lxdialog.scrltmp which contains this information. The file is * deleted by lxdialog if the user leaves a submenu or enters a new * one, but it would be nice if Menuconfig could make another "rm -f" * just to be sure. Just try it out - you will recognise a difference! * * [ 1998-06-14 ] * * *) Now lxdialog is crash-safe against broken "lxdialog.scrltmp" files * and menus change their size on the fly. * * *) If for some reason the last scrolling position is not saved by * lxdialog, it sets the scrolling so that the selected item is in the * middle of the menu box, not at the bottom. * * 02 January 1999, Michael Elizabeth Chastain (mec@shout.net) * Reset 'scroll' to 0 if the value from lxdialog.scrltmp is bogus. * This fixes a bug in Menuconfig where using ' ' to descend into menus * would leave mis-synchronized lxdialog.scrltmp files lying around, * fscanf would read in 'scroll', and eventually that value would get used. */ #include "dialog.h" static int menu_width, item_x; /* * Print menu item */ static void do_print_item(WINDOW * win, const char *item, int line_y, int selected, int hotkey) { int j; char *menu_item = malloc(menu_width + 1); strncpy(menu_item, item, menu_width - item_x); menu_item[menu_width - item_x] = '\0'; j = first_alpha(menu_item, "YyNnMmHh"); /* Clear 'residue' of last item */ wattrset(win, dlg.menubox.atr); wmove(win, line_y, 0); #if OLD_NCURSES { int i; for (i = 0; i < menu_width; i++) waddch(win, ' '); } #else wclrtoeol(win); #endif wattrset(win, selected ? dlg.item_selected.atr : dlg.item.atr); mvwaddstr(win, line_y, item_x, menu_item); if (hotkey) { wattrset(win, selected ? dlg.tag_key_selected.atr : dlg.tag_key.atr); mvwaddch(win, line_y, item_x + j, menu_item[j]); } if (selected) { wmove(win, line_y, item_x + 1); } free(menu_item); wrefresh(win); } #define print_item(index, choice, selected) \ do { \ item_set(index); \ do_print_item(menu, item_str(), choice, selected, !item_is_tag(':')); \ } while (0) /* * Print the scroll indicators. */ static void print_arrows(WINDOW * win, int item_no, int scroll, int y, int x, int height) { int cur_y, cur_x; getyx(win, cur_y, cur_x); wmove(win, y, x); if (scroll > 0) { wattrset(win, dlg.uarrow.atr); waddch(win, ACS_UARROW); waddstr(win, "(-)"); } else { wattrset(win, dlg.menubox.atr); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); } y = y + height + 1; wmove(win, y, x); wrefresh(win); if ((height < item_no) && (scroll + height < item_no)) { wattrset(win, dlg.darrow.atr); waddch(win, ACS_DARROW); waddstr(win, "(+)"); } else { wattrset(win, dlg.menubox_border.atr); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); } wmove(win, cur_y, cur_x); wrefresh(win); } /* * Display the termination buttons. */ static void print_buttons(WINDOW * win, int height, int width, int selected) { int x = width / 2 - 28; int y = height - 2; print_button(win, gettext("Select"), y, x, selected == 0); print_button(win, gettext(" Exit "), y, x + 12, selected == 1); print_button(win, gettext(" Help "), y, x + 24, selected == 2); print_button(win, gettext(" Save "), y, x + 36, selected == 3); print_button(win, gettext(" Load "), y, x + 48, selected == 4); wmove(win, y, x + 1 + 12 * selected); wrefresh(win); } /* scroll up n lines (n may be negative) */ static void do_scroll(WINDOW *win, int *scroll, int n) { /* Scroll menu up */ scrollok(win, TRUE); wscrl(win, n); scrollok(win, FALSE); *scroll = *scroll + n; wrefresh(win); } /* * Display a menu for choosing among a number of options */ int dialog_menu(const char *title, const char *prompt, const void *selected, int *s_scroll) { int i, j, x, y, box_x, box_y; int height, width, menu_height; int key = 0, button = 0, scroll = 0, choice = 0; int first_item = 0, max_choice; WINDOW *dialog, *menu; do_resize: height = getmaxy(stdscr); width = getmaxx(stdscr); if (height < MENUBOX_HEIGTH_MIN || width < MENUBOX_WIDTH_MIN) return -ERRDISPLAYTOOSMALL; height -= 4; width -= 5; menu_height = height - 10; max_choice = MIN(menu_height, item_count()); /* center dialog box on screen */ x = (getmaxx(stdscr) - width) / 2; y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); wbkgdset(dialog, dlg.dialog.atr & A_COLOR); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); wattrset(dialog, dlg.dialog.atr); print_autowrap(dialog, prompt, width - 2, 1, 3); menu_width = width - 6; box_y = height - menu_height - 5; box_x = (width - menu_width) / 2 - 1; /* create new window for the menu */ menu = subwin(dialog, menu_height, menu_width, y + box_y + 1, x + box_x + 1); keypad(menu, TRUE); /* draw a box around the menu items */ draw_box(dialog, box_y, box_x, menu_height + 2, menu_width + 2, dlg.menubox_border.atr, dlg.menubox.atr); if (menu_width >= 80) item_x = (menu_width - 70) / 2; else item_x = 4; /* Set choice to default item */ item_foreach() if (selected && (selected == item_data())) choice = item_n(); /* get the saved scroll info */ scroll = *s_scroll; if ((scroll <= choice) && (scroll + max_choice > choice) && (scroll >= 0) && (scroll + max_choice <= item_count())) { first_item = scroll; choice = choice - scroll; } else { scroll = 0; } if ((choice >= max_choice)) { if (choice >= item_count() - max_choice / 2) scroll = first_item = item_count() - max_choice; else scroll = first_item = choice - max_choice / 2; choice = choice - scroll; } /* Print the menu */ for (i = 0; i < max_choice; i++) { print_item(first_item + i, i, i == choice); } wnoutrefresh(menu); print_arrows(dialog, item_count(), scroll, box_y, box_x + item_x + 1, menu_height); print_buttons(dialog, height, width, 0); wmove(menu, choice, item_x + 1); wrefresh(menu); while (key != KEY_ESC) { key = wgetch(menu); if (key < 256 && isalpha(key)) key = tolower(key); if (strchr("ynmh", key)) i = max_choice; else { for (i = choice + 1; i < max_choice; i++) { item_set(scroll + i); j = first_alpha(item_str(), "YyNnMmHh"); if (key == tolower(item_str()[j])) break; } if (i == max_choice) for (i = 0; i < max_choice; i++) { item_set(scroll + i); j = first_alpha(item_str(), "YyNnMmHh"); if (key == tolower(item_str()[j])) break; } } if (item_count() != 0 && (i < max_choice || key == KEY_UP || key == KEY_DOWN || key == '-' || key == '+' || key == KEY_PPAGE || key == KEY_NPAGE)) { /* Remove highligt of current item */ print_item(scroll + choice, choice, FALSE); if (key == KEY_UP || key == '-') { if (choice < 2 && scroll) { /* Scroll menu down */ do_scroll(menu, &scroll, -1); print_item(scroll, 0, FALSE); } else choice = MAX(choice - 1, 0); } else if (key == KEY_DOWN || key == '+') { print_item(scroll+choice, choice, FALSE); if ((choice > max_choice - 3) && (scroll + max_choice < item_count())) { /* Scroll menu up */ do_scroll(menu, &scroll, 1); print_item(scroll+max_choice - 1, max_choice - 1, FALSE); } else choice = MIN(choice + 1, max_choice - 1); } else if (key == KEY_PPAGE) { scrollok(menu, TRUE); for (i = 0; (i < max_choice); i++) { if (scroll > 0) { do_scroll(menu, &scroll, -1); print_item(scroll, 0, FALSE); } else { if (choice > 0) choice--; } } } else if (key == KEY_NPAGE) { for (i = 0; (i < max_choice); i++) { if (scroll + max_choice < item_count()) { do_scroll(menu, &scroll, 1); print_item(scroll+max_choice-1, max_choice - 1, FALSE); } else { if (choice + 1 < max_choice) choice++; } } } else choice = i; print_item(scroll + choice, choice, TRUE); print_arrows(dialog, item_count(), scroll, box_y, box_x + item_x + 1, menu_height); wnoutrefresh(dialog); wrefresh(menu); continue; /* wait for another key press */ } switch (key) { case KEY_LEFT: case TAB: case KEY_RIGHT: button = ((key == KEY_LEFT ? --button : ++button) < 0) ? 4 : (button > 4 ? 0 : button); print_buttons(dialog, height, width, button); wrefresh(menu); break; case ' ': case 's': case 'y': case 'n': case 'm': case '/': case 'h': case '?': case 'z': case '\n': /* save scroll info */ *s_scroll = scroll; delwin(menu); delwin(dialog); item_set(scroll + choice); item_set_selected(1); switch (key) { case 'h': case '?': return 2; case 's': case 'y': return 5; case 'n': return 6; case 'm': return 7; case ' ': return 8; case '/': return 9; case 'z': return 10; case '\n': return button; } return 0; case 'e': case 'x': key = KEY_ESC; break; case KEY_ESC: key = on_key_esc(menu); break; case KEY_RESIZE: on_key_resize(); delwin(menu); delwin(dialog); goto do_resize; } } delwin(menu); delwin(dialog); return key; /* ESC pressed */ } backport-iwlwifi-dkms-7906/kconf/lxdialog/textbox.c000066400000000000000000000217661351064634500224610ustar00rootroot00000000000000/* * textbox.c -- implements the text box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "dialog.h" static void back_lines(int n); static void print_page(WINDOW *win, int height, int width, update_text_fn update_text, void *data); static void print_line(WINDOW *win, int row, int width); static char *get_line(void); static void print_position(WINDOW * win); static int hscroll; static int begin_reached, end_reached, page_length; static char *buf; static char *page; /* * refresh window content */ static void refresh_text_box(WINDOW *dialog, WINDOW *box, int boxh, int boxw, int cur_y, int cur_x, update_text_fn update_text, void *data) { print_page(box, boxh, boxw, update_text, data); print_position(dialog); wmove(dialog, cur_y, cur_x); /* Restore cursor position */ wrefresh(dialog); } /* * Display text from a file in a dialog box. * * keys is a null-terminated array * update_text() may not add or remove any '\n' or '\0' in tbuf */ int dialog_textbox(const char *title, char *tbuf, int initial_height, int initial_width, int *keys, int *_vscroll, int *_hscroll, update_text_fn update_text, void *data) { int i, x, y, cur_x, cur_y, key = 0; int height, width, boxh, boxw; WINDOW *dialog, *box; bool done = false; begin_reached = 1; end_reached = 0; page_length = 0; hscroll = 0; buf = tbuf; page = buf; /* page is pointer to start of page to be displayed */ if (_vscroll && *_vscroll) { begin_reached = 0; for (i = 0; i < *_vscroll; i++) get_line(); } if (_hscroll) hscroll = *_hscroll; do_resize: getmaxyx(stdscr, height, width); if (height < TEXTBOX_HEIGTH_MIN || width < TEXTBOX_WIDTH_MIN) return -ERRDISPLAYTOOSMALL; if (initial_height != 0) height = initial_height; else if (height > 4) height -= 4; else height = 0; if (initial_width != 0) width = initial_width; else if (width > 5) width -= 5; else width = 0; /* center dialog box on screen */ x = (getmaxx(stdscr) - width) / 2; y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); /* Create window for box region, used for scrolling text */ boxh = height - 4; boxw = width - 2; box = subwin(dialog, boxh, boxw, y + 1, x + 1); wattrset(box, dlg.dialog.atr); wbkgdset(box, dlg.dialog.atr & A_COLOR); keypad(box, TRUE); /* register the new window, along with its borders */ draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); wbkgdset(dialog, dlg.dialog.atr & A_COLOR); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); print_button(dialog, gettext(" Exit "), height - 2, width / 2 - 4, TRUE); wnoutrefresh(dialog); getyx(dialog, cur_y, cur_x); /* Save cursor position */ /* Print first page of text */ attr_clear(box, boxh, boxw, dlg.dialog.atr); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); while (!done) { key = wgetch(dialog); switch (key) { case 'E': /* Exit */ case 'e': case 'X': case 'x': case 'q': case '\n': done = true; break; case 'g': /* First page */ case KEY_HOME: if (!begin_reached) { begin_reached = 1; page = buf; refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); } break; case 'G': /* Last page */ case KEY_END: end_reached = 1; /* point to last char in buf */ page = buf + strlen(buf); back_lines(boxh); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case 'K': /* Previous line */ case 'k': case KEY_UP: if (begin_reached) break; back_lines(page_length + 1); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case 'B': /* Previous page */ case 'b': case 'u': case KEY_PPAGE: if (begin_reached) break; back_lines(page_length + boxh); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case 'J': /* Next line */ case 'j': case KEY_DOWN: if (end_reached) break; back_lines(page_length - 1); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case KEY_NPAGE: /* Next page */ case ' ': case 'd': if (end_reached) break; begin_reached = 0; refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case '0': /* Beginning of line */ case 'H': /* Scroll left */ case 'h': case KEY_LEFT: if (hscroll <= 0) break; if (key == '0') hscroll = 0; else hscroll--; /* Reprint current page to scroll horizontally */ back_lines(page_length); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case 'L': /* Scroll right */ case 'l': case KEY_RIGHT: if (hscroll >= MAX_LEN) break; hscroll++; /* Reprint current page to scroll horizontally */ back_lines(page_length); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case KEY_ESC: if (on_key_esc(dialog) == KEY_ESC) done = true; break; case KEY_RESIZE: back_lines(height); delwin(box); delwin(dialog); on_key_resize(); goto do_resize; default: for (i = 0; keys[i]; i++) { if (key == keys[i]) { done = true; break; } } } } delwin(box); delwin(dialog); if (_vscroll) { const char *s; s = buf; *_vscroll = 0; back_lines(page_length); while (s < page && (s = strchr(s, '\n'))) { (*_vscroll)++; s++; } } if (_hscroll) *_hscroll = hscroll; return key; } /* * Go back 'n' lines in text. Called by dialog_textbox(). * 'page' will be updated to point to the desired line in 'buf'. */ static void back_lines(int n) { int i; begin_reached = 0; /* Go back 'n' lines */ for (i = 0; i < n; i++) { if (*page == '\0') { if (end_reached) { end_reached = 0; continue; } } if (page == buf) { begin_reached = 1; return; } page--; do { if (page == buf) { begin_reached = 1; return; } page--; } while (*page != '\n'); page++; } } /* * Print a new page of text. */ static void print_page(WINDOW *win, int height, int width, update_text_fn update_text, void *data) { int i, passed_end = 0; if (update_text) { char *end; for (i = 0; i < height; i++) get_line(); end = page; back_lines(height); update_text(buf, page - buf, end - buf, data); } page_length = 0; for (i = 0; i < height; i++) { print_line(win, i, width); if (!passed_end) page_length++; if (end_reached && !passed_end) passed_end = 1; } wnoutrefresh(win); } /* * Print a new line of text. */ static void print_line(WINDOW * win, int row, int width) { char *line; line = get_line(); line += MIN(strlen(line), hscroll); /* Scroll horizontally */ wmove(win, row, 0); /* move cursor to correct line */ waddch(win, ' '); waddnstr(win, line, MIN(strlen(line), width - 2)); /* Clear 'residue' of previous line */ #if OLD_NCURSES { int x = getcurx(win); int i; for (i = 0; i < width - x; i++) waddch(win, ' '); } #else wclrtoeol(win); #endif } /* * Return current line of text. Called by dialog_textbox() and print_line(). * 'page' should point to start of current line before calling, and will be * updated to point to start of next line. */ static char *get_line(void) { int i = 0; static char line[MAX_LEN + 1]; end_reached = 0; while (*page != '\n') { if (*page == '\0') { end_reached = 1; break; } else if (i < MAX_LEN) line[i++] = *(page++); else { /* Truncate lines longer than MAX_LEN characters */ if (i == MAX_LEN) line[i++] = '\0'; page++; } } if (i <= MAX_LEN) line[i] = '\0'; if (!end_reached) page++; /* move past '\n' */ return line; } /* * Print current position */ static void print_position(WINDOW * win) { int percent; wattrset(win, dlg.position_indicator.atr); wbkgdset(win, dlg.position_indicator.atr & A_COLOR); percent = (page - buf) * 100 / strlen(buf); wmove(win, getmaxy(win) - 3, getmaxx(win) - 9); wprintw(win, "(%3d%%)", percent); } backport-iwlwifi-dkms-7906/kconf/lxdialog/util.c000066400000000000000000000436211351064634500217330ustar00rootroot00000000000000/* * util.c * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include #include "dialog.h" /* Needed in signal handler in mconf.c */ int saved_x, saved_y; struct dialog_info dlg; static void set_mono_theme(void) { dlg.screen.atr = A_NORMAL; dlg.shadow.atr = A_NORMAL; dlg.dialog.atr = A_NORMAL; dlg.title.atr = A_BOLD; dlg.border.atr = A_NORMAL; dlg.button_active.atr = A_REVERSE; dlg.button_inactive.atr = A_DIM; dlg.button_key_active.atr = A_REVERSE; dlg.button_key_inactive.atr = A_BOLD; dlg.button_label_active.atr = A_REVERSE; dlg.button_label_inactive.atr = A_NORMAL; dlg.inputbox.atr = A_NORMAL; dlg.inputbox_border.atr = A_NORMAL; dlg.searchbox.atr = A_NORMAL; dlg.searchbox_title.atr = A_BOLD; dlg.searchbox_border.atr = A_NORMAL; dlg.position_indicator.atr = A_BOLD; dlg.menubox.atr = A_NORMAL; dlg.menubox_border.atr = A_NORMAL; dlg.item.atr = A_NORMAL; dlg.item_selected.atr = A_REVERSE; dlg.tag.atr = A_BOLD; dlg.tag_selected.atr = A_REVERSE; dlg.tag_key.atr = A_BOLD; dlg.tag_key_selected.atr = A_REVERSE; dlg.check.atr = A_BOLD; dlg.check_selected.atr = A_REVERSE; dlg.uarrow.atr = A_BOLD; dlg.darrow.atr = A_BOLD; } #define DLG_COLOR(dialog, f, b, h) \ do { \ dlg.dialog.fg = (f); \ dlg.dialog.bg = (b); \ dlg.dialog.hl = (h); \ } while (0) static void set_classic_theme(void) { DLG_COLOR(screen, COLOR_CYAN, COLOR_BLUE, true); DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(dialog, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(title, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(border, COLOR_WHITE, COLOR_WHITE, true); DLG_COLOR(button_active, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(button_inactive, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(button_key_active, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_WHITE, false); DLG_COLOR(button_label_active, COLOR_YELLOW, COLOR_BLUE, true); DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_WHITE, true); DLG_COLOR(inputbox, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(inputbox_border, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(searchbox, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(searchbox_border, COLOR_WHITE, COLOR_WHITE, true); DLG_COLOR(position_indicator, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(menubox, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(menubox_border, COLOR_WHITE, COLOR_WHITE, true); DLG_COLOR(item, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(item_selected, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(tag, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_BLUE, true); DLG_COLOR(tag_key, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_BLUE, true); DLG_COLOR(check, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(check_selected, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(uarrow, COLOR_GREEN, COLOR_WHITE, true); DLG_COLOR(darrow, COLOR_GREEN, COLOR_WHITE, true); } static void set_blackbg_theme(void) { DLG_COLOR(screen, COLOR_RED, COLOR_BLACK, true); DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, false); DLG_COLOR(dialog, COLOR_WHITE, COLOR_BLACK, false); DLG_COLOR(title, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(border, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(button_active, COLOR_YELLOW, COLOR_RED, false); DLG_COLOR(button_inactive, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_RED, true); DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_RED, false); DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(inputbox, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(inputbox_border, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(searchbox, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_BLACK, true); DLG_COLOR(searchbox_border, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(position_indicator, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(menubox, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(menubox_border, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(item, COLOR_WHITE, COLOR_BLACK, false); DLG_COLOR(item_selected, COLOR_WHITE, COLOR_RED, false); DLG_COLOR(tag, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_RED, true); DLG_COLOR(tag_key, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_RED, true); DLG_COLOR(check, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(check_selected, COLOR_YELLOW, COLOR_RED, true); DLG_COLOR(uarrow, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(darrow, COLOR_RED, COLOR_BLACK, false); } static void set_bluetitle_theme(void) { set_classic_theme(); DLG_COLOR(title, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_BLUE, true); DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(searchbox_title, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(position_indicator, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(tag, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(tag_key, COLOR_BLUE, COLOR_WHITE, true); } /* * Select color theme */ static int set_theme(const char *theme) { int use_color = 1; if (!theme) set_bluetitle_theme(); else if (strcmp(theme, "classic") == 0) set_classic_theme(); else if (strcmp(theme, "bluetitle") == 0) set_bluetitle_theme(); else if (strcmp(theme, "blackbg") == 0) set_blackbg_theme(); else if (strcmp(theme, "mono") == 0) use_color = 0; return use_color; } static void init_one_color(struct dialog_color *color) { static int pair = 0; pair++; init_pair(pair, color->fg, color->bg); if (color->hl) color->atr = A_BOLD | COLOR_PAIR(pair); else color->atr = COLOR_PAIR(pair); } static void init_dialog_colors(void) { init_one_color(&dlg.screen); init_one_color(&dlg.shadow); init_one_color(&dlg.dialog); init_one_color(&dlg.title); init_one_color(&dlg.border); init_one_color(&dlg.button_active); init_one_color(&dlg.button_inactive); init_one_color(&dlg.button_key_active); init_one_color(&dlg.button_key_inactive); init_one_color(&dlg.button_label_active); init_one_color(&dlg.button_label_inactive); init_one_color(&dlg.inputbox); init_one_color(&dlg.inputbox_border); init_one_color(&dlg.searchbox); init_one_color(&dlg.searchbox_title); init_one_color(&dlg.searchbox_border); init_one_color(&dlg.position_indicator); init_one_color(&dlg.menubox); init_one_color(&dlg.menubox_border); init_one_color(&dlg.item); init_one_color(&dlg.item_selected); init_one_color(&dlg.tag); init_one_color(&dlg.tag_selected); init_one_color(&dlg.tag_key); init_one_color(&dlg.tag_key_selected); init_one_color(&dlg.check); init_one_color(&dlg.check_selected); init_one_color(&dlg.uarrow); init_one_color(&dlg.darrow); } /* * Setup for color display */ static void color_setup(const char *theme) { int use_color; use_color = set_theme(theme); if (use_color && has_colors()) { start_color(); init_dialog_colors(); } else set_mono_theme(); } /* * Set window to attribute 'attr' */ void attr_clear(WINDOW * win, int height, int width, chtype attr) { int i, j; wattrset(win, attr); for (i = 0; i < height; i++) { wmove(win, i, 0); for (j = 0; j < width; j++) waddch(win, ' '); } touchwin(win); } void dialog_clear(void) { int lines, columns; lines = getmaxy(stdscr); columns = getmaxx(stdscr); attr_clear(stdscr, lines, columns, dlg.screen.atr); /* Display background title if it exists ... - SLH */ if (dlg.backtitle != NULL) { int i, len = 0, skip = 0; struct subtitle_list *pos; wattrset(stdscr, dlg.screen.atr); mvwaddstr(stdscr, 0, 1, (char *)dlg.backtitle); for (pos = dlg.subtitles; pos != NULL; pos = pos->next) { /* 3 is for the arrow and spaces */ len += strlen(pos->text) + 3; } wmove(stdscr, 1, 1); if (len > columns - 2) { const char *ellipsis = "[...] "; waddstr(stdscr, ellipsis); skip = len - (columns - 2 - strlen(ellipsis)); } for (pos = dlg.subtitles; pos != NULL; pos = pos->next) { if (skip == 0) waddch(stdscr, ACS_RARROW); else skip--; if (skip == 0) waddch(stdscr, ' '); else skip--; if (skip < strlen(pos->text)) { waddstr(stdscr, pos->text + skip); skip = 0; } else skip -= strlen(pos->text); if (skip == 0) waddch(stdscr, ' '); else skip--; } for (i = len + 1; i < columns - 1; i++) waddch(stdscr, ACS_HLINE); } wnoutrefresh(stdscr); } /* * Do some initialization for dialog */ int init_dialog(const char *backtitle) { int height, width; initscr(); /* Init curses */ /* Get current cursor position for signal handler in mconf.c */ getyx(stdscr, saved_y, saved_x); getmaxyx(stdscr, height, width); if (height < WINDOW_HEIGTH_MIN || width < WINDOW_WIDTH_MIN) { endwin(); return -ERRDISPLAYTOOSMALL; } dlg.backtitle = backtitle; color_setup(getenv("MENUCONFIG_COLOR")); keypad(stdscr, TRUE); cbreak(); noecho(); dialog_clear(); return 0; } void set_dialog_backtitle(const char *backtitle) { dlg.backtitle = backtitle; } void set_dialog_subtitles(struct subtitle_list *subtitles) { dlg.subtitles = subtitles; } /* * End using dialog functions. */ void end_dialog(int x, int y) { /* move cursor back to original position */ move(y, x); refresh(); endwin(); } /* Print the title of the dialog. Center the title and truncate * tile if wider than dialog (- 2 chars). **/ void print_title(WINDOW *dialog, const char *title, int width) { if (title) { int tlen = MIN(width - 2, strlen(title)); wattrset(dialog, dlg.title.atr); mvwaddch(dialog, 0, (width - tlen) / 2 - 1, ' '); mvwaddnstr(dialog, 0, (width - tlen)/2, title, tlen); waddch(dialog, ' '); } } /* * Print a string of text in a window, automatically wrap around to the * next line if the string is too long to fit on one line. Newline * characters '\n' are propperly processed. We start on a new line * if there is no room for at least 4 nonblanks following a double-space. */ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) { int newl, cur_x, cur_y; int prompt_len, room, wlen; char tempstr[MAX_LEN + 1], *word, *sp, *sp2, *newline_separator = 0; strcpy(tempstr, prompt); prompt_len = strlen(tempstr); if (prompt_len <= width - x * 2) { /* If prompt is short */ wmove(win, y, (width - prompt_len) / 2); waddstr(win, tempstr); } else { cur_x = x; cur_y = y; newl = 1; word = tempstr; while (word && *word) { sp = strpbrk(word, "\n "); if (sp && *sp == '\n') newline_separator = sp; if (sp) *sp++ = 0; /* Wrap to next line if either the word does not fit, or it is the first word of a new sentence, and it is short, and the next word does not fit. */ room = width - cur_x; wlen = strlen(word); if (wlen > room || (newl && wlen < 4 && sp && wlen + 1 + strlen(sp) > room && (!(sp2 = strpbrk(sp, "\n ")) || wlen + 1 + (sp2 - sp) > room))) { cur_y++; cur_x = x; } wmove(win, cur_y, cur_x); waddstr(win, word); getyx(win, cur_y, cur_x); /* Move to the next line if the word separator was a newline */ if (newline_separator) { cur_y++; cur_x = x; newline_separator = 0; } else cur_x++; if (sp && *sp == ' ') { cur_x++; /* double space */ while (*++sp == ' ') ; newl = 1; } else newl = 0; word = sp; } } } /* * Print a button */ void print_button(WINDOW * win, const char *label, int y, int x, int selected) { int i, temp; wmove(win, y, x); wattrset(win, selected ? dlg.button_active.atr : dlg.button_inactive.atr); waddstr(win, "<"); temp = strspn(label, " "); label += temp; wattrset(win, selected ? dlg.button_label_active.atr : dlg.button_label_inactive.atr); for (i = 0; i < temp; i++) waddch(win, ' '); wattrset(win, selected ? dlg.button_key_active.atr : dlg.button_key_inactive.atr); waddch(win, label[0]); wattrset(win, selected ? dlg.button_label_active.atr : dlg.button_label_inactive.atr); waddstr(win, (char *)label + 1); wattrset(win, selected ? dlg.button_active.atr : dlg.button_inactive.atr); waddstr(win, ">"); wmove(win, y, x + temp + 1); } /* * Draw a rectangular box with line drawing characters */ void draw_box(WINDOW * win, int y, int x, int height, int width, chtype box, chtype border) { int i, j; wattrset(win, 0); for (i = 0; i < height; i++) { wmove(win, y + i, x); for (j = 0; j < width; j++) if (!i && !j) waddch(win, border | ACS_ULCORNER); else if (i == height - 1 && !j) waddch(win, border | ACS_LLCORNER); else if (!i && j == width - 1) waddch(win, box | ACS_URCORNER); else if (i == height - 1 && j == width - 1) waddch(win, box | ACS_LRCORNER); else if (!i) waddch(win, border | ACS_HLINE); else if (i == height - 1) waddch(win, box | ACS_HLINE); else if (!j) waddch(win, border | ACS_VLINE); else if (j == width - 1) waddch(win, box | ACS_VLINE); else waddch(win, box | ' '); } } /* * Draw shadows along the right and bottom edge to give a more 3D look * to the boxes */ void draw_shadow(WINDOW * win, int y, int x, int height, int width) { int i; if (has_colors()) { /* Whether terminal supports color? */ wattrset(win, dlg.shadow.atr); wmove(win, y + height, x + 2); for (i = 0; i < width; i++) waddch(win, winch(win) & A_CHARTEXT); for (i = y + 1; i < y + height + 1; i++) { wmove(win, i, x + width); waddch(win, winch(win) & A_CHARTEXT); waddch(win, winch(win) & A_CHARTEXT); } wnoutrefresh(win); } } /* * Return the position of the first alphabetic character in a string. */ int first_alpha(const char *string, const char *exempt) { int i, in_paren = 0, c; for (i = 0; i < strlen(string); i++) { c = tolower(string[i]); if (strchr("<[(", c)) ++in_paren; if (strchr(">])", c) && in_paren > 0) --in_paren; if ((!in_paren) && isalpha(c) && strchr(exempt, c) == 0) return i; } return 0; } /* * ncurses uses ESC to detect escaped char sequences. This resutl in * a small timeout before ESC is actually delivered to the application. * lxdialog suggest which is correctly translated to two * times esc. But then we need to ignore the second esc to avoid stepping * out one menu too much. Filter away all escaped key sequences since * keypad(FALSE) turn off ncurses support for escape sequences - and thats * needed to make notimeout() do as expected. */ int on_key_esc(WINDOW *win) { int key; int key2; int key3; nodelay(win, TRUE); keypad(win, FALSE); key = wgetch(win); key2 = wgetch(win); do { key3 = wgetch(win); } while (key3 != ERR); nodelay(win, FALSE); keypad(win, TRUE); if (key == KEY_ESC && key2 == ERR) return KEY_ESC; else if (key != ERR && key != KEY_ESC && key2 == ERR) ungetch(key); return -1; } /* redraw screen in new size */ int on_key_resize(void) { dialog_clear(); return KEY_RESIZE; } struct dialog_list *item_cur; struct dialog_list item_nil; struct dialog_list *item_head; void item_reset(void) { struct dialog_list *p, *next; for (p = item_head; p; p = next) { next = p->next; free(p); } item_head = NULL; item_cur = &item_nil; } void item_make(const char *fmt, ...) { va_list ap; struct dialog_list *p = malloc(sizeof(*p)); if (item_head) item_cur->next = p; else item_head = p; item_cur = p; memset(p, 0, sizeof(*p)); va_start(ap, fmt); vsnprintf(item_cur->node.str, sizeof(item_cur->node.str), fmt, ap); va_end(ap); } void item_add_str(const char *fmt, ...) { va_list ap; size_t avail; avail = sizeof(item_cur->node.str) - strlen(item_cur->node.str); va_start(ap, fmt); vsnprintf(item_cur->node.str + strlen(item_cur->node.str), avail, fmt, ap); item_cur->node.str[sizeof(item_cur->node.str) - 1] = '\0'; va_end(ap); } void item_set_tag(char tag) { item_cur->node.tag = tag; } void item_set_data(void *ptr) { item_cur->node.data = ptr; } void item_set_selected(int val) { item_cur->node.selected = val; } int item_activate_selected(void) { item_foreach() if (item_is_selected()) return 1; return 0; } void *item_data(void) { return item_cur->node.data; } char item_tag(void) { return item_cur->node.tag; } int item_count(void) { int n = 0; struct dialog_list *p; for (p = item_head; p; p = p->next) n++; return n; } void item_set(int n) { int i = 0; item_foreach() if (i++ == n) return; } int item_n(void) { int n = 0; struct dialog_list *p; for (p = item_head; p; p = p->next) { if (p == item_cur) return n; n++; } return 0; } const char *item_str(void) { return item_cur->node.str; } int item_is_selected(void) { return (item_cur->node.selected != 0); } int item_is_tag(char tag) { return (item_cur->node.tag == tag); } backport-iwlwifi-dkms-7906/kconf/lxdialog/yesno.c000066400000000000000000000056121351064634500221110ustar00rootroot00000000000000/* * yesno.c -- implements the yes/no box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "dialog.h" /* * Display termination buttons */ static void print_buttons(WINDOW * dialog, int height, int width, int selected) { int x = width / 2 - 10; int y = height - 2; print_button(dialog, gettext(" Yes "), y, x, selected == 0); print_button(dialog, gettext(" No "), y, x + 13, selected == 1); wmove(dialog, y, x + 1 + 13 * selected); wrefresh(dialog); } /* * Display a dialog box with two buttons - Yes and No */ int dialog_yesno(const char *title, const char *prompt, int height, int width) { int i, x, y, key = 0, button = 0; WINDOW *dialog; do_resize: if (getmaxy(stdscr) < (height + YESNO_HEIGTH_MIN)) return -ERRDISPLAYTOOSMALL; if (getmaxx(stdscr) < (width + YESNO_WIDTH_MIN)) return -ERRDISPLAYTOOSMALL; /* center dialog box on screen */ x = (getmaxx(stdscr) - width) / 2; y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); wattrset(dialog, dlg.dialog.atr); print_autowrap(dialog, prompt, width - 2, 1, 3); print_buttons(dialog, height, width, 0); while (key != KEY_ESC) { key = wgetch(dialog); switch (key) { case 'Y': case 'y': delwin(dialog); return 0; case 'N': case 'n': delwin(dialog); return 1; case TAB: case KEY_LEFT: case KEY_RIGHT: button = ((key == KEY_LEFT ? --button : ++button) < 0) ? 1 : (button > 1 ? 0 : button); print_buttons(dialog, height, width, button); wrefresh(dialog); break; case ' ': case '\n': delwin(dialog); return button; case KEY_ESC: key = on_key_esc(dialog); break; case KEY_RESIZE: delwin(dialog); on_key_resize(); goto do_resize; } } delwin(dialog); return key; /* ESC pressed */ } backport-iwlwifi-dkms-7906/kconf/mconf.c000066400000000000000000000673621351064634500202650ustar00rootroot00000000000000/* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. * * Introduced single menu mode (show all sub-menus in one large tree). * 2002-11-06 Petr Baudis * * i18n, 2005, Arnaldo Carvalho de Melo */ #include #include #include #include #include #include #include #include #include #include #include "lkc.h" #include "lxdialog/dialog.h" static const char mconf_readme[] = N_( "Overview\n" "--------\n" "This interface lets you select features and parameters for the build.\n" "Features can either be built-in, modularized, or ignored. Parameters\n" "must be entered in as decimal or hexadecimal numbers or text.\n" "\n" "Menu items beginning with following braces represent features that\n" " [ ] can be built in or removed\n" " < > can be built in, modularized or removed\n" " { } can be built in or modularized (selected by other feature)\n" " - - are selected by other feature,\n" "while *, M or whitespace inside braces means to build in, build as\n" "a module or to exclude the feature respectively.\n" "\n" "To change any of these features, highlight it with the cursor\n" "keys and press to build it in, to make it a module or\n" " to remove it. You may also press the to cycle\n" "through the available options (i.e. Y->N->M->Y).\n" "\n" "Some additional keyboard hints:\n" "\n" "Menus\n" "----------\n" "o Use the Up/Down arrow keys (cursor keys) to highlight the item you\n" " wish to change or the submenu you wish to select and press .\n" " Submenus are designated by \"--->\", empty ones by \"----\".\n" "\n" " Shortcut: Press the option's highlighted letter (hotkey).\n" " Pressing a hotkey more than once will sequence\n" " through all visible items which use that hotkey.\n" "\n" " You may also use the and keys to scroll\n" " unseen options into view.\n" "\n" "o To exit a menu use the cursor keys to highlight the button\n" " and press .\n" "\n" " Shortcut: Press or or if there is no hotkey\n" " using those letters. You may press a single , but\n" " there is a delayed response which you may find annoying.\n" "\n" " Also, the and cursor keys will cycle between and\n" " \n" "\n" "\n" "Data Entry\n" "-----------\n" "o Enter the requested information and press \n" " If you are entering hexadecimal values, it is not necessary to\n" " add the '0x' prefix to the entry.\n" "\n" "o For help, use the or cursor keys to highlight the help option\n" " and press . You can try as well.\n" "\n" "\n" "Text Box (Help Window)\n" "--------\n" "o Use the cursor keys to scroll up/down/left/right. The VI editor\n" " keys h,j,k,l function here as do , , and for\n" " those who are familiar with less and lynx.\n" "\n" "o Press , , , or to exit.\n" "\n" "\n" "Alternate Configuration Files\n" "-----------------------------\n" "Menuconfig supports the use of alternate configuration files for\n" "those who, for various reasons, find it necessary to switch\n" "between different configurations.\n" "\n" "The button will let you save the current configuration to\n" "a file of your choosing. Use the button to load a previously\n" "saved alternate configuration.\n" "\n" "Even if you don't use alternate configuration files, but you find\n" "during a Menuconfig session that you have completely messed up your\n" "settings, you may use the button to restore your previously\n" "saved settings from \".config\" without restarting Menuconfig.\n" "\n" "Other information\n" "-----------------\n" "If you use Menuconfig in an XTERM window, make sure you have your\n" "$TERM variable set to point to an xterm definition which supports\n" "color. Otherwise, Menuconfig will look rather bad. Menuconfig will\n" "not display correctly in an RXVT window because rxvt displays only one\n" "intensity of color, bright.\n" "\n" "Menuconfig will display larger menus on screens or xterms which are\n" "set to display more than the standard 25 row by 80 column geometry.\n" "In order for this to work, the \"stty size\" command must be able to\n" "display the screen's current row and column geometry. I STRONGLY\n" "RECOMMEND that you make sure you do NOT have the shell variables\n" "LINES and COLUMNS exported into your environment. Some distributions\n" "export those variables via /etc/profile. Some ncurses programs can\n" "become confused when those variables (LINES & COLUMNS) don't reflect\n" "the true screen size.\n" "\n" "Optional personality available\n" "------------------------------\n" "If you prefer to have all of the options listed in a single menu,\n" "rather than the default multimenu hierarchy, run the menuconfig with\n" "MENUCONFIG_MODE environment variable set to single_menu. Example:\n" "\n" "make MENUCONFIG_MODE=single_menu menuconfig\n" "\n" " will then unroll the appropriate category, or enfold it if it\n" "is already unrolled.\n" "\n" "Note that this mode can eventually be a little more CPU expensive\n" "(especially with a larger number of unrolled categories) than the\n" "default mode.\n" "\n" "Different color themes available\n" "--------------------------------\n" "It is possible to select different color themes using the variable\n" "MENUCONFIG_COLOR. To select a theme use:\n" "\n" "make MENUCONFIG_COLOR= menuconfig\n" "\n" "Available themes are\n" " mono => selects colors suitable for monochrome displays\n" " blackbg => selects a color scheme with black background\n" " classic => theme with blue background. The classic look\n" " bluetitle => an LCD friendly version of classic. (default)\n" "\n"), menu_instructions[] = N_( "Arrow keys navigate the menu. " " selects submenus ---> (or empty submenus ----). " "Highlighted letters are hotkeys. " "Pressing includes, excludes, modularizes features. " "Press to exit, for Help, for Search. " "Legend: [*] built-in [ ] excluded module < > module capable"), radiolist_instructions[] = N_( "Use the arrow keys to navigate this window or " "press the hotkey of the item you wish to select " "followed by the . " "Press for additional information about this option."), inputbox_instructions_int[] = N_( "Please enter a decimal value. " "Fractions will not be accepted. " "Use the key to move from the input field to the buttons below it."), inputbox_instructions_hex[] = N_( "Please enter a hexadecimal value. " "Use the key to move from the input field to the buttons below it."), inputbox_instructions_string[] = N_( "Please enter a string value. " "Use the key to move from the input field to the buttons below it."), setmod_text[] = N_( "This feature depends on another which has been configured as a module.\n" "As a result, this feature will be built as a module."), load_config_text[] = N_( "Enter the name of the configuration file you wish to load. " "Accept the name shown to restore the configuration you " "last retrieved. Leave blank to abort."), load_config_help[] = N_( "\n" "For various reasons, one may wish to keep several different\n" "configurations available on a single machine.\n" "\n" "If you have saved a previous configuration in a file other than the\n" "default one, entering its name here will allow you to modify that\n" "configuration.\n" "\n" "If you are uncertain, then you have probably never used alternate\n" "configuration files. You should therefore leave this blank to abort.\n"), save_config_text[] = N_( "Enter a filename to which this configuration should be saved " "as an alternate. Leave blank to abort."), save_config_help[] = N_( "\n" "For various reasons, one may wish to keep different configurations\n" "available on a single machine.\n" "\n" "Entering a file name here will allow you to later retrieve, modify\n" "and use the current configuration as an alternate to whatever\n" "configuration options you have selected at that time.\n" "\n" "If you are uncertain what all this means then you should probably\n" "leave this blank.\n"), search_help[] = N_( "\n" "Search for symbols and display their relations.\n" "Regular expressions are allowed.\n" "Example: search for \"^FOO\"\n" "Result:\n" "-----------------------------------------------------------------\n" "Symbol: FOO [=m]\n" "Type : tristate\n" "Prompt: Foo bus is used to drive the bar HW\n" " Location:\n" " -> Bus options (PCI, PCMCIA, EISA, ISA)\n" " -> PCI support (PCI [=y])\n" "(1) -> PCI access mode ( [=y])\n" " Defined at drivers/pci/Kconfig:47\n" " Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n" " Selects: LIBCRC32\n" " Selected by: BAR [=n]\n" "-----------------------------------------------------------------\n" "o The line 'Type:' shows the type of the configuration option for\n" " this symbol (boolean, tristate, string, ...)\n" "o The line 'Prompt:' shows the text used in the menu structure for\n" " this symbol\n" "o The 'Defined at' line tells at what file / line number the symbol\n" " is defined\n" "o The 'Depends on:' line tells what symbols need to be defined for\n" " this symbol to be visible in the menu (selectable)\n" "o The 'Location:' lines tells where in the menu structure this symbol\n" " is located\n" " A location followed by a [=y] indicates that this is a\n" " selectable menu item - and the current value is displayed inside\n" " brackets.\n" " Press the key in the (#) prefix to jump directly to that\n" " location. You will be returned to the current search results\n" " after exiting this new menu.\n" "o The 'Selects:' line tells what symbols will be automatically\n" " selected if this symbol is selected (y or m)\n" "o The 'Selected by' line tells what symbol has selected this symbol\n" "\n" "Only relevant lines are shown.\n" "\n\n" "Search examples:\n" "Examples: USB => find all symbols containing USB\n" " ^USB => find all symbols starting with USB\n" " USB$ => find all symbols ending with USB\n" "\n"); static int indent; static struct menu *current_menu; static int child_count; static int single_menu_mode; static int show_all_options; static int save_and_exit; static int silent; static void conf(struct menu *menu, struct menu *active_menu); static void conf_choice(struct menu *menu); static void conf_string(struct menu *menu); static void conf_load(void); static void conf_save(void); static int show_textbox_ext(const char *title, char *text, int r, int c, int *keys, int *vscroll, int *hscroll, update_text_fn update_text, void *data); static void show_textbox(const char *title, const char *text, int r, int c); static void show_helptext(const char *title, const char *text); static void show_help(struct menu *menu); static char filename[PATH_MAX+1]; static void set_config_filename(const char *config_filename) { static char menu_backtitle[PATH_MAX+128]; int size; size = snprintf(menu_backtitle, sizeof(menu_backtitle), "%s - %s", config_filename, rootmenu.prompt->text); if (size >= sizeof(menu_backtitle)) menu_backtitle[sizeof(menu_backtitle)-1] = '\0'; set_dialog_backtitle(menu_backtitle); size = snprintf(filename, sizeof(filename), "%s", config_filename); if (size >= sizeof(filename)) filename[sizeof(filename)-1] = '\0'; } struct subtitle_part { struct list_head entries; const char *text; }; static LIST_HEAD(trail); static struct subtitle_list *subtitles; static void set_subtitle(void) { struct subtitle_part *sp; struct subtitle_list *pos, *tmp; for (pos = subtitles; pos != NULL; pos = tmp) { tmp = pos->next; free(pos); } subtitles = NULL; list_for_each_entry(sp, &trail, entries) { if (sp->text) { if (pos) { pos->next = xcalloc(1, sizeof(*pos)); pos = pos->next; } else { subtitles = pos = xcalloc(1, sizeof(*pos)); } pos->text = sp->text; } } set_dialog_subtitles(subtitles); } static void reset_subtitle(void) { struct subtitle_list *pos, *tmp; for (pos = subtitles; pos != NULL; pos = tmp) { tmp = pos->next; free(pos); } subtitles = NULL; set_dialog_subtitles(subtitles); } struct search_data { struct list_head *head; struct menu **targets; int *keys; }; static void update_text(char *buf, size_t start, size_t end, void *_data) { struct search_data *data = _data; struct jump_key *pos; int k = 0; list_for_each_entry(pos, data->head, entries) { if (pos->offset >= start && pos->offset < end) { char header[4]; if (k < JUMP_NB) { int key = '0' + (pos->index % JUMP_NB) + 1; sprintf(header, "(%c)", key); data->keys[k] = key; data->targets[k] = pos->target; k++; } else { sprintf(header, " "); } memcpy(buf + pos->offset, header, sizeof(header) - 1); } } data->keys[k] = 0; } static void search_conf(void) { struct symbol **sym_arr; struct gstr res; struct gstr title; char *dialog_input; int dres, vscroll = 0, hscroll = 0; bool again; struct gstr sttext; struct subtitle_part stpart; title = str_new(); str_printf( &title, _("Enter (sub)string or regexp to search for " "(with or without \"%s\")"), CONFIG_); again: dialog_clear(); dres = dialog_inputbox(_("Search Configuration Parameter"), str_get(&title), 10, 75, ""); switch (dres) { case 0: break; case 1: show_helptext(_("Search Configuration"), search_help); goto again; default: str_free(&title); return; } /* strip the prefix if necessary */ dialog_input = dialog_input_result; if (strncasecmp(dialog_input_result, CONFIG_, strlen(CONFIG_)) == 0) dialog_input += strlen(CONFIG_); sttext = str_new(); str_printf(&sttext, "Search (%s)", dialog_input_result); stpart.text = str_get(&sttext); list_add_tail(&stpart.entries, &trail); sym_arr = sym_re_search(dialog_input); do { LIST_HEAD(head); struct menu *targets[JUMP_NB]; int keys[JUMP_NB + 1], i; struct search_data data = { .head = &head, .targets = targets, .keys = keys, }; struct jump_key *pos, *tmp; res = get_relations_str(sym_arr, &head); set_subtitle(); dres = show_textbox_ext(_("Search Results"), (char *) str_get(&res), 0, 0, keys, &vscroll, &hscroll, &update_text, (void *) &data); again = false; for (i = 0; i < JUMP_NB && keys[i]; i++) if (dres == keys[i]) { conf(targets[i]->parent, targets[i]); again = true; } str_free(&res); list_for_each_entry_safe(pos, tmp, &head, entries) free(pos); } while (again); free(sym_arr); str_free(&title); list_del(trail.prev); str_free(&sttext); } static void build_conf(struct menu *menu) { struct symbol *sym; struct property *prop; struct menu *child; int type, tmp, doint = 2; tristate val; char ch; bool visible; /* * note: menu_is_visible() has side effect that it will * recalc the value of the symbol. */ visible = menu_is_visible(menu); if (show_all_options && !menu_has_prompt(menu)) return; else if (!show_all_options && !visible) return; sym = menu->sym; prop = menu->prompt; if (!sym) { if (prop && menu != current_menu) { const char *prompt = menu_get_prompt(menu); switch (prop->type) { case P_MENU: child_count++; prompt = _(prompt); if (single_menu_mode) { item_make("%s%*c%s", menu->data ? "-->" : "++>", indent + 1, ' ', prompt); } else item_make(" %*c%s %s", indent + 1, ' ', prompt, menu_is_empty(menu) ? "----" : "--->"); item_set_tag('m'); item_set_data(menu); if (single_menu_mode && menu->data) goto conf_childs; return; case P_COMMENT: if (prompt) { child_count++; item_make(" %*c*** %s ***", indent + 1, ' ', _(prompt)); item_set_tag(':'); item_set_data(menu); } break; default: if (prompt) { child_count++; item_make("---%*c%s", indent + 1, ' ', _(prompt)); item_set_tag(':'); item_set_data(menu); } } } else doint = 0; goto conf_childs; } type = sym_get_type(sym); if (sym_is_choice(sym)) { struct symbol *def_sym = sym_get_choice_value(sym); struct menu *def_menu = NULL; child_count++; for (child = menu->list; child; child = child->next) { if (menu_is_visible(child) && child->sym == def_sym) def_menu = child; } val = sym_get_tristate_value(sym); if (sym_is_changable(sym)) { switch (type) { case S_BOOLEAN: item_make("[%c]", val == no ? ' ' : '*'); break; case S_TRISTATE: switch (val) { case yes: ch = '*'; break; case mod: ch = 'M'; break; default: ch = ' '; break; } item_make("<%c>", ch); break; } item_set_tag('t'); item_set_data(menu); } else { item_make(" "); item_set_tag(def_menu ? 't' : ':'); item_set_data(menu); } item_add_str("%*c%s", indent + 1, ' ', _(menu_get_prompt(menu))); if (val == yes) { if (def_menu) { item_add_str(" (%s)", _(menu_get_prompt(def_menu))); item_add_str(" --->"); if (def_menu->list) { indent += 2; build_conf(def_menu); indent -= 2; } } return; } } else { if (menu == current_menu) { item_make("---%*c%s", indent + 1, ' ', _(menu_get_prompt(menu))); item_set_tag(':'); item_set_data(menu); goto conf_childs; } child_count++; val = sym_get_tristate_value(sym); if (sym_is_choice_value(sym) && val == yes) { item_make(" "); item_set_tag(':'); item_set_data(menu); } else { switch (type) { case S_BOOLEAN: if (sym_is_changable(sym)) item_make("[%c]", val == no ? ' ' : '*'); else item_make("-%c-", val == no ? ' ' : '*'); item_set_tag('t'); item_set_data(menu); break; case S_TRISTATE: switch (val) { case yes: ch = '*'; break; case mod: ch = 'M'; break; default: ch = ' '; break; } if (sym_is_changable(sym)) { if (sym->rev_dep.tri == mod) item_make("{%c}", ch); else item_make("<%c>", ch); } else item_make("-%c-", ch); item_set_tag('t'); item_set_data(menu); break; default: tmp = 2 + strlen(sym_get_string_value(sym)); /* () = 2 */ item_make("(%s)", sym_get_string_value(sym)); tmp = indent - tmp + 4; if (tmp < 0) tmp = 0; item_add_str("%*c%s%s", tmp, ' ', _(menu_get_prompt(menu)), (sym_has_value(sym) || !sym_is_changable(sym)) ? "" : _(" (NEW)")); item_set_tag('s'); item_set_data(menu); goto conf_childs; } } item_add_str("%*c%s%s", indent + 1, ' ', _(menu_get_prompt(menu)), (sym_has_value(sym) || !sym_is_changable(sym)) ? "" : _(" (NEW)")); if (menu->prompt->type == P_MENU) { item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); return; } } conf_childs: indent += doint; for (child = menu->list; child; child = child->next) build_conf(child); indent -= doint; } static void conf(struct menu *menu, struct menu *active_menu) { struct menu *submenu; const char *prompt = menu_get_prompt(menu); struct subtitle_part stpart; struct symbol *sym; int res; int s_scroll = 0; if (menu != &rootmenu) stpart.text = menu_get_prompt(menu); else stpart.text = NULL; list_add_tail(&stpart.entries, &trail); while (1) { item_reset(); current_menu = menu; build_conf(menu); if (!child_count) break; set_subtitle(); dialog_clear(); res = dialog_menu(prompt ? _(prompt) : _("Main Menu"), _(menu_instructions), active_menu, &s_scroll); if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL) break; if (item_count() != 0) { if (!item_activate_selected()) continue; if (!item_tag()) continue; } submenu = item_data(); active_menu = item_data(); if (submenu) sym = submenu->sym; else sym = NULL; switch (res) { case 0: switch (item_tag()) { case 'm': if (single_menu_mode) submenu->data = (void *) (long) !submenu->data; else conf(submenu, NULL); break; case 't': if (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes) conf_choice(submenu); else if (submenu->prompt->type == P_MENU) conf(submenu, NULL); break; case 's': conf_string(submenu); break; } break; case 2: if (sym) show_help(submenu); else { reset_subtitle(); show_helptext(_("README"), _(mconf_readme)); } break; case 3: reset_subtitle(); conf_save(); break; case 4: reset_subtitle(); conf_load(); break; case 5: if (item_is_tag('t')) { if (sym_set_tristate_value(sym, yes)) break; if (sym_set_tristate_value(sym, mod)) show_textbox(NULL, setmod_text, 6, 74); } break; case 6: if (item_is_tag('t')) sym_set_tristate_value(sym, no); break; case 7: if (item_is_tag('t')) sym_set_tristate_value(sym, mod); break; case 8: if (item_is_tag('t')) sym_toggle_tristate_value(sym); else if (item_is_tag('m')) conf(submenu, NULL); break; case 9: search_conf(); break; case 10: show_all_options = !show_all_options; break; } } list_del(trail.prev); } static int show_textbox_ext(const char *title, char *text, int r, int c, int *keys, int *vscroll, int *hscroll, update_text_fn update_text, void *data) { dialog_clear(); return dialog_textbox(title, text, r, c, keys, vscroll, hscroll, update_text, data); } static void show_textbox(const char *title, const char *text, int r, int c) { show_textbox_ext(title, (char *) text, r, c, (int []) {0}, NULL, NULL, NULL, NULL); } static void show_helptext(const char *title, const char *text) { show_textbox(title, text, 0, 0); } static void conf_message_callback(const char *fmt, va_list ap) { char buf[PATH_MAX+1]; vsnprintf(buf, sizeof(buf), fmt, ap); if (save_and_exit) { if (!silent) printf("%s", buf); } else { show_textbox(NULL, buf, 6, 60); } } static void show_help(struct menu *menu) { struct gstr help = str_new(); help.max_width = getmaxx(stdscr) - 10; menu_get_ext_help(menu, &help); show_helptext(_(menu_get_prompt(menu)), str_get(&help)); str_free(&help); } static void conf_choice(struct menu *menu) { const char *prompt = _(menu_get_prompt(menu)); struct menu *child; struct symbol *active; active = sym_get_choice_value(menu->sym); while (1) { int res; int selected; item_reset(); current_menu = menu; for (child = menu->list; child; child = child->next) { if (!menu_is_visible(child)) continue; if (child->sym) item_make("%s", _(menu_get_prompt(child))); else { item_make("*** %s ***", _(menu_get_prompt(child))); item_set_tag(':'); } item_set_data(child); if (child->sym == active) item_set_selected(1); if (child->sym == sym_get_choice_value(menu->sym)) item_set_tag('X'); } dialog_clear(); res = dialog_checklist(prompt ? _(prompt) : _("Main Menu"), _(radiolist_instructions), MENUBOX_HEIGTH_MIN, MENUBOX_WIDTH_MIN, CHECKLIST_HEIGTH_MIN); selected = item_activate_selected(); switch (res) { case 0: if (selected) { child = item_data(); if (!child->sym) break; sym_set_tristate_value(child->sym, yes); } return; case 1: if (selected) { child = item_data(); show_help(child); active = child->sym; } else show_help(menu); break; case KEY_ESC: return; case -ERRDISPLAYTOOSMALL: return; } } } static void conf_string(struct menu *menu) { const char *prompt = menu_get_prompt(menu); while (1) { int res; const char *heading; switch (sym_get_type(menu->sym)) { case S_INT: heading = _(inputbox_instructions_int); break; case S_HEX: heading = _(inputbox_instructions_hex); break; case S_STRING: heading = _(inputbox_instructions_string); break; default: heading = _("Internal mconf error!"); } dialog_clear(); res = dialog_inputbox(prompt ? _(prompt) : _("Main Menu"), heading, 10, 75, sym_get_string_value(menu->sym)); switch (res) { case 0: if (sym_set_string_value(menu->sym, dialog_input_result)) return; show_textbox(NULL, _("You have made an invalid entry."), 5, 43); break; case 1: show_help(menu); break; case KEY_ESC: return; } } } static void conf_load(void) { while (1) { int res; dialog_clear(); res = dialog_inputbox(NULL, load_config_text, 11, 55, filename); switch(res) { case 0: if (!dialog_input_result[0]) return; if (!conf_read(dialog_input_result)) { set_config_filename(dialog_input_result); sym_set_change_count(1); return; } show_textbox(NULL, _("File does not exist!"), 5, 38); break; case 1: show_helptext(_("Load Alternate Configuration"), load_config_help); break; case KEY_ESC: return; } } } static void conf_save(void) { while (1) { int res; dialog_clear(); res = dialog_inputbox(NULL, save_config_text, 11, 55, filename); switch(res) { case 0: if (!dialog_input_result[0]) return; if (!conf_write(dialog_input_result)) { set_config_filename(dialog_input_result); return; } show_textbox(NULL, _("Can't create file! Probably a nonexistent directory."), 5, 60); break; case 1: show_helptext(_("Save Alternate Configuration"), save_config_help); break; case KEY_ESC: return; } } } static int handle_exit(void) { int res; save_and_exit = 1; reset_subtitle(); dialog_clear(); if (conf_get_changed()) res = dialog_yesno(NULL, _("Do you wish to save your new configuration?\n" "(Press to continue kernel configuration.)"), 6, 60); else res = -1; end_dialog(saved_x, saved_y); switch (res) { case 0: if (conf_write(filename)) { fprintf(stderr, _("\n\n" "Error while writing of the configuration.\n" "Your configuration changes were NOT saved." "\n\n")); return 1; } /* fall through */ case -1: if (!silent) printf(_("\n\n" "*** End of the configuration.\n" "*** Execute 'make' to start the build or try 'make help'." "\n\n")); res = 0; break; default: if (!silent) fprintf(stderr, _("\n\n" "Your configuration changes were NOT saved." "\n\n")); if (res != KEY_ESC) res = 0; } return res; } static void sig_handler(int signo) { exit(handle_exit()); } int main(int ac, char **av) { char *mode; int res; setlocale(LC_ALL, ""); bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); signal(SIGINT, sig_handler); if (ac > 1 && strcmp(av[1], "-s") == 0) { silent = 1; /* Silence conf_read() until the real callback is set up */ conf_set_message_callback(NULL); av++; } conf_parse(av[1]); conf_read(NULL); mode = getenv("MENUCONFIG_MODE"); if (mode) { if (!strcasecmp(mode, "single_menu")) single_menu_mode = 1; } if (init_dialog(NULL)) { fprintf(stderr, N_("Your display is too small to run Menuconfig!\n")); fprintf(stderr, N_("It must be at least 19 lines by 80 columns.\n")); return 1; } set_config_filename(conf_get_configname()); conf_set_message_callback(conf_message_callback); do { conf(&rootmenu, NULL); res = handle_exit(); } while (res == KEY_ESC); return res; } backport-iwlwifi-dkms-7906/kconf/menu.c000066400000000000000000000430041351064634500201120ustar00rootroot00000000000000/* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #include #include #include #include #include "lkc.h" static const char nohelp_text[] = "There is no help available for this option."; struct menu rootmenu; static struct menu **last_entry_ptr; struct file *file_list; struct file *current_file; void menu_warn(struct menu *menu, const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "%s:%d:warning: ", menu->file->name, menu->lineno); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); } static void prop_warn(struct property *prop, const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "%s:%d:warning: ", prop->file->name, prop->lineno); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); } void _menu_init(void) { current_entry = current_menu = &rootmenu; last_entry_ptr = &rootmenu.list; } void menu_add_entry(struct symbol *sym) { struct menu *menu; menu = xmalloc(sizeof(*menu)); memset(menu, 0, sizeof(*menu)); menu->sym = sym; menu->parent = current_menu; menu->file = current_file; menu->lineno = zconf_lineno(); *last_entry_ptr = menu; last_entry_ptr = &menu->next; current_entry = menu; if (sym) menu_add_symbol(P_SYMBOL, sym, NULL); } void menu_end_entry(void) { } struct menu *menu_add_menu(void) { menu_end_entry(); last_entry_ptr = ¤t_entry->list; return current_menu = current_entry; } void menu_end_menu(void) { last_entry_ptr = ¤t_menu->next; current_menu = current_menu->parent; } static struct expr *menu_check_dep(struct expr *e) { if (!e) return e; switch (e->type) { case E_NOT: e->left.expr = menu_check_dep(e->left.expr); break; case E_OR: case E_AND: e->left.expr = menu_check_dep(e->left.expr); e->right.expr = menu_check_dep(e->right.expr); break; case E_SYMBOL: /* change 'm' into 'm' && MODULES */ if (e->left.sym == &symbol_mod) return expr_alloc_and(e, expr_alloc_symbol(modules_sym)); break; default: break; } return e; } void menu_add_dep(struct expr *dep) { current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep)); } void menu_set_type(int type) { struct symbol *sym = current_entry->sym; if (sym->type == type) return; if (sym->type == S_UNKNOWN) { sym->type = type; return; } menu_warn(current_entry, "ignoring type redefinition of '%s' from '%s' to '%s'", sym->name ? sym->name : "", sym_type_name(sym->type), sym_type_name(type)); } static struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep) { struct property *prop = prop_alloc(type, current_entry->sym); prop->menu = current_entry; prop->expr = expr; prop->visible.expr = menu_check_dep(dep); if (prompt) { if (isspace(*prompt)) { prop_warn(prop, "leading whitespace ignored"); while (isspace(*prompt)) prompt++; } if (current_entry->prompt && current_entry != &rootmenu) prop_warn(prop, "prompt redefined"); /* Apply all upper menus' visibilities to actual prompts. */ if(type == P_PROMPT) { struct menu *menu = current_entry; while ((menu = menu->parent) != NULL) { struct expr *dup_expr; if (!menu->visibility) continue; /* * Do not add a reference to the * menu's visibility expression but * use a copy of it. Otherwise the * expression reduction functions * will modify expressions that have * multiple references which can * cause unwanted side effects. */ dup_expr = expr_copy(menu->visibility); prop->visible.expr = expr_alloc_and(prop->visible.expr, dup_expr); } } current_entry->prompt = prop; } prop->text = prompt; return prop; } struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep) { return menu_add_prop(type, prompt, NULL, dep); } void menu_add_visibility(struct expr *expr) { current_entry->visibility = expr_alloc_and(current_entry->visibility, expr); } void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep) { menu_add_prop(type, NULL, expr, dep); } void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep) { menu_add_prop(type, NULL, expr_alloc_symbol(sym), dep); } void menu_add_option(int token, char *arg) { switch (token) { case T_OPT_MODULES: if (modules_sym) zconf_error("symbol '%s' redefines option 'modules'" " already defined by symbol '%s'", current_entry->sym->name, modules_sym->name ); modules_sym = current_entry->sym; break; case T_OPT_DEFCONFIG_LIST: if (!sym_defconfig_list) sym_defconfig_list = current_entry->sym; else if (sym_defconfig_list != current_entry->sym) zconf_error("trying to redefine defconfig symbol"); break; case T_OPT_ENV: prop_add_env(arg); break; case T_OPT_ALLNOCONFIG_Y: current_entry->sym->flags |= SYMBOL_ALLNOCONFIG_Y; break; } } static int menu_validate_number(struct symbol *sym, struct symbol *sym2) { return sym2->type == S_INT || sym2->type == S_HEX || (sym2->type == S_UNKNOWN && sym_string_valid(sym, sym2->name)); } static void sym_check_prop(struct symbol *sym) { struct property *prop; struct symbol *sym2; char *use; for (prop = sym->prop; prop; prop = prop->next) { switch (prop->type) { case P_DEFAULT: if ((sym->type == S_STRING || sym->type == S_INT || sym->type == S_HEX) && prop->expr->type != E_SYMBOL) prop_warn(prop, "default for config symbol '%s'" " must be a single symbol", sym->name); if (prop->expr->type != E_SYMBOL) break; sym2 = prop_get_symbol(prop); if (sym->type == S_HEX || sym->type == S_INT) { if (!menu_validate_number(sym, sym2)) prop_warn(prop, "'%s': number is invalid", sym->name); } break; case P_SELECT: case P_IMPLY: use = prop->type == P_SELECT ? "select" : "imply"; sym2 = prop_get_symbol(prop); if (sym->type != S_BOOLEAN && sym->type != S_TRISTATE) prop_warn(prop, "config symbol '%s' uses %s, but is " "not boolean or tristate", sym->name, use); else if (sym2->type != S_UNKNOWN && sym2->type != S_BOOLEAN && sym2->type != S_TRISTATE) prop_warn(prop, "'%s' has wrong type. '%s' only " "accept arguments of boolean and " "tristate type", sym2->name, use); break; case P_RANGE: if (sym->type != S_INT && sym->type != S_HEX) prop_warn(prop, "range is only allowed " "for int or hex symbols"); if (!menu_validate_number(sym, prop->expr->left.sym) || !menu_validate_number(sym, prop->expr->right.sym)) prop_warn(prop, "range is invalid"); break; default: ; } } } void menu_finalize(struct menu *parent) { struct menu *menu, *last_menu; struct symbol *sym; struct property *prop; struct expr *parentdep, *basedep, *dep, *dep2, **ep; sym = parent->sym; if (parent->list) { if (sym && sym_is_choice(sym)) { if (sym->type == S_UNKNOWN) { /* find the first choice value to find out choice type */ current_entry = parent; for (menu = parent->list; menu; menu = menu->next) { if (menu->sym && menu->sym->type != S_UNKNOWN) { menu_set_type(menu->sym->type); break; } } } /* set the type of the remaining choice values */ for (menu = parent->list; menu; menu = menu->next) { current_entry = menu; if (menu->sym && menu->sym->type == S_UNKNOWN) menu_set_type(sym->type); } parentdep = expr_alloc_symbol(sym); } else if (parent->prompt) parentdep = parent->prompt->visible.expr; else parentdep = parent->dep; for (menu = parent->list; menu; menu = menu->next) { basedep = expr_transform(menu->dep); basedep = expr_alloc_and(expr_copy(parentdep), basedep); basedep = expr_eliminate_dups(basedep); menu->dep = basedep; if (menu->sym) prop = menu->sym->prop; else prop = menu->prompt; for (; prop; prop = prop->next) { if (prop->menu != menu) continue; dep = expr_transform(prop->visible.expr); dep = expr_alloc_and(expr_copy(basedep), dep); dep = expr_eliminate_dups(dep); if (menu->sym && menu->sym->type != S_TRISTATE) dep = expr_trans_bool(dep); prop->visible.expr = dep; if (prop->type == P_SELECT) { struct symbol *es = prop_get_symbol(prop); es->rev_dep.expr = expr_alloc_or(es->rev_dep.expr, expr_alloc_and(expr_alloc_symbol(menu->sym), expr_copy(dep))); } else if (prop->type == P_IMPLY) { struct symbol *es = prop_get_symbol(prop); es->implied.expr = expr_alloc_or(es->implied.expr, expr_alloc_and(expr_alloc_symbol(menu->sym), expr_copy(dep))); } } } for (menu = parent->list; menu; menu = menu->next) menu_finalize(menu); } else if (sym) { basedep = parent->prompt ? parent->prompt->visible.expr : NULL; basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no); basedep = expr_eliminate_dups(expr_transform(basedep)); last_menu = NULL; for (menu = parent->next; menu; menu = menu->next) { dep = menu->prompt ? menu->prompt->visible.expr : menu->dep; if (!expr_contains_symbol(dep, sym)) break; if (expr_depends_symbol(dep, sym)) goto next; dep = expr_trans_compare(dep, E_UNEQUAL, &symbol_no); dep = expr_eliminate_dups(expr_transform(dep)); dep2 = expr_copy(basedep); expr_eliminate_eq(&dep, &dep2); expr_free(dep); if (!expr_is_yes(dep2)) { expr_free(dep2); break; } expr_free(dep2); next: menu_finalize(menu); menu->parent = parent; last_menu = menu; } if (last_menu) { parent->list = parent->next; parent->next = last_menu->next; last_menu->next = NULL; } sym->dir_dep.expr = expr_alloc_or(sym->dir_dep.expr, parent->dep); } for (menu = parent->list; menu; menu = menu->next) { if (sym && sym_is_choice(sym) && menu->sym && !sym_is_choice_value(menu->sym)) { current_entry = menu; menu->sym->flags |= SYMBOL_CHOICEVAL; if (!menu->prompt) menu_warn(menu, "choice value must have a prompt"); for (prop = menu->sym->prop; prop; prop = prop->next) { if (prop->type == P_DEFAULT) prop_warn(prop, "defaults for choice " "values not supported"); if (prop->menu == menu) continue; if (prop->type == P_PROMPT && prop->menu->parent->sym != sym) prop_warn(prop, "choice value used outside its choice group"); } /* Non-tristate choice values of tristate choices must * depend on the choice being set to Y. The choice * values' dependencies were propagated to their * properties above, so the change here must be re- * propagated. */ if (sym->type == S_TRISTATE && menu->sym->type != S_TRISTATE) { basedep = expr_alloc_comp(E_EQUAL, sym, &symbol_yes); menu->dep = expr_alloc_and(basedep, menu->dep); for (prop = menu->sym->prop; prop; prop = prop->next) { if (prop->menu != menu) continue; prop->visible.expr = expr_alloc_and(expr_copy(basedep), prop->visible.expr); } } menu_add_symbol(P_CHOICE, sym, NULL); prop = sym_get_choice_prop(sym); for (ep = &prop->expr; *ep; ep = &(*ep)->left.expr) ; *ep = expr_alloc_one(E_LIST, NULL); (*ep)->right.sym = menu->sym; } if (menu->list && (!menu->prompt || !menu->prompt->text)) { for (last_menu = menu->list; ; last_menu = last_menu->next) { last_menu->parent = parent; if (!last_menu->next) break; } last_menu->next = menu->next; menu->next = menu->list; menu->list = NULL; } } if (sym && !(sym->flags & SYMBOL_WARNED)) { if (sym->type == S_UNKNOWN) menu_warn(parent, "config symbol defined without type"); if (sym_is_choice(sym) && !parent->prompt) menu_warn(parent, "choice must have a prompt"); /* Check properties connected to this symbol */ sym_check_prop(sym); sym->flags |= SYMBOL_WARNED; } if (sym && !sym_is_optional(sym) && parent->prompt) { sym->rev_dep.expr = expr_alloc_or(sym->rev_dep.expr, expr_alloc_and(parent->prompt->visible.expr, expr_alloc_symbol(&symbol_mod))); } } bool menu_has_prompt(struct menu *menu) { if (!menu->prompt) return false; return true; } /* * Determine if a menu is empty. * A menu is considered empty if it contains no or only * invisible entries. */ bool menu_is_empty(struct menu *menu) { struct menu *child; for (child = menu->list; child; child = child->next) { if (menu_is_visible(child)) return(false); } return(true); } bool menu_is_visible(struct menu *menu) { struct menu *child; struct symbol *sym; tristate visible; if (!menu->prompt) return false; if (menu->visibility) { if (expr_calc_value(menu->visibility) == no) return false; } sym = menu->sym; if (sym) { sym_calc_value(sym); visible = menu->prompt->visible.tri; } else visible = menu->prompt->visible.tri = expr_calc_value(menu->prompt->visible.expr); if (visible != no) return true; if (!sym || sym_get_tristate_value(menu->sym) == no) return false; for (child = menu->list; child; child = child->next) { if (menu_is_visible(child)) { if (sym) sym->flags |= SYMBOL_DEF_USER; return true; } } return false; } const char *menu_get_prompt(struct menu *menu) { if (menu->prompt) return menu->prompt->text; else if (menu->sym) return menu->sym->name; return NULL; } struct menu *menu_get_root_menu(struct menu *menu) { return &rootmenu; } struct menu *menu_get_parent_menu(struct menu *menu) { enum prop_type type; for (; menu != &rootmenu; menu = menu->parent) { type = menu->prompt ? menu->prompt->type : 0; if (type == P_MENU) break; } return menu; } bool menu_has_help(struct menu *menu) { return menu->help != NULL; } const char *menu_get_help(struct menu *menu) { if (menu->help) return menu->help; else return ""; } static void get_prompt_str(struct gstr *r, struct property *prop, struct list_head *head) { int i, j; struct menu *submenu[8], *menu, *location = NULL; struct jump_key *jump = NULL; str_printf(r, _("Prompt: %s\n"), _(prop->text)); menu = prop->menu->parent; for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent) { bool accessible = menu_is_visible(menu); submenu[i++] = menu; if (location == NULL && accessible) location = menu; } if (head && location) { jump = xmalloc(sizeof(struct jump_key)); if (menu_is_visible(prop->menu)) { /* * There is not enough room to put the hint at the * beginning of the "Prompt" line. Put the hint on the * last "Location" line even when it would belong on * the former. */ jump->target = prop->menu; } else jump->target = location; if (list_empty(head)) jump->index = 0; else jump->index = list_entry(head->prev, struct jump_key, entries)->index + 1; list_add_tail(&jump->entries, head); } if (i > 0) { str_printf(r, _(" Location:\n")); for (j = 4; --i >= 0; j += 2) { menu = submenu[i]; if (jump && menu == location) jump->offset = strlen(r->s); str_printf(r, "%*c-> %s", j, ' ', _(menu_get_prompt(menu))); if (menu->sym) { str_printf(r, " (%s [=%s])", menu->sym->name ? menu->sym->name : _(""), sym_get_string_value(menu->sym)); } str_append(r, "\n"); } } } /* * get property of type P_SYMBOL */ static struct property *get_symbol_prop(struct symbol *sym) { struct property *prop = NULL; for_all_properties(sym, prop, P_SYMBOL) break; return prop; } static void get_symbol_props_str(struct gstr *r, struct symbol *sym, enum prop_type tok, const char *prefix) { bool hit = false; struct property *prop; for_all_properties(sym, prop, tok) { if (!hit) { str_append(r, prefix); hit = true; } else str_printf(r, " && "); expr_gstr_print(prop->expr, r); } if (hit) str_append(r, "\n"); } /* * head is optional and may be NULL */ static void get_symbol_str(struct gstr *r, struct symbol *sym, struct list_head *head) { struct property *prop; if (sym && sym->name) { str_printf(r, "Symbol: %s [=%s]\n", sym->name, sym_get_string_value(sym)); str_printf(r, "Type : %s\n", sym_type_name(sym->type)); if (sym->type == S_INT || sym->type == S_HEX) { prop = sym_get_range_prop(sym); if (prop) { str_printf(r, "Range : "); expr_gstr_print(prop->expr, r); str_append(r, "\n"); } } } for_all_prompts(sym, prop) get_prompt_str(r, prop, head); prop = get_symbol_prop(sym); if (prop) { str_printf(r, _(" Defined at %s:%d\n"), prop->menu->file->name, prop->menu->lineno); if (!expr_is_yes(prop->visible.expr)) { str_append(r, _(" Depends on: ")); expr_gstr_print(prop->visible.expr, r); str_append(r, "\n"); } } get_symbol_props_str(r, sym, P_SELECT, _(" Selects: ")); if (sym->rev_dep.expr) { str_append(r, _(" Selected by: ")); expr_gstr_print(sym->rev_dep.expr, r); str_append(r, "\n"); } get_symbol_props_str(r, sym, P_IMPLY, _(" Implies: ")); if (sym->implied.expr) { str_append(r, _(" Implied by: ")); expr_gstr_print(sym->implied.expr, r); str_append(r, "\n"); } str_append(r, "\n\n"); } struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head) { struct symbol *sym; struct gstr res = str_new(); int i; for (i = 0; sym_arr && (sym = sym_arr[i]); i++) get_symbol_str(&res, sym, head); if (!i) str_append(&res, _("No matches found.\n")); return res; } void menu_get_ext_help(struct menu *menu, struct gstr *help) { struct symbol *sym = menu->sym; const char *help_text = nohelp_text; if (menu_has_help(menu)) { if (sym->name) str_printf(help, "%s%s:\n\n", CONFIG_, sym->name); help_text = menu_get_help(menu); } str_printf(help, "%s\n", _(help_text)); if (sym) get_symbol_str(help, sym, NULL); } backport-iwlwifi-dkms-7906/kconf/symbol.c000066400000000000000000000745631351064634500204710ustar00rootroot00000000000000/* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #include #include #include #include #include #include "lkc.h" struct symbol symbol_yes = { .name = "y", .curr = { "y", yes }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_mod = { .name = "m", .curr = { "m", mod }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_no = { .name = "n", .curr = { "n", no }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_empty = { .name = "", .curr = { "", no }, .flags = SYMBOL_VALID, }; struct symbol *sym_defconfig_list; struct symbol *modules_sym; tristate modules_val; struct expr *sym_env_list; static void sym_add_default(struct symbol *sym, const char *def) { struct property *prop = prop_alloc(P_DEFAULT, sym); prop->expr = expr_alloc_symbol(sym_lookup(def, SYMBOL_CONST)); } void sym_init(void) { struct symbol *sym; struct utsname uts; static bool inited = false; if (inited) return; inited = true; uname(&uts); sym = sym_lookup("UNAME_RELEASE", 0); sym->type = S_STRING; sym->flags |= SYMBOL_AUTO; sym_add_default(sym, uts.release); } enum symbol_type sym_get_type(struct symbol *sym) { enum symbol_type type = sym->type; if (type == S_TRISTATE) { if (sym_is_choice_value(sym) && sym->visible == yes) type = S_BOOLEAN; else if (modules_val == no) type = S_BOOLEAN; } return type; } const char *sym_type_name(enum symbol_type type) { switch (type) { case S_BOOLEAN: return "boolean"; case S_TRISTATE: return "tristate"; case S_INT: return "integer"; case S_HEX: return "hex"; case S_STRING: return "string"; case S_UNKNOWN: return "unknown"; case S_OTHER: break; } return "???"; } struct property *sym_get_choice_prop(struct symbol *sym) { struct property *prop; for_all_choices(sym, prop) return prop; return NULL; } struct property *sym_get_env_prop(struct symbol *sym) { struct property *prop; for_all_properties(sym, prop, P_ENV) return prop; return NULL; } static struct property *sym_get_default_prop(struct symbol *sym) { struct property *prop; for_all_defaults(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri != no) return prop; } return NULL; } static struct property *sym_get_range_prop(struct symbol *sym) { struct property *prop; for_all_properties(sym, prop, P_RANGE) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri != no) return prop; } return NULL; } static long long sym_get_range_val(struct symbol *sym, int base) { sym_calc_value(sym); switch (sym->type) { case S_INT: base = 10; break; case S_HEX: base = 16; break; default: break; } return strtoll(sym->curr.val, NULL, base); } static void sym_validate_range(struct symbol *sym) { struct property *prop; int base; long long val, val2; char str[64]; switch (sym->type) { case S_INT: base = 10; break; case S_HEX: base = 16; break; default: return; } prop = sym_get_range_prop(sym); if (!prop) return; val = strtoll(sym->curr.val, NULL, base); val2 = sym_get_range_val(prop->expr->left.sym, base); if (val >= val2) { val2 = sym_get_range_val(prop->expr->right.sym, base); if (val <= val2) return; } if (sym->type == S_INT) sprintf(str, "%lld", val2); else sprintf(str, "0x%llx", val2); sym->curr.val = strdup(str); } static void sym_set_changed(struct symbol *sym) { struct property *prop; sym->flags |= SYMBOL_CHANGED; for (prop = sym->prop; prop; prop = prop->next) { if (prop->menu) prop->menu->flags |= MENU_CHANGED; } } static void sym_set_all_changed(void) { struct symbol *sym; int i; for_all_symbols(i, sym) sym_set_changed(sym); } static void sym_calc_visibility(struct symbol *sym) { struct property *prop; struct symbol *choice_sym = NULL; tristate tri; /* any prompt visible? */ tri = no; if (sym_is_choice_value(sym)) choice_sym = prop_get_symbol(sym_get_choice_prop(sym)); for_all_prompts(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); /* * Tristate choice_values with visibility 'mod' are * not visible if the corresponding choice's value is * 'yes'. */ if (choice_sym && sym->type == S_TRISTATE && prop->visible.tri == mod && choice_sym->curr.tri == yes) prop->visible.tri = no; tri = EXPR_OR(tri, prop->visible.tri); } if (tri == mod && (sym->type != S_TRISTATE || modules_val == no)) tri = yes; if (sym->visible != tri) { sym->visible = tri; sym_set_changed(sym); } if (sym_is_choice_value(sym)) return; /* defaulting to "yes" if no explicit "depends on" are given */ tri = yes; if (sym->dir_dep.expr) tri = expr_calc_value(sym->dir_dep.expr); if (tri == mod) tri = yes; if (sym->dir_dep.tri != tri) { sym->dir_dep.tri = tri; sym_set_changed(sym); } tri = no; if (sym->rev_dep.expr) tri = expr_calc_value(sym->rev_dep.expr); if (tri == mod && sym_get_type(sym) == S_BOOLEAN) tri = yes; if (sym->rev_dep.tri != tri) { sym->rev_dep.tri = tri; sym_set_changed(sym); } tri = no; if (sym->implied.expr && sym->dir_dep.tri != no) tri = expr_calc_value(sym->implied.expr); if (tri == mod && sym_get_type(sym) == S_BOOLEAN) tri = yes; if (sym->implied.tri != tri) { sym->implied.tri = tri; sym_set_changed(sym); } } /* * Find the default symbol for a choice. * First try the default values for the choice symbol * Next locate the first visible choice value * Return NULL if none was found */ struct symbol *sym_choice_default(struct symbol *sym) { struct symbol *def_sym; struct property *prop; struct expr *e; /* any of the defaults visible? */ for_all_defaults(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri == no) continue; def_sym = prop_get_symbol(prop); if (def_sym->visible != no) return def_sym; } /* just get the first visible value */ prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, def_sym) if (def_sym->visible != no) return def_sym; /* failed to locate any defaults */ return NULL; } static struct symbol *sym_calc_choice(struct symbol *sym) { struct symbol *def_sym; struct property *prop; struct expr *e; int flags; /* first calculate all choice values' visibilities */ flags = sym->flags; prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, def_sym) { sym_calc_visibility(def_sym); if (def_sym->visible != no) flags &= def_sym->flags; } sym->flags &= flags | ~SYMBOL_DEF_USER; /* is the user choice visible? */ def_sym = sym->def[S_DEF_USER].val; if (def_sym && def_sym->visible != no) return def_sym; def_sym = sym_choice_default(sym); if (def_sym == NULL) /* no choice? reset tristate value */ sym->curr.tri = no; return def_sym; } void sym_calc_value(struct symbol *sym) { struct symbol_value newval, oldval; struct property *prop; struct expr *e; if (!sym) return; if (sym->flags & SYMBOL_VALID) return; if (sym_is_choice_value(sym) && sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) { sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES; prop = sym_get_choice_prop(sym); sym_calc_value(prop_get_symbol(prop)); } sym->flags |= SYMBOL_VALID; oldval = sym->curr; switch (sym->type) { case S_INT: case S_HEX: case S_STRING: newval = symbol_empty.curr; break; case S_BOOLEAN: case S_TRISTATE: newval = symbol_no.curr; break; default: sym->curr.val = sym->name; sym->curr.tri = no; return; } if (!sym_is_choice_value(sym)) sym->flags &= ~SYMBOL_WRITE; sym_calc_visibility(sym); /* set default if recursively called */ sym->curr = newval; switch (sym_get_type(sym)) { case S_BOOLEAN: case S_TRISTATE: if (sym_is_choice_value(sym) && sym->visible == yes) { prop = sym_get_choice_prop(sym); newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no; } else { if (sym->visible != no) { /* if the symbol is visible use the user value * if available, otherwise try the default value */ sym->flags |= SYMBOL_WRITE; if (sym_has_value(sym)) { newval.tri = EXPR_AND(sym->def[S_DEF_USER].tri, sym->visible); goto calc_newval; } } if (sym->rev_dep.tri != no) sym->flags |= SYMBOL_WRITE; if (!sym_is_choice(sym)) { prop = sym_get_default_prop(sym); if (prop) { sym->flags |= SYMBOL_WRITE; newval.tri = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri); } if (sym->implied.tri != no) { sym->flags |= SYMBOL_WRITE; newval.tri = EXPR_OR(newval.tri, sym->implied.tri); } } calc_newval: if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) { struct expr *e; e = expr_simplify_unmet_dep(sym->rev_dep.expr, sym->dir_dep.expr); fprintf(stderr, "warning: ("); expr_fprint(e, stderr); fprintf(stderr, ") selects %s which has unmet direct dependencies (", sym->name); expr_fprint(sym->dir_dep.expr, stderr); fprintf(stderr, ")\n"); expr_free(e); } newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri); } if (newval.tri == mod && (sym_get_type(sym) == S_BOOLEAN || sym->implied.tri == yes)) newval.tri = yes; break; case S_STRING: case S_HEX: case S_INT: if (sym->visible != no) { sym->flags |= SYMBOL_WRITE; if (sym_has_value(sym)) { newval.val = sym->def[S_DEF_USER].val; break; } } prop = sym_get_default_prop(sym); if (prop) { struct symbol *ds = prop_get_symbol(prop); if (ds) { sym->flags |= SYMBOL_WRITE; sym_calc_value(ds); newval.val = ds->curr.val; } } break; default: ; } sym->curr = newval; if (sym_is_choice(sym) && newval.tri == yes) sym->curr.val = sym_calc_choice(sym); sym_validate_range(sym); if (memcmp(&oldval, &sym->curr, sizeof(oldval))) { sym_set_changed(sym); if (modules_sym == sym) { sym_set_all_changed(); modules_val = modules_sym->curr.tri; } } if (sym_is_choice(sym)) { struct symbol *choice_sym; prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, choice_sym) { if ((sym->flags & SYMBOL_WRITE) && choice_sym->visible != no) choice_sym->flags |= SYMBOL_WRITE; if (sym->flags & SYMBOL_CHANGED) sym_set_changed(choice_sym); } } if (sym->flags & SYMBOL_AUTO) sym->flags &= ~SYMBOL_WRITE; if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) set_all_choice_values(sym); } void sym_clear_all_valid(void) { struct symbol *sym; int i; for_all_symbols(i, sym) sym->flags &= ~SYMBOL_VALID; sym_add_change_count(1); sym_calc_value(modules_sym); } bool sym_tristate_within_range(struct symbol *sym, tristate val) { int type = sym_get_type(sym); if (sym->visible == no) return false; if (type != S_BOOLEAN && type != S_TRISTATE) return false; if (type == S_BOOLEAN && val == mod) return false; if (sym->visible <= sym->rev_dep.tri) return false; if (sym->implied.tri == yes && val == mod) return false; if (sym_is_choice_value(sym) && sym->visible == yes) return val == yes; return val >= sym->rev_dep.tri && val <= sym->visible; } bool sym_set_tristate_value(struct symbol *sym, tristate val) { tristate oldval = sym_get_tristate_value(sym); if (oldval != val && !sym_tristate_within_range(sym, val)) return false; if (!(sym->flags & SYMBOL_DEF_USER)) { sym->flags |= SYMBOL_DEF_USER; sym_set_changed(sym); } /* * setting a choice value also resets the new flag of the choice * symbol and all other choice values. */ if (sym_is_choice_value(sym) && val == yes) { struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); struct property *prop; struct expr *e; cs->def[S_DEF_USER].val = sym; cs->flags |= SYMBOL_DEF_USER; prop = sym_get_choice_prop(cs); for (e = prop->expr; e; e = e->left.expr) { if (e->right.sym->visible != no) e->right.sym->flags |= SYMBOL_DEF_USER; } } sym->def[S_DEF_USER].tri = val; if (oldval != val) sym_clear_all_valid(); return true; } tristate sym_toggle_tristate_value(struct symbol *sym) { tristate oldval, newval; oldval = newval = sym_get_tristate_value(sym); do { switch (newval) { case no: newval = mod; break; case mod: newval = yes; break; case yes: newval = no; break; } if (sym_set_tristate_value(sym, newval)) break; } while (oldval != newval); return newval; } bool sym_string_valid(struct symbol *sym, const char *str) { signed char ch; switch (sym->type) { case S_STRING: return true; case S_INT: ch = *str++; if (ch == '-') ch = *str++; if (!isdigit(ch)) return false; if (ch == '0' && *str != 0) return false; while ((ch = *str++)) { if (!isdigit(ch)) return false; } return true; case S_HEX: if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; ch = *str++; do { if (!isxdigit(ch)) return false; } while ((ch = *str++)); return true; case S_BOOLEAN: case S_TRISTATE: switch (str[0]) { case 'y': case 'Y': case 'm': case 'M': case 'n': case 'N': return true; } return false; default: return false; } } bool sym_string_within_range(struct symbol *sym, const char *str) { struct property *prop; long long val; switch (sym->type) { case S_STRING: return sym_string_valid(sym, str); case S_INT: if (!sym_string_valid(sym, str)) return false; prop = sym_get_range_prop(sym); if (!prop) return true; val = strtoll(str, NULL, 10); return val >= sym_get_range_val(prop->expr->left.sym, 10) && val <= sym_get_range_val(prop->expr->right.sym, 10); case S_HEX: if (!sym_string_valid(sym, str)) return false; prop = sym_get_range_prop(sym); if (!prop) return true; val = strtoll(str, NULL, 16); return val >= sym_get_range_val(prop->expr->left.sym, 16) && val <= sym_get_range_val(prop->expr->right.sym, 16); case S_BOOLEAN: case S_TRISTATE: switch (str[0]) { case 'y': case 'Y': return sym_tristate_within_range(sym, yes); case 'm': case 'M': return sym_tristate_within_range(sym, mod); case 'n': case 'N': return sym_tristate_within_range(sym, no); } return false; default: return false; } } bool sym_set_string_value(struct symbol *sym, const char *newval) { const char *oldval; char *val; int size; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: switch (newval[0]) { case 'y': case 'Y': return sym_set_tristate_value(sym, yes); case 'm': case 'M': return sym_set_tristate_value(sym, mod); case 'n': case 'N': return sym_set_tristate_value(sym, no); } return false; default: ; } if (!sym_string_within_range(sym, newval)) return false; if (!(sym->flags & SYMBOL_DEF_USER)) { sym->flags |= SYMBOL_DEF_USER; sym_set_changed(sym); } oldval = sym->def[S_DEF_USER].val; size = strlen(newval) + 1; if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) { size += 2; sym->def[S_DEF_USER].val = val = xmalloc(size); *val++ = '0'; *val++ = 'x'; } else if (!oldval || strcmp(oldval, newval)) sym->def[S_DEF_USER].val = val = xmalloc(size); else return true; strcpy(val, newval); free((void *)oldval); sym_clear_all_valid(); return true; } /* * Find the default value associated to a symbol. * For tristate symbol handle the modules=n case * in which case "m" becomes "y". * If the symbol does not have any default then fallback * to the fixed default values. */ const char *sym_get_string_default(struct symbol *sym) { struct property *prop; struct symbol *ds; const char *str; tristate val; sym_calc_visibility(sym); sym_calc_value(modules_sym); val = symbol_no.curr.tri; str = symbol_empty.curr.val; /* If symbol has a default value look it up */ prop = sym_get_default_prop(sym); if (prop != NULL) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: /* The visibility may limit the value from yes => mod */ val = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri); break; default: /* * The following fails to handle the situation * where a default value is further limited by * the valid range. */ ds = prop_get_symbol(prop); if (ds != NULL) { sym_calc_value(ds); str = (const char *)ds->curr.val; } } } /* Handle select statements */ val = EXPR_OR(val, sym->rev_dep.tri); /* transpose mod to yes if modules are not enabled */ if (val == mod) if (!sym_is_choice_value(sym) && modules_sym->curr.tri == no) val = yes; /* transpose mod to yes if type is bool */ if (sym->type == S_BOOLEAN && val == mod) val = yes; /* adjust the default value if this symbol is implied by another */ if (val < sym->implied.tri) val = sym->implied.tri; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: switch (val) { case no: return "n"; case mod: return "m"; case yes: return "y"; } case S_INT: case S_HEX: return str; case S_STRING: return str; case S_OTHER: case S_UNKNOWN: break; } return ""; } const char *sym_get_string_value(struct symbol *sym) { tristate val; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: val = sym_get_tristate_value(sym); switch (val) { case no: return "n"; case mod: sym_calc_value(modules_sym); return (modules_sym->curr.tri == no) ? "n" : "m"; case yes: return "y"; } break; default: ; } return (const char *)sym->curr.val; } bool sym_is_changable(struct symbol *sym) { return sym->visible > sym->rev_dep.tri; } static unsigned strhash(const char *s) { /* fnv32 hash */ unsigned hash = 2166136261U; for (; *s; s++) hash = (hash ^ *s) * 0x01000193; return hash; } struct symbol *sym_lookup(const char *name, int flags) { struct symbol *symbol; char *new_name; int hash; if (name) { if (name[0] && !name[1]) { switch (name[0]) { case 'y': return &symbol_yes; case 'm': return &symbol_mod; case 'n': return &symbol_no; } } hash = strhash(name) % SYMBOL_HASHSIZE; for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) { if (symbol->name && !strcmp(symbol->name, name) && (flags ? symbol->flags & flags : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE)))) return symbol; } new_name = strdup(name); } else { new_name = NULL; hash = 0; } symbol = xmalloc(sizeof(*symbol)); memset(symbol, 0, sizeof(*symbol)); symbol->name = new_name; symbol->type = S_UNKNOWN; symbol->flags |= flags; symbol->next = symbol_hash[hash]; symbol_hash[hash] = symbol; return symbol; } struct symbol *sym_find(const char *name) { struct symbol *symbol = NULL; int hash = 0; if (!name) return NULL; if (name[0] && !name[1]) { switch (name[0]) { case 'y': return &symbol_yes; case 'm': return &symbol_mod; case 'n': return &symbol_no; } } hash = strhash(name) % SYMBOL_HASHSIZE; for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) { if (symbol->name && !strcmp(symbol->name, name) && !(symbol->flags & SYMBOL_CONST)) break; } return symbol; } /* * Expand symbol's names embedded in the string given in argument. Symbols' * name to be expanded shall be prefixed by a '$'. Unknown symbol expands to * the empty string. */ const char *sym_expand_string_value(const char *in) { const char *src; char *res; size_t reslen; reslen = strlen(in) + 1; res = xmalloc(reslen); res[0] = '\0'; while ((src = strchr(in, '$'))) { char *p, name[SYMBOL_MAXLENGTH]; const char *symval = ""; struct symbol *sym; size_t newlen; strncat(res, in, src - in); src++; p = name; while (isalnum(*src) || *src == '_') *p++ = *src++; *p = '\0'; sym = sym_find(name); if (sym != NULL) { sym_calc_value(sym); symval = sym_get_string_value(sym); } newlen = strlen(res) + strlen(symval) + strlen(src) + 1; if (newlen > reslen) { reslen = newlen; res = realloc(res, reslen); } strcat(res, symval); in = src; } strcat(res, in); return res; } const char *sym_escape_string_value(const char *in) { const char *p; size_t reslen; char *res; size_t l; reslen = strlen(in) + strlen("\"\"") + 1; p = in; for (;;) { l = strcspn(p, "\"\\"); p += l; if (p[0] == '\0') break; reslen++; p++; } res = xmalloc(reslen); res[0] = '\0'; strcat(res, "\""); p = in; for (;;) { l = strcspn(p, "\"\\"); strncat(res, p, l); p += l; if (p[0] == '\0') break; strcat(res, "\\"); strncat(res, p++, 1); } strcat(res, "\""); return res; } struct sym_match { struct symbol *sym; off_t so, eo; }; /* Compare matched symbols as thus: * - first, symbols that match exactly * - then, alphabetical sort */ static int sym_rel_comp(const void *sym1, const void *sym2) { const struct sym_match *s1 = sym1; const struct sym_match *s2 = sym2; int exact1, exact2; /* Exact match: * - if matched length on symbol s1 is the length of that symbol, * then this symbol should come first; * - if matched length on symbol s2 is the length of that symbol, * then this symbol should come first. * Note: since the search can be a regexp, both symbols may match * exactly; if this is the case, we can't decide which comes first, * and we fallback to sorting alphabetically. */ exact1 = (s1->eo - s1->so) == strlen(s1->sym->name); exact2 = (s2->eo - s2->so) == strlen(s2->sym->name); if (exact1 && !exact2) return -1; if (!exact1 && exact2) return 1; /* As a fallback, sort symbols alphabetically */ return strcmp(s1->sym->name, s2->sym->name); } struct symbol **sym_re_search(const char *pattern) { struct symbol *sym, **sym_arr = NULL; struct sym_match *sym_match_arr = NULL; int i, cnt, size; regex_t re; regmatch_t match[1]; cnt = size = 0; /* Skip if empty */ if (strlen(pattern) == 0) return NULL; if (regcomp(&re, pattern, REG_EXTENDED|REG_ICASE)) return NULL; for_all_symbols(i, sym) { if (sym->flags & SYMBOL_CONST || !sym->name) continue; if (regexec(&re, sym->name, 1, match, 0)) continue; if (cnt >= size) { void *tmp; size += 16; tmp = realloc(sym_match_arr, size * sizeof(struct sym_match)); if (!tmp) goto sym_re_search_free; sym_match_arr = tmp; } sym_calc_value(sym); /* As regexec returned 0, we know we have a match, so * we can use match[0].rm_[se]o without further checks */ sym_match_arr[cnt].so = match[0].rm_so; sym_match_arr[cnt].eo = match[0].rm_eo; sym_match_arr[cnt++].sym = sym; } if (sym_match_arr) { qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp); sym_arr = malloc((cnt+1) * sizeof(struct symbol)); if (!sym_arr) goto sym_re_search_free; for (i = 0; i < cnt; i++) sym_arr[i] = sym_match_arr[i].sym; sym_arr[cnt] = NULL; } sym_re_search_free: /* sym_match_arr can be NULL if no match, but free(NULL) is OK */ free(sym_match_arr); regfree(&re); return sym_arr; } /* * When we check for recursive dependencies we use a stack to save * current state so we can print out relevant info to user. * The entries are located on the call stack so no need to free memory. * Note insert() remove() must always match to properly clear the stack. */ static struct dep_stack { struct dep_stack *prev, *next; struct symbol *sym; struct property *prop; struct expr *expr; } *check_top; static void dep_stack_insert(struct dep_stack *stack, struct symbol *sym) { memset(stack, 0, sizeof(*stack)); if (check_top) check_top->next = stack; stack->prev = check_top; stack->sym = sym; check_top = stack; } static void dep_stack_remove(void) { check_top = check_top->prev; if (check_top) check_top->next = NULL; } /* * Called when we have detected a recursive dependency. * check_top point to the top of the stact so we use * the ->prev pointer to locate the bottom of the stack. */ static void sym_check_print_recursive(struct symbol *last_sym) { struct dep_stack *stack; struct symbol *sym, *next_sym; struct menu *menu = NULL; struct property *prop; struct dep_stack cv_stack; if (sym_is_choice_value(last_sym)) { dep_stack_insert(&cv_stack, last_sym); last_sym = prop_get_symbol(sym_get_choice_prop(last_sym)); } for (stack = check_top; stack != NULL; stack = stack->prev) if (stack->sym == last_sym) break; if (!stack) { fprintf(stderr, "unexpected recursive dependency error\n"); return; } for (; stack; stack = stack->next) { sym = stack->sym; next_sym = stack->next ? stack->next->sym : last_sym; prop = stack->prop; if (prop == NULL) prop = stack->sym->prop; /* for choice values find the menu entry (used below) */ if (sym_is_choice(sym) || sym_is_choice_value(sym)) { for (prop = sym->prop; prop; prop = prop->next) { menu = prop->menu; if (prop->menu) break; } } if (stack->sym == last_sym) fprintf(stderr, "%s:%d:error: recursive dependency detected!\n", prop->file->name, prop->lineno); fprintf(stderr, "For a resolution refer to Documentation/kbuild/kconfig-language.txt\n"); fprintf(stderr, "subsection \"Kconfig recursive dependency limitations\"\n"); if (stack->expr) { fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "", prop_get_type_name(prop->type), next_sym->name ? next_sym->name : ""); } else if (stack->prop) { fprintf(stderr, "%s:%d:\tsymbol %s depends on %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "", next_sym->name ? next_sym->name : ""); } else if (sym_is_choice(sym)) { fprintf(stderr, "%s:%d:\tchoice %s contains symbol %s\n", menu->file->name, menu->lineno, sym->name ? sym->name : "", next_sym->name ? next_sym->name : ""); } else if (sym_is_choice_value(sym)) { fprintf(stderr, "%s:%d:\tsymbol %s is part of choice %s\n", menu->file->name, menu->lineno, sym->name ? sym->name : "", next_sym->name ? next_sym->name : ""); } else { fprintf(stderr, "%s:%d:\tsymbol %s is selected by %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "", next_sym->name ? next_sym->name : ""); } } if (check_top == &cv_stack) dep_stack_remove(); } static struct symbol *sym_check_expr_deps(struct expr *e) { struct symbol *sym; if (!e) return NULL; switch (e->type) { case E_OR: case E_AND: sym = sym_check_expr_deps(e->left.expr); if (sym) return sym; return sym_check_expr_deps(e->right.expr); case E_NOT: return sym_check_expr_deps(e->left.expr); case E_EQUAL: case E_GEQ: case E_GTH: case E_LEQ: case E_LTH: case E_UNEQUAL: sym = sym_check_deps(e->left.sym); if (sym) return sym; return sym_check_deps(e->right.sym); case E_SYMBOL: return sym_check_deps(e->left.sym); default: break; } printf("Oops! How to check %d?\n", e->type); return NULL; } /* return NULL when dependencies are OK */ static struct symbol *sym_check_sym_deps(struct symbol *sym) { struct symbol *sym2; struct property *prop; struct dep_stack stack; dep_stack_insert(&stack, sym); sym2 = sym_check_expr_deps(sym->rev_dep.expr); if (sym2) goto out; for (prop = sym->prop; prop; prop = prop->next) { if (prop->type == P_CHOICE || prop->type == P_SELECT) continue; stack.prop = prop; sym2 = sym_check_expr_deps(prop->visible.expr); if (sym2) break; if (prop->type != P_DEFAULT || sym_is_choice(sym)) continue; stack.expr = prop->expr; sym2 = sym_check_expr_deps(prop->expr); if (sym2) break; stack.expr = NULL; } out: dep_stack_remove(); return sym2; } static struct symbol *sym_check_choice_deps(struct symbol *choice) { struct symbol *sym, *sym2; struct property *prop; struct expr *e; struct dep_stack stack; dep_stack_insert(&stack, choice); prop = sym_get_choice_prop(choice); expr_list_for_each_sym(prop->expr, e, sym) sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); choice->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); sym2 = sym_check_sym_deps(choice); choice->flags &= ~SYMBOL_CHECK; if (sym2) goto out; expr_list_for_each_sym(prop->expr, e, sym) { sym2 = sym_check_sym_deps(sym); if (sym2) break; } out: expr_list_for_each_sym(prop->expr, e, sym) sym->flags &= ~SYMBOL_CHECK; if (sym2 && sym_is_choice_value(sym2) && prop_get_symbol(sym_get_choice_prop(sym2)) == choice) sym2 = choice; dep_stack_remove(); return sym2; } struct symbol *sym_check_deps(struct symbol *sym) { struct symbol *sym2; struct property *prop; if (sym->flags & SYMBOL_CHECK) { sym_check_print_recursive(sym); return sym; } if (sym->flags & SYMBOL_CHECKED) return NULL; if (sym_is_choice_value(sym)) { struct dep_stack stack; /* for choice groups start the check with main choice symbol */ dep_stack_insert(&stack, sym); prop = sym_get_choice_prop(sym); sym2 = sym_check_deps(prop_get_symbol(prop)); dep_stack_remove(); } else if (sym_is_choice(sym)) { sym2 = sym_check_choice_deps(sym); } else { sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); sym2 = sym_check_sym_deps(sym); sym->flags &= ~SYMBOL_CHECK; } if (sym2 && sym2 == sym) sym2 = NULL; return sym2; } struct property *prop_alloc(enum prop_type type, struct symbol *sym) { struct property *prop; struct property **propp; prop = xmalloc(sizeof(*prop)); memset(prop, 0, sizeof(*prop)); prop->type = type; prop->sym = sym; prop->file = current_file; prop->lineno = zconf_lineno(); /* append property to the prop list of symbol */ if (sym) { for (propp = &sym->prop; *propp; propp = &(*propp)->next) ; *propp = prop; } return prop; } struct symbol *prop_get_symbol(struct property *prop) { if (prop->expr && (prop->expr->type == E_SYMBOL || prop->expr->type == E_LIST)) return prop->expr->left.sym; return NULL; } const char *prop_get_type_name(enum prop_type type) { switch (type) { case P_PROMPT: return "prompt"; case P_ENV: return "env"; case P_COMMENT: return "comment"; case P_MENU: return "menu"; case P_DEFAULT: return "default"; case P_CHOICE: return "choice"; case P_SELECT: return "select"; case P_IMPLY: return "imply"; case P_RANGE: return "range"; case P_SYMBOL: return "symbol"; case P_UNKNOWN: break; } return "unknown"; } static void prop_add_env(const char *env) { struct symbol *sym, *sym2; struct property *prop; char *p; sym = current_entry->sym; sym->flags |= SYMBOL_AUTO; for_all_properties(sym, prop, P_ENV) { sym2 = prop_get_symbol(prop); if (strcmp(sym2->name, env)) menu_warn(current_entry, "redefining environment symbol from %s", sym2->name); return; } prop = prop_alloc(P_ENV, sym); prop->expr = expr_alloc_symbol(sym_lookup(env, SYMBOL_CONST)); sym_env_list = expr_alloc_one(E_LIST, sym_env_list); sym_env_list->right.sym = sym; p = getenv(env); if (p) sym_add_default(sym, p); else menu_warn(current_entry, "environment variable %s undefined", env); } backport-iwlwifi-dkms-7906/kconf/util.c000066400000000000000000000056431351064634500201320ustar00rootroot00000000000000/* * Copyright (C) 2002-2005 Roman Zippel * Copyright (C) 2002-2005 Sam Ravnborg * * Released under the terms of the GNU GPL v2.0. */ #include #include #include #include "lkc.h" /* file already present in list? If not add it */ struct file *file_lookup(const char *name) { struct file *file; const char *file_name = sym_expand_string_value(name); for (file = file_list; file; file = file->next) { if (!strcmp(name, file->name)) { free((void *)file_name); return file; } } file = xmalloc(sizeof(*file)); memset(file, 0, sizeof(*file)); file->name = file_name; file->next = file_list; file_list = file; return file; } /* write a dependency file as used by kbuild to track dependencies */ int file_write_dep(const char *name) { struct symbol *sym, *env_sym; struct expr *e; struct file *file; FILE *out; if (!name) name = ".kconfig.d"; out = fopen("..config.tmp", "w"); if (!out) return 1; fprintf(out, "deps_config := \\\n"); for (file = file_list; file; file = file->next) { if (file->next) fprintf(out, "\t%s \\\n", file->name); else fprintf(out, "\t%s\n", file->name); } fprintf(out, "\n%s: \\\n" "\t$(deps_config)\n\n", conf_get_autoconfig_name()); expr_list_for_each_sym(sym_env_list, e, sym) { struct property *prop; const char *value; prop = sym_get_env_prop(sym); env_sym = prop_get_symbol(prop); if (!env_sym) continue; value = getenv(env_sym->name); if (!value) value = ""; fprintf(out, "ifneq \"$(%s)\" \"%s\"\n", env_sym->name, value); fprintf(out, "%s: FORCE\n", conf_get_autoconfig_name()); fprintf(out, "endif\n"); } fprintf(out, "\n$(deps_config): ;\n"); fclose(out); rename("..config.tmp", name); return 0; } /* Allocate initial growable string */ struct gstr str_new(void) { struct gstr gs; gs.s = xmalloc(sizeof(char) * 64); gs.len = 64; gs.max_width = 0; strcpy(gs.s, "\0"); return gs; } /* Free storage for growable string */ void str_free(struct gstr *gs) { if (gs->s) free(gs->s); gs->s = NULL; gs->len = 0; } /* Append to growable string */ void str_append(struct gstr *gs, const char *s) { size_t l; if (s) { l = strlen(gs->s) + strlen(s) + 1; if (l > gs->len) { gs->s = realloc(gs->s, l); gs->len = l; } strcat(gs->s, s); } } /* Append printf formatted string to growable string */ void str_printf(struct gstr *gs, const char *fmt, ...) { va_list ap; char s[10000]; /* big enough... */ va_start(ap, fmt); vsnprintf(s, sizeof(s), fmt, ap); str_append(gs, s); va_end(ap); } /* Retrieve value of growable string */ const char *str_get(struct gstr *gs) { return gs->s; } void *xmalloc(size_t size) { void *p = malloc(size); if (p) return p; fprintf(stderr, "Out of memory.\n"); exit(1); } void *xcalloc(size_t nmemb, size_t size) { void *p = calloc(nmemb, size); if (p) return p; fprintf(stderr, "Out of memory.\n"); exit(1); } backport-iwlwifi-dkms-7906/kconf/zconf.hash.c000066400000000000000000000305601351064634500212120ustar00rootroot00000000000000/* ANSI-C code produced by gperf version 3.0.4 */ /* Command-line: gperf -t --output-file scripts/kconfig/zconf.hash.c_shipped -a -C -E -g -k '1,3,$' -p -t scripts/kconfig/zconf.gperf */ #if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \ && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \ && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \ && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \ && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \ && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \ && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \ && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \ && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \ && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \ && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \ && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \ && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \ && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \ && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \ && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \ && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \ && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \ && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \ && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \ && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \ && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \ && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126)) /* The character set is not based on ISO-646. */ #error "gperf generated tables don't work with this execution character set. Please report a bug to ." #endif #line 10 "scripts/kconfig/zconf.gperf" struct kconf_id; static const struct kconf_id *kconf_id_lookup(register const char *str, register unsigned int len); /* maximum key range = 71, duplicates = 0 */ #ifdef __GNUC__ __inline #else #ifdef __cplusplus inline #endif #endif static unsigned int kconf_id_hash (register const char *str, register unsigned int len) { static const unsigned char asso_values[] = { 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 0, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 10, 25, 25, 0, 0, 0, 5, 0, 0, 73, 73, 5, 0, 10, 5, 45, 73, 20, 20, 0, 15, 15, 73, 20, 0, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73 }; register int hval = len; switch (hval) { default: hval += asso_values[(unsigned char)str[2]]; /*FALLTHROUGH*/ case 2: case 1: hval += asso_values[(unsigned char)str[0]]; break; } return hval + asso_values[(unsigned char)str[len - 1]]; } struct kconf_id_strings_t { char kconf_id_strings_str2[sizeof("if")]; char kconf_id_strings_str3[sizeof("int")]; char kconf_id_strings_str5[sizeof("endif")]; char kconf_id_strings_str7[sizeof("default")]; char kconf_id_strings_str8[sizeof("tristate")]; char kconf_id_strings_str9[sizeof("endchoice")]; char kconf_id_strings_str10[sizeof("---help---")]; char kconf_id_strings_str12[sizeof("def_tristate")]; char kconf_id_strings_str13[sizeof("def_bool")]; char kconf_id_strings_str14[sizeof("defconfig_list")]; char kconf_id_strings_str17[sizeof("on")]; char kconf_id_strings_str18[sizeof("optional")]; char kconf_id_strings_str21[sizeof("option")]; char kconf_id_strings_str22[sizeof("endmenu")]; char kconf_id_strings_str23[sizeof("mainmenu")]; char kconf_id_strings_str25[sizeof("menuconfig")]; char kconf_id_strings_str27[sizeof("modules")]; char kconf_id_strings_str28[sizeof("allnoconfig_y")]; char kconf_id_strings_str29[sizeof("menu")]; char kconf_id_strings_str31[sizeof("select")]; char kconf_id_strings_str32[sizeof("comment")]; char kconf_id_strings_str33[sizeof("env")]; char kconf_id_strings_str35[sizeof("range")]; char kconf_id_strings_str36[sizeof("choice")]; char kconf_id_strings_str39[sizeof("bool")]; char kconf_id_strings_str41[sizeof("source")]; char kconf_id_strings_str42[sizeof("visible")]; char kconf_id_strings_str43[sizeof("hex")]; char kconf_id_strings_str46[sizeof("config")]; char kconf_id_strings_str47[sizeof("boolean")]; char kconf_id_strings_str50[sizeof("imply")]; char kconf_id_strings_str51[sizeof("string")]; char kconf_id_strings_str54[sizeof("help")]; char kconf_id_strings_str56[sizeof("prompt")]; char kconf_id_strings_str72[sizeof("depends")]; }; static const struct kconf_id_strings_t kconf_id_strings_contents = { "if", "int", "endif", "default", "tristate", "endchoice", "---help---", "def_tristate", "def_bool", "defconfig_list", "on", "optional", "option", "endmenu", "mainmenu", "menuconfig", "modules", "allnoconfig_y", "menu", "select", "comment", "env", "range", "choice", "bool", "source", "visible", "hex", "config", "boolean", "imply", "string", "help", "prompt", "depends" }; #define kconf_id_strings ((const char *) &kconf_id_strings_contents) #ifdef __GNUC__ __inline #if defined __GNUC_STDC_INLINE__ || defined __GNUC_GNU_INLINE__ __attribute__ ((__gnu_inline__)) #endif #endif const struct kconf_id * kconf_id_lookup (register const char *str, register unsigned int len) { enum { TOTAL_KEYWORDS = 35, MIN_WORD_LENGTH = 2, MAX_WORD_LENGTH = 14, MIN_HASH_VALUE = 2, MAX_HASH_VALUE = 72 }; static const struct kconf_id wordlist[] = { {-1}, {-1}, #line 26 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str2, T_IF, TF_COMMAND|TF_PARAM}, #line 37 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str3, T_TYPE, TF_COMMAND, S_INT}, {-1}, #line 27 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str5, T_ENDIF, TF_COMMAND}, {-1}, #line 30 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str7, T_DEFAULT, TF_COMMAND, S_UNKNOWN}, #line 32 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str8, T_TYPE, TF_COMMAND, S_TRISTATE}, #line 20 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str9, T_ENDCHOICE, TF_COMMAND}, #line 25 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str10, T_HELP, TF_COMMAND}, {-1}, #line 33 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_TRISTATE}, #line 36 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_DEFAULT, TF_COMMAND, S_BOOLEAN}, #line 47 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_OPT_DEFCONFIG_LIST,TF_OPTION}, {-1}, {-1}, #line 45 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_ON, TF_PARAM}, #line 29 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_OPTIONAL, TF_COMMAND}, {-1}, {-1}, #line 44 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_OPTION, TF_COMMAND}, #line 17 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_ENDMENU, TF_COMMAND}, #line 15 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_MAINMENU, TF_COMMAND}, {-1}, #line 23 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str25, T_MENUCONFIG, TF_COMMAND}, {-1}, #line 46 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_OPT_MODULES, TF_OPTION}, #line 49 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_OPT_ALLNOCONFIG_Y,TF_OPTION}, #line 16 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str29, T_MENU, TF_COMMAND}, {-1}, #line 40 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_SELECT, TF_COMMAND}, #line 21 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_COMMENT, TF_COMMAND}, #line 48 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_OPT_ENV, TF_OPTION}, {-1}, #line 42 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str35, T_RANGE, TF_COMMAND}, #line 19 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str36, T_CHOICE, TF_COMMAND}, {-1}, {-1}, #line 34 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str39, T_TYPE, TF_COMMAND, S_BOOLEAN}, {-1}, #line 18 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_SOURCE, TF_COMMAND}, #line 43 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str42, T_VISIBLE, TF_COMMAND}, #line 38 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str43, T_TYPE, TF_COMMAND, S_HEX}, {-1}, {-1}, #line 22 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_CONFIG, TF_COMMAND}, #line 35 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str47, T_TYPE, TF_COMMAND, S_BOOLEAN}, {-1}, {-1}, #line 41 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str50, T_IMPLY, TF_COMMAND}, #line 39 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str51, T_TYPE, TF_COMMAND, S_STRING}, {-1}, {-1}, #line 24 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str54, T_HELP, TF_COMMAND}, {-1}, #line 31 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str56, T_PROMPT, TF_COMMAND}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, #line 28 "scripts/kconfig/zconf.gperf" {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str72, T_DEPENDS, TF_COMMAND} }; if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) { register int key = kconf_id_hash (str, len); if (key <= MAX_HASH_VALUE && key >= 0) { register int o = wordlist[key].name; if (o >= 0) { register const char *s = o + kconf_id_strings; if (*str == *s && !strncmp (str + 1, s + 1, len - 1) && s[len] == '\0') return &wordlist[key]; } } } return 0; } #line 50 "scripts/kconfig/zconf.gperf" backport-iwlwifi-dkms-7906/kconf/zconf.lex.c000066400000000000000000001647361351064634500210740ustar00rootroot00000000000000 #line 3 "scripts/kconfig/zconf.lex.c_shipped" #define YY_INT_ALIGNED short int /* A lexical scanner generated by flex */ #define yy_create_buffer zconf_create_buffer #define yy_delete_buffer zconf_delete_buffer #define yy_flex_debug zconf_flex_debug #define yy_init_buffer zconf_init_buffer #define yy_flush_buffer zconf_flush_buffer #define yy_load_buffer_state zconf_load_buffer_state #define yy_switch_to_buffer zconf_switch_to_buffer #define yyin zconfin #define yyleng zconfleng #define yylex zconflex #define yylineno zconflineno #define yyout zconfout #define yyrestart zconfrestart #define yytext zconftext #define yywrap zconfwrap #define yyalloc zconfalloc #define yyrealloc zconfrealloc #define yyfree zconffree #define FLEX_SCANNER #define YY_FLEX_MAJOR_VERSION 2 #define YY_FLEX_MINOR_VERSION 5 #define YY_FLEX_SUBMINOR_VERSION 35 #if YY_FLEX_SUBMINOR_VERSION > 0 #define FLEX_BETA #endif /* First, we deal with platform-specific or compiler-specific issues. */ /* begin standard C headers. */ #include #include #include #include /* end standard C headers. */ /* flex integer type definitions */ #ifndef FLEXINT_H #define FLEXINT_H /* C99 systems have . Non-C99 systems may or may not. */ #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, * if you want the limit (max/min) macros for int types. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS 1 #endif #include typedef int8_t flex_int8_t; typedef uint8_t flex_uint8_t; typedef int16_t flex_int16_t; typedef uint16_t flex_uint16_t; typedef int32_t flex_int32_t; typedef uint32_t flex_uint32_t; #else typedef signed char flex_int8_t; typedef short int flex_int16_t; typedef int flex_int32_t; typedef unsigned char flex_uint8_t; typedef unsigned short int flex_uint16_t; typedef unsigned int flex_uint32_t; /* Limits of integral types. */ #ifndef INT8_MIN #define INT8_MIN (-128) #endif #ifndef INT16_MIN #define INT16_MIN (-32767-1) #endif #ifndef INT32_MIN #define INT32_MIN (-2147483647-1) #endif #ifndef INT8_MAX #define INT8_MAX (127) #endif #ifndef INT16_MAX #define INT16_MAX (32767) #endif #ifndef INT32_MAX #define INT32_MAX (2147483647) #endif #ifndef UINT8_MAX #define UINT8_MAX (255U) #endif #ifndef UINT16_MAX #define UINT16_MAX (65535U) #endif #ifndef UINT32_MAX #define UINT32_MAX (4294967295U) #endif #endif /* ! C99 */ #endif /* ! FLEXINT_H */ #ifdef __cplusplus /* The "const" storage-class-modifier is valid. */ #define YY_USE_CONST #else /* ! __cplusplus */ /* C99 requires __STDC__ to be defined as 1. */ #if defined (__STDC__) #define YY_USE_CONST #endif /* defined (__STDC__) */ #endif /* ! __cplusplus */ #ifdef YY_USE_CONST #define yyconst const #else #define yyconst #endif /* Returned upon end-of-file. */ #define YY_NULL 0 /* Promotes a possibly negative, possibly signed char to an unsigned * integer for use as an array index. If the signed char is negative, * we want to instead treat it as an 8-bit unsigned char, hence the * double cast. */ #define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c) /* Enter a start condition. This macro really ought to take a parameter, * but we do it the disgusting crufty way forced on us by the ()-less * definition of BEGIN. */ #define BEGIN (yy_start) = 1 + 2 * /* Translate the current start state into a value that can be later handed * to BEGIN to return to the state. The YYSTATE alias is for lex * compatibility. */ #define YY_START (((yy_start) - 1) / 2) #define YYSTATE YY_START /* Action number for EOF rule of a given start state. */ #define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1) /* Special action meaning "start processing a new file". */ #define YY_NEW_FILE zconfrestart(zconfin ) #define YY_END_OF_BUFFER_CHAR 0 /* Size of default input buffer. */ #ifndef YY_BUF_SIZE #ifdef __ia64__ /* On IA-64, the buffer size is 16k, not 8k. * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case. * Ditto for the __ia64__ case accordingly. */ #define YY_BUF_SIZE 32768 #else #define YY_BUF_SIZE 16384 #endif /* __ia64__ */ #endif /* The state buf must be large enough to hold one state per character in the main buffer. */ #define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type)) #ifndef YY_TYPEDEF_YY_BUFFER_STATE #define YY_TYPEDEF_YY_BUFFER_STATE typedef struct yy_buffer_state *YY_BUFFER_STATE; #endif extern int zconfleng; extern FILE *zconfin, *zconfout; #define EOB_ACT_CONTINUE_SCAN 0 #define EOB_ACT_END_OF_FILE 1 #define EOB_ACT_LAST_MATCH 2 #define YY_LESS_LINENO(n) /* Return all but the first "n" matched characters back to the input stream. */ #define yyless(n) \ do \ { \ /* Undo effects of setting up zconftext. */ \ int yyless_macro_arg = (n); \ YY_LESS_LINENO(yyless_macro_arg);\ *yy_cp = (yy_hold_char); \ YY_RESTORE_YY_MORE_OFFSET \ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ YY_DO_BEFORE_ACTION; /* set up zconftext again */ \ } \ while ( 0 ) #define unput(c) yyunput( c, (yytext_ptr) ) #ifndef YY_TYPEDEF_YY_SIZE_T #define YY_TYPEDEF_YY_SIZE_T typedef size_t yy_size_t; #endif #ifndef YY_STRUCT_YY_BUFFER_STATE #define YY_STRUCT_YY_BUFFER_STATE struct yy_buffer_state { FILE *yy_input_file; char *yy_ch_buf; /* input buffer */ char *yy_buf_pos; /* current position in input buffer */ /* Size of input buffer in bytes, not including room for EOB * characters. */ yy_size_t yy_buf_size; /* Number of characters read into yy_ch_buf, not including EOB * characters. */ int yy_n_chars; /* Whether we "own" the buffer - i.e., we know we created it, * and can realloc() it to grow it, and should free() it to * delete it. */ int yy_is_our_buffer; /* Whether this is an "interactive" input source; if so, and * if we're using stdio for input, then we want to use getc() * instead of fread(), to make sure we stop fetching input after * each newline. */ int yy_is_interactive; /* Whether we're considered to be at the beginning of a line. * If so, '^' rules will be active on the next match, otherwise * not. */ int yy_at_bol; int yy_bs_lineno; /**< The line count. */ int yy_bs_column; /**< The column count. */ /* Whether to try to fill the input buffer when we reach the * end of it. */ int yy_fill_buffer; int yy_buffer_status; #define YY_BUFFER_NEW 0 #define YY_BUFFER_NORMAL 1 /* When an EOF's been seen but there's still some text to process * then we mark the buffer as YY_EOF_PENDING, to indicate that we * shouldn't try reading from the input source any more. We might * still have a bunch of tokens to match, though, because of * possible backing-up. * * When we actually see the EOF, we change the status to "new" * (via zconfrestart()), so that the user can continue scanning by * just pointing zconfin at a new input file. */ #define YY_BUFFER_EOF_PENDING 2 }; #endif /* !YY_STRUCT_YY_BUFFER_STATE */ /* Stack of input buffers. */ static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */ static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */ static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */ /* We provide macros for accessing buffer states in case in the * future we want to put the buffer states in a more general * "scanner state". * * Returns the top of the stack, or NULL. */ #define YY_CURRENT_BUFFER ( (yy_buffer_stack) \ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \ : NULL) /* Same as previous macro, but useful when we know that the buffer stack is not * NULL or when we need an lvalue. For internal use only. */ #define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)] /* yy_hold_char holds the character lost when zconftext is formed. */ static char yy_hold_char; static int yy_n_chars; /* number of characters read into yy_ch_buf */ int zconfleng; /* Points to current character in buffer. */ static char *yy_c_buf_p = (char *) 0; static int yy_init = 0; /* whether we need to initialize */ static int yy_start = 0; /* start state number */ /* Flag which is used to allow zconfwrap()'s to do buffer switches * instead of setting up a fresh zconfin. A bit of a hack ... */ static int yy_did_buffer_switch_on_eof; void zconfrestart (FILE *input_file ); void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer ); YY_BUFFER_STATE zconf_create_buffer (FILE *file,int size ); void zconf_delete_buffer (YY_BUFFER_STATE b ); void zconf_flush_buffer (YY_BUFFER_STATE b ); void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer ); void zconfpop_buffer_state (void ); static void zconfensure_buffer_stack (void ); static void zconf_load_buffer_state (void ); static void zconf_init_buffer (YY_BUFFER_STATE b,FILE *file ); #define YY_FLUSH_BUFFER zconf_flush_buffer(YY_CURRENT_BUFFER ) YY_BUFFER_STATE zconf_scan_buffer (char *base,yy_size_t size ); YY_BUFFER_STATE zconf_scan_string (yyconst char *yy_str ); YY_BUFFER_STATE zconf_scan_bytes (yyconst char *bytes,int len ); void *zconfalloc (yy_size_t ); void *zconfrealloc (void *,yy_size_t ); void zconffree (void * ); #define yy_new_buffer zconf_create_buffer #define yy_set_interactive(is_interactive) \ { \ if ( ! YY_CURRENT_BUFFER ){ \ zconfensure_buffer_stack (); \ YY_CURRENT_BUFFER_LVALUE = \ zconf_create_buffer(zconfin,YY_BUF_SIZE ); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ } #define yy_set_bol(at_bol) \ { \ if ( ! YY_CURRENT_BUFFER ){\ zconfensure_buffer_stack (); \ YY_CURRENT_BUFFER_LVALUE = \ zconf_create_buffer(zconfin,YY_BUF_SIZE ); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ } #define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol) /* Begin user sect3 */ #define zconfwrap(n) 1 #define YY_SKIP_YYWRAP typedef unsigned char YY_CHAR; FILE *zconfin = (FILE *) 0, *zconfout = (FILE *) 0; typedef int yy_state_type; extern int zconflineno; int zconflineno = 1; extern char *zconftext; #define yytext_ptr zconftext static yyconst flex_int16_t yy_nxt[][18] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 11, 12, 13, 14, 12, 12, 15, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12 }, { 11, 12, 13, 14, 12, 12, 15, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12 }, { 11, 16, 16, 17, 16, 16, 16, 16, 16, 16, 16, 18, 16, 16, 16, 16, 16, 16 }, { 11, 16, 16, 17, 16, 16, 16, 16, 16, 16, 16, 18, 16, 16, 16, 16, 16, 16 }, { 11, 19, 20, 21, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19 }, { 11, 19, 20, 21, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19 }, { 11, 22, 22, 23, 22, 24, 22, 22, 24, 22, 22, 22, 22, 22, 22, 22, 25, 22 }, { 11, 22, 22, 23, 22, 24, 22, 22, 24, 22, 22, 22, 22, 22, 22, 22, 25, 22 }, { 11, 26, 27, 28, 29, 30, 31, 32, 30, 33, 34, 35, 35, 36, 37, 38, 39, 40 }, { 11, 26, 27, 28, 29, 30, 31, 32, 30, 33, 34, 35, 35, 36, 37, 38, 39, 40 }, { -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11 }, { 11, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12 }, { 11, -13, 41, 42, -13, -13, 43, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13 }, { 11, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14 }, { 11, 44, 44, 45, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44 }, { 11, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16 }, { 11, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17 }, { 11, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, 46, -18, -18, -18, -18, -18, -18 }, { 11, 47, 47, -19, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47 }, { 11, -20, 48, 49, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20 }, { 11, 50, -21, -21, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50 }, { 11, 51, 51, 52, 51, -22, 51, 51, -22, 51, 51, 51, 51, 51, 51, 51, -22, 51 }, { 11, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23 }, { 11, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24 }, { 11, 53, 53, 54, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53 }, { 11, -26, -26, -26, -26, -26, -26, -26, -26, -26, -26, -26, -26, -26, -26, -26, -26, -26 }, { 11, -27, 55, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27 }, { 11, -28, -28, -28, -28, -28, -28, -28, -28, -28, -28, -28, -28, -28, -28, -28, -28, -28 }, { 11, -29, -29, -29, -29, -29, -29, -29, -29, -29, -29, -29, -29, -29, 56, -29, -29, -29 }, { 11, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30 }, { 11, 57, 57, -31, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57 }, { 11, -32, -32, -32, -32, -32, -32, 58, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32 }, { 11, -33, -33, -33, -33, -33, -33, -33, -33, -33, -33, -33, -33, -33, -33, -33, -33, -33 }, { 11, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34 }, { 11, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, 59, 59, -35, -35, -35, -35, -35 }, { 11, -36, -36, -36, -36, -36, -36, -36, -36, -36, -36, -36, -36, -36, 60, -36, -36, -36 }, { 11, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37 }, { 11, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, 61, -38, -38, -38 }, { 11, -39, -39, 62, -39, -39, -39, -39, -39, -39, -39, -39, -39, -39, -39, -39, -39, -39 }, { 11, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, 63 }, { 11, -41, 41, 42, -41, -41, 43, -41, -41, -41, -41, -41, -41, -41, -41, -41, -41, -41 }, { 11, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42 }, { 11, 44, 44, 45, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44 }, { 11, 44, 44, 45, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44 }, { 11, -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, -45 }, { 11, -46, -46, -46, -46, -46, -46, -46, -46, -46, -46, 46, -46, -46, -46, -46, -46, -46 }, { 11, 47, 47, -47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47 }, { 11, -48, 48, 49, -48, -48, -48, -48, -48, -48, -48, -48, -48, -48, -48, -48, -48, -48 }, { 11, 50, -49, -49, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50 }, { 11, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50 }, { 11, 51, 51, 52, 51, -51, 51, 51, -51, 51, 51, 51, 51, 51, 51, 51, -51, 51 }, { 11, -52, -52, -52, -52, -52, -52, -52, -52, -52, -52, -52, -52, -52, -52, -52, -52, -52 }, { 11, -53, -53, 54, -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, -53 }, { 11, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54 }, { 11, -55, 55, -55, -55, -55, -55, -55, -55, -55, -55, -55, -55, -55, -55, -55, -55, -55 }, { 11, -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, -56 }, { 11, 57, 57, -57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57 }, { 11, -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, -58 }, { 11, -59, -59, -59, -59, -59, -59, -59, -59, -59, -59, 59, 59, -59, -59, -59, -59, -59 }, { 11, -60, -60, -60, -60, -60, -60, -60, -60, -60, -60, -60, -60, -60, -60, -60, -60, -60 }, { 11, -61, -61, -61, -61, -61, -61, -61, -61, -61, -61, -61, -61, -61, -61, -61, -61, -61 }, { 11, -62, -62, -62, -62, -62, -62, -62, -62, -62, -62, -62, -62, -62, -62, -62, -62, -62 }, { 11, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63 }, } ; static yy_state_type yy_get_previous_state (void ); static yy_state_type yy_try_NUL_trans (yy_state_type current_state ); static int yy_get_next_buffer (void ); static void yy_fatal_error (yyconst char msg[] ); /* Done after the current pattern has been matched and before the * corresponding action - sets up zconftext. */ #define YY_DO_BEFORE_ACTION \ (yytext_ptr) = yy_bp; \ zconfleng = (size_t) (yy_cp - yy_bp); \ (yy_hold_char) = *yy_cp; \ *yy_cp = '\0'; \ (yy_c_buf_p) = yy_cp; #define YY_NUM_RULES 37 #define YY_END_OF_BUFFER 38 /* This struct is not used in this scanner, but its presence is necessary. */ struct yy_trans_info { flex_int32_t yy_verify; flex_int32_t yy_nxt; }; static yyconst flex_int16_t yy_accept[64] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 5, 4, 2, 3, 7, 8, 6, 36, 33, 35, 28, 32, 31, 30, 26, 25, 21, 13, 20, 23, 26, 11, 12, 22, 18, 14, 19, 26, 26, 4, 2, 3, 3, 1, 6, 36, 33, 35, 34, 28, 27, 30, 29, 25, 15, 23, 9, 22, 16, 17, 24, 10 } ; static yyconst flex_int32_t yy_ec[256] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 5, 6, 1, 1, 7, 8, 9, 10, 1, 1, 1, 11, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 1, 1, 13, 14, 15, 1, 1, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 1, 16, 1, 1, 11, 1, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 1, 17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } ; extern int zconf_flex_debug; int zconf_flex_debug = 0; /* The intent behind this definition is that it'll catch * any uses of REJECT which flex missed. */ #define REJECT reject_used_but_not_detected #define yymore() yymore_used_but_not_detected #define YY_MORE_ADJ 0 #define YY_RESTORE_YY_MORE_OFFSET char *zconftext; #define YY_NO_INPUT 1 /* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #include #include #include #include #include #include "lkc.h" #define START_STRSIZE 16 static struct { struct file *file; int lineno; } current_pos; static char *text; static int text_size, text_asize; struct buffer { struct buffer *parent; YY_BUFFER_STATE state; }; struct buffer *current_buf; static int last_ts, first_ts; static void zconf_endhelp(void); static void zconf_endfile(void); static void new_string(void) { text = xmalloc(START_STRSIZE); text_asize = START_STRSIZE; text_size = 0; *text = 0; } static void append_string(const char *str, int size) { int new_size = text_size + size + 1; if (new_size > text_asize) { new_size += START_STRSIZE - 1; new_size &= -START_STRSIZE; text = realloc(text, new_size); text_asize = new_size; } memcpy(text + text_size, str, size); text_size += size; text[text_size] = 0; } static void alloc_string(const char *str, int size) { text = xmalloc(size + 1); memcpy(text, str, size); text[size] = 0; } static void warn_ignored_character(char chr) { fprintf(stderr, "%s:%d:warning: ignoring unsupported character '%c'\n", zconf_curname(), zconf_lineno(), chr); } #define INITIAL 0 #define COMMAND 1 #define HELP 2 #define STRING 3 #define PARAM 4 #ifndef YY_NO_UNISTD_H /* Special case for "unistd.h", since it is non-ANSI. We include it way * down here because we want the user's section 1 to have been scanned first. * The user has a chance to override it with an option. */ #include #endif #ifndef YY_EXTRA_TYPE #define YY_EXTRA_TYPE void * #endif static int yy_init_globals (void ); /* Accessor methods to globals. These are made visible to non-reentrant scanners for convenience. */ int zconflex_destroy (void ); int zconfget_debug (void ); void zconfset_debug (int debug_flag ); YY_EXTRA_TYPE zconfget_extra (void ); void zconfset_extra (YY_EXTRA_TYPE user_defined ); FILE *zconfget_in (void ); void zconfset_in (FILE * in_str ); FILE *zconfget_out (void ); void zconfset_out (FILE * out_str ); int zconfget_leng (void ); char *zconfget_text (void ); int zconfget_lineno (void ); void zconfset_lineno (int line_number ); /* Macros after this point can all be overridden by user definitions in * section 1. */ #ifndef YY_SKIP_YYWRAP #ifdef __cplusplus extern "C" int zconfwrap (void ); #else extern int zconfwrap (void ); #endif #endif static void yyunput (int c,char *buf_ptr ); #ifndef yytext_ptr static void yy_flex_strncpy (char *,yyconst char *,int ); #endif #ifdef YY_NEED_STRLEN static int yy_flex_strlen (yyconst char * ); #endif #ifndef YY_NO_INPUT #ifdef __cplusplus static int yyinput (void ); #else static int input (void ); #endif #endif /* Amount of stuff to slurp up with each read. */ #ifndef YY_READ_BUF_SIZE #ifdef __ia64__ /* On IA-64, the buffer size is 16k, not 8k */ #define YY_READ_BUF_SIZE 16384 #else #define YY_READ_BUF_SIZE 8192 #endif /* __ia64__ */ #endif /* Copy whatever the last rule matched to the standard output. */ #ifndef ECHO /* This used to be an fputs(), but since the string might contain NUL's, * we now use fwrite(). */ #define ECHO do { if (fwrite( zconftext, zconfleng, 1, zconfout )) {} } while (0) #endif /* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, * is returned in "result". */ #ifndef YY_INPUT #define YY_INPUT(buf,result,max_size) \ errno=0; \ while ( (result = read( fileno(zconfin), (char *) buf, max_size )) < 0 ) \ { \ if( errno != EINTR) \ { \ YY_FATAL_ERROR( "input in flex scanner failed" ); \ break; \ } \ errno=0; \ clearerr(zconfin); \ }\ \ #endif /* No semi-colon after return; correct usage is to write "yyterminate();" - * we don't want an extra ';' after the "return" because that will cause * some compilers to complain about unreachable statements. */ #ifndef yyterminate #define yyterminate() return YY_NULL #endif /* Number of entries by which start-condition stack grows. */ #ifndef YY_START_STACK_INCR #define YY_START_STACK_INCR 25 #endif /* Report a fatal error. */ #ifndef YY_FATAL_ERROR #define YY_FATAL_ERROR(msg) yy_fatal_error( msg ) #endif /* end tables serialization structures and prototypes */ /* Default declaration of generated scanner - a define so the user can * easily add parameters. */ #ifndef YY_DECL #define YY_DECL_IS_OURS 1 extern int zconflex (void); #define YY_DECL int zconflex (void) #endif /* !YY_DECL */ /* Code executed at the beginning of each rule, after zconftext and zconfleng * have been set up. */ #ifndef YY_USER_ACTION #define YY_USER_ACTION #endif /* Code executed at the end of each rule. */ #ifndef YY_BREAK #define YY_BREAK break; #endif #define YY_RULE_SETUP \ YY_USER_ACTION /** The main scanner function which does all the work. */ YY_DECL { register yy_state_type yy_current_state; register char *yy_cp, *yy_bp; register int yy_act; int str = 0; int ts, i; if ( !(yy_init) ) { (yy_init) = 1; #ifdef YY_USER_INIT YY_USER_INIT; #endif if ( ! (yy_start) ) (yy_start) = 1; /* first start state */ if ( ! zconfin ) zconfin = stdin; if ( ! zconfout ) zconfout = stdout; if ( ! YY_CURRENT_BUFFER ) { zconfensure_buffer_stack (); YY_CURRENT_BUFFER_LVALUE = zconf_create_buffer(zconfin,YY_BUF_SIZE ); } zconf_load_buffer_state( ); } while ( 1 ) /* loops until end-of-file is reached */ { yy_cp = (yy_c_buf_p); /* Support of zconftext. */ *yy_cp = (yy_hold_char); /* yy_bp points to the position in yy_ch_buf of the start of * the current run. */ yy_bp = yy_cp; yy_current_state = (yy_start); yy_match: while ( (yy_current_state = yy_nxt[yy_current_state][ yy_ec[YY_SC_TO_UI(*yy_cp)] ]) > 0 ) ++yy_cp; yy_current_state = -yy_current_state; yy_find_action: yy_act = yy_accept[yy_current_state]; YY_DO_BEFORE_ACTION; do_action: /* This label is used only to access EOF actions. */ switch ( yy_act ) { /* beginning of action switch */ case 1: /* rule 1 can match eol */ case 2: /* rule 2 can match eol */ YY_RULE_SETUP { current_file->lineno++; return T_EOL; } YY_BREAK case 3: YY_RULE_SETUP YY_BREAK case 4: YY_RULE_SETUP { BEGIN(COMMAND); } YY_BREAK case 5: YY_RULE_SETUP { unput(zconftext[0]); BEGIN(COMMAND); } YY_BREAK case 6: YY_RULE_SETUP { const struct kconf_id *id = kconf_id_lookup(zconftext, zconfleng); BEGIN(PARAM); current_pos.file = current_file; current_pos.lineno = current_file->lineno; if (id && id->flags & TF_COMMAND) { zconflval.id = id; return id->token; } alloc_string(zconftext, zconfleng); zconflval.string = text; return T_WORD; } YY_BREAK case 7: YY_RULE_SETUP warn_ignored_character(*zconftext); YY_BREAK case 8: /* rule 8 can match eol */ YY_RULE_SETUP { BEGIN(INITIAL); current_file->lineno++; return T_EOL; } YY_BREAK case 9: YY_RULE_SETUP return T_AND; YY_BREAK case 10: YY_RULE_SETUP return T_OR; YY_BREAK case 11: YY_RULE_SETUP return T_OPEN_PAREN; YY_BREAK case 12: YY_RULE_SETUP return T_CLOSE_PAREN; YY_BREAK case 13: YY_RULE_SETUP return T_NOT; YY_BREAK case 14: YY_RULE_SETUP return T_EQUAL; YY_BREAK case 15: YY_RULE_SETUP return T_UNEQUAL; YY_BREAK case 16: YY_RULE_SETUP return T_LESS_EQUAL; YY_BREAK case 17: YY_RULE_SETUP return T_GREATER_EQUAL; YY_BREAK case 18: YY_RULE_SETUP return T_LESS; YY_BREAK case 19: YY_RULE_SETUP return T_GREATER; YY_BREAK case 20: YY_RULE_SETUP { str = zconftext[0]; new_string(); BEGIN(STRING); } YY_BREAK case 21: /* rule 21 can match eol */ YY_RULE_SETUP BEGIN(INITIAL); current_file->lineno++; return T_EOL; YY_BREAK case 22: YY_RULE_SETUP { const struct kconf_id *id = kconf_id_lookup(zconftext, zconfleng); if (id && id->flags & TF_PARAM) { zconflval.id = id; return id->token; } alloc_string(zconftext, zconfleng); zconflval.string = text; return T_WORD; } YY_BREAK case 23: YY_RULE_SETUP /* comment */ YY_BREAK case 24: /* rule 24 can match eol */ YY_RULE_SETUP current_file->lineno++; YY_BREAK case 25: YY_RULE_SETUP YY_BREAK case 26: YY_RULE_SETUP warn_ignored_character(*zconftext); YY_BREAK case YY_STATE_EOF(PARAM): { BEGIN(INITIAL); } YY_BREAK case 27: /* rule 27 can match eol */ *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */ (yy_c_buf_p) = yy_cp -= 1; YY_DO_BEFORE_ACTION; /* set up zconftext again */ YY_RULE_SETUP { append_string(zconftext, zconfleng); zconflval.string = text; return T_WORD_QUOTE; } YY_BREAK case 28: YY_RULE_SETUP { append_string(zconftext, zconfleng); } YY_BREAK case 29: /* rule 29 can match eol */ *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */ (yy_c_buf_p) = yy_cp -= 1; YY_DO_BEFORE_ACTION; /* set up zconftext again */ YY_RULE_SETUP { append_string(zconftext + 1, zconfleng - 1); zconflval.string = text; return T_WORD_QUOTE; } YY_BREAK case 30: YY_RULE_SETUP { append_string(zconftext + 1, zconfleng - 1); } YY_BREAK case 31: YY_RULE_SETUP { if (str == zconftext[0]) { BEGIN(PARAM); zconflval.string = text; return T_WORD_QUOTE; } else append_string(zconftext, 1); } YY_BREAK case 32: /* rule 32 can match eol */ YY_RULE_SETUP { printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno()); current_file->lineno++; BEGIN(INITIAL); return T_EOL; } YY_BREAK case YY_STATE_EOF(STRING): { BEGIN(INITIAL); } YY_BREAK case 33: YY_RULE_SETUP { ts = 0; for (i = 0; i < zconfleng; i++) { if (zconftext[i] == '\t') ts = (ts & ~7) + 8; else ts++; } last_ts = ts; if (first_ts) { if (ts < first_ts) { zconf_endhelp(); return T_HELPTEXT; } ts -= first_ts; while (ts > 8) { append_string(" ", 8); ts -= 8; } append_string(" ", ts); } } YY_BREAK case 34: /* rule 34 can match eol */ *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */ (yy_c_buf_p) = yy_cp -= 1; YY_DO_BEFORE_ACTION; /* set up zconftext again */ YY_RULE_SETUP { current_file->lineno++; zconf_endhelp(); return T_HELPTEXT; } YY_BREAK case 35: /* rule 35 can match eol */ YY_RULE_SETUP { current_file->lineno++; append_string("\n", 1); } YY_BREAK case 36: YY_RULE_SETUP { while (zconfleng) { if ((zconftext[zconfleng-1] != ' ') && (zconftext[zconfleng-1] != '\t')) break; zconfleng--; } append_string(zconftext, zconfleng); if (!first_ts) first_ts = last_ts; } YY_BREAK case YY_STATE_EOF(HELP): { zconf_endhelp(); return T_HELPTEXT; } YY_BREAK case YY_STATE_EOF(INITIAL): case YY_STATE_EOF(COMMAND): { if (current_file) { zconf_endfile(); return T_EOL; } fclose(zconfin); yyterminate(); } YY_BREAK case 37: YY_RULE_SETUP YY_FATAL_ERROR( "flex scanner jammed" ); YY_BREAK case YY_END_OF_BUFFER: { /* Amount of text matched not including the EOB char. */ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1; /* Undo the effects of YY_DO_BEFORE_ACTION. */ *yy_cp = (yy_hold_char); YY_RESTORE_YY_MORE_OFFSET if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW ) { /* We're scanning a new file or input source. It's * possible that this happened because the user * just pointed zconfin at a new source and called * zconflex(). If so, then we have to assure * consistency between YY_CURRENT_BUFFER and our * globals. Here is the right place to do so, because * this is the first action (other than possibly a * back-up) that will match for the new input source. */ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; YY_CURRENT_BUFFER_LVALUE->yy_input_file = zconfin; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL; } /* Note that here we test for yy_c_buf_p "<=" to the position * of the first EOB in the buffer, since yy_c_buf_p will * already have been incremented past the NUL character * (since all states make transitions on EOB to the * end-of-buffer state). Contrast this with the test * in input(). */ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) { /* This was really a NUL. */ yy_state_type yy_next_state; (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; yy_current_state = yy_get_previous_state( ); /* Okay, we're now positioned to make the NUL * transition. We couldn't have * yy_get_previous_state() go ahead and do it * for us because it doesn't know how to deal * with the possibility of jamming (and we don't * want to build jamming into it because then it * will run more slowly). */ yy_next_state = yy_try_NUL_trans( yy_current_state ); yy_bp = (yytext_ptr) + YY_MORE_ADJ; if ( yy_next_state ) { /* Consume the NUL. */ yy_cp = ++(yy_c_buf_p); yy_current_state = yy_next_state; goto yy_match; } else { yy_cp = (yy_c_buf_p); goto yy_find_action; } } else switch ( yy_get_next_buffer( ) ) { case EOB_ACT_END_OF_FILE: { (yy_did_buffer_switch_on_eof) = 0; if ( zconfwrap( ) ) { /* Note: because we've taken care in * yy_get_next_buffer() to have set up * zconftext, we can now set up * yy_c_buf_p so that if some total * hoser (like flex itself) wants to * call the scanner after we return the * YY_NULL, it'll still work - another * YY_NULL will get returned. */ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ; yy_act = YY_STATE_EOF(YY_START); goto do_action; } else { if ( ! (yy_did_buffer_switch_on_eof) ) YY_NEW_FILE; } break; } case EOB_ACT_CONTINUE_SCAN: (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; yy_current_state = yy_get_previous_state( ); yy_cp = (yy_c_buf_p); yy_bp = (yytext_ptr) + YY_MORE_ADJ; goto yy_match; case EOB_ACT_LAST_MATCH: (yy_c_buf_p) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)]; yy_current_state = yy_get_previous_state( ); yy_cp = (yy_c_buf_p); yy_bp = (yytext_ptr) + YY_MORE_ADJ; goto yy_find_action; } break; } default: YY_FATAL_ERROR( "fatal flex scanner internal error--no action found" ); } /* end of action switch */ } /* end of scanning one token */ } /* end of zconflex */ /* yy_get_next_buffer - try to read in a new buffer * * Returns a code representing an action: * EOB_ACT_LAST_MATCH - * EOB_ACT_CONTINUE_SCAN - continue scanning from current position * EOB_ACT_END_OF_FILE - end of file */ static int yy_get_next_buffer (void) { register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; register char *source = (yytext_ptr); register int number_to_move, i; int ret_val; if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] ) YY_FATAL_ERROR( "fatal flex scanner internal error--end of buffer missed" ); if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 ) { /* Don't try to fill the buffer, so this is an EOF. */ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 ) { /* We matched a single character, the EOB, so * treat this as a final EOF. */ return EOB_ACT_END_OF_FILE; } else { /* We matched some text prior to the EOB, first * process it. */ return EOB_ACT_LAST_MATCH; } } /* Try to read more data. */ /* First move last chars to start of buffer. */ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1; for ( i = 0; i < number_to_move; ++i ) *(dest++) = *(source++); if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING ) /* don't do the read, it's not guaranteed to return an EOF, * just force an EOF */ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0; else { int num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; while ( num_to_read <= 0 ) { /* Not enough room in the buffer - grow it. */ /* just a shorter name for the current buffer */ YY_BUFFER_STATE b = YY_CURRENT_BUFFER; int yy_c_buf_p_offset = (int) ((yy_c_buf_p) - b->yy_ch_buf); if ( b->yy_is_our_buffer ) { int new_size = b->yy_buf_size * 2; if ( new_size <= 0 ) b->yy_buf_size += b->yy_buf_size / 8; else b->yy_buf_size *= 2; b->yy_ch_buf = (char *) /* Include room in for 2 EOB chars. */ zconfrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ); } else /* Can't grow it, we don't own it. */ b->yy_ch_buf = 0; if ( ! b->yy_ch_buf ) YY_FATAL_ERROR( "fatal error - scanner input buffer overflow" ); (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset]; num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; } if ( num_to_read > YY_READ_BUF_SIZE ) num_to_read = YY_READ_BUF_SIZE; /* Read in more data. */ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), (yy_n_chars), (size_t) num_to_read ); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } if ( (yy_n_chars) == 0 ) { if ( number_to_move == YY_MORE_ADJ ) { ret_val = EOB_ACT_END_OF_FILE; zconfrestart(zconfin ); } else { ret_val = EOB_ACT_LAST_MATCH; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_EOF_PENDING; } } else ret_val = EOB_ACT_CONTINUE_SCAN; if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) { /* Extend the array by 50%, plus the number we really need. */ yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1); YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) zconfrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ); if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" ); } (yy_n_chars) += number_to_move; YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR; YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR; (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0]; return ret_val; } /* yy_get_previous_state - get the state just before the EOB char was reached */ static yy_state_type yy_get_previous_state (void) { register yy_state_type yy_current_state; register char *yy_cp; yy_current_state = (yy_start); for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp ) { yy_current_state = yy_nxt[yy_current_state][(*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1)]; } return yy_current_state; } /* yy_try_NUL_trans - try to make a transition on the NUL character * * synopsis * next_state = yy_try_NUL_trans( current_state ); */ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state ) { register int yy_is_jam; yy_current_state = yy_nxt[yy_current_state][1]; yy_is_jam = (yy_current_state <= 0); return yy_is_jam ? 0 : yy_current_state; } static void yyunput (int c, register char * yy_bp ) { register char *yy_cp; yy_cp = (yy_c_buf_p); /* undo effects of setting up zconftext */ *yy_cp = (yy_hold_char); if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 ) { /* need to shift things up to make room */ /* +2 for EOB chars. */ register int number_to_move = (yy_n_chars) + 2; register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[ YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2]; register char *source = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]; while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) *--dest = *--source; yy_cp += (int) (dest - source); yy_bp += (int) (dest - source); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_buf_size; if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 ) YY_FATAL_ERROR( "flex scanner push-back overflow" ); } *--yy_cp = (char) c; (yytext_ptr) = yy_bp; (yy_hold_char) = *yy_cp; (yy_c_buf_p) = yy_cp; } #ifndef YY_NO_INPUT #ifdef __cplusplus static int yyinput (void) #else static int input (void) #endif { int c; *(yy_c_buf_p) = (yy_hold_char); if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR ) { /* yy_c_buf_p now points to the character we want to return. * If this occurs *before* the EOB characters, then it's a * valid NUL; if not, then we've hit the end of the buffer. */ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) /* This was really a NUL. */ *(yy_c_buf_p) = '\0'; else { /* need more input */ int offset = (yy_c_buf_p) - (yytext_ptr); ++(yy_c_buf_p); switch ( yy_get_next_buffer( ) ) { case EOB_ACT_LAST_MATCH: /* This happens because yy_g_n_b() * sees that we've accumulated a * token and flags that we need to * try matching the token before * proceeding. But for input(), * there's no matching to consider. * So convert the EOB_ACT_LAST_MATCH * to EOB_ACT_END_OF_FILE. */ /* Reset buffer status. */ zconfrestart(zconfin ); /*FALLTHROUGH*/ case EOB_ACT_END_OF_FILE: { if ( zconfwrap( ) ) return EOF; if ( ! (yy_did_buffer_switch_on_eof) ) YY_NEW_FILE; #ifdef __cplusplus return yyinput(); #else return input(); #endif } case EOB_ACT_CONTINUE_SCAN: (yy_c_buf_p) = (yytext_ptr) + offset; break; } } } c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */ *(yy_c_buf_p) = '\0'; /* preserve zconftext */ (yy_hold_char) = *++(yy_c_buf_p); return c; } #endif /* ifndef YY_NO_INPUT */ /** Immediately switch to a different input stream. * @param input_file A readable stream. * * @note This function does not reset the start condition to @c INITIAL . */ void zconfrestart (FILE * input_file ) { if ( ! YY_CURRENT_BUFFER ){ zconfensure_buffer_stack (); YY_CURRENT_BUFFER_LVALUE = zconf_create_buffer(zconfin,YY_BUF_SIZE ); } zconf_init_buffer(YY_CURRENT_BUFFER,input_file ); zconf_load_buffer_state( ); } /** Switch to a different input buffer. * @param new_buffer The new input buffer. * */ void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer ) { /* TODO. We should be able to replace this entire function body * with * zconfpop_buffer_state(); * zconfpush_buffer_state(new_buffer); */ zconfensure_buffer_stack (); if ( YY_CURRENT_BUFFER == new_buffer ) return; if ( YY_CURRENT_BUFFER ) { /* Flush out information for old buffer. */ *(yy_c_buf_p) = (yy_hold_char); YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } YY_CURRENT_BUFFER_LVALUE = new_buffer; zconf_load_buffer_state( ); /* We don't actually know whether we did this switch during * EOF (zconfwrap()) processing, but the only time this flag * is looked at is after zconfwrap() is called, so it's safe * to go ahead and always set it. */ (yy_did_buffer_switch_on_eof) = 1; } static void zconf_load_buffer_state (void) { (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; zconfin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; (yy_hold_char) = *(yy_c_buf_p); } /** Allocate and initialize an input buffer state. * @param file A readable stream. * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE. * * @return the allocated buffer state. */ YY_BUFFER_STATE zconf_create_buffer (FILE * file, int size ) { YY_BUFFER_STATE b; b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state ) ); if ( ! b ) YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" ); b->yy_buf_size = size; /* yy_ch_buf has to be 2 characters longer than the size given because * we need to put in 2 end-of-buffer characters. */ b->yy_ch_buf = (char *) zconfalloc(b->yy_buf_size + 2 ); if ( ! b->yy_ch_buf ) YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" ); b->yy_is_our_buffer = 1; zconf_init_buffer(b,file ); return b; } /** Destroy the buffer. * @param b a buffer created with zconf_create_buffer() * */ void zconf_delete_buffer (YY_BUFFER_STATE b ) { if ( ! b ) return; if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0; if ( b->yy_is_our_buffer ) zconffree((void *) b->yy_ch_buf ); zconffree((void *) b ); } /* Initializes or reinitializes a buffer. * This function is sometimes called more than once on the same buffer, * such as during a zconfrestart() or at EOF. */ static void zconf_init_buffer (YY_BUFFER_STATE b, FILE * file ) { int oerrno = errno; zconf_flush_buffer(b ); b->yy_input_file = file; b->yy_fill_buffer = 1; /* If b is the current buffer, then zconf_init_buffer was _probably_ * called from zconfrestart() or through yy_get_next_buffer. * In that case, we don't want to reset the lineno or column. */ if (b != YY_CURRENT_BUFFER){ b->yy_bs_lineno = 1; b->yy_bs_column = 0; } b->yy_is_interactive = 0; errno = oerrno; } /** Discard all buffered characters. On the next scan, YY_INPUT will be called. * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER. * */ void zconf_flush_buffer (YY_BUFFER_STATE b ) { if ( ! b ) return; b->yy_n_chars = 0; /* We always need two end-of-buffer characters. The first causes * a transition to the end-of-buffer state. The second causes * a jam in that state. */ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR; b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR; b->yy_buf_pos = &b->yy_ch_buf[0]; b->yy_at_bol = 1; b->yy_buffer_status = YY_BUFFER_NEW; if ( b == YY_CURRENT_BUFFER ) zconf_load_buffer_state( ); } /** Pushes the new state onto the stack. The new state becomes * the current state. This function will allocate the stack * if necessary. * @param new_buffer The new state. * */ void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer ) { if (new_buffer == NULL) return; zconfensure_buffer_stack(); /* This block is copied from zconf_switch_to_buffer. */ if ( YY_CURRENT_BUFFER ) { /* Flush out information for old buffer. */ *(yy_c_buf_p) = (yy_hold_char); YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } /* Only push if top exists. Otherwise, replace top. */ if (YY_CURRENT_BUFFER) (yy_buffer_stack_top)++; YY_CURRENT_BUFFER_LVALUE = new_buffer; /* copied from zconf_switch_to_buffer. */ zconf_load_buffer_state( ); (yy_did_buffer_switch_on_eof) = 1; } /** Removes and deletes the top of the stack, if present. * The next element becomes the new top. * */ void zconfpop_buffer_state (void) { if (!YY_CURRENT_BUFFER) return; zconf_delete_buffer(YY_CURRENT_BUFFER ); YY_CURRENT_BUFFER_LVALUE = NULL; if ((yy_buffer_stack_top) > 0) --(yy_buffer_stack_top); if (YY_CURRENT_BUFFER) { zconf_load_buffer_state( ); (yy_did_buffer_switch_on_eof) = 1; } } /* Allocates the stack if it does not exist. * Guarantees space for at least one push. */ static void zconfensure_buffer_stack (void) { int num_to_alloc; if (!(yy_buffer_stack)) { /* First allocation is just for 2 elements, since we don't know if this * scanner will even need a stack. We use 2 instead of 1 to avoid an * immediate realloc on the next call. */ num_to_alloc = 1; (yy_buffer_stack) = (struct yy_buffer_state**)zconfalloc (num_to_alloc * sizeof(struct yy_buffer_state*) ); if ( ! (yy_buffer_stack) ) YY_FATAL_ERROR( "out of dynamic memory in zconfensure_buffer_stack()" ); memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*)); (yy_buffer_stack_max) = num_to_alloc; (yy_buffer_stack_top) = 0; return; } if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){ /* Increase the buffer to prepare for a possible push. */ int grow_size = 8 /* arbitrary grow size */; num_to_alloc = (yy_buffer_stack_max) + grow_size; (yy_buffer_stack) = (struct yy_buffer_state**)zconfrealloc ((yy_buffer_stack), num_to_alloc * sizeof(struct yy_buffer_state*) ); if ( ! (yy_buffer_stack) ) YY_FATAL_ERROR( "out of dynamic memory in zconfensure_buffer_stack()" ); /* zero only the new slots.*/ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*)); (yy_buffer_stack_max) = num_to_alloc; } } /** Setup the input buffer state to scan directly from a user-specified character buffer. * @param base the character buffer * @param size the size in bytes of the character buffer * * @return the newly allocated buffer state object. */ YY_BUFFER_STATE zconf_scan_buffer (char * base, yy_size_t size ) { YY_BUFFER_STATE b; if ( size < 2 || base[size-2] != YY_END_OF_BUFFER_CHAR || base[size-1] != YY_END_OF_BUFFER_CHAR ) /* They forgot to leave room for the EOB's. */ return 0; b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state ) ); if ( ! b ) YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_buffer()" ); b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */ b->yy_buf_pos = b->yy_ch_buf = base; b->yy_is_our_buffer = 0; b->yy_input_file = 0; b->yy_n_chars = b->yy_buf_size; b->yy_is_interactive = 0; b->yy_at_bol = 1; b->yy_fill_buffer = 0; b->yy_buffer_status = YY_BUFFER_NEW; zconf_switch_to_buffer(b ); return b; } /** Setup the input buffer state to scan a string. The next call to zconflex() will * scan from a @e copy of @a str. * @param yystr a NUL-terminated string to scan * * @return the newly allocated buffer state object. * @note If you want to scan bytes that may contain NUL values, then use * zconf_scan_bytes() instead. */ YY_BUFFER_STATE zconf_scan_string (yyconst char * yystr ) { return zconf_scan_bytes(yystr,strlen(yystr) ); } /** Setup the input buffer state to scan the given bytes. The next call to zconflex() will * scan from a @e copy of @a bytes. * @param yybytes the byte buffer to scan * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes. * * @return the newly allocated buffer state object. */ YY_BUFFER_STATE zconf_scan_bytes (yyconst char * yybytes, int _yybytes_len ) { YY_BUFFER_STATE b; char *buf; yy_size_t n; int i; /* Get memory for full buffer, including space for trailing EOB's. */ n = _yybytes_len + 2; buf = (char *) zconfalloc(n ); if ( ! buf ) YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" ); for ( i = 0; i < _yybytes_len; ++i ) buf[i] = yybytes[i]; buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR; b = zconf_scan_buffer(buf,n ); if ( ! b ) YY_FATAL_ERROR( "bad buffer in zconf_scan_bytes()" ); /* It's okay to grow etc. this buffer, and we should throw it * away when we're done. */ b->yy_is_our_buffer = 1; return b; } #ifndef YY_EXIT_FAILURE #define YY_EXIT_FAILURE 2 #endif static void yy_fatal_error (yyconst char* msg ) { (void) fprintf( stderr, "%s\n", msg ); exit( YY_EXIT_FAILURE ); } /* Redefine yyless() so it works in section 3 code. */ #undef yyless #define yyless(n) \ do \ { \ /* Undo effects of setting up zconftext. */ \ int yyless_macro_arg = (n); \ YY_LESS_LINENO(yyless_macro_arg);\ zconftext[zconfleng] = (yy_hold_char); \ (yy_c_buf_p) = zconftext + yyless_macro_arg; \ (yy_hold_char) = *(yy_c_buf_p); \ *(yy_c_buf_p) = '\0'; \ zconfleng = yyless_macro_arg; \ } \ while ( 0 ) /* Accessor methods (get/set functions) to struct members. */ /** Get the current line number. * */ int zconfget_lineno (void) { return zconflineno; } /** Get the input stream. * */ FILE *zconfget_in (void) { return zconfin; } /** Get the output stream. * */ FILE *zconfget_out (void) { return zconfout; } /** Get the length of the current token. * */ int zconfget_leng (void) { return zconfleng; } /** Get the current token. * */ char *zconfget_text (void) { return zconftext; } /** Set the current line number. * @param line_number * */ void zconfset_lineno (int line_number ) { zconflineno = line_number; } /** Set the input stream. This does not discard the current * input buffer. * @param in_str A readable stream. * * @see zconf_switch_to_buffer */ void zconfset_in (FILE * in_str ) { zconfin = in_str ; } void zconfset_out (FILE * out_str ) { zconfout = out_str ; } int zconfget_debug (void) { return zconf_flex_debug; } void zconfset_debug (int bdebug ) { zconf_flex_debug = bdebug ; } static int yy_init_globals (void) { /* Initialization is the same as for the non-reentrant scanner. * This function is called from zconflex_destroy(), so don't allocate here. */ (yy_buffer_stack) = 0; (yy_buffer_stack_top) = 0; (yy_buffer_stack_max) = 0; (yy_c_buf_p) = (char *) 0; (yy_init) = 0; (yy_start) = 0; /* Defined in main.c */ #ifdef YY_STDINIT zconfin = stdin; zconfout = stdout; #else zconfin = (FILE *) 0; zconfout = (FILE *) 0; #endif /* For future reference: Set errno on error, since we are called by * zconflex_init() */ return 0; } /* zconflex_destroy is for both reentrant and non-reentrant scanners. */ int zconflex_destroy (void) { /* Pop the buffer stack, destroying each element. */ while(YY_CURRENT_BUFFER){ zconf_delete_buffer(YY_CURRENT_BUFFER ); YY_CURRENT_BUFFER_LVALUE = NULL; zconfpop_buffer_state(); } /* Destroy the stack itself. */ zconffree((yy_buffer_stack) ); (yy_buffer_stack) = NULL; /* Reset the globals. This is important in a non-reentrant scanner so the next time * zconflex() is called, initialization will occur. */ yy_init_globals( ); return 0; } /* * Internal utility routines. */ #ifndef yytext_ptr static void yy_flex_strncpy (char* s1, yyconst char * s2, int n ) { register int i; for ( i = 0; i < n; ++i ) s1[i] = s2[i]; } #endif #ifdef YY_NEED_STRLEN static int yy_flex_strlen (yyconst char * s ) { register int n; for ( n = 0; s[n]; ++n ) ; return n; } #endif void *zconfalloc (yy_size_t size ) { return (void *) malloc( size ); } void *zconfrealloc (void * ptr, yy_size_t size ) { /* The cast to (char *) in the following accommodates both * implementations that use char* generic pointers, and those * that use void* generic pointers. It works with the latter * because both ANSI C and C++ allow castless assignment from * any pointer type to void*, and deal with argument conversions * as though doing an assignment. */ return (void *) realloc( (char *) ptr, size ); } void zconffree (void * ptr ) { free( (char *) ptr ); /* see zconfrealloc() for (char *) cast */ } #define YYTABLES_NAME "yytables" void zconf_starthelp(void) { new_string(); last_ts = first_ts = 0; BEGIN(HELP); } static void zconf_endhelp(void) { zconflval.string = text; BEGIN(INITIAL); } /* * Try to open specified file with following names: * ./name * $(backport_srctree)/name * The latter is used when srctree is separate from objtree * when compiling the kernel. * Return NULL if file is not found. */ FILE *zconf_fopen(const char *name) { char *env, fullname[PATH_MAX+1]; FILE *f; f = fopen(name, "r"); if (!f && name != NULL && name[0] != '/') { env = getenv(SRCTREE); if (env) { sprintf(fullname, "%s/%s", env, name); f = fopen(fullname, "r"); } } return f; } void zconf_initscan(const char *name) { zconfin = zconf_fopen(name); if (!zconfin) { printf("can't find file %s\n", name); exit(1); } current_buf = xmalloc(sizeof(*current_buf)); memset(current_buf, 0, sizeof(*current_buf)); current_file = file_lookup(name); current_file->lineno = 1; } void zconf_nextfile(const char *name) { struct file *iter; struct file *file = file_lookup(name); struct buffer *buf = xmalloc(sizeof(*buf)); memset(buf, 0, sizeof(*buf)); current_buf->state = YY_CURRENT_BUFFER; zconfin = zconf_fopen(file->name); if (!zconfin) { printf("%s:%d: can't open file \"%s\"\n", zconf_curname(), zconf_lineno(), file->name); exit(1); } zconf_switch_to_buffer(zconf_create_buffer(zconfin,YY_BUF_SIZE)); buf->parent = current_buf; current_buf = buf; for (iter = current_file->parent; iter; iter = iter->parent ) { if (!strcmp(current_file->name,iter->name) ) { printf("%s:%d: recursive inclusion detected. " "Inclusion path:\n current file : '%s'\n", zconf_curname(), zconf_lineno(), zconf_curname()); iter = current_file->parent; while (iter && \ strcmp(iter->name,current_file->name)) { printf(" included from: '%s:%d'\n", iter->name, iter->lineno-1); iter = iter->parent; } if (iter) printf(" included from: '%s:%d'\n", iter->name, iter->lineno+1); exit(1); } } file->lineno = 1; file->parent = current_file; current_file = file; } static void zconf_endfile(void) { struct buffer *parent; current_file = current_file->parent; parent = current_buf->parent; if (parent) { fclose(zconfin); zconf_delete_buffer(YY_CURRENT_BUFFER); zconf_switch_to_buffer(parent->state); } free(current_buf); current_buf = parent; } int zconf_lineno(void) { return current_pos.lineno; } const char *zconf_curname(void) { return current_pos.file ? current_pos.file->name : ""; } backport-iwlwifi-dkms-7906/kconf/zconf.tab.c000066400000000000000000002143561351064634500210440ustar00rootroot00000000000000/* A Bison parser, made by GNU Bison 3.0.4. */ /* Bison implementation for Yacc-like parsers in C Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "3.0.4" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 0 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* Substitute the variable and function names. */ #define yyparse zconfparse #define yylex zconflex #define yyerror zconferror #define yydebug zconfdebug #define yynerrs zconfnerrs #define yylval zconflval #define yychar zconfchar /* Copy the first part of user declarations. */ /* * Copyright (C) 2002 Roman Zippel * Released under the terms of the GNU GPL v2.0. */ #include #include #include #include #include #include #include "lkc.h" #define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt) #define PRINTD 0x0001 #define DEBUG_PARSE 0x0002 int cdebug = PRINTD; extern int zconflex(void); static void zconfprint(const char *err, ...); static void zconf_error(const char *err, ...); static void zconferror(const char *err); static bool zconf_endtoken(const struct kconf_id *id, int starttoken, int endtoken); struct symbol *symbol_hash[SYMBOL_HASHSIZE]; static struct menu *current_menu, *current_entry; # ifndef YY_NULLPTR # if defined __cplusplus && 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 1 #endif #if YYDEBUG extern int zconfdebug; #endif /* Token type. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE enum yytokentype { T_MAINMENU = 258, T_MENU = 259, T_ENDMENU = 260, T_SOURCE = 261, T_CHOICE = 262, T_ENDCHOICE = 263, T_COMMENT = 264, T_CONFIG = 265, T_MENUCONFIG = 266, T_HELP = 267, T_HELPTEXT = 268, T_IF = 269, T_ENDIF = 270, T_DEPENDS = 271, T_OPTIONAL = 272, T_PROMPT = 273, T_TYPE = 274, T_DEFAULT = 275, T_SELECT = 276, T_IMPLY = 277, T_RANGE = 278, T_VISIBLE = 279, T_OPTION = 280, T_ON = 281, T_WORD = 282, T_WORD_QUOTE = 283, T_UNEQUAL = 284, T_LESS = 285, T_LESS_EQUAL = 286, T_GREATER = 287, T_GREATER_EQUAL = 288, T_CLOSE_PAREN = 289, T_OPEN_PAREN = 290, T_EOL = 291, T_OR = 292, T_AND = 293, T_EQUAL = 294, T_NOT = 295 }; #endif /* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED union YYSTYPE { char *string; struct file *file; struct symbol *symbol; struct expr *expr; struct menu *menu; const struct kconf_id *id; }; typedef union YYSTYPE YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define YYSTYPE_IS_DECLARED 1 #endif extern YYSTYPE zconflval; int zconfparse (void); /* Copy the second part of user declarations. */ /* Include zconf.hash.c here so it can see the token constants. */ #include "zconf.hash.c" #ifdef short # undef short #endif #ifdef YYTYPE_UINT8 typedef YYTYPE_UINT8 yytype_uint8; #else typedef unsigned char yytype_uint8; #endif #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #else typedef signed char yytype_int8; #endif #ifdef YYTYPE_UINT16 typedef YYTYPE_UINT16 yytype_uint16; #else typedef unsigned short int yytype_uint16; #endif #ifdef YYTYPE_INT16 typedef YYTYPE_INT16 yytype_int16; #else typedef short int yytype_int16; #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif ! defined YYSIZE_T # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned int # endif #endif #define YYSIZE_MAXIMUM ((YYSIZE_T) -1) #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS # if ENABLE_NLS # include /* INFRINGES ON USER NAME SPACE */ # define YY_(Msgid) dgettext ("bison-runtime", Msgid) # endif # endif # ifndef YY_ # define YY_(Msgid) Msgid # endif #endif #ifndef YY_ATTRIBUTE # if (defined __GNUC__ \ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C # define YY_ATTRIBUTE(Spec) __attribute__(Spec) # else # define YY_ATTRIBUTE(Spec) /* empty */ # endif #endif #ifndef YY_ATTRIBUTE_PURE # define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) #endif #ifndef YY_ATTRIBUTE_UNUSED # define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) #endif #if !defined _Noreturn \ && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) # if defined _MSC_VER && 1200 <= _MSC_VER # define _Noreturn __declspec (noreturn) # else # define _Noreturn YY_ATTRIBUTE ((__noreturn__)) # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) #else # define YYUSE(E) /* empty */ #endif #if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") # define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value #endif #ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_END #endif #ifndef YY_INITIAL_VALUE # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's 'empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yytype_int16 yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYSIZE_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / sizeof (*yyptr); \ } \ while (0) #endif #if defined YYCOPY_NEEDED && YYCOPY_NEEDED /* Copy COUNT objects from SRC to DST. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(Dst, Src, Count) \ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) # else # define YYCOPY(Dst, Src, Count) \ do \ { \ YYSIZE_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ /* YYFINAL -- State number of the termination state. */ #define YYFINAL 11 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 301 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 41 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 50 /* YYNRULES -- Number of rules. */ #define YYNRULES 124 /* YYNSTATES -- Number of states. */ #define YYNSTATES 204 /* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned by yylex, with out-of-bounds checking. */ #define YYUNDEFTOK 2 #define YYMAXUTOK 295 #define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM as returned by yylex, without out-of-bounds checking. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 }; #if YYDEBUG /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { 0, 109, 109, 109, 111, 111, 113, 115, 116, 117, 118, 119, 120, 124, 128, 128, 128, 128, 128, 128, 128, 128, 128, 132, 133, 134, 135, 136, 137, 141, 142, 148, 156, 162, 170, 180, 182, 183, 184, 185, 186, 187, 190, 198, 204, 214, 220, 226, 232, 235, 237, 248, 249, 254, 263, 268, 276, 279, 281, 282, 283, 284, 285, 288, 294, 305, 311, 321, 323, 328, 336, 344, 347, 349, 350, 351, 356, 363, 370, 375, 383, 386, 388, 389, 390, 393, 401, 408, 415, 421, 428, 430, 431, 432, 435, 443, 445, 446, 449, 456, 458, 463, 464, 467, 468, 469, 473, 474, 477, 478, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 494, 495, 498, 499 }; #endif #if YYDEBUG || YYERROR_VERBOSE || 0 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "T_MAINMENU", "T_MENU", "T_ENDMENU", "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG", "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS", "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT", "T_SELECT", "T_IMPLY", "T_RANGE", "T_VISIBLE", "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL", "T_LESS", "T_LESS_EQUAL", "T_GREATER", "T_GREATER_EQUAL", "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL", "T_NOT", "$accept", "input", "start", "stmt_list", "option_name", "common_stmt", "option_error", "config_entry_start", "config_stmt", "menuconfig_entry_start", "menuconfig_stmt", "config_option_list", "config_option", "symbol_option", "symbol_option_list", "symbol_option_arg", "choice", "choice_entry", "choice_end", "choice_stmt", "choice_option_list", "choice_option", "choice_block", "if_entry", "if_end", "if_stmt", "if_block", "mainmenu_stmt", "menu", "menu_entry", "menu_end", "menu_stmt", "menu_block", "source_stmt", "comment", "comment_stmt", "help_start", "help", "depends_list", "depends", "visibility_list", "visible", "prompt_stmt_opt", "prompt", "end", "nl", "if_expr", "expr", "symbol", "word_opt", YY_NULLPTR }; #endif # ifdef YYPRINT /* YYTOKNUM[NUM] -- (External) token number corresponding to the (internal) symbol number NUM (which must be that of a token). */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295 }; # endif #define YYPACT_NINF -92 #define yypact_value_is_default(Yystate) \ (!!((Yystate) == (-92))) #define YYTABLE_NINF -88 #define yytable_value_is_error(Yytable_value) \ 0 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ static const yytype_int16 yypact[] = { 17, 41, -92, 15, -92, 150, -92, 19, -92, -92, -13, -92, 28, 41, 38, 41, 50, 47, 41, 79, 82, 44, 76, -92, -92, -92, -92, -92, -92, -92, -92, -92, 118, -92, 129, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, 184, -92, -92, 107, -92, 111, -92, 113, -92, 116, -92, 139, 140, 151, -92, -92, 44, 44, 142, 256, -92, 160, 173, 27, 117, 80, 51, 255, -15, 255, 217, -92, -92, -92, -92, -92, -92, -8, -92, 44, 44, 107, 87, 87, 87, 87, 87, 87, -92, -92, 174, 176, 187, 41, 41, 44, 188, 189, 87, -92, 213, -92, -92, -92, -92, 206, -92, -92, 193, 41, 41, 203, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, 229, -92, 241, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, 216, -92, -92, -92, -92, -92, -92, -92, -92, -92, 44, 229, 222, 229, 64, 229, 229, 87, 31, 231, -92, -92, 229, 236, 229, 44, -92, 145, 242, -92, -92, 243, 244, 245, 229, 251, -92, -92, 247, -92, 257, 125, -92, -92, -92, -92, -92, 260, 41, -92, -92, -92, -92, -92 }; /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. Performed when YYTABLE does not specify something else to do. Zero means the default is an error. */ static const yytype_uint8 yydefact[] = { 6, 0, 106, 0, 3, 0, 6, 6, 101, 102, 0, 1, 0, 0, 0, 0, 123, 0, 0, 0, 0, 0, 0, 14, 19, 15, 16, 21, 17, 18, 20, 22, 0, 23, 0, 7, 35, 26, 35, 27, 57, 67, 8, 72, 24, 95, 81, 9, 28, 90, 25, 10, 0, 107, 2, 76, 13, 0, 103, 0, 124, 0, 104, 0, 0, 0, 121, 122, 0, 0, 0, 110, 105, 0, 0, 0, 0, 0, 0, 0, 90, 0, 0, 77, 85, 53, 86, 31, 33, 0, 118, 0, 0, 69, 0, 0, 0, 0, 0, 0, 11, 12, 0, 0, 0, 0, 99, 0, 0, 0, 0, 49, 0, 41, 40, 36, 37, 0, 39, 38, 0, 0, 99, 0, 61, 62, 58, 60, 59, 68, 56, 55, 73, 75, 71, 74, 70, 108, 97, 0, 96, 82, 84, 80, 83, 79, 92, 93, 91, 117, 119, 120, 116, 111, 112, 113, 114, 115, 30, 88, 0, 108, 0, 108, 108, 108, 108, 0, 0, 0, 89, 65, 108, 0, 108, 0, 98, 0, 0, 42, 100, 0, 0, 0, 108, 51, 48, 29, 0, 64, 0, 109, 94, 43, 44, 45, 46, 0, 0, 50, 63, 66, 47, 52 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { -92, -92, 285, 291, -92, 32, -66, -92, -92, -92, -92, 261, -92, -92, -92, -92, -92, -92, -92, 1, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, 24, -92, -92, -92, -92, -92, 221, 220, -64, -92, -92, 179, -1, 67, 0, 110, -67, -91, -92 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int16 yydefgoto[] = { -1, 3, 4, 5, 34, 35, 114, 36, 37, 38, 39, 75, 115, 116, 168, 199, 40, 41, 130, 42, 77, 126, 78, 43, 134, 44, 79, 6, 45, 46, 143, 47, 81, 48, 49, 50, 117, 118, 82, 119, 80, 140, 162, 163, 51, 7, 176, 70, 71, 61 }; /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule whose number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_int16 yytable[] = { 10, 89, 90, 152, 153, 154, 155, 156, 157, 137, 55, 125, 57, 128, 59, 11, 147, 63, 148, 167, 1, 138, 1, 2, 150, 151, 149, -32, 102, 91, 92, -32, -32, -32, -32, -32, -32, -32, -32, 103, 164, -32, -32, 104, -32, 105, 106, 107, 108, 109, 110, -32, 111, 2, 112, 53, 14, 15, 185, 17, 18, 19, 20, 113, 56, 21, 22, 186, 8, 9, 93, 66, 67, 147, 58, 148, 184, 60, 175, 68, 133, 102, 142, 62, 69, -54, -54, 33, -54, -54, -54, -54, 103, 177, -54, -54, 104, 120, 121, 122, 123, 91, 92, 135, 161, 144, 64, 112, 191, 65, 129, 132, 72, 141, 66, 67, 124, -34, 102, 73, 172, -34, -34, -34, -34, -34, -34, -34, -34, 103, 74, -34, -34, 104, -34, 105, 106, 107, 108, 109, 110, -34, 111, 53, 112, 131, 136, 83, 145, 84, -5, 12, 85, 113, 13, 14, 15, 16, 17, 18, 19, 20, 91, 92, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 86, 87, 32, 2, 91, 92, 192, 91, 92, -4, 12, 33, 88, 13, 14, 15, 16, 17, 18, 19, 20, 100, 203, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 101, 158, 32, 159, 160, 169, 165, 166, -87, 102, 170, 33, -87, -87, -87, -87, -87, -87, -87, -87, 171, 174, -87, -87, 104, -87, -87, -87, -87, -87, -87, -87, -87, 102, 175, 112, -78, -78, -78, -78, -78, -78, -78, -78, 146, 92, -78, -78, 104, 179, 13, 14, 15, 16, 17, 18, 19, 20, 187, 112, 21, 22, 178, 189, 180, 181, 182, 183, 146, 193, 194, 195, 196, 188, 200, 190, 94, 95, 96, 97, 98, 198, 33, 54, 201, 197, 99, 202, 52, 127, 76, 139, 173 }; static const yytype_uint8 yycheck[] = { 1, 68, 69, 94, 95, 96, 97, 98, 99, 24, 10, 77, 13, 77, 15, 0, 82, 18, 82, 110, 3, 36, 3, 36, 91, 92, 34, 0, 1, 37, 38, 4, 5, 6, 7, 8, 9, 10, 11, 12, 107, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 36, 27, 36, 5, 6, 27, 8, 9, 10, 11, 36, 36, 14, 15, 36, 27, 28, 70, 27, 28, 139, 36, 139, 167, 27, 14, 35, 79, 1, 81, 36, 40, 5, 6, 36, 8, 9, 10, 11, 12, 160, 14, 15, 16, 17, 18, 19, 20, 37, 38, 79, 105, 81, 27, 27, 175, 27, 78, 79, 36, 81, 27, 28, 36, 0, 1, 1, 121, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 36, 27, 78, 79, 36, 81, 36, 0, 1, 36, 36, 4, 5, 6, 7, 8, 9, 10, 11, 37, 38, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 36, 36, 27, 36, 37, 38, 36, 37, 38, 0, 1, 36, 36, 4, 5, 6, 7, 8, 9, 10, 11, 36, 198, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 36, 36, 27, 36, 26, 1, 27, 27, 0, 1, 13, 36, 4, 5, 6, 7, 8, 9, 10, 11, 36, 27, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 1, 14, 27, 4, 5, 6, 7, 8, 9, 10, 11, 36, 38, 14, 15, 16, 36, 4, 5, 6, 7, 8, 9, 10, 11, 36, 27, 14, 15, 161, 36, 163, 164, 165, 166, 36, 36, 36, 36, 36, 172, 36, 174, 29, 30, 31, 32, 33, 39, 36, 7, 36, 184, 39, 36, 6, 77, 38, 80, 122 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { 0, 3, 36, 42, 43, 44, 68, 86, 27, 28, 84, 0, 1, 4, 5, 6, 7, 8, 9, 10, 11, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 27, 36, 45, 46, 48, 49, 50, 51, 57, 58, 60, 64, 66, 69, 70, 72, 74, 75, 76, 85, 44, 36, 43, 86, 36, 84, 36, 84, 27, 90, 36, 84, 27, 27, 27, 28, 35, 40, 88, 89, 36, 1, 1, 52, 52, 61, 63, 67, 81, 73, 79, 36, 36, 36, 36, 36, 36, 88, 88, 37, 38, 86, 29, 30, 31, 32, 33, 39, 36, 36, 1, 12, 16, 18, 19, 20, 21, 22, 23, 25, 27, 36, 47, 53, 54, 77, 78, 80, 17, 18, 19, 20, 36, 47, 62, 78, 80, 46, 59, 85, 46, 60, 65, 72, 85, 24, 36, 79, 82, 46, 60, 71, 72, 85, 36, 47, 80, 34, 88, 88, 89, 89, 89, 89, 89, 89, 36, 36, 26, 84, 83, 84, 88, 27, 27, 89, 55, 1, 13, 36, 84, 83, 27, 14, 87, 88, 87, 36, 87, 87, 87, 87, 89, 27, 36, 36, 87, 36, 87, 88, 36, 36, 36, 36, 36, 87, 39, 56, 36, 36, 36, 84 }; /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { 0, 41, 42, 42, 43, 43, 44, 44, 44, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 47, 47, 48, 49, 50, 51, 52, 52, 52, 52, 52, 52, 52, 53, 53, 53, 53, 53, 53, 54, 55, 55, 56, 56, 57, 58, 59, 60, 61, 61, 61, 61, 61, 61, 62, 62, 62, 62, 63, 63, 64, 65, 66, 67, 67, 67, 67, 68, 69, 70, 71, 72, 73, 73, 73, 73, 74, 75, 76, 77, 78, 79, 79, 79, 79, 80, 81, 81, 81, 82, 83, 83, 84, 84, 85, 85, 85, 86, 86, 87, 87, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 89, 89, 90, 90 }; /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { 0, 2, 2, 1, 2, 1, 0, 2, 2, 2, 2, 4, 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 3, 2, 3, 2, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 4, 4, 5, 3, 0, 3, 0, 2, 3, 2, 1, 3, 0, 2, 2, 2, 2, 2, 4, 3, 2, 4, 0, 2, 3, 1, 3, 0, 2, 2, 2, 3, 3, 3, 1, 3, 0, 2, 2, 2, 3, 3, 2, 2, 2, 0, 2, 2, 2, 4, 0, 2, 2, 2, 0, 2, 1, 1, 2, 2, 2, 1, 2, 0, 2, 1, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 1, 1, 0, 1 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY) \ { \ yychar = (Token); \ yylval = (Value); \ YYPOPSTACK (yylen); \ yystate = *yyssp; \ goto yybackup; \ } \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (0) /* Error token number */ #define YYTERROR 1 #define YYERRCODE 256 /* Enable debugging if requested. */ #if YYDEBUG # ifndef YYFPRINTF # include /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (0) /* This macro is provided for backward compatibility. */ #ifndef YY_LOCATION_PRINT # define YY_LOCATION_PRINT(File, Loc) ((void) 0) #endif # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value); \ YYFPRINTF (stderr, "\n"); \ } \ } while (0) /*----------------------------------------. | Print this symbol's value on YYOUTPUT. | `----------------------------------------*/ static void yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) { FILE *yyo = yyoutput; YYUSE (yyo); if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); # endif YYUSE (yytype); } /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ static void yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) { YYFPRINTF (yyoutput, "%s %s (", yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); yy_symbol_value_print (yyoutput, yytype, yyvaluep); YYFPRINTF (yyoutput, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ static void yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule) { unsigned long int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yystos[yyssp[yyi + 1 - yynrhs]], &(yyvsp[(yyi + 1) - (yynrhs)]) ); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyssp, yyvsp, Rule); \ } while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !YYDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen strlen # else /* Return the length of YYSTR. */ static YYSIZE_T yystrlen (const char *yystr) { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * yystpcpy (char *yydest, const char *yysrc) { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYSIZE_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYSIZE_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; /* Fall through. */ default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (! yyres) return yystrlen (yystr); return yystpcpy (yyres, yystr) - yyres; } # endif /* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message about the unexpected token YYTOKEN for the state stack whose top is YYSSP. Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is not large enough to hold the message. In that case, also set *YYMSG_ALLOC to the required number of bytes. Return 2 if the required number of bytes is too large to store. */ static int yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, yytype_int16 *yyssp, int yytoken) { YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); YYSIZE_T yysize = yysize0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ const char *yyformat = YY_NULLPTR; /* Arguments of yyformat. */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Number of reported tokens (one for the "unexpected", one per "expected"). */ int yycount = 0; /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected tokens because there are none. - The only way there can be no lookahead present (in yychar) is if this state is a consistent state with a default action. Thus, detecting the absence of a lookahead is sufficient to determine that there is no unexpected or expected token to report. In that case, just report a simple "syntax error". - Don't assume there isn't a lookahead just because this state is a consistent state with a default action. There might have been a previous inconsistent state, consistent state with a non-default action, or user semantic action that manipulated yychar. - Of course, the expected token list depends on states to have correct lookahead information, and it depends on the parser not to perform extra reductions after fetching a lookahead from the scanner and before detecting a syntax error. Thus, state merging (from LALR or IELR) and default reductions corrupt the expected token list. However, the list is correct for canonical LR with one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ if (yytoken != YYEMPTY) { int yyn = yypact[*yyssp]; yyarg[yycount++] = yytname[yytoken]; if (!yypact_value_is_default (yyn)) { /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. In other words, skip the first -YYN actions for this state because they are default actions. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yyx; for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR && !yytable_value_is_error (yytable[yyx + yyn])) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; break; } yyarg[yycount++] = yytname[yyx]; { YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } } } } switch (yycount) { # define YYCASE_(N, S) \ case N: \ yyformat = S; \ break YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); # undef YYCASE_ } { YYSIZE_T yysize1 = yysize + yystrlen (yyformat); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } if (*yymsg_alloc < yysize) { *yymsg_alloc = 2 * yysize; if (! (yysize <= *yymsg_alloc && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; return 1; } /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ { char *yyp = *yymsg; int yyi = 0; while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyformat += 2; } else { yyp++; yyformat++; } } return 0; } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) { YYUSE (yyvaluep); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN switch (yytype) { case 58: /* choice_entry */ { fprintf(stderr, "%s:%d: missing end statement for this entry\n", ((*yyvaluep).menu)->file->name, ((*yyvaluep).menu)->lineno); if (current_menu == ((*yyvaluep).menu)) menu_end_menu(); } break; case 64: /* if_entry */ { fprintf(stderr, "%s:%d: missing end statement for this entry\n", ((*yyvaluep).menu)->file->name, ((*yyvaluep).menu)->lineno); if (current_menu == ((*yyvaluep).menu)) menu_end_menu(); } break; case 70: /* menu_entry */ { fprintf(stderr, "%s:%d: missing end statement for this entry\n", ((*yyvaluep).menu)->file->name, ((*yyvaluep).menu)->lineno); if (current_menu == ((*yyvaluep).menu)) menu_end_menu(); } break; default: break; } YY_IGNORE_MAYBE_UNINITIALIZED_END } /* The lookahead symbol. */ int yychar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; /* Number of syntax errors so far. */ int yynerrs; /*----------. | yyparse. | `----------*/ int yyparse (void) { int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: 'yyss': related to states. 'yyvs': related to semantic values. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yytype_int16 yyssa[YYINITDEPTH]; yytype_int16 *yyss; yytype_int16 *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; YYSIZE_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken = 0; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yyssp = yyss = yyssa; yyvsp = yyvs = yyvsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ goto yysetstate; /*------------------------------------------------------------. | yynewstate -- Push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; yysetstate: *yyssp = yystate; if (yyss + yystacksize - 1 <= yyssp) { /* Get the current used size of the three stacks, in elements. */ YYSIZE_T yysize = yyssp - yyss + 1; #ifdef yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; yytype_int16 *yyss1 = yyss; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE goto yyexhaustedlab; # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yytype_int16 *yyss1 = yyss; union yyalloc *yyptr = (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", (unsigned long int) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yypact_value_is_default (yyn)) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = yylex (); } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yytable_value_is_error (yyn)) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); /* Discard the shifted token. */ yychar = YYEMPTY; yystate = yyn; YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; YY_REDUCE_PRINT (yyn); switch (yyn) { case 10: { zconf_error("unexpected end statement"); } break; case 11: { zconf_error("unknown statement \"%s\"", (yyvsp[-2].string)); } break; case 12: { zconf_error("unexpected option \"%s\"", kconf_id_strings + (yyvsp[-2].id)->name); } break; case 13: { zconf_error("invalid statement"); } break; case 29: { zconf_error("unknown option \"%s\"", (yyvsp[-2].string)); } break; case 30: { zconf_error("invalid option"); } break; case 31: { struct symbol *sym = sym_lookup((yyvsp[-1].string), 0); sym->flags |= SYMBOL_OPTIONAL; menu_add_entry(sym); printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string)); } break; case 32: { menu_end_entry(); printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno()); } break; case 33: { struct symbol *sym = sym_lookup((yyvsp[-1].string), 0); sym->flags |= SYMBOL_OPTIONAL; menu_add_entry(sym); printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string)); } break; case 34: { if (current_entry->prompt) current_entry->prompt->type = P_MENU; else zconfprint("warning: menuconfig statement without prompt"); menu_end_entry(); printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno()); } break; case 42: { menu_set_type((yyvsp[-2].id)->stype); printd(DEBUG_PARSE, "%s:%d:type(%u)\n", zconf_curname(), zconf_lineno(), (yyvsp[-2].id)->stype); } break; case 43: { menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr)); printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno()); } break; case 44: { menu_add_expr(P_DEFAULT, (yyvsp[-2].expr), (yyvsp[-1].expr)); if ((yyvsp[-3].id)->stype != S_UNKNOWN) menu_set_type((yyvsp[-3].id)->stype); printd(DEBUG_PARSE, "%s:%d:default(%u)\n", zconf_curname(), zconf_lineno(), (yyvsp[-3].id)->stype); } break; case 45: { menu_add_symbol(P_SELECT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr)); printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno()); } break; case 46: { menu_add_symbol(P_IMPLY, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr)); printd(DEBUG_PARSE, "%s:%d:imply\n", zconf_curname(), zconf_lineno()); } break; case 47: { menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[-3].symbol), (yyvsp[-2].symbol)), (yyvsp[-1].expr)); printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno()); } break; case 50: { const struct kconf_id *id = kconf_id_lookup((yyvsp[-1].string), strlen((yyvsp[-1].string))); if (id && id->flags & TF_OPTION) menu_add_option(id->token, (yyvsp[0].string)); else zconfprint("warning: ignoring unknown option %s", (yyvsp[-1].string)); free((yyvsp[-1].string)); } break; case 51: { (yyval.string) = NULL; } break; case 52: { (yyval.string) = (yyvsp[0].string); } break; case 53: { struct symbol *sym = sym_lookup((yyvsp[-1].string), SYMBOL_CHOICE); sym->flags |= SYMBOL_AUTO; menu_add_entry(sym); menu_add_expr(P_CHOICE, NULL, NULL); printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno()); } break; case 54: { (yyval.menu) = menu_add_menu(); } break; case 55: { if (zconf_endtoken((yyvsp[0].id), T_CHOICE, T_ENDCHOICE)) { menu_end_menu(); printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno()); } } break; case 63: { menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr)); printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno()); } break; case 64: { if ((yyvsp[-2].id)->stype == S_BOOLEAN || (yyvsp[-2].id)->stype == S_TRISTATE) { menu_set_type((yyvsp[-2].id)->stype); printd(DEBUG_PARSE, "%s:%d:type(%u)\n", zconf_curname(), zconf_lineno(), (yyvsp[-2].id)->stype); } else YYERROR; } break; case 65: { current_entry->sym->flags |= SYMBOL_OPTIONAL; printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno()); } break; case 66: { if ((yyvsp[-3].id)->stype == S_UNKNOWN) { menu_add_symbol(P_DEFAULT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr)); printd(DEBUG_PARSE, "%s:%d:default\n", zconf_curname(), zconf_lineno()); } else YYERROR; } break; case 69: { printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno()); menu_add_entry(NULL); menu_add_dep((yyvsp[-1].expr)); (yyval.menu) = menu_add_menu(); } break; case 70: { if (zconf_endtoken((yyvsp[0].id), T_IF, T_ENDIF)) { menu_end_menu(); printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno()); } } break; case 76: { menu_add_prompt(P_MENU, (yyvsp[-1].string), NULL); } break; case 77: { menu_add_entry(NULL); menu_add_prompt(P_MENU, (yyvsp[-1].string), NULL); printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno()); } break; case 78: { (yyval.menu) = menu_add_menu(); } break; case 79: { if (zconf_endtoken((yyvsp[0].id), T_MENU, T_ENDMENU)) { menu_end_menu(); printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno()); } } break; case 85: { printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string)); zconf_nextfile((yyvsp[-1].string)); } break; case 86: { menu_add_entry(NULL); menu_add_prompt(P_COMMENT, (yyvsp[-1].string), NULL); printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno()); } break; case 87: { menu_end_entry(); } break; case 88: { printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno()); zconf_starthelp(); } break; case 89: { current_entry->help = (yyvsp[0].string); } break; case 94: { menu_add_dep((yyvsp[-1].expr)); printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno()); } break; case 98: { menu_add_visibility((yyvsp[0].expr)); } break; case 100: { menu_add_prompt(P_PROMPT, (yyvsp[-1].string), (yyvsp[0].expr)); } break; case 103: { (yyval.id) = (yyvsp[-1].id); } break; case 104: { (yyval.id) = (yyvsp[-1].id); } break; case 105: { (yyval.id) = (yyvsp[-1].id); } break; case 108: { (yyval.expr) = NULL; } break; case 109: { (yyval.expr) = (yyvsp[0].expr); } break; case 110: { (yyval.expr) = expr_alloc_symbol((yyvsp[0].symbol)); } break; case 111: { (yyval.expr) = expr_alloc_comp(E_LTH, (yyvsp[-2].symbol), (yyvsp[0].symbol)); } break; case 112: { (yyval.expr) = expr_alloc_comp(E_LEQ, (yyvsp[-2].symbol), (yyvsp[0].symbol)); } break; case 113: { (yyval.expr) = expr_alloc_comp(E_GTH, (yyvsp[-2].symbol), (yyvsp[0].symbol)); } break; case 114: { (yyval.expr) = expr_alloc_comp(E_GEQ, (yyvsp[-2].symbol), (yyvsp[0].symbol)); } break; case 115: { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); } break; case 116: { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); } break; case 117: { (yyval.expr) = (yyvsp[-1].expr); } break; case 118: { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[0].expr)); } break; case 119: { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[-2].expr), (yyvsp[0].expr)); } break; case 120: { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[-2].expr), (yyvsp[0].expr)); } break; case 121: { (yyval.symbol) = sym_lookup((yyvsp[0].string), 0); free((yyvsp[0].string)); } break; case 122: { (yyval.symbol) = sym_lookup((yyvsp[0].string), SYMBOL_CONST); free((yyvsp[0].string)); } break; case 123: { (yyval.string) = NULL; } break; default: break; } /* User semantic actions sometimes alter yychar, and that requires that yytoken be updated with the new translation. We take the approach of translating immediately before every use of yytoken. One alternative is translating here after every semantic action, but that translation would be missed if the semantic action invokes YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an incorrect destructor might then be invoked immediately. In the case of YYERROR or YYBACKUP, subsequent parser actions might lead to an incorrect destructor call or verbose syntax error message before the lookahead is translated. */ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ yyn = yyr1[yyn]; yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) yystate = yytable[yystate]; else yystate = yydefgoto[yyn - YYNTOKENS]; goto yynewstate; /*--------------------------------------. | yyerrlab -- here on detecting error. | `--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else # define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ yyssp, yytoken) { char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = YYSYNTAX_ERROR; if (yysyntax_error_status == 0) yymsgp = yymsg; else if (yysyntax_error_status == 1) { if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); if (!yymsg) { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; yysyntax_error_status = 2; } else { yysyntax_error_status = YYSYNTAX_ERROR; yymsgp = yymsg; } } yyerror (yymsgp); if (yysyntax_error_status == 2) goto yyexhaustedlab; } # undef YYSYNTAX_ERROR #endif } if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers like GCC when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (/*CONSTCOND*/ 0) goto yyerrorlab; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yydestruct ("Error: popping", yystos[yystate], yyvsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined yyoverflow || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif yyreturn: if (yychar != YYEMPTY) { /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = YYTRANSLATE (yychar); yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval); } /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[*yyssp], yyvsp); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif return yyresult; } void conf_parse(const char *name) { struct symbol *sym; int i; zconf_initscan(name); sym_init(); _menu_init(); rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL); if (getenv("ZCONF_DEBUG")) zconfdebug = 1; zconfparse(); if (zconfnerrs) exit(1); if (!modules_sym) modules_sym = sym_find( "n" ); rootmenu.prompt->text = _(rootmenu.prompt->text); rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text); menu_finalize(&rootmenu); for_all_symbols(i, sym) { if (sym_check_deps(sym)) zconfnerrs++; } if (zconfnerrs) exit(1); sym_set_change_count(1); } static const char *zconf_tokenname(int token) { switch (token) { case T_MENU: return "menu"; case T_ENDMENU: return "endmenu"; case T_CHOICE: return "choice"; case T_ENDCHOICE: return "endchoice"; case T_IF: return "if"; case T_ENDIF: return "endif"; case T_DEPENDS: return "depends"; case T_VISIBLE: return "visible"; } return ""; } static bool zconf_endtoken(const struct kconf_id *id, int starttoken, int endtoken) { if (id->token != endtoken) { zconf_error("unexpected '%s' within %s block", kconf_id_strings + id->name, zconf_tokenname(starttoken)); zconfnerrs++; return false; } if (current_menu->file != current_file) { zconf_error("'%s' in different file than '%s'", kconf_id_strings + id->name, zconf_tokenname(starttoken)); fprintf(stderr, "%s:%d: location of the '%s'\n", current_menu->file->name, current_menu->lineno, zconf_tokenname(starttoken)); zconfnerrs++; return false; } return true; } static void zconfprint(const char *err, ...) { va_list ap; fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno()); va_start(ap, err); vfprintf(stderr, err, ap); va_end(ap); fprintf(stderr, "\n"); } static void zconf_error(const char *err, ...) { va_list ap; zconfnerrs++; fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno()); va_start(ap, err); vfprintf(stderr, err, ap); va_end(ap); fprintf(stderr, "\n"); } static void zconferror(const char *err) { fprintf(stderr, "%s:%d: %s\n", zconf_curname(), zconf_lineno() + 1, err); } static void print_quoted_string(FILE *out, const char *str) { const char *p; int len; putc('"', out); while ((p = strchr(str, '"'))) { len = p - str; if (len) fprintf(out, "%.*s", len, str); fputs("\\\"", out); str = p + 1; } fputs(str, out); putc('"', out); } static void print_symbol(FILE *out, struct menu *menu) { struct symbol *sym = menu->sym; struct property *prop; if (sym_is_choice(sym)) fprintf(out, "\nchoice\n"); else fprintf(out, "\nconfig %s\n", sym->name); switch (sym->type) { case S_BOOLEAN: fputs(" boolean\n", out); break; case S_TRISTATE: fputs(" tristate\n", out); break; case S_STRING: fputs(" string\n", out); break; case S_INT: fputs(" integer\n", out); break; case S_HEX: fputs(" hex\n", out); break; default: fputs(" ???\n", out); break; } for (prop = sym->prop; prop; prop = prop->next) { if (prop->menu != menu) continue; switch (prop->type) { case P_PROMPT: fputs(" prompt ", out); print_quoted_string(out, prop->text); if (!expr_is_yes(prop->visible.expr)) { fputs(" if ", out); expr_fprint(prop->visible.expr, out); } fputc('\n', out); break; case P_DEFAULT: fputs( " default ", out); expr_fprint(prop->expr, out); if (!expr_is_yes(prop->visible.expr)) { fputs(" if ", out); expr_fprint(prop->visible.expr, out); } fputc('\n', out); break; case P_CHOICE: fputs(" #choice value\n", out); break; case P_SELECT: fputs( " select ", out); expr_fprint(prop->expr, out); fputc('\n', out); break; case P_IMPLY: fputs( " imply ", out); expr_fprint(prop->expr, out); fputc('\n', out); break; case P_RANGE: fputs( " range ", out); expr_fprint(prop->expr, out); fputc('\n', out); break; case P_MENU: fputs( " menu ", out); print_quoted_string(out, prop->text); fputc('\n', out); break; default: fprintf(out, " unknown prop %d!\n", prop->type); break; } } if (menu->help) { int len = strlen(menu->help); while (menu->help[--len] == '\n') menu->help[len] = 0; fprintf(out, " help\n%s\n", menu->help); } } void zconfdump(FILE *out) { struct property *prop; struct symbol *sym; struct menu *menu; menu = rootmenu.list; while (menu) { if ((sym = menu->sym)) print_symbol(out, menu); else if ((prop = menu->prompt)) { switch (prop->type) { case P_COMMENT: fputs("\ncomment ", out); print_quoted_string(out, prop->text); fputs("\n", out); break; case P_MENU: fputs("\nmenu ", out); print_quoted_string(out, prop->text); fputs("\n", out); break; default: ; } if (!expr_is_yes(prop->visible.expr)) { fputs(" depends ", out); expr_fprint(prop->visible.expr, out); fputc('\n', out); } } if (menu->list) menu = menu->list; else if (menu->next) menu = menu->next; else while ((menu = menu->parent)) { if (menu->prompt && menu->prompt->type == P_MENU) fputs("\nendmenu\n", out); if (menu->next) { menu = menu->next; break; } } } } #include "zconf.lex.c" #include "util.c" #include "confdata.c" #include "expr.c" #include "symbol.c" #include "menu.c" backport-iwlwifi-dkms-7906/local-symbols000066400000000000000000000042141351064634500204050ustar00rootroot00000000000000BACKPORT_DIR= BACKPORTS_VERSION= BACKPORTED_KERNEL_VERSION= BACKPORTED_KERNEL_NAME= WIRELESS= NET_CORE= EXPERT= BP_MODULES= BPAUTO_BUILD_CORDIC= BPAUTO_CORDIC= BPAUTO_MII= BPAUTO_BUILD_LEDS= BPAUTO_NEW_LEDS= BPAUTO_LEDS_CLASS= BPAUTO_LEDS_TRIGGERS= BPAUTO_USERSEL_BUILD_ALL= BPAUTO_WANT_DEV_COREDUMP= BPAUTO_BUILD_WANT_DEV_COREDUMP= BPAUTO_RHASHTABLE= BPAUTO_BUCKET_LOCKS= BPAUTO_REFCOUNT= BPAUTO_SYSTEM_DATA_VERIFICATION= BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION= BPAUTO_PUBLIC_KEY= BPAUTO_ASN1_DECODER= BPAUTO_PKCS7= CFG80211= NL80211_TESTMODE= CFG80211_DEVELOPER_WARNINGS= CFG80211_CERTIFICATION_ONUS= CFG80211_REQUIRE_SIGNED_REGDB= CFG80211_USE_KERNEL_REGDB_KEYS= CFG80211_EXTRA_REGDB_KEYDIR= CFG80211_REG_CELLULAR_HINTS= CFG80211_REG_RELAX_NO_IR= CFG80211_DEFAULT_PS= CFG80211_DEBUGFS= CFG80211_CRDA_SUPPORT= CFG80211_WEXT= CFG80211_WEXT_EXPORT= LIB80211= LIB80211_CRYPT_WEP= LIB80211_CRYPT_CCMP= LIB80211_CRYPT_TKIP= LIB80211_DEBUG= MAC80211= MAC80211_HAS_RC= MAC80211_RC_MINSTREL= MAC80211_RC_DEFAULT_MINSTREL= MAC80211_RC_DEFAULT= MAC80211_MESH= MAC80211_LEDS= MAC80211_DEBUGFS= MAC80211_MESSAGE_TRACING= MAC80211_DEBUG_MENU= MAC80211_NOINLINE= MAC80211_VERBOSE_DEBUG= MAC80211_MLME_DEBUG= MAC80211_STA_DEBUG= MAC80211_HT_DEBUG= MAC80211_OCB_DEBUG= MAC80211_IBSS_DEBUG= MAC80211_PS_DEBUG= MAC80211_MPL_DEBUG= MAC80211_MPATH_DEBUG= MAC80211_MHWMP_DEBUG= MAC80211_MESH_SYNC_DEBUG= MAC80211_MESH_CSA_DEBUG= MAC80211_MESH_PS_DEBUG= MAC80211_TDLS_DEBUG= MAC80211_DEBUG_COUNTERS= MAC80211_LATENCY_MEASUREMENTS= MAC80211_STA_HASH_MAX_SIZE= IWL_TIMEOUT_FACTOR= WLAN= WIRELESS_WDS= PCMCIA_RAYCS= PCMCIA_WL3501= MAC80211_HWSIM= USB_NET_RNDIS_WLAN= VIRT_WIFI= WLAN_VENDOR_INTEL= IWLWIFI= IWLWIFI_LEDS= IWLMVM= IWLWIFI_OPMODE_MODULAR= IWLWIFI_BCAST_FILTERING= IWLWIFI_DEBUG= IWLWIFI_DEBUGFS= IWLWIFI_DEVICE_TRACING= IWLWIFI_DEVICE_TESTMODE= IWLXVT= IWLWIFI_DBGM= IWLWIFI_NUM_CHANNELS= IWLWIFI_SUPPORT_DEBUG_OVERRIDES= IWLWIFI_THERMAL_DEBUGFS= IWLWIFI_FORCE_OFDM_RATE= IWLWIFI_BCAST_FILTERING= IWLMVM_DISABLE_P2P_MIMO= IWLMVM_TDLS_PEER_CACHE= IWLMVM_P2P_OPPPS_TEST_WA= IWLMVM_VENDOR_CMDS= IWLWIFI_DISALLOW_OLDER_FW= IWLWIFI_PCIE_FAKE_RXQS= IWLWIFI_NUM_STA_INTERFACES= REJECT_NONUPSTREAM_NL80211= backport-iwlwifi-dkms-7906/net/000077500000000000000000000000001351064634500164675ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/net/Kconfig000066400000000000000000000344001351064634500177730ustar00rootroot00000000000000# # Network configuration # menuconfig NET bool "Networking support" select NLATTR select GENERIC_NET_UTILS select BPF ---help--- Unless you really know what you are doing, you should say Y here. The reason is that some programs need kernel networking support even when running on a stand-alone machine that isn't connected to any other computer. If you are upgrading from an older kernel, you should consider updating your networking tools too because changes in the kernel and the tools often go hand in hand. The tools are contained in the package net-tools, the location and version number of which are given in . For a general introduction to Linux networking, it is highly recommended to read the NET-HOWTO, available from . if NET config WANT_COMPAT_NETLINK_MESSAGES bool help This option can be selected by other options that need compat netlink messages. config COMPAT_NETLINK_MESSAGES def_bool y depends on COMPAT depends on WEXT_CORE || WANT_COMPAT_NETLINK_MESSAGES help This option makes it possible to send different netlink messages to tasks depending on whether the task is a compat task or not. To achieve this, you need to set skb_shinfo(skb)->frag_list to the compat skb before sending the skb, the netlink code will sort out which message to actually pass to the task. Newly written code should NEVER need this option but do compat-independent messages instead! config NET_INGRESS bool config NET_EGRESS bool config SKB_EXTENSIONS bool menu "Networking options" source "net/packet/Kconfig" source "net/unix/Kconfig" source "net/tls/Kconfig" source "net/xfrm/Kconfig" source "net/iucv/Kconfig" source "net/smc/Kconfig" source "net/xdp/Kconfig" config INET bool "TCP/IP networking" select CRYPTO select CRYPTO_AES ---help--- These are the protocols used on the Internet and on most local Ethernets. It is highly recommended to say Y here (this will enlarge your kernel by about 400 KB), since some programs (e.g. the X window system) use TCP/IP even if your machine is not connected to any other computer. You will get the so-called loopback device which allows you to ping yourself (great fun, that!). For an excellent introduction to Linux networking, please read the Linux Networking HOWTO, available from . If you say Y here and also to "/proc file system support" and "Sysctl support" below, you can change various aspects of the behavior of the TCP/IP code by writing to the (virtual) files in /proc/sys/net/ipv4/*; the options are explained in the file . Short answer: say Y. if INET source "net/ipv4/Kconfig" source "net/ipv6/Kconfig" source "net/netlabel/Kconfig" endif # if INET config NETWORK_SECMARK bool "Security Marking" help This enables security marking of network packets, similar to nfmark, but designated for security purposes. If you are unsure how to answer this question, answer N. config NET_PTP_CLASSIFY def_bool n config NETWORK_PHY_TIMESTAMPING bool "Timestamping in PHY devices" select NET_PTP_CLASSIFY help This allows timestamping of network packets by PHYs with hardware timestamping capabilities. This option adds some overhead in the transmit and receive paths. If you are unsure how to answer this question, answer N. menuconfig NETFILTER bool "Network packet filtering framework (Netfilter)" ---help--- Netfilter is a framework for filtering and mangling network packets that pass through your Linux box. The most common use of packet filtering is to run your Linux box as a firewall protecting a local network from the Internet. The type of firewall provided by this kernel support is called a "packet filter", which means that it can reject individual network packets based on type, source, destination etc. The other kind of firewall, a "proxy-based" one, is more secure but more intrusive and more bothersome to set up; it inspects the network traffic much more closely, modifies it and has knowledge about the higher level protocols, which a packet filter lacks. Moreover, proxy-based firewalls often require changes to the programs running on the local clients. Proxy-based firewalls don't need support by the kernel, but they are often combined with a packet filter, which only works if you say Y here. You should also say Y here if you intend to use your Linux box as the gateway to the Internet for a local network of machines without globally valid IP addresses. This is called "masquerading": if one of the computers on your local network wants to send something to the outside, your box can "masquerade" as that computer, i.e. it forwards the traffic to the intended outside destination, but modifies the packets to make it look like they came from the firewall box itself. It works both ways: if the outside host replies, the Linux box will silently forward the traffic to the correct local computer. This way, the computers on your local net are completely invisible to the outside world, even though they can reach the outside and can receive replies. It is even possible to run globally visible servers from within a masqueraded local network using a mechanism called portforwarding. Masquerading is also often called NAT (Network Address Translation). Another use of Netfilter is in transparent proxying: if a machine on the local network tries to connect to an outside host, your Linux box can transparently forward the traffic to a local server, typically a caching proxy server. Yet another use of Netfilter is building a bridging firewall. Using a bridge with Network packet filtering enabled makes iptables "see" the bridged traffic. For filtering on the lower network and Ethernet protocols over the bridge, use ebtables (under bridge netfilter configuration). Various modules exist for netfilter which replace the previous masquerading (ipmasqadm), packet filtering (ipchains), transparent proxying, and portforwarding mechanisms. Please see under "iptables" for the location of these packages. if NETFILTER config NETFILTER_ADVANCED bool "Advanced netfilter configuration" depends on NETFILTER default y help If you say Y here you can select between all the netfilter modules. If you say N the more unusual ones will not be shown and the basic ones needed by most people will default to 'M'. If unsure, say Y. config BRIDGE_NETFILTER tristate "Bridged IP/ARP packets filtering" depends on BRIDGE depends on NETFILTER && INET depends on NETFILTER_ADVANCED select NETFILTER_FAMILY_BRIDGE select SKB_EXTENSIONS default m ---help--- Enabling this option will let arptables resp. iptables see bridged ARP resp. IP traffic. If you want a bridging firewall, you probably want this option enabled. Enabling or disabling this option doesn't enable or disable ebtables. If unsure, say N. source "net/netfilter/Kconfig" source "net/ipv4/netfilter/Kconfig" source "net/ipv6/netfilter/Kconfig" source "net/decnet/netfilter/Kconfig" source "net/bridge/netfilter/Kconfig" endif source "net/bpfilter/Kconfig" source "net/dccp/Kconfig" source "net/sctp/Kconfig" source "net/rds/Kconfig" source "net/tipc/Kconfig" source "net/atm/Kconfig" source "net/l2tp/Kconfig" source "net/802/Kconfig" source "net/bridge/Kconfig" source "net/dsa/Kconfig" source "net/8021q/Kconfig" source "net/decnet/Kconfig" source "net/llc/Kconfig" source "drivers/net/appletalk/Kconfig" source "net/x25/Kconfig" source "net/lapb/Kconfig" source "net/phonet/Kconfig" source "net/6lowpan/Kconfig" source "net/ieee802154/Kconfig" source "net/mac802154/Kconfig" source "net/sched/Kconfig" source "net/dcb/Kconfig" source "net/dns_resolver/Kconfig" source "net/batman-adv/Kconfig" source "net/openvswitch/Kconfig" source "net/vmw_vsock/Kconfig" source "net/netlink/Kconfig" source "net/mpls/Kconfig" source "net/nsh/Kconfig" source "net/hsr/Kconfig" source "net/switchdev/Kconfig" source "net/l3mdev/Kconfig" source "net/qrtr/Kconfig" source "net/ncsi/Kconfig" config RPS bool depends on SMP && SYSFS default y config RFS_ACCEL bool depends on RPS select CPU_RMAP default y config XPS bool depends on SMP default y config HWBM bool config CGROUP_NET_PRIO bool "Network priority cgroup" depends on CGROUPS select SOCK_CGROUP_DATA ---help--- Cgroup subsystem for use in assigning processes to network priorities on a per-interface basis. config CGROUP_NET_CLASSID bool "Network classid cgroup" depends on CGROUPS select SOCK_CGROUP_DATA ---help--- Cgroup subsystem for use as general purpose socket classid marker that is being used in cls_cgroup and for netfilter matching. config NET_RX_BUSY_POLL bool default y config BQL bool depends on SYSFS select DQL default y config BPF_JIT bool "enable BPF Just In Time compiler" depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT depends on MODULES ---help--- Berkeley Packet Filter filtering capabilities are normally handled by an interpreter. This option allows kernel to generate a native code when filter is loaded in memory. This should speedup packet sniffing (libpcap/tcpdump). Note, admin should enable this feature changing: /proc/sys/net/core/bpf_jit_enable /proc/sys/net/core/bpf_jit_harden (optional) /proc/sys/net/core/bpf_jit_kallsyms (optional) config BPF_STREAM_PARSER bool "enable BPF STREAM_PARSER" depends on INET depends on BPF_SYSCALL depends on CGROUP_BPF select STREAM_PARSER select NET_SOCK_MSG ---help--- Enabling this allows a stream parser to be used with BPF_MAP_TYPE_SOCKMAP. BPF_MAP_TYPE_SOCKMAP provides a map type to use with network sockets. It can be used to enforce socket policy, implement socket redirects, etc. config NET_FLOW_LIMIT bool depends on RPS default y ---help--- The network stack has to drop packets when a receive processing CPU's backlog reaches netdev_max_backlog. If a few out of many active flows generate the vast majority of load, drop their traffic earlier to maintain capacity for the other flows. This feature provides servers with many clients some protection against DoS by a single (spoofed) flow that greatly exceeds average workload. menu "Network testing" config NET_PKTGEN tristate "Packet Generator (USE WITH CAUTION)" depends on INET && PROC_FS ---help--- This module will inject preconfigured packets, at a configurable rate, out of a given interface. It is used for network interface stress testing and performance analysis. If you don't understand what was just said, you don't need it: say N. Documentation on how to use the packet generator can be found at . To compile this code as a module, choose M here: the module will be called pktgen. config NET_DROP_MONITOR tristate "Network packet drop alerting service" depends on INET && TRACEPOINTS ---help--- This feature provides an alerting service to userspace in the event that packets are discarded in the network stack. Alerts are broadcast via netlink socket to any listening user space process. If you don't need network drop alerts, or if you are ok just checking the various proc files and other utilities for drop statistics, say N here. endmenu endmenu source "net/ax25/Kconfig" source "net/can/Kconfig" source "net/bluetooth/Kconfig" source "net/rxrpc/Kconfig" source "net/kcm/Kconfig" source "net/strparser/Kconfig" config FIB_RULES bool menuconfig WIRELESS bool "Wireless" depends on !S390 default y if WIRELESS source "net/wireless/Kconfig" source "net/mac80211/Kconfig" endif # WIRELESS source "net/wimax/Kconfig" source "net/rfkill/Kconfig" source "net/9p/Kconfig" source "net/caif/Kconfig" source "net/ceph/Kconfig" source "net/nfc/Kconfig" source "net/psample/Kconfig" source "net/ife/Kconfig" config LWTUNNEL bool "Network light weight tunnels" ---help--- This feature provides an infrastructure to support light weight tunnels like mpls. There is no netdevice associated with a light weight tunnel endpoint. Tunnel encapsulation parameters are stored with light weight tunnel state associated with fib routes. config LWTUNNEL_BPF bool "Execute BPF program as route nexthop action" depends on LWTUNNEL default y if LWTUNNEL=y ---help--- Allows to run BPF programs as a nexthop action following a route lookup for incoming and outgoing packets. config DST_CACHE bool default n config GRO_CELLS bool default n config SOCK_VALIDATE_XMIT bool config NET_SOCK_MSG bool default n help The NET_SOCK_MSG provides a framework for plain sockets (e.g. TCP) or ULPs (upper layer modules, e.g. TLS) to process L7 application data with the help of BPF programs. config NET_DEVLINK tristate "Network physical/parent device Netlink interface" help Network physical/parent device Netlink interface provides infrastructure to support access to physical chip-wide config and monitoring. config MAY_USE_DEVLINK tristate default m if NET_DEVLINK=m default y if NET_DEVLINK=y || NET_DEVLINK=n help Drivers using the devlink infrastructure should have a dependency on MAY_USE_DEVLINK to ensure they do not cause link errors when devlink is a loadable module and the driver using it is built-in. config PAGE_POOL bool config FAILOVER tristate "Generic failover module" help The failover module provides a generic interface for paravirtual drivers to register a netdev and a set of ops with a failover instance. The ops are used as event handlers that get called to handle netdev register/unregister/link change/name change events on slave pci ethernet devices with the same mac address as the failover netdev. This enables paravirtual drivers to use a VF as an accelerated low latency datapath. It also allows live migration of VMs with direct attached VFs by failing over to the paravirtual datapath when the VF is unplugged. endif # if NET # Used by archs to tell that they support BPF JIT compiler plus which flavour. # Only one of the two can be selected for a specific arch since eBPF JIT supersedes # the cBPF JIT. # Classic BPF JIT (cBPF) config HAVE_CBPF_JIT bool # Extended BPF JIT (eBPF) config HAVE_EBPF_JIT bool backport-iwlwifi-dkms-7906/net/Makefile000066400000000000000000000050011351064634500201230ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # # Makefile for the linux networking. # # 2 Sep 2000, Christoph Hellwig # Rewritten to use lists instead of if-statements. # obj-$(CONFIG_NET) := socket.o core/ tmp-$(CONFIG_COMPAT) := compat.o obj-$(CONFIG_NET) += $(tmp-y) # LLC has to be linked before the files in net/802/ obj-$(CONFIG_LLC) += llc/ obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ bpf/ obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_INET) += ipv4/ obj-$(CONFIG_TLS) += tls/ obj-$(CONFIG_XFRM) += xfrm/ obj-$(CONFIG_UNIX) += unix/ obj-$(CONFIG_NET) += ipv6/ obj-$(CONFIG_BPFILTER) += bpfilter/ obj-$(CONFIG_PACKET) += packet/ obj-$(CONFIG_NET_KEY) += key/ obj-$(CONFIG_BRIDGE) += bridge/ obj-$(CONFIG_NET_DSA) += dsa/ obj-$(CONFIG_ATALK) += appletalk/ obj-$(CONFIG_X25) += x25/ obj-$(CONFIG_LAPB) += lapb/ obj-$(CONFIG_NETROM) += netrom/ obj-$(CONFIG_ROSE) += rose/ obj-$(CONFIG_AX25) += ax25/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_BT) += bluetooth/ obj-$(CONFIG_SUNRPC) += sunrpc/ obj-$(CONFIG_AF_RXRPC) += rxrpc/ obj-$(CONFIG_AF_KCM) += kcm/ obj-$(CONFIG_STREAM_PARSER) += strparser/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_L2TP) += l2tp/ obj-$(CONFIG_DECNET) += decnet/ obj-$(CONFIG_PHONET) += phonet/ ifneq ($(CONFIG_VLAN_8021Q),) obj-y += 8021q/ endif obj-$(CONFIG_IP_DCCP) += dccp/ obj-$(CONFIG_IP_SCTP) += sctp/ obj-$(CONFIG_RDS) += rds/ obj-$(CPTCFG_WIRELESS) += wireless/ obj-$(CPTCFG_MAC80211) += mac80211/ obj-$(CONFIG_TIPC) += tipc/ obj-$(CONFIG_NETLABEL) += netlabel/ obj-$(CONFIG_IUCV) += iucv/ obj-$(CONFIG_SMC) += smc/ obj-$(CONFIG_RFKILL) += rfkill/ obj-$(CONFIG_NET_9P) += 9p/ obj-$(CONFIG_CAIF) += caif/ ifneq ($(CONFIG_DCB),) obj-y += dcb/ endif obj-$(CONFIG_6LOWPAN) += 6lowpan/ obj-$(CONFIG_IEEE802154) += ieee802154/ obj-$(CONFIG_MAC802154) += mac802154/ ifeq ($(CONFIG_NET),y) obj-$(CONFIG_SYSCTL) += sysctl_net.o endif obj-$(CONFIG_WIMAX) += wimax/ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ obj-$(CONFIG_CEPH_LIB) += ceph/ obj-$(CONFIG_BATMAN_ADV) += batman-adv/ obj-$(CONFIG_NFC) += nfc/ obj-$(CONFIG_PSAMPLE) += psample/ obj-$(CONFIG_NET_IFE) += ife/ obj-$(CONFIG_OPENVSWITCH) += openvswitch/ obj-$(CONFIG_VSOCKETS) += vmw_vsock/ obj-$(CONFIG_MPLS) += mpls/ obj-$(CONFIG_NET_NSH) += nsh/ obj-$(CONFIG_HSR) += hsr/ ifneq ($(CONFIG_NET_SWITCHDEV),) obj-y += switchdev/ endif ifneq ($(CONFIG_NET_L3_MASTER_DEV),) obj-y += l3mdev/ endif obj-$(CONFIG_QRTR) += qrtr/ obj-$(CONFIG_NET_NCSI) += ncsi/ obj-$(CONFIG_XDP_SOCKETS) += xdp/ backport-iwlwifi-dkms-7906/net/mac80211/000077500000000000000000000000001351064634500176235ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/net/mac80211/Kconfig000066400000000000000000000226271351064634500211370ustar00rootroot00000000000000config MAC80211 tristate "Generic IEEE 802.11 Networking Stack (mac80211)" depends on m depends on CFG80211 depends on CRYPTO depends on CRYPTO_ARC4 depends on CRYPTO_AES depends on CRYPTO_CCM depends on CRYPTO_GCM depends on CRYPTO_CMAC depends on CRC32 ---help--- This option enables the hardware independent IEEE 802.11 networking stack. comment "CFG80211 needs to be enabled for MAC80211" depends on CFG80211=n if MAC80211 != n config MAC80211_HAS_RC bool config MAC80211_RC_MINSTREL bool "Minstrel" if EXPERT select MAC80211_HAS_RC default y ---help--- This option enables the 'minstrel' TX rate control algorithm choice prompt "Default rate control algorithm" depends on MAC80211_HAS_RC default MAC80211_RC_DEFAULT_MINSTREL ---help--- This option selects the default rate control algorithm mac80211 will use. Note that this default can still be overridden through the ieee80211_default_rc_algo module parameter if different algorithms are available. config MAC80211_RC_DEFAULT_MINSTREL bool "Minstrel" depends on MAC80211_RC_MINSTREL ---help--- Select Minstrel as the default rate control algorithm. endchoice config MAC80211_RC_DEFAULT string default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL default "" endif comment "Some wireless drivers require a rate control algorithm" depends on MAC80211 && MAC80211_HAS_RC=n config MAC80211_MESH bool "Enable mac80211 mesh networking support" depends on MAC80211 ---help--- Select this option to enable 802.11 mesh operation in mac80211 drivers that support it. 802.11 mesh connects multiple stations over (possibly multi-hop) wireless links to form a single logical LAN. config MAC80211_LEDS bool "Enable LED triggers" depends on MAC80211 depends on LEDS_CLASS select BPAUTO_LEDS_TRIGGERS ---help--- This option enables a few LED triggers for different packet receive/transmit events. config MAC80211_DEBUGFS bool "Export mac80211 internals in DebugFS" depends on MAC80211 && DEBUG_FS ---help--- Select this to see extensive information about the internal state of mac80211 in debugfs. Say N unless you know you need this. config MAC80211_MESSAGE_TRACING bool "Trace all mac80211 debug messages" depends on MAC80211 ---help--- Select this option to have mac80211 register the mac80211_msg trace subsystem with tracepoints to collect all debugging messages, independent of printing them into the kernel log. The overhead in this option is that all the messages need to be present in the binary and formatted at runtime for tracing. menuconfig MAC80211_DEBUG_MENU bool "Select mac80211 debugging features" depends on MAC80211 ---help--- This option collects various mac80211 debug settings. config MAC80211_NOINLINE bool "Do not inline TX/RX handlers" depends on MAC80211_DEBUG_MENU ---help--- This option affects code generation in mac80211, when selected some functions are marked "noinline" to allow easier debugging of problems in the transmit and receive paths. This option increases code size a bit and inserts a lot of function calls in the code, but is otherwise safe to enable. If unsure, say N unless you expect to be finding problems in mac80211. config MAC80211_VERBOSE_DEBUG bool "Verbose debugging output" depends on MAC80211_DEBUG_MENU ---help--- Selecting this option causes mac80211 to print out many debugging messages. It should not be selected on production systems as some of the messages are remotely triggerable. Do not select this option. config MAC80211_MLME_DEBUG bool "Verbose managed MLME output" depends on MAC80211_DEBUG_MENU ---help--- Selecting this option causes mac80211 to print out debugging messages for the managed-mode MLME. It should not be selected on production systems as some of the messages are remotely triggerable. Do not select this option. config MAC80211_STA_DEBUG bool "Verbose station debugging" depends on MAC80211_DEBUG_MENU ---help--- Selecting this option causes mac80211 to print out debugging messages for station addition/removal. Do not select this option. config MAC80211_HT_DEBUG bool "Verbose HT debugging" depends on MAC80211_DEBUG_MENU ---help--- This option enables 802.11n High Throughput features debug tracing output. It should not be selected on production systems as some of the messages are remotely triggerable. Do not select this option. config MAC80211_OCB_DEBUG bool "Verbose OCB debugging" depends on MAC80211_DEBUG_MENU ---help--- Selecting this option causes mac80211 to print out very verbose OCB debugging messages. It should not be selected on production systems as those messages are remotely triggerable. Do not select this option. config MAC80211_IBSS_DEBUG bool "Verbose IBSS debugging" depends on MAC80211_DEBUG_MENU ---help--- Selecting this option causes mac80211 to print out very verbose IBSS debugging messages. It should not be selected on production systems as those messages are remotely triggerable. Do not select this option. config MAC80211_PS_DEBUG bool "Verbose powersave mode debugging" depends on MAC80211_DEBUG_MENU ---help--- Selecting this option causes mac80211 to print out very verbose power save mode debugging messages (when mac80211 is an AP and has power saving stations.) It should not be selected on production systems as those messages are remotely triggerable. Do not select this option. config MAC80211_MPL_DEBUG bool "Verbose mesh peer link debugging" depends on MAC80211_DEBUG_MENU depends on MAC80211_MESH ---help--- Selecting this option causes mac80211 to print out very verbose mesh peer link debugging messages (when mac80211 is taking part in a mesh network). It should not be selected on production systems as those messages are remotely triggerable. Do not select this option. config MAC80211_MPATH_DEBUG bool "Verbose mesh path debugging" depends on MAC80211_DEBUG_MENU depends on MAC80211_MESH ---help--- Selecting this option causes mac80211 to print out very verbose mesh path selection debugging messages (when mac80211 is taking part in a mesh network). It should not be selected on production systems as those messages are remotely triggerable. Do not select this option. config MAC80211_MHWMP_DEBUG bool "Verbose mesh HWMP routing debugging" depends on MAC80211_DEBUG_MENU depends on MAC80211_MESH ---help--- Selecting this option causes mac80211 to print out very verbose mesh routing (HWMP) debugging messages (when mac80211 is taking part in a mesh network). It should not be selected on production systems as those messages are remotely triggerable. Do not select this option. config MAC80211_MESH_SYNC_DEBUG bool "Verbose mesh synchronization debugging" depends on MAC80211_DEBUG_MENU depends on MAC80211_MESH ---help--- Selecting this option causes mac80211 to print out very verbose mesh synchronization debugging messages (when mac80211 is taking part in a mesh network). Do not select this option. config MAC80211_MESH_CSA_DEBUG bool "Verbose mesh channel switch debugging" depends on MAC80211_DEBUG_MENU depends on MAC80211_MESH ---help--- Selecting this option causes mac80211 to print out very verbose mesh channel switch debugging messages (when mac80211 is taking part in a mesh network). Do not select this option. config MAC80211_MESH_PS_DEBUG bool "Verbose mesh powersave debugging" depends on MAC80211_DEBUG_MENU depends on MAC80211_MESH ---help--- Selecting this option causes mac80211 to print out very verbose mesh powersave debugging messages (when mac80211 is taking part in a mesh network). Do not select this option. config MAC80211_TDLS_DEBUG bool "Verbose TDLS debugging" depends on MAC80211_DEBUG_MENU ---help--- Selecting this option causes mac80211 to print out very verbose TDLS selection debugging messages (when mac80211 is a TDLS STA). It should not be selected on production systems as those messages are remotely triggerable. Do not select this option. config MAC80211_DEBUG_COUNTERS bool "Extra statistics for TX/RX debugging" depends on MAC80211_DEBUG_MENU depends on MAC80211_DEBUGFS ---help--- Selecting this option causes mac80211 to keep additional and very verbose statistics about TX and RX handler use as well as a few selected dot11 counters. These will be exposed in debugfs. Note that some of the counters are not concurrency safe and may thus not always be accurate. If unsure, say N. config MAC80211_STA_HASH_MAX_SIZE int "Station hash table maximum size" if MAC80211_DEBUG_MENU default 0 ---help--- Setting this option to a low value (e.g. 4) allows testing the hash table with collisions relatively deterministically (just connect more stations than the number selected here.) If unsure, leave the default of 0. config MAC80211_LATENCY_MEASUREMENTS bool "Enable latency measurements in mac80211" depends on MAC80211_DEBUGFS default y ---help--- Selecting this option causes mac80211 to keep additional and very verbose statistics about TX. The purpose is to measure the latency of the TX and classify the packets into categories based on the time they have been waiting from the time they entered mac80211 until they have been freed in mac80211. If unsure, say N. config IWL_TIMEOUT_FACTOR int "Factor to multiply timeouts by" range 1 2500 default 1 backport-iwlwifi-dkms-7906/net/mac80211/Makefile000066400000000000000000000021641351064634500212660ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 obj-$(CPTCFG_MAC80211) += mac80211.o # mac80211 objects mac80211-y := \ main.o status.o \ driver-ops.o \ sta_info.o \ wep.o \ aead_api.o \ wpa.o \ scan.o offchannel.o \ ht.o agg-tx.o agg-rx.o \ vht.o \ he.o \ ibss.o \ iface.o \ rate.o \ michael.o \ tkip.o \ aes_cmac.o \ aes_gmac.o \ fils_aead.o \ cfg.o \ ethtool.o \ rx.o \ spectmgmt.o \ tx.o \ key.o \ util.o \ wme.o \ chan.o \ trace.o mlme.o \ tdls.o \ ocb.o mac80211-$(CPTCFG_MAC80211_LEDS) += led.o mac80211-$(CPTCFG_MAC80211_DEBUGFS) += \ debugfs.o \ debugfs_sta.o \ debugfs_netdev.o \ debugfs_key.o mac80211-$(CPTCFG_MAC80211_MESH) += \ mesh.o \ mesh_pathtbl.o \ mesh_plink.o \ mesh_hwmp.o \ mesh_sync.o \ mesh_ps.o mac80211-$(CONFIG_PM) += pm.o CFLAGS_trace.o := -I$(src) subdir-ccflags-y += $(call cc-option,-Wimplicit-fallthrough) rc80211_minstrel-y := \ rc80211_minstrel.o \ rc80211_minstrel_ht.o rc80211_minstrel-$(CPTCFG_MAC80211_DEBUGFS) += \ rc80211_minstrel_debugfs.o \ rc80211_minstrel_ht_debugfs.o mac80211-$(CPTCFG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) ccflags-y += -DDEBUG backport-iwlwifi-dkms-7906/net/mac80211/aead_api.c000066400000000000000000000053051351064634500215150ustar00rootroot00000000000000/* * Copyright 2003-2004, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2014-2015, Qualcomm Atheros, Inc. * * Rewrite: Copyright (C) 2013 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include "aead_api.h" int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len, u8 *data, size_t data_len, u8 *mic) { size_t mic_len = crypto_aead_authsize(tfm); struct scatterlist sg[3]; struct aead_request *aead_req; int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); u8 *__aad; aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC); if (!aead_req) return -ENOMEM; __aad = (u8 *)aead_req + reqsize; memcpy(__aad, aad, aad_len); sg_init_table(sg, 3); sg_set_buf(&sg[0], __aad, aad_len); sg_set_buf(&sg[1], data, data_len); sg_set_buf(&sg[2], mic, mic_len); aead_request_set_tfm(aead_req, tfm); aead_request_set_crypt(aead_req, sg, sg, data_len, b_0); aead_request_set_ad(aead_req, sg[0].length); crypto_aead_encrypt(aead_req); kzfree(aead_req); return 0; } int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len, u8 *data, size_t data_len, u8 *mic) { size_t mic_len = crypto_aead_authsize(tfm); struct scatterlist sg[3]; struct aead_request *aead_req; int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); u8 *__aad; int err; if (data_len == 0) return -EINVAL; aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC); if (!aead_req) return -ENOMEM; __aad = (u8 *)aead_req + reqsize; memcpy(__aad, aad, aad_len); sg_init_table(sg, 3); sg_set_buf(&sg[0], __aad, aad_len); sg_set_buf(&sg[1], data, data_len); sg_set_buf(&sg[2], mic, mic_len); aead_request_set_tfm(aead_req, tfm); aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0); aead_request_set_ad(aead_req, sg[0].length); err = crypto_aead_decrypt(aead_req); kzfree(aead_req); return err; } struct crypto_aead * aead_key_setup_encrypt(const char *alg, const u8 key[], size_t key_len, size_t mic_len) { struct crypto_aead *tfm; int err; tfm = crypto_alloc_aead(alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; err = crypto_aead_setkey(tfm, key, key_len); if (err) goto free_aead; err = crypto_aead_setauthsize(tfm, mic_len); if (err) goto free_aead; return tfm; free_aead: crypto_free_aead(tfm); return ERR_PTR(err); } void aead_key_free(struct crypto_aead *tfm) { crypto_free_aead(tfm); } backport-iwlwifi-dkms-7906/net/mac80211/aead_api.h000066400000000000000000000013171351064634500215210ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _AEAD_API_H #define _AEAD_API_H #include #include struct crypto_aead * aead_key_setup_encrypt(const char *alg, const u8 key[], size_t key_len, size_t mic_len); int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len, u8 *data, size_t data_len, u8 *mic); int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len, u8 *data, size_t data_len, u8 *mic); void aead_key_free(struct crypto_aead *tfm); #endif /* _AEAD_API_H */ backport-iwlwifi-dkms-7906/net/mac80211/aes_ccm.h000066400000000000000000000022261351064634500213700ustar00rootroot00000000000000/* * Copyright 2003-2004, Instant802 Networks, Inc. * Copyright 2006, Devicescape Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef AES_CCM_H #define AES_CCM_H #include "aead_api.h" #define CCM_AAD_LEN 32 static inline struct crypto_aead * ieee80211_aes_key_setup_encrypt(const u8 key[], size_t key_len, size_t mic_len) { return aead_key_setup_encrypt("ccm(aes)", key, key_len, mic_len); } static inline int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { return aead_encrypt(tfm, b_0, aad + 2, be16_to_cpup((__be16 *)aad), data, data_len, mic); } static inline int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { return aead_decrypt(tfm, b_0, aad + 2, be16_to_cpup((__be16 *)aad), data, data_len, mic); } static inline void ieee80211_aes_key_free(struct crypto_aead *tfm) { return aead_key_free(tfm); } #endif /* AES_CCM_H */ backport-iwlwifi-dkms-7906/net/mac80211/aes_cmac.c000066400000000000000000000033551351064634500215300ustar00rootroot00000000000000/* * AES-128-CMAC with TLen 16 for IEEE 802.11w BIP * Copyright 2008, Jouni Malinen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include "key.h" #include "aes_cmac.h" #define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */ #define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */ #define AAD_LEN 20 static const u8 zero[CMAC_TLEN_256]; void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm); u8 out[AES_BLOCK_SIZE]; desc->tfm = tfm; crypto_shash_init(desc); crypto_shash_update(desc, aad, AAD_LEN); crypto_shash_update(desc, data, data_len - CMAC_TLEN); crypto_shash_finup(desc, zero, CMAC_TLEN, out); memcpy(mic, out, CMAC_TLEN); } void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm); desc->tfm = tfm; crypto_shash_init(desc); crypto_shash_update(desc, aad, AAD_LEN); crypto_shash_update(desc, data, data_len - CMAC_TLEN_256); crypto_shash_finup(desc, zero, CMAC_TLEN_256, mic); } struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[], size_t key_len) { struct crypto_shash *tfm; tfm = crypto_alloc_shash("cmac(aes)", 0, 0); if (!IS_ERR(tfm)) crypto_shash_setkey(tfm, key, key_len); return tfm; } void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm) { crypto_free_shash(tfm); } backport-iwlwifi-dkms-7906/net/mac80211/aes_cmac.h000066400000000000000000000013421351064634500215270ustar00rootroot00000000000000/* * Copyright 2008, Jouni Malinen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef AES_CMAC_H #define AES_CMAC_H #include #include struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[], size_t key_len); void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic); void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic); void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm); #endif /* AES_CMAC_H */ backport-iwlwifi-dkms-7906/net/mac80211/aes_gcm.h000066400000000000000000000022071351064634500213730ustar00rootroot00000000000000/* * Copyright 2014-2015, Qualcomm Atheros, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef AES_GCM_H #define AES_GCM_H #include "aead_api.h" #define GCM_AAD_LEN 32 static inline int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { return aead_encrypt(tfm, j_0, aad + 2, be16_to_cpup((__be16 *)aad), data, data_len, mic); } static inline int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { return aead_decrypt(tfm, j_0, aad + 2, be16_to_cpup((__be16 *)aad), data, data_len, mic); } static inline struct crypto_aead * ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], size_t key_len) { return aead_key_setup_encrypt("gcm(aes)", key, key_len, IEEE80211_GCMP_MIC_LEN); } static inline void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm) { return aead_key_free(tfm); } #endif /* AES_GCM_H */ backport-iwlwifi-dkms-7906/net/mac80211/aes_gmac.c000066400000000000000000000040161351064634500215270ustar00rootroot00000000000000/* * AES-GMAC for IEEE 802.11 BIP-GMAC-128 and BIP-GMAC-256 * Copyright 2015, Qualcomm Atheros, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include "key.h" #include "aes_gmac.h" int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, const u8 *data, size_t data_len, u8 *mic) { struct scatterlist sg[4]; u8 *zero, *__aad, iv[AES_BLOCK_SIZE]; struct aead_request *aead_req; int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); if (data_len < GMAC_MIC_LEN) return -EINVAL; aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC); if (!aead_req) return -ENOMEM; zero = (u8 *)aead_req + reqsize; __aad = zero + GMAC_MIC_LEN; memcpy(__aad, aad, GMAC_AAD_LEN); sg_init_table(sg, 4); sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN); sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); sg_set_buf(&sg[3], mic, GMAC_MIC_LEN); memcpy(iv, nonce, GMAC_NONCE_LEN); memset(iv + GMAC_NONCE_LEN, 0, sizeof(iv) - GMAC_NONCE_LEN); iv[AES_BLOCK_SIZE - 1] = 0x01; aead_request_set_tfm(aead_req, tfm); aead_request_set_crypt(aead_req, sg, sg, 0, iv); aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len); crypto_aead_encrypt(aead_req); kzfree(aead_req); return 0; } struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[], size_t key_len) { struct crypto_aead *tfm; int err; tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; err = crypto_aead_setkey(tfm, key, key_len); if (!err) err = crypto_aead_setauthsize(tfm, GMAC_MIC_LEN); if (!err) return tfm; crypto_free_aead(tfm); return ERR_PTR(err); } void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm) { crypto_free_aead(tfm); } backport-iwlwifi-dkms-7906/net/mac80211/aes_gmac.h000066400000000000000000000012501351064634500215310ustar00rootroot00000000000000/* * Copyright 2015, Qualcomm Atheros, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef AES_GMAC_H #define AES_GMAC_H #include #define GMAC_AAD_LEN 20 #define GMAC_MIC_LEN 16 #define GMAC_NONCE_LEN 12 struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[], size_t key_len); int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, const u8 *data, size_t data_len, u8 *mic); void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm); #endif /* AES_GMAC_H */ backport-iwlwifi-dkms-7906/net/mac80211/agg-rx.c000066400000000000000000000355501351064634500211640ustar00rootroot00000000000000/* * HT handling * * Copyright 2003, Jouni Malinen * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /** * DOC: RX A-MPDU aggregation * * Aggregation on the RX side requires only implementing the * @ampdu_action callback that is invoked to start/stop any * block-ack sessions for RX aggregation. * * When RX aggregation is started by the peer, the driver is * notified via @ampdu_action function, with the * %IEEE80211_AMPDU_RX_START action, and may reject the request * in which case a negative response is sent to the peer, if it * accepts it a positive response is sent. * * While the session is active, the device/driver are required * to de-aggregate frames and pass them up one by one to mac80211, * which will handle the reorder buffer. * * When the aggregation session is stopped again by the peer or * ourselves, the driver's @ampdu_action function will be called * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the * call must not fail. */ #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" static void ieee80211_free_tid_rx(struct rcu_head *h) { struct tid_ampdu_rx *tid_rx = container_of(h, struct tid_ampdu_rx, rcu_head); int i; for (i = 0; i < tid_rx->buf_size; i++) __skb_queue_purge(&tid_rx->reorder_buf[i]); kfree(tid_rx->reorder_buf); kfree(tid_rx->reorder_time); kfree(tid_rx); } void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool tx) { struct ieee80211_local *local = sta->local; struct tid_ampdu_rx *tid_rx; struct ieee80211_ampdu_params params = { .sta = &sta->sta, .action = IEEE80211_AMPDU_RX_STOP, .tid = tid, .amsdu = false, .timeout = 0, .ssn = 0, }; lockdep_assert_held(&sta->ampdu_mlme.mtx); tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid], lockdep_is_held(&sta->ampdu_mlme.mtx)); if (!test_bit(tid, sta->ampdu_mlme.agg_session_valid)) return; RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL); __clear_bit(tid, sta->ampdu_mlme.agg_session_valid); ht_dbg(sta->sdata, "Rx BA session stop requested for %pM tid %u %s reason: %d\n", sta->sta.addr, tid, initiator == WLAN_BACK_RECIPIENT ? "recipient" : "initiator", (int)reason); if (drv_ampdu_action(local, sta->sdata, ¶ms)) sdata_info(sta->sdata, "HW problem - can not stop rx aggregation for %pM tid %d\n", sta->sta.addr, tid); /* check if this is a self generated aggregation halt */ if (initiator == WLAN_BACK_RECIPIENT && tx) ieee80211_send_delba(sta->sdata, sta->sta.addr, tid, WLAN_BACK_RECIPIENT, reason); /* * return here in case tid_rx is not assigned - which will happen if * IEEE80211_HW_SUPPORTS_REORDERING_BUFFER is set. */ if (!tid_rx) return; del_timer_sync(&tid_rx->session_timer); /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */ spin_lock_bh(&tid_rx->reorder_lock); tid_rx->removed = true; spin_unlock_bh(&tid_rx->reorder_lock); del_timer_sync(&tid_rx->reorder_timer); call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); } void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool tx) { mutex_lock(&sta->ampdu_mlme.mtx); ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, tx); mutex_unlock(&sta->ampdu_mlme.mtx); } void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, const u8 *addr) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct sta_info *sta; int i; rcu_read_lock(); sta = sta_info_get_bss(sdata, addr); if (!sta) { rcu_read_unlock(); return; } for (i = 0; i < IEEE80211_NUM_TIDS; i++) if (ba_rx_bitmap & BIT(i)) set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested); ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_stop_rx_ba_session); /* * After accepting the AddBA Request we activated a timer, * resetting it after each frame that arrives from the originator. */ static void sta_rx_agg_session_timer_expired(struct timer_list *t) { struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer); struct sta_info *sta = tid_rx->sta; u8 tid = tid_rx->tid; unsigned long timeout; timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); if (time_is_after_jiffies(timeout)) { mod_timer(&tid_rx->session_timer, timeout); return; } ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n", sta->sta.addr, tid); set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired); ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); } static void sta_rx_agg_reorder_timer_expired(struct timer_list *t) { struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, reorder_timer); rcu_read_lock(); ieee80211_release_reorder_timeout(tid_rx->sta, tid_rx->tid); rcu_read_unlock(); } static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, u8 dialog_token, u16 status, u16 policy, u16 buf_size, u16 timeout) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; bool amsdu = ieee80211_hw_check(&local->hw, SUPPORTS_AMSDU_IN_AMPDU); u16 capab; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = skb_put_zero(skb, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_MESH_POINT) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; mgmt->u.action.u.addba_resp.dialog_token = dialog_token; capab = (u16)(amsdu << 0); /* bit 0 A-MSDU support */ capab |= (u16)(policy << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); ieee80211_tx_skb(sdata, skb); } void ___ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, u16 buf_size, bool tx, bool auto_seq) { struct ieee80211_local *local = sta->sdata->local; struct tid_ampdu_rx *tid_agg_rx; struct ieee80211_ampdu_params params = { .sta = &sta->sta, .action = IEEE80211_AMPDU_RX_START, .tid = tid, .amsdu = false, .timeout = timeout, .ssn = start_seq_num, }; int i, ret = -EOPNOTSUPP; u16 status = WLAN_STATUS_REQUEST_DECLINED; u16 max_buf_size; if (tid >= IEEE80211_FIRST_TSPEC_TSID) { ht_dbg(sta->sdata, "STA %pM requests BA session on unsupported tid %d\n", sta->sta.addr, tid); goto end; } if (!sta->sta.ht_cap.ht_supported) { ht_dbg(sta->sdata, "STA %pM erroneously requests BA session on tid %d w/o QoS\n", sta->sta.addr, tid); /* send a response anyway, it's an error case if we get here */ goto end; } if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { ht_dbg(sta->sdata, "Suspend in progress - Denying ADDBA request (%pM tid %d)\n", sta->sta.addr, tid); goto end; } if (sta->sta.he_cap.has_he) max_buf_size = IEEE80211_MAX_AMPDU_BUF; else max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT; /* sanity check for incoming parameters: * check if configuration can support the BA policy * and if buffer size does not exceeds max value */ /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || (buf_size > max_buf_size)) { status = WLAN_STATUS_INVALID_QOS_PARAM; ht_dbg_ratelimited(sta->sdata, "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", sta->sta.addr, tid, ba_policy, buf_size); goto end; } /* determine default buffer size */ if (buf_size == 0) buf_size = max_buf_size; /* make sure the size doesn't exceed the maximum supported by the hw */ if (buf_size > sta->sta.max_rx_aggregation_subframes) buf_size = sta->sta.max_rx_aggregation_subframes; params.buf_size = buf_size; ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n", buf_size, sta->sta.addr); /* examine state machine */ lockdep_assert_held(&sta->ampdu_mlme.mtx); if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) { if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) { struct tid_ampdu_rx *tid_rx; ht_dbg_ratelimited(sta->sdata, "updated AddBA Req from %pM on tid %u\n", sta->sta.addr, tid); /* We have no API to update the timeout value in the * driver so reject the timeout update if the timeout * changed. If if did not change, i.e., no real update, * just reply with success. */ rcu_read_lock(); tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); if (tid_rx && tid_rx->timeout == timeout) status = WLAN_STATUS_SUCCESS; else status = WLAN_STATUS_REQUEST_DECLINED; rcu_read_unlock(); goto end; } ht_dbg_ratelimited(sta->sdata, "unexpected AddBA Req from %pM on tid %u\n", sta->sta.addr, tid); /* delete existing Rx BA session on the same tid */ ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, WLAN_STATUS_UNSPECIFIED_QOS, false); } if (ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) { ret = drv_ampdu_action(local, sta->sdata, ¶ms); ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n", sta->sta.addr, tid, ret); if (!ret) status = WLAN_STATUS_SUCCESS; goto end; } /* prepare A-MPDU MLME for Rx aggregation */ tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); if (!tid_agg_rx) goto end; spin_lock_init(&tid_agg_rx->reorder_lock); /* rx timer */ timer_setup(&tid_agg_rx->session_timer, sta_rx_agg_session_timer_expired, TIMER_DEFERRABLE); /* rx reorder timer */ timer_setup(&tid_agg_rx->reorder_timer, sta_rx_agg_reorder_timer_expired, 0); /* prepare reordering buffer */ tid_agg_rx->reorder_buf = kcalloc(buf_size, sizeof(struct sk_buff_head), GFP_KERNEL); tid_agg_rx->reorder_time = kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL); if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) { kfree(tid_agg_rx->reorder_buf); kfree(tid_agg_rx->reorder_time); kfree(tid_agg_rx); goto end; } for (i = 0; i < buf_size; i++) __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]); ret = drv_ampdu_action(local, sta->sdata, ¶ms); ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n", sta->sta.addr, tid, ret); if (ret) { kfree(tid_agg_rx->reorder_buf); kfree(tid_agg_rx->reorder_time); kfree(tid_agg_rx); goto end; } /* update data */ tid_agg_rx->ssn = start_seq_num; tid_agg_rx->head_seq_num = start_seq_num; tid_agg_rx->buf_size = buf_size; tid_agg_rx->timeout = timeout; tid_agg_rx->stored_mpdu_num = 0; tid_agg_rx->auto_seq = auto_seq; tid_agg_rx->started = false; tid_agg_rx->reorder_buf_filtered = 0; tid_agg_rx->tid = tid; tid_agg_rx->sta = sta; status = WLAN_STATUS_SUCCESS; /* activate it for RX */ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); if (timeout) { mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); tid_agg_rx->last_rx = jiffies; } end: if (status == WLAN_STATUS_SUCCESS) { __set_bit(tid, sta->ampdu_mlme.agg_session_valid); __clear_bit(tid, sta->ampdu_mlme.unexpected_agg); sta->ampdu_mlme.tid_rx_token[tid] = dialog_token; } if (tx) ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, dialog_token, status, 1, buf_size, timeout); } static void __ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, u16 buf_size, bool tx, bool auto_seq) { mutex_lock(&sta->ampdu_mlme.mtx); ___ieee80211_start_rx_ba_session(sta, dialog_token, timeout, start_seq_num, ba_policy, tid, buf_size, tx, auto_seq); mutex_unlock(&sta->ampdu_mlme.mtx); } void ieee80211_process_addba_request(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num; u8 dialog_token; /* extract session parameters from addba request frame */ dialog_token = mgmt->u.action.u.addba_req.dialog_token; timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); start_seq_num = le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; __ieee80211_start_rx_ba_session(sta, dialog_token, timeout, start_seq_num, ba_policy, tid, buf_size, true, false); } void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, const u8 *addr, unsigned int tid) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct sta_info *sta; rcu_read_lock(); sta = sta_info_get_bss(sdata, addr); if (!sta) goto unlock; set_bit(tid, sta->ampdu_mlme.tid_rx_manage_offl); ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); unlock: rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_manage_rx_ba_offl); void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif, const u8 *addr, unsigned int tid) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct sta_info *sta; rcu_read_lock(); sta = sta_info_get_bss(sdata, addr); if (!sta) goto unlock; set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired); ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); unlock: rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_rx_ba_timer_expired); backport-iwlwifi-dkms-7906/net/mac80211/agg-tx.c000066400000000000000000000711761351064634500211720ustar00rootroot00000000000000/* * HT handling * * Copyright 2003, Jouni Malinen * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "wme.h" /** * DOC: TX A-MPDU aggregation * * Aggregation on the TX side requires setting the hardware flag * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed * packets with a flag indicating A-MPDU aggregation. The driver * or device is responsible for actually aggregating the frames, * as well as deciding how many and which to aggregate. * * When TX aggregation is started by some subsystem (usually the rate * control algorithm would be appropriate) by calling the * ieee80211_start_tx_ba_session() function, the driver will be * notified via its @ampdu_action function, with the * %IEEE80211_AMPDU_TX_START action. * * In response to that, the driver is later required to call the * ieee80211_start_tx_ba_cb_irqsafe() function, which will really * start the aggregation session after the peer has also responded. * If the peer responds negatively, the session will be stopped * again right away. Note that it is possible for the aggregation * session to be stopped before the driver has indicated that it * is done setting it up, in which case it must not indicate the * setup completion. * * Also note that, since we also need to wait for a response from * the peer, the driver is notified of the completion of the * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the * @ampdu_action callback. * * Similarly, when the aggregation session is stopped by the peer * or something calling ieee80211_stop_tx_ba_session(), the driver's * @ampdu_action function will be called with the action * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). * Note that the sta can get destroyed before the BA tear down is * complete. */ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u8 dialog_token, u16 start_seq_num, u16 agg_size, u16 timeout) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 capab; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = skb_put_zero(skb, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_MESH_POINT) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; mgmt->u.action.u.addba_req.dialog_token = dialog_token; capab = (u16)(1 << 0); /* bit 0 A-MSDU support */ capab |= (u16)(1 << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(start_seq_num << 4); ieee80211_tx_skb(sdata, skb); } void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_bar *bar; u16 bar_control = 0; skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); bar = skb_put_zero(skb, sizeof(*bar)); bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); memcpy(bar->ra, ra, ETH_ALEN); memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT); bar->control = cpu_to_le16(bar_control); bar->start_seq_num = cpu_to_le16(ssn); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_CTL_REQ_TX_STATUS; ieee80211_tx_skb_tid(sdata, skb, tid); } EXPORT_SYMBOL(ieee80211_send_bar); void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx) { lockdep_assert_held(&sta->ampdu_mlme.mtx); lockdep_assert_held(&sta->lock); rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); } /* * When multiple aggregation sessions on multiple stations * are being created/destroyed simultaneously, we need to * refcount the global queue stop caused by that in order * to not get into a situation where one of the aggregation * setup or teardown re-enables queues before the other is * ready to handle that. * * These two functions take care of this issue by keeping * a global "agg_queue_stop" refcount. */ static void __acquires(agg_queue) ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) { int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; /* we do refcounting here, so don't use the queue reason refcounting */ if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) ieee80211_stop_queue_by_reason( &sdata->local->hw, queue, IEEE80211_QUEUE_STOP_REASON_AGGREGATION, false); __acquire(agg_queue); } static void __releases(agg_queue) ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) { int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) ieee80211_wake_queue_by_reason( &sdata->local->hw, queue, IEEE80211_QUEUE_STOP_REASON_AGGREGATION, false); __release(agg_queue); } static void ieee80211_agg_stop_txq(struct sta_info *sta, int tid) { struct ieee80211_txq *txq = sta->sta.txq[tid]; struct ieee80211_sub_if_data *sdata; struct fq *fq; struct txq_info *txqi; if (!txq) return; txqi = to_txq_info(txq); sdata = vif_to_sdata(txq->vif); fq = &sdata->local->fq; /* Lock here to protect against further seqno updates on dequeue */ spin_lock_bh(&fq->lock); set_bit(IEEE80211_TXQ_STOP, &txqi->flags); spin_unlock_bh(&fq->lock); } static void ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable) { struct ieee80211_txq *txq = sta->sta.txq[tid]; struct txq_info *txqi; if (!txq) return; txqi = to_txq_info(txq); if (enable) set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); else clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); clear_bit(IEEE80211_TXQ_STOP, &txqi->flags); local_bh_disable(); rcu_read_lock(); drv_wake_tx_queue(sta->sdata->local, txqi); rcu_read_unlock(); local_bh_enable(); } /* * splice packets from the STA's pending to the local pending, * requires a call to ieee80211_agg_splice_finish later */ static void __acquires(agg_queue) ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_tx *tid_tx, u16 tid) { struct ieee80211_local *local = sdata->local; int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; unsigned long flags; ieee80211_stop_queue_agg(sdata, tid); if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates from the pending queue\n", tid)) return; if (!skb_queue_empty(&tid_tx->pending)) { spin_lock_irqsave(&local->queue_stop_reason_lock, flags); /* copy over remaining packets */ skb_queue_splice_tail_init(&tid_tx->pending, &local->pending[queue]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } } static void __releases(agg_queue) ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) { ieee80211_wake_queue_agg(sdata, tid); } static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) { struct tid_ampdu_tx *tid_tx; lockdep_assert_held(&sta->ampdu_mlme.mtx); lockdep_assert_held(&sta->lock); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); /* * When we get here, the TX path will not be lockless any more wrt. * aggregation, since the OPERATIONAL bit has long been cleared. * Thus it will block on getting the lock, if it occurs. So if we * stop the queue now, we will not get any more packets, and any * that might be being processed will wait for us here, thereby * guaranteeing that no packets go to the tid_tx pending queue any * more. */ ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); /* future packets must not find the tid_tx struct any more */ ieee80211_assign_tid_tx(sta, tid, NULL); ieee80211_agg_splice_finish(sta->sdata, tid); ieee80211_agg_start_txq(sta, tid, false); kfree_rcu(tid_tx, rcu_head); } int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_agg_stop_reason reason) { struct ieee80211_local *local = sta->local; struct tid_ampdu_tx *tid_tx; struct ieee80211_ampdu_params params = { .sta = &sta->sta, .tid = tid, .buf_size = 0, .amsdu = false, .timeout = 0, .ssn = 0, }; int ret; lockdep_assert_held(&sta->ampdu_mlme.mtx); switch (reason) { case AGG_STOP_DECLINED: case AGG_STOP_LOCAL_REQUEST: case AGG_STOP_PEER_REQUEST: params.action = IEEE80211_AMPDU_TX_STOP_CONT; break; case AGG_STOP_DESTROY_STA: params.action = IEEE80211_AMPDU_TX_STOP_FLUSH; break; default: WARN_ON_ONCE(1); return -EINVAL; } spin_lock_bh(&sta->lock); /* free struct pending for start, if present */ tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; kfree(tid_tx); sta->ampdu_mlme.tid_start_tx[tid] = NULL; tid_tx = rcu_dereference_protected_tid_tx(sta, tid); if (!tid_tx) { spin_unlock_bh(&sta->lock); return -ENOENT; } /* * if we're already stopping ignore any new requests to stop * unless we're destroying it in which case notify the driver */ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { spin_unlock_bh(&sta->lock); if (reason != AGG_STOP_DESTROY_STA) return -EALREADY; params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT; ret = drv_ampdu_action(local, sta->sdata, ¶ms); WARN_ON_ONCE(ret); return 0; } if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { /* not even started yet! */ ieee80211_assign_tid_tx(sta, tid, NULL); spin_unlock_bh(&sta->lock); kfree_rcu(tid_tx, rcu_head); return 0; } set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); ieee80211_agg_stop_txq(sta, tid); spin_unlock_bh(&sta->lock); ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", sta->sta.addr, tid); del_timer_sync(&tid_tx->addba_resp_timer); del_timer_sync(&tid_tx->session_timer); /* * After this packets are no longer handed right through * to the driver but are put onto tid_tx->pending instead, * with locking to ensure proper access. */ clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); /* * There might be a few packets being processed right now (on * another CPU) that have already gotten past the aggregation * check when it was still OPERATIONAL and consequently have * IEEE80211_TX_CTL_AMPDU set. In that case, this code might * call into the driver at the same time or even before the * TX paths calls into it, which could confuse the driver. * * Wait for all currently running TX paths to finish before * telling the driver. New packets will not go through since * the aggregation session is no longer OPERATIONAL. */ if (!local->in_reconfig) synchronize_net(); tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ? WLAN_BACK_RECIPIENT : WLAN_BACK_INITIATOR; tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; ret = drv_ampdu_action(local, sta->sdata, ¶ms); /* HW shall not deny going back to legacy */ if (WARN_ON(ret)) { /* * We may have pending packets get stuck in this case... * Not bothering with a workaround for now. */ } /* * In the case of AGG_STOP_DESTROY_STA, the driver won't * necessarily call ieee80211_stop_tx_ba_cb(), so this may * seem like we can leave the tid_tx data pending forever. * This is true, in a way, but "forever" is only until the * station struct is actually destroyed. In the meantime, * leaving it around ensures that we don't transmit packets * to the driver on this TID which might confuse it. */ return 0; } /* * After sending add Block Ack request we activated a timer until * add Block Ack response will arrive from the recipient. * If this timer expires sta_addba_resp_timer_expired will be executed. */ static void sta_addba_resp_timer_expired(struct timer_list *t) { struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer); struct sta_info *sta = tid_tx->sta; u8 tid = tid_tx->tid; /* check if the TID waits for addBA response */ if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { ht_dbg(sta->sdata, "timer expired on %pM tid %d not expecting addBA response\n", sta->sta.addr, tid); return; } ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n", sta->sta.addr, tid); ieee80211_stop_tx_ba_session(&sta->sta, tid); } void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) { struct tid_ampdu_tx *tid_tx; struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_ampdu_params params = { .sta = &sta->sta, .action = IEEE80211_AMPDU_TX_START, .tid = tid, .buf_size = 0, .amsdu = false, .timeout = 0, }; int ret; u16 buf_size; tid_tx = rcu_dereference_protected_tid_tx(sta, tid); /* * Start queuing up packets for this aggregation session. * We're going to release them once the driver is OK with * that. */ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); ieee80211_agg_stop_txq(sta, tid); /* * Make sure no packets are being processed. This ensures that * we have a valid starting sequence number and that in-flight * packets have been flushed out and no packets for this TID * will go into the driver during the ampdu_action call. */ synchronize_net(); params.ssn = sta->tid_seq[tid] >> 4; ret = drv_ampdu_action(local, sdata, ¶ms); if (ret) { ht_dbg(sdata, "BA request denied - HW unavailable for %pM tid %d\n", sta->sta.addr, tid); spin_lock_bh(&sta->lock); ieee80211_agg_splice_packets(sdata, tid_tx, tid); ieee80211_assign_tid_tx(sta, tid, NULL); ieee80211_agg_splice_finish(sdata, tid); spin_unlock_bh(&sta->lock); ieee80211_agg_start_txq(sta, tid, false); kfree_rcu(tid_tx, rcu_head); return; } /* activate the timer for the recipient's addBA response */ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n", sta->sta.addr, tid); spin_lock_bh(&sta->lock); sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); if (sta->sta.he_cap.has_he) { buf_size = local->hw.max_tx_aggregation_subframes; } else { /* * We really should use what the driver told us it will * transmit as the maximum, but certain APs (e.g. the * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012) * will crash when we use a lower number. */ buf_size = IEEE80211_MAX_AMPDU_BUF_HT; } /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, tid_tx->dialog_token, params.ssn, buf_size, tid_tx->timeout); } /* * After accepting the AddBA Response we activated a timer, * resetting it after each frame that we send. */ static void sta_tx_agg_session_timer_expired(struct timer_list *t) { struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer); struct sta_info *sta = tid_tx->sta; u8 tid = tid_tx->tid; unsigned long timeout; if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { return; } timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); if (time_is_after_jiffies(timeout)) { mod_timer(&tid_tx->session_timer, timeout); return; } ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n", sta->sta.addr, tid); ieee80211_stop_tx_ba_session(&sta->sta, tid); } int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, u16 timeout) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct tid_ampdu_tx *tid_tx; int ret = 0; trace_api_start_tx_ba_session(pubsta, tid); if (WARN(sta->reserved_tid == tid, "Requested to start BA session on reserved tid=%d", tid)) return -EINVAL; if (!pubsta->ht_cap.ht_supported) return -EINVAL; if (WARN_ON_ONCE(!local->ops->ampdu_action)) return -EINVAL; if ((tid >= IEEE80211_NUM_TIDS) || !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) || ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) return -EINVAL; if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID)) return -EINVAL; ht_dbg(sdata, "Open BA session requested for %pM tid %u\n", pubsta->addr, tid); if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC) return -EINVAL; if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { ht_dbg(sdata, "BA sessions blocked - Denying BA session request %pM tid %d\n", sta->sta.addr, tid); return -EINVAL; } /* * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a * member of an IBSS, and has no other existing Block Ack agreement * with the recipient STA, then the initiating STA shall transmit a * Probe Request frame to the recipient STA and shall not transmit an * ADDBA Request frame unless it receives a Probe Response frame * from the recipient within dot11ADDBAFailureTimeout. * * The probe request mechanism for ADDBA is currently not implemented, * but we only build up Block Ack session with HT STAs. This information * is set when we receive a bss info from a probe response or a beacon. */ if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && !sta->sta.ht_cap.ht_supported) { ht_dbg(sdata, "BA request denied - IBSS STA %pM does not advertise HT support\n", pubsta->addr); return -EINVAL; } spin_lock_bh(&sta->lock); /* we have tried too many times, receiver does not want A-MPDU */ if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { ret = -EBUSY; goto err_unlock_sta; } /* * if we have tried more than HT_AGG_BURST_RETRIES times we * will spread our requests in time to avoid stalling connection * for too long */ if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + HT_AGG_RETRIES_PERIOD)) { ht_dbg(sdata, "BA request denied - %d failed requests on %pM tid %u\n", sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid); ret = -EBUSY; goto err_unlock_sta; } tid_tx = rcu_dereference_protected_tid_tx(sta, tid); /* check if the TID is not in aggregation flow already */ if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { ht_dbg(sdata, "BA request denied - session is not idle on %pM tid %u\n", sta->sta.addr, tid); ret = -EAGAIN; goto err_unlock_sta; } /* prepare A-MPDU MLME for Tx aggregation */ tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); if (!tid_tx) { ret = -ENOMEM; goto err_unlock_sta; } skb_queue_head_init(&tid_tx->pending); __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); tid_tx->timeout = timeout; tid_tx->sta = sta; tid_tx->tid = tid; /* response timer */ timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0); /* tx timer */ timer_setup(&tid_tx->session_timer, sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE); /* assign a dialog token */ sta->ampdu_mlme.dialog_token_allocator++; tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; /* * Finally, assign it to the start array; the work item will * collect it and move it to the normal array. */ sta->ampdu_mlme.tid_start_tx[tid] = tid_tx; ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); /* this flow continues off the work */ err_unlock_sta: spin_unlock_bh(&sta->lock); return ret; } EXPORT_SYMBOL(ieee80211_start_tx_ba_session); static void ieee80211_agg_tx_operational(struct ieee80211_local *local, struct sta_info *sta, u16 tid) { struct tid_ampdu_tx *tid_tx; struct ieee80211_ampdu_params params = { .sta = &sta->sta, .action = IEEE80211_AMPDU_TX_OPERATIONAL, .tid = tid, .timeout = 0, .ssn = 0, }; lockdep_assert_held(&sta->ampdu_mlme.mtx); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); params.buf_size = tid_tx->buf_size; params.amsdu = tid_tx->amsdu; ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n", sta->sta.addr, tid); drv_ampdu_action(local, sta->sdata, ¶ms); /* * synchronize with TX path, while splicing the TX path * should block so it won't put more packets onto pending. */ spin_lock_bh(&sta->lock); ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); /* * Now mark as operational. This will be visible * in the TX path, and lets it go lock-free in * the common case. */ set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); ieee80211_agg_splice_finish(sta->sdata, tid); spin_unlock_bh(&sta->lock); ieee80211_agg_start_txq(sta, tid, true); } void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) return; if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) ieee80211_agg_tx_operational(local, sta, tid); } static struct tid_ampdu_tx * ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid, struct sta_info **sta) { struct tid_ampdu_tx *tid_tx; if (tid >= IEEE80211_NUM_TIDS) { ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", tid, IEEE80211_NUM_TIDS); return NULL; } *sta = sta_info_get_bss(sdata, ra); if (!*sta) { ht_dbg(sdata, "Could not find station: %pM\n", ra); return NULL; } tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]); if (WARN_ON(!tid_tx)) ht_dbg(sdata, "addBA was not requested!\n"); return tid_tx; } void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, u16 tid) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct tid_ampdu_tx *tid_tx; trace_api_start_tx_ba_cb(sdata, ra, tid); rcu_read_lock(); tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); if (!tid_tx) goto out; set_bit(HT_AGG_STATE_START_CB, &tid_tx->state); ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); out: rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_agg_stop_reason reason) { int ret; mutex_lock(&sta->ampdu_mlme.mtx); ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason); mutex_unlock(&sta->ampdu_mlme.mtx); return ret; } int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct tid_ampdu_tx *tid_tx; int ret = 0; trace_api_stop_tx_ba_session(pubsta, tid); if (!local->ops->ampdu_action) return -EINVAL; if (tid >= IEEE80211_NUM_TIDS) return -EINVAL; spin_lock_bh(&sta->lock); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); if (!tid_tx) { ret = -ENOENT; goto unlock; } WARN(sta->reserved_tid == tid, "Requested to stop BA session on reserved tid=%d", tid); if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { /* already in progress stopping it */ ret = 0; goto unlock; } set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); unlock: spin_unlock_bh(&sta->lock); return ret; } EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx) { struct ieee80211_sub_if_data *sdata = sta->sdata; bool send_delba = false; ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", sta->sta.addr, tid); spin_lock_bh(&sta->lock); if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { ht_dbg(sdata, "unexpected callback to A-MPDU stop for %pM tid %d\n", sta->sta.addr, tid); goto unlock_sta; } if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop) send_delba = true; ieee80211_remove_tid_tx(sta, tid); unlock_sta: spin_unlock_bh(&sta->lock); if (send_delba) ieee80211_send_delba(sdata, sta->sta.addr, tid, WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); } void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, u16 tid) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct tid_ampdu_tx *tid_tx; trace_api_stop_tx_ba_cb(sdata, ra, tid); rcu_read_lock(); tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); if (!tid_tx) goto out; set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state); ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); out: rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); void ieee80211_process_addba_resp(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct tid_ampdu_tx *tid_tx; struct ieee80211_txq *txq; u16 capab, tid, buf_size; bool amsdu; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); txq = sta->sta.txq[tid]; if (!amsdu && txq) set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags); mutex_lock(&sta->ampdu_mlme.mtx); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); if (!tid_tx) goto out; if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n", sta->sta.addr, tid); goto out; } del_timer_sync(&tid_tx->addba_resp_timer); ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", sta->sta.addr, tid); /* * addba_resp_timer may have fired before we got here, and * caused WANT_STOP to be set. If the stop then was already * processed further, STOPPING might be set. */ if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { ht_dbg(sta->sdata, "got addBA resp for %pM tid %d but we already gave up\n", sta->sta.addr, tid); goto out; } /* * IEEE 802.11-2007 7.3.1.14: * In an ADDBA Response frame, when the Status Code field * is set to 0, the Buffer Size subfield is set to a value * of at least 1. */ if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) == WLAN_STATUS_SUCCESS && buf_size) { if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { /* ignore duplicate response */ goto out; } tid_tx->buf_size = buf_size; tid_tx->amsdu = amsdu; if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) ieee80211_agg_tx_operational(local, sta, tid); sta->ampdu_mlme.addba_req_num[tid] = 0; tid_tx->timeout = le16_to_cpu(mgmt->u.action.u.addba_resp.timeout); if (tid_tx->timeout) { mod_timer(&tid_tx->session_timer, TU_TO_EXP_TIME(tid_tx->timeout)); tid_tx->last_tx = jiffies; } } else { ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED); } out: mutex_unlock(&sta->ampdu_mlme.mtx); } backport-iwlwifi-dkms-7906/net/mac80211/cfg.c000066400000000000000000003227301351064634500205350ustar00rootroot00000000000000/* * mac80211 configuration hooks for cfg80211 * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This file is GPLv2 as found in COPYING. */ #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wme.h" static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata, struct vif_params *params) { bool mu_mimo_groups = false; bool mu_mimo_follow = false; if (params->vht_mumimo_groups) { u64 membership; BUILD_BUG_ON(sizeof(membership) != WLAN_MEMBERSHIP_LEN); memcpy(sdata->vif.bss_conf.mu_group.membership, params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN); memcpy(sdata->vif.bss_conf.mu_group.position, params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN, WLAN_USER_POSITION_LEN); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS); /* don't care about endianness - just check for 0 */ memcpy(&membership, params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN); mu_mimo_groups = membership != 0; } if (params->vht_mumimo_follow_addr) { mu_mimo_follow = is_valid_ether_addr(params->vht_mumimo_follow_addr); ether_addr_copy(sdata->u.mntr.mu_follow_addr, params->vht_mumimo_follow_addr); } sdata->vif.mu_mimo_owner = mu_mimo_groups || mu_mimo_follow; } static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, struct vif_params *params) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *monitor_sdata; /* check flags first */ if (params->flags && ieee80211_sdata_running(sdata)) { u32 mask = MONITOR_FLAG_COOK_FRAMES | MONITOR_FLAG_ACTIVE; /* * Prohibit MONITOR_FLAG_COOK_FRAMES and * MONITOR_FLAG_ACTIVE to be changed while the * interface is up. * Else we would need to add a lot of cruft * to update everything: * cooked_mntrs, monitor and all fif_* counters * reconfigure hardware */ if ((params->flags & mask) != (sdata->u.mntr.flags & mask)) return -EBUSY; } /* also validate MU-MIMO change */ monitor_sdata = rtnl_dereference(local->monitor_sdata); if (!monitor_sdata && (params->vht_mumimo_groups || params->vht_mumimo_follow_addr)) return -EOPNOTSUPP; /* apply all changes now - no failures allowed */ if (monitor_sdata) ieee80211_set_mu_mimo_follow(monitor_sdata, params); if (params->flags) { if (ieee80211_sdata_running(sdata)) { ieee80211_adjust_monitor_flags(sdata, -1); sdata->u.mntr.flags = params->flags; ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); } else { /* * Because the interface is down, ieee80211_do_stop * and ieee80211_do_open take care of "everything" * mentioned in the comment above. */ sdata->u.mntr.flags = params->flags; } } return 0; } static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct wireless_dev *wdev; struct ieee80211_sub_if_data *sdata; int err; err = ieee80211_if_add(local, name, name_assign_type, &wdev, type, params); if (err) return ERR_PTR(err); sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (type == NL80211_IFTYPE_MONITOR) { err = ieee80211_set_mon_options(sdata, params); if (err) { ieee80211_if_remove(sdata); return NULL; } } return wdev; } static int ieee80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) { ieee80211_if_remove(IEEE80211_WDEV_TO_SUB_IF(wdev)); return 0; } static int ieee80211_change_iface(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int ret; ret = ieee80211_if_change_type(sdata, type); if (ret) return ret; if (type == NL80211_IFTYPE_AP_VLAN && params->use_4addr == 0) { RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); ieee80211_check_fast_rx_iface(sdata); } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) { sdata->u.mgd.use_4addr = params->use_4addr; } if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { ret = ieee80211_set_mon_options(sdata, params); if (ret) return ret; } return 0; } static int ieee80211_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); int ret; mutex_lock(&sdata->local->chanctx_mtx); ret = ieee80211_check_combinations(sdata, NULL, 0, 0); mutex_unlock(&sdata->local->chanctx_mtx); if (ret < 0) return ret; return ieee80211_do_open(wdev, true); } static void ieee80211_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) { ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev)); } static int ieee80211_start_nan(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); int ret; mutex_lock(&sdata->local->chanctx_mtx); ret = ieee80211_check_combinations(sdata, NULL, 0, 0); mutex_unlock(&sdata->local->chanctx_mtx); if (ret < 0) return ret; ret = ieee80211_do_open(wdev, true); if (ret) return ret; ret = drv_start_nan(sdata->local, sdata, conf); if (ret) ieee80211_sdata_stop(sdata); sdata->u.nan.conf = *conf; return ret; } static void ieee80211_stop_nan(struct wiphy *wiphy, struct wireless_dev *wdev) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); drv_stop_nan(sdata->local, sdata); ieee80211_sdata_stop(sdata); } static int ieee80211_nan_change_conf(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct cfg80211_nan_conf new_conf; int ret = 0; if (sdata->vif.type != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; new_conf = sdata->u.nan.conf; if (changes & CFG80211_NAN_CONF_CHANGED_PREF) new_conf.master_pref = conf->master_pref; if (changes & CFG80211_NAN_CONF_CHANGED_BANDS) new_conf.bands = conf->bands; if (changes & CFG80211_NAN_CONF_CHANGED_CDW_2G) new_conf.cdw_2g = conf->cdw_2g; if (changes & CFG80211_NAN_CONF_CHANGED_CDW_5G) new_conf.cdw_5g = conf->cdw_5g; ret = drv_nan_change_conf(sdata->local, sdata, &new_conf, changes); if (!ret) sdata->u.nan.conf = new_conf; return ret; } static int ieee80211_add_nan_func(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_func *nan_func) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); int ret; if (sdata->vif.type != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; spin_lock_bh(&sdata->u.nan.func_lock); ret = idr_alloc(&sdata->u.nan.function_inst_ids, nan_func, 1, sdata->local->hw.max_nan_de_entries + 1, GFP_ATOMIC); spin_unlock_bh(&sdata->u.nan.func_lock); if (ret < 0) return ret; nan_func->instance_id = ret; WARN_ON(nan_func->instance_id == 0); ret = drv_add_nan_func(sdata->local, sdata, nan_func); if (ret) { spin_lock_bh(&sdata->u.nan.func_lock); idr_remove(&sdata->u.nan.function_inst_ids, nan_func->instance_id); spin_unlock_bh(&sdata->u.nan.func_lock); } return ret; } static struct cfg80211_nan_func * ieee80211_find_nan_func_by_cookie(struct ieee80211_sub_if_data *sdata, u64 cookie) { struct cfg80211_nan_func *func; int id; lockdep_assert_held(&sdata->u.nan.func_lock); idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, id) { if (func->cookie == cookie) return func; } return NULL; } static void ieee80211_del_nan_func(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct cfg80211_nan_func *func; u8 instance_id = 0; if (sdata->vif.type != NL80211_IFTYPE_NAN || !ieee80211_sdata_running(sdata)) return; spin_lock_bh(&sdata->u.nan.func_lock); func = ieee80211_find_nan_func_by_cookie(sdata, cookie); if (func) instance_id = func->instance_id; spin_unlock_bh(&sdata->u.nan.func_lock); if (instance_id) drv_del_nan_func(sdata->local, sdata, instance_id); } static int ieee80211_set_noack_map(struct wiphy *wiphy, struct net_device *dev, u16 noack_map) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); sdata->noack_map = noack_map; ieee80211_check_fast_xmit_iface(sdata); return 0; } static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta = NULL; const struct ieee80211_cipher_scheme *cs = NULL; struct ieee80211_key *key; int err; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; /* reject WEP and TKIP keys if WEP failed to initialize */ switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_WEP104: if (IS_ERR(local->wep_tx_tfm)) return -EINVAL; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: break; default: cs = ieee80211_cs_get(local, params->cipher, sdata->vif.type); break; } key = ieee80211_key_alloc(params->cipher, key_idx, params->key_len, params->key, params->seq_len, params->seq, cs); if (IS_ERR(key)) return PTR_ERR(key); if (pairwise) key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; mutex_lock(&local->sta_mtx); if (mac_addr) { sta = sta_info_get_bss(sdata, mac_addr); /* * The ASSOC test makes sure the driver is ready to * receive the key. When wpa_supplicant has roamed * using FT, it attempts to set the key before * association has completed, this rejects that attempt * so it will set the key again after association. * * TODO: accept the key if we have a station entry and * add it to the device after the station. */ if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) { ieee80211_key_free_unused(key); err = -ENOENT; goto out_unlock; } } switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: /* Keys without a station are used for TX only */ if (sta && test_sta_flag(sta, WLAN_STA_MFP)) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; break; case NL80211_IFTYPE_ADHOC: /* no MFP (yet) */ break; case NL80211_IFTYPE_MESH_POINT: #ifdef CPTCFG_MAC80211_MESH if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; break; #endif case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_NAN_DATA: /* shouldn't happen */ WARN_ON_ONCE(1); break; } if (sta) sta->cipher_scheme = cs; err = ieee80211_key_link(key, sdata, sta); out_unlock: mutex_unlock(&local->sta_mtx); return err; } static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_key *key = NULL; int ret; mutex_lock(&local->sta_mtx); mutex_lock(&local->key_mtx); if (mac_addr) { ret = -ENOENT; sta = sta_info_get_bss(sdata, mac_addr); if (!sta) goto out_unlock; if (pairwise) key = key_mtx_dereference(local, sta->ptk[key_idx]); else key = key_mtx_dereference(local, sta->gtk[key_idx]); } else key = key_mtx_dereference(local, sdata->keys[key_idx]); if (!key) { ret = -ENOENT; goto out_unlock; } ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION); ret = 0; out_unlock: mutex_unlock(&local->key_mtx); mutex_unlock(&local->sta_mtx); return ret; } static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params *params)) { struct ieee80211_sub_if_data *sdata; struct sta_info *sta = NULL; u8 seq[6] = {0}; struct key_params params; struct ieee80211_key *key = NULL; u64 pn64; u32 iv32; u16 iv16; int err = -ENOENT; struct ieee80211_key_seq kseq = {}; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); if (mac_addr) { sta = sta_info_get_bss(sdata, mac_addr); if (!sta) goto out; if (pairwise && key_idx < NUM_DEFAULT_KEYS) key = rcu_dereference(sta->ptk[key_idx]); else if (!pairwise && key_idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) key = rcu_dereference(sta->gtk[key_idx]); } else key = rcu_dereference(sdata->keys[key_idx]); if (!key) goto out; memset(¶ms, 0, sizeof(params)); params.cipher = key->conf.cipher; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: pn64 = atomic64_read(&key->conf.tx_pn); iv32 = TKIP_PN_TO_IV32(pn64); iv16 = TKIP_PN_TO_IV16(pn64); if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { drv_get_key_seq(sdata->local, key, &kseq); iv32 = kseq.tkip.iv32; iv16 = kseq.tkip.iv16; } seq[0] = iv16 & 0xff; seq[1] = (iv16 >> 8) & 0xff; seq[2] = iv32 & 0xff; seq[3] = (iv32 >> 8) & 0xff; seq[4] = (iv32 >> 16) & 0xff; seq[5] = (iv32 >> 24) & 0xff; params.seq = seq; params.seq_len = 6; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), aes_cmac)); /* fall through */ case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), aes_gmac)); /* fall through */ case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), gcmp)); if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { drv_get_key_seq(sdata->local, key, &kseq); memcpy(seq, kseq.ccmp.pn, 6); } else { pn64 = atomic64_read(&key->conf.tx_pn); seq[0] = pn64; seq[1] = pn64 >> 8; seq[2] = pn64 >> 16; seq[3] = pn64 >> 24; seq[4] = pn64 >> 32; seq[5] = pn64 >> 40; } params.seq = seq; params.seq_len = 6; break; default: if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) break; if (WARN_ON(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) break; drv_get_key_seq(sdata->local, key, &kseq); params.seq = kseq.hw.seq; params.seq_len = kseq.hw.seq_len; break; } params.key = key->conf.key; params.key_len = key->conf.keylen; callback(cookie, ¶ms); err = 0; out: rcu_read_unlock(); return err; } static int ieee80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool uni, bool multi) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_set_default_key(sdata, key_idx, uni, multi); return 0; } static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_set_default_mgmt_key(sdata, key_idx); return 0; } void sta_set_rate_info_tx(struct sta_info *sta, const struct ieee80211_tx_rate *rate, struct rate_info *rinfo) { rinfo->flags = 0; if (rate->flags & IEEE80211_TX_RC_MCS) { rinfo->flags |= RATE_INFO_FLAGS_MCS; rinfo->mcs = rate->idx; } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; rinfo->mcs = ieee80211_rate_get_vht_mcs(rate); rinfo->nss = ieee80211_rate_get_vht_nss(rate); } else { struct ieee80211_supported_band *sband; int shift = ieee80211_vif_get_shift(&sta->sdata->vif); u16 brate; sband = ieee80211_get_sband(sta->sdata); if (sband) { brate = sband->bitrates[rate->idx].bitrate; rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); } } if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rinfo->bw = RATE_INFO_BW_40; else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) rinfo->bw = RATE_INFO_BW_80; else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) rinfo->bw = RATE_INFO_BW_160; else rinfo->bw = RATE_INFO_BW_20; if (rate->flags & IEEE80211_TX_RC_SHORT_GI) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; } static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; int ret = -ENOENT; mutex_lock(&local->sta_mtx); sta = sta_info_get_by_idx(sdata, idx); if (sta) { ret = 0; memcpy(mac, sta->sta.addr, ETH_ALEN); sta_set_sinfo(sta, sinfo, true); } mutex_unlock(&local->sta_mtx); return ret; } static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, int idx, struct survey_info *survey) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); return drv_get_survey(local, idx, survey); } static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; int ret = -ENOENT; mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mac); if (sta) { ret = 0; sta_set_sinfo(sta, sinfo, true); } mutex_unlock(&local->sta_mtx); return ret; } static int ieee80211_set_monitor_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata; int ret = 0; if (cfg80211_chandef_identical(&local->monitor_chandef, chandef)) return 0; mutex_lock(&local->mtx); if (local->use_chanctx) { sdata = rtnl_dereference(local->monitor_sdata); if (sdata) { ieee80211_vif_release_channel(sdata); ret = ieee80211_vif_use_channel(sdata, chandef, IEEE80211_CHANCTX_EXCLUSIVE); } } else if (local->open_count == local->monitors) { local->_oper_chandef = *chandef; ieee80211_hw_config(local, 0); } if (ret == 0) local->monitor_chandef = *chandef; mutex_unlock(&local->mtx); return ret; } static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, const u8 *resp, size_t resp_len, const struct ieee80211_csa_settings *csa) { struct probe_resp *new, *old; if (!resp || !resp_len) return 1; old = sdata_dereference(sdata->u.ap.probe_resp, sdata); new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL); if (!new) return -ENOMEM; new->len = resp_len; memcpy(new->data, resp, resp_len); if (csa) memcpy(new->csa_counter_offsets, csa->counter_offsets_presp, csa->n_counter_offsets_presp * sizeof(new->csa_counter_offsets[0])); rcu_assign_pointer(sdata->u.ap.probe_resp, new); if (old) kfree_rcu(old, rcu_head); return 0; } static int ieee80211_set_ftm_responder_params( struct ieee80211_sub_if_data *sdata, const u8 *lci, size_t lci_len, const u8 *civicloc, size_t civicloc_len) { struct ieee80211_ftm_responder_params *new, *old; struct ieee80211_bss_conf *bss_conf; u8 *pos; int len; if (!lci_len && !civicloc_len) return 0; bss_conf = &sdata->vif.bss_conf; old = bss_conf->ftmr_params; len = lci_len + civicloc_len; new = kzalloc(sizeof(*new) + len, GFP_KERNEL); if (!new) return -ENOMEM; pos = (u8 *)(new + 1); if (lci_len) { new->lci_len = lci_len; new->lci = pos; memcpy(pos, lci, lci_len); pos += lci_len; } if (civicloc_len) { new->civicloc_len = civicloc_len; new->civicloc = pos; memcpy(pos, civicloc, civicloc_len); pos += civicloc_len; } bss_conf->ftmr_params = new; kfree(old); return 0; } static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_beacon_data *params, const struct ieee80211_csa_settings *csa) { struct beacon_data *new, *old; int new_head_len, new_tail_len; int size, err; u32 changed = BSS_CHANGED_BEACON; old = sdata_dereference(sdata->u.ap.beacon, sdata); /* Need to have a beacon head if we don't have one yet */ if (!params->head && !old) return -EINVAL; /* new or old head? */ if (params->head) new_head_len = params->head_len; else new_head_len = old->head_len; /* new or old tail? */ if (params->tail || !old) /* params->tail_len will be zero for !params->tail */ new_tail_len = params->tail_len; else new_tail_len = old->tail_len; size = sizeof(*new) + new_head_len + new_tail_len; new = kzalloc(size, GFP_KERNEL); if (!new) return -ENOMEM; /* start filling the new info now */ /* * pointers go into the block we allocated, * memory is | beacon_data | head | tail | */ new->head = ((u8 *) new) + sizeof(*new); new->tail = new->head + new_head_len; new->head_len = new_head_len; new->tail_len = new_tail_len; if (csa) { new->csa_current_counter = csa->count; memcpy(new->csa_counter_offsets, csa->counter_offsets_beacon, csa->n_counter_offsets_beacon * sizeof(new->csa_counter_offsets[0])); } /* copy in head */ if (params->head) memcpy(new->head, params->head, new_head_len); else memcpy(new->head, old->head, new_head_len); /* copy in optional tail */ if (params->tail) memcpy(new->tail, params->tail, new_tail_len); else if (old) memcpy(new->tail, old->tail, new_tail_len); err = ieee80211_set_probe_resp(sdata, params->probe_resp, params->probe_resp_len, csa); if (err < 0) return err; if (err == 0) changed |= BSS_CHANGED_AP_PROBE_RESP; if (params->ftm_responder != -1) { sdata->vif.bss_conf.ftm_responder = params->ftm_responder; err = ieee80211_set_ftm_responder_params(sdata, params->lci, params->lci_len, params->civicloc, params->civicloc_len); if (err < 0) return err; changed |= BSS_CHANGED_FTM_RESPONDER; } rcu_assign_pointer(sdata->u.ap.beacon, new); if (old) kfree_rcu(old, rcu_head); return changed; } static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct beacon_data *old; struct ieee80211_sub_if_data *vlan; u32 changed = BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON | BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS | BSS_CHANGED_TXPOWER; int err; old = sdata_dereference(sdata->u.ap.beacon, sdata); if (old) return -EALREADY; switch (params->smps_mode) { case NL80211_SMPS_OFF: sdata->smps_mode = IEEE80211_SMPS_OFF; break; case NL80211_SMPS_STATIC: sdata->smps_mode = IEEE80211_SMPS_STATIC; break; case NL80211_SMPS_DYNAMIC: sdata->smps_mode = IEEE80211_SMPS_DYNAMIC; break; default: return -EINVAL; } sdata->u.ap.req_smps = sdata->smps_mode; sdata->needed_rx_chains = sdata->local->rx_chains; sdata->vif.bss_conf.beacon_int = params->beacon_interval; if (params->he_cap) sdata->vif.bss_conf.he_support = true; mutex_lock(&local->mtx); err = ieee80211_vif_use_channel(sdata, ¶ms->chandef, IEEE80211_CHANCTX_SHARED); if (!err) ieee80211_vif_copy_chanctx_to_vlans(sdata, false); mutex_unlock(&local->mtx); if (err) return err; /* * Apply control port protocol, this allows us to * not encrypt dynamic WEP control frames. */ sdata->control_port_protocol = params->crypto.control_port_ethertype; sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt; sdata->control_port_over_nl80211 = params->crypto.control_port_over_nl80211; sdata->encrypt_headroom = ieee80211_cs_headroom(sdata->local, ¶ms->crypto, sdata->vif.type); list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { vlan->control_port_protocol = params->crypto.control_port_ethertype; vlan->control_port_no_encrypt = params->crypto.control_port_no_encrypt; vlan->control_port_over_nl80211 = params->crypto.control_port_over_nl80211; vlan->encrypt_headroom = ieee80211_cs_headroom(sdata->local, ¶ms->crypto, vlan->vif.type); } sdata->vif.bss_conf.dtim_period = params->dtim_period; sdata->vif.bss_conf.enable_beacon = true; sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p; sdata->vif.bss_conf.ssid_len = params->ssid_len; if (params->ssid_len) memcpy(sdata->vif.bss_conf.ssid, params->ssid, params->ssid_len); sdata->vif.bss_conf.hidden_ssid = (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE); memset(&sdata->vif.bss_conf.p2p_noa_attr, 0, sizeof(sdata->vif.bss_conf.p2p_noa_attr)); sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow = params->p2p_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK; if (params->p2p_opp_ps) sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; err = ieee80211_assign_beacon(sdata, ¶ms->beacon, NULL); if (err < 0) { ieee80211_vif_release_channel(sdata); return err; } changed |= err; err = drv_start_ap(sdata->local, sdata); if (err) { old = sdata_dereference(sdata->u.ap.beacon, sdata); if (old) kfree_rcu(old, rcu_head); RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); ieee80211_vif_release_channel(sdata); return err; } ieee80211_recalc_dtim(local, sdata); ieee80211_bss_info_change_notify(sdata, changed); netif_carrier_on(dev); list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) netif_carrier_on(vlan->dev); return 0; } static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_beacon_data *params) { struct ieee80211_sub_if_data *sdata; struct beacon_data *old; int err; sdata = IEEE80211_DEV_TO_SUB_IF(dev); sdata_assert_lock(sdata); /* don't allow changing the beacon while CSA is in place - offset * of channel switch counter may change */ if (sdata->vif.csa_active) return -EBUSY; old = sdata_dereference(sdata->u.ap.beacon, sdata); if (!old) return -ENOENT; err = ieee80211_assign_beacon(sdata, params, NULL); if (err < 0) return err; ieee80211_bss_info_change_notify(sdata, err); return 0; } static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_sub_if_data *vlan; struct ieee80211_local *local = sdata->local; struct beacon_data *old_beacon; struct probe_resp *old_probe_resp; struct cfg80211_chan_def chandef; sdata_assert_lock(sdata); old_beacon = sdata_dereference(sdata->u.ap.beacon, sdata); if (!old_beacon) return -ENOENT; old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata); /* abort any running channel switch */ mutex_lock(&local->mtx); sdata->vif.csa_active = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_block_tx = false; } mutex_unlock(&local->mtx); kfree(sdata->u.ap.next_beacon); sdata->u.ap.next_beacon = NULL; /* turn off carrier for this interface and dependent VLANs */ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) netif_carrier_off(vlan->dev); netif_carrier_off(dev); /* remove beacon and probe response */ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); kfree_rcu(old_beacon, rcu_head); if (old_probe_resp) kfree_rcu(old_probe_resp, rcu_head); sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF; kfree(sdata->vif.bss_conf.ftmr_params); sdata->vif.bss_conf.ftmr_params = NULL; __sta_info_flush(sdata, true); ieee80211_free_keys(sdata, true); sdata->vif.bss_conf.enable_beacon = false; sdata->vif.bss_conf.ssid_len = 0; clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); if (sdata->wdev.cac_started) { chandef = sdata->vif.bss_conf.chandef; cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); } drv_stop_ap(sdata->local, sdata); /* free all potentially still buffered bcast frames */ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); mutex_lock(&local->mtx); ieee80211_vif_copy_chanctx_to_vlans(sdata, true); ieee80211_vif_release_channel(sdata); mutex_unlock(&local->mtx); return 0; } static int sta_apply_auth_flags(struct ieee80211_local *local, struct sta_info *sta, u32 mask, u32 set) { int ret; if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED) && set & BIT(NL80211_STA_FLAG_AUTHENTICATED) && !test_sta_flag(sta, WLAN_STA_AUTH)) { ret = sta_info_move_state(sta, IEEE80211_STA_AUTH); if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) && set & BIT(NL80211_STA_FLAG_ASSOCIATED) && !test_sta_flag(sta, WLAN_STA_ASSOC)) { /* * When peer becomes associated, init rate control as * well. Some drivers require rate control initialized * before drv_sta_state() is called. */ if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) rate_control_rate_init(sta); ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) ret = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); else ret = 0; if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) && !(set & BIT(NL80211_STA_FLAG_ASSOCIATED)) && test_sta_flag(sta, WLAN_STA_ASSOC)) { ret = sta_info_move_state(sta, IEEE80211_STA_AUTH); if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED) && !(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) && test_sta_flag(sta, WLAN_STA_AUTH)) { ret = sta_info_move_state(sta, IEEE80211_STA_NONE); if (ret) return ret; } return 0; } static void sta_apply_mesh_params(struct ieee80211_local *local, struct sta_info *sta, struct station_parameters *params) { #ifdef CPTCFG_MAC80211_MESH struct ieee80211_sub_if_data *sdata = sta->sdata; u32 changed = 0; if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) { switch (params->plink_state) { case NL80211_PLINK_ESTAB: if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) changed = mesh_plink_inc_estab_count(sdata); sta->mesh->plink_state = params->plink_state; sta->mesh->aid = params->peer_aid; ieee80211_mps_sta_status_update(sta); changed |= ieee80211_mps_set_sta_local_pm(sta, sdata->u.mesh.mshcfg.power_mode); break; case NL80211_PLINK_LISTEN: case NL80211_PLINK_BLOCKED: case NL80211_PLINK_OPN_SNT: case NL80211_PLINK_OPN_RCVD: case NL80211_PLINK_CNF_RCVD: case NL80211_PLINK_HOLDING: if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) changed = mesh_plink_dec_estab_count(sdata); sta->mesh->plink_state = params->plink_state; ieee80211_mps_sta_status_update(sta); changed |= ieee80211_mps_set_sta_local_pm(sta, NL80211_MESH_POWER_UNKNOWN); break; default: /* nothing */ break; } } switch (params->plink_action) { case NL80211_PLINK_ACTION_NO_ACTION: /* nothing */ break; case NL80211_PLINK_ACTION_OPEN: changed |= mesh_plink_open(sta); break; case NL80211_PLINK_ACTION_BLOCK: changed |= mesh_plink_block(sta); break; } if (params->local_pm) changed |= ieee80211_mps_set_sta_local_pm(sta, params->local_pm); ieee80211_mbss_info_change_notify(sdata, changed); #endif } static int sta_apply_parameters(struct ieee80211_local *local, struct sta_info *sta, struct station_parameters *params) { int ret = 0; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; u32 mask, set; sband = ieee80211_get_sband(sdata); if (!sband) return -EINVAL; mask = params->sta_flags_mask; set = params->sta_flags_set; if (ieee80211_vif_is_mesh(&sdata->vif)) { /* * In mesh mode, ASSOCIATED isn't part of the nl80211 * API but must follow AUTHENTICATED for driver state. */ if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) mask |= BIT(NL80211_STA_FLAG_ASSOCIATED); if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) set |= BIT(NL80211_STA_FLAG_ASSOCIATED); } else if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { /* * TDLS -- everything follows authorized, but * only becoming authorized is possible, not * going back */ if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) { set |= BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED); mask |= BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED); } } if (mask & BIT(NL80211_STA_FLAG_WME) && local->hw.queues >= IEEE80211_NUM_ACS) sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME); /* auth flags will be set later for TDLS, * and for unassociated stations that move to assocaited */ if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) && !((mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) && (set & BIT(NL80211_STA_FLAG_ASSOCIATED)))) { ret = sta_apply_auth_flags(local, sta, mask, set); if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); else clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); } if (mask & BIT(NL80211_STA_FLAG_MFP)) { sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP)); if (set & BIT(NL80211_STA_FLAG_MFP)) set_sta_flag(sta, WLAN_STA_MFP); else clear_sta_flag(sta, WLAN_STA_MFP); } if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) { if (set & BIT(NL80211_STA_FLAG_TDLS_PEER)) set_sta_flag(sta, WLAN_STA_TDLS_PEER); else clear_sta_flag(sta, WLAN_STA_TDLS_PEER); } /* mark TDLS channel switch support, if the AP allows it */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && !sdata->u.mgd.tdls_chan_switch_prohibited && params->ext_capab_len >= 4 && params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH); if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && !sdata->u.mgd.tdls_wider_bw_prohibited && ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && params->ext_capab_len >= 8 && params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW); if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) { sta->sta.uapsd_queues = params->uapsd_queues; sta->sta.max_sp = params->max_sp; } /* The sender might not have sent the last bit, consider it to be 0 */ if (params->ext_capab_len >= 8) { u8 val = (params->ext_capab[7] & WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB) >> 7; /* we did get all the bits, take the MSB as well */ if (params->ext_capab_len >= 9) { u8 val_msb = params->ext_capab[8] & WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB; val_msb <<= 1; val |= val_msb; } switch (val) { case 1: sta->sta.max_amsdu_subframes = 32; break; case 2: sta->sta.max_amsdu_subframes = 16; break; case 3: sta->sta.max_amsdu_subframes = 8; break; default: sta->sta.max_amsdu_subframes = 0; } } /* * cfg80211 validates this (1-2007) and allows setting the AID * only when creating a new station entry */ if (params->aid) sta->sta.aid = params->aid; /* * Some of the following updates would be racy if called on an * existing station, via ieee80211_change_station(). However, * all such changes are rejected by cfg80211 except for updates * changing the supported rates on an existing but not yet used * TDLS peer. */ if (params->listen_interval >= 0) sta->listen_interval = params->listen_interval; if (params->supported_rates && params->supported_rates_len) { ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, sband, params->supported_rates, params->supported_rates_len, &sta->sta.supp_rates[sband->band]); } if (params->ht_capa) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, params->ht_capa, sta); /* VHT can override some HT caps such as the A-MSDU max length */ if (params->vht_capa) ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, params->vht_capa, sta); if (params->he_capa) ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, (void *)params->he_capa, params->he_capa_len, sta); if (params->opmode_notif_used) { /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it */ __ieee80211_vht_handle_opmode(sdata, sta, params->opmode_notif, sband->band); } if (params->support_p2p_ps >= 0) sta->sta.support_p2p_ps = params->support_p2p_ps; if (ieee80211_vif_is_mesh(&sdata->vif)) sta_apply_mesh_params(local, sta, params); /* set the STA state after all sta info from usermode has been set */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) || set & BIT(NL80211_STA_FLAG_ASSOCIATED)) { ret = sta_apply_auth_flags(local, sta, mask, set); if (ret) return ret; } return 0; } static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; struct ieee80211_sub_if_data *sdata; int err; int layer2_update; if (params->vlan) { sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP) return -EINVAL; } else sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (ether_addr_equal(mac, sdata->vif.addr)) return -EINVAL; if (is_multicast_ether_addr(mac)) return -EINVAL; sta = sta_info_alloc(sdata, mac, GFP_KERNEL); if (!sta) return -ENOMEM; if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) sta->sta.tdls = true; err = sta_apply_parameters(local, sta, params); if (err) { sta_info_free(local, sta); return err; } /* * for TDLS and for unassociated station, rate control should be * initialized only when rates are known and station is marked * authorized/associated */ if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) && test_sta_flag(sta, WLAN_STA_ASSOC)) rate_control_rate_init(sta); layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_AP; err = sta_info_insert_rcu(sta); if (err) { rcu_read_unlock(); return err; } if (layer2_update) cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); rcu_read_unlock(); return 0; } static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (params->mac) return sta_info_destroy_addr_bss(sdata, params->mac); sta_info_flush(sdata); return 0; } static int ieee80211_change_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; struct ieee80211_sub_if_data *vlansdata; enum cfg80211_station_type statype; int err; mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mac); if (!sta) { err = -ENOENT; goto out_err; } switch (sdata->vif.type) { case NL80211_IFTYPE_MESH_POINT: if (sdata->u.mesh.user_mpm) statype = CFG80211_STA_MESH_PEER_USER; else statype = CFG80211_STA_MESH_PEER_KERNEL; break; case NL80211_IFTYPE_ADHOC: statype = CFG80211_STA_IBSS; break; case NL80211_IFTYPE_STATION: if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { statype = CFG80211_STA_AP_STA; break; } if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) statype = CFG80211_STA_TDLS_PEER_ACTIVE; else statype = CFG80211_STA_TDLS_PEER_SETUP; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: if (test_sta_flag(sta, WLAN_STA_ASSOC)) statype = CFG80211_STA_AP_CLIENT; else statype = CFG80211_STA_AP_CLIENT_UNASSOC; break; default: err = -EOPNOTSUPP; goto out_err; } err = cfg80211_check_station_change(wiphy, params, statype); if (err) goto out_err; if (params->vlan && params->vlan != sta->sdata->dev) { vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); if (params->vlan->ieee80211_ptr->use_4addr) { if (vlansdata->u.vlan.sta) { err = -EBUSY; goto out_err; } rcu_assign_pointer(vlansdata->u.vlan.sta, sta); __ieee80211_check_fast_rx_iface(vlansdata); } if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sta->sdata->u.vlan.sta) RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL); if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) ieee80211_vif_dec_num_mcast(sta->sdata); sta->sdata = vlansdata; ieee80211_check_fast_xmit(sta); if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) ieee80211_vif_inc_num_mcast(sta->sdata); cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); } err = sta_apply_parameters(local, sta, params); if (err) goto out_err; mutex_unlock(&local->sta_mtx); if ((sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && sta->known_smps_mode != sta->sdata->bss->req_smps && test_sta_flag(sta, WLAN_STA_AUTHORIZED) && sta_info_tx_streams(sta) != 1) { ht_dbg(sta->sdata, "%pM just authorized and MIMO capable - update SMPS\n", sta->sta.addr); ieee80211_send_smps_action(sta->sdata, sta->sdata->bss->req_smps, sta->sta.addr, sta->sdata->vif.bss_conf.bssid); } if (sdata->vif.type == NL80211_IFTYPE_STATION && params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); } return 0; out_err: mutex_unlock(&local->sta_mtx); return err; } #ifdef CPTCFG_MAC80211_MESH static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, const u8 *dst, const u8 *next_hop) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; struct sta_info *sta; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); sta = sta_info_get(sdata, next_hop); if (!sta) { rcu_read_unlock(); return -ENOENT; } mpath = mesh_path_add(sdata, dst); if (IS_ERR(mpath)) { rcu_read_unlock(); return PTR_ERR(mpath); } mesh_path_fix_nexthop(mpath, sta); rcu_read_unlock(); return 0; } static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, const u8 *dst) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (dst) return mesh_path_del(sdata, dst); mesh_path_flush_by_iface(sdata); return 0; } static int ieee80211_change_mpath(struct wiphy *wiphy, struct net_device *dev, const u8 *dst, const u8 *next_hop) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; struct sta_info *sta; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); sta = sta_info_get(sdata, next_hop); if (!sta) { rcu_read_unlock(); return -ENOENT; } mpath = mesh_path_lookup(sdata, dst); if (!mpath) { rcu_read_unlock(); return -ENOENT; } mesh_path_fix_nexthop(mpath, sta); rcu_read_unlock(); return 0; } static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, struct mpath_info *pinfo) { struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop); if (next_hop_sta) memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN); else eth_zero_addr(next_hop); memset(pinfo, 0, sizeof(*pinfo)); pinfo->generation = mpath->sdata->u.mesh.mesh_paths_generation; pinfo->filled = MPATH_INFO_FRAME_QLEN | MPATH_INFO_SN | MPATH_INFO_METRIC | MPATH_INFO_EXPTIME | MPATH_INFO_DISCOVERY_TIMEOUT | MPATH_INFO_DISCOVERY_RETRIES | MPATH_INFO_FLAGS; pinfo->frame_qlen = mpath->frame_queue.qlen; pinfo->sn = mpath->sn; pinfo->metric = mpath->metric; if (time_before(jiffies, mpath->exp_time)) pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); pinfo->discovery_timeout = jiffies_to_msecs(mpath->discovery_timeout); pinfo->discovery_retries = mpath->discovery_retries; if (mpath->flags & MESH_PATH_ACTIVE) pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; if (mpath->flags & MESH_PATH_RESOLVING) pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; if (mpath->flags & MESH_PATH_SN_VALID) pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; if (mpath->flags & MESH_PATH_FIXED) pinfo->flags |= NL80211_MPATH_FLAG_FIXED; if (mpath->flags & MESH_PATH_RESOLVED) pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED; } static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mesh_path_lookup(sdata, dst); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpath_set_pinfo(mpath, next_hop, pinfo); rcu_read_unlock(); return 0; } static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mesh_path_lookup_by_idx(sdata, idx); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpath_set_pinfo(mpath, next_hop, pinfo); rcu_read_unlock(); return 0; } static void mpp_set_pinfo(struct mesh_path *mpath, u8 *mpp, struct mpath_info *pinfo) { memset(pinfo, 0, sizeof(*pinfo)); memcpy(mpp, mpath->mpp, ETH_ALEN); pinfo->generation = mpath->sdata->u.mesh.mpp_paths_generation; } static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mpp_path_lookup(sdata, dst); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpp_set_pinfo(mpath, mpp, pinfo); rcu_read_unlock(); return 0; } static int ieee80211_dump_mpp(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mpp_path_lookup_by_idx(sdata, idx); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpp_set_pinfo(mpath, mpp, pinfo); rcu_read_unlock(); return 0; } static int ieee80211_get_mesh_config(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config)); return 0; } static inline bool _chg_mesh_attr(enum nl80211_meshconf_params parm, u32 mask) { return (mask >> (parm-1)) & 0x1; } static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, const struct mesh_setup *setup) { u8 *new_ie; const u8 *old_ie; struct ieee80211_sub_if_data *sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh); /* allocate information elements */ new_ie = NULL; old_ie = ifmsh->ie; if (setup->ie_len) { new_ie = kmemdup(setup->ie, setup->ie_len, GFP_KERNEL); if (!new_ie) return -ENOMEM; } ifmsh->ie_len = setup->ie_len; ifmsh->ie = new_ie; kfree(old_ie); /* now copy the rest of the setup parameters */ ifmsh->mesh_id_len = setup->mesh_id_len; memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); ifmsh->mesh_sp_id = setup->sync_method; ifmsh->mesh_pp_id = setup->path_sel_proto; ifmsh->mesh_pm_id = setup->path_metric; ifmsh->user_mpm = setup->user_mpm; ifmsh->mesh_auth_id = setup->auth_id; ifmsh->security = IEEE80211_MESH_SEC_NONE; ifmsh->userspace_handles_dfs = setup->userspace_handles_dfs; if (setup->is_authenticated) ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; if (setup->is_secure) ifmsh->security |= IEEE80211_MESH_SEC_SECURED; /* mcast rate setting in Mesh Node */ memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, sizeof(setup->mcast_rate)); sdata->vif.bss_conf.basic_rates = setup->basic_rates; sdata->vif.bss_conf.beacon_int = setup->beacon_interval; sdata->vif.bss_conf.dtim_period = setup->dtim_period; return 0; } static int ieee80211_update_mesh_config(struct wiphy *wiphy, struct net_device *dev, u32 mask, const struct mesh_config *nconf) { struct mesh_config *conf; struct ieee80211_sub_if_data *sdata; struct ieee80211_if_mesh *ifmsh; sdata = IEEE80211_DEV_TO_SUB_IF(dev); ifmsh = &sdata->u.mesh; /* Set the config options which we are interested in setting */ conf = &(sdata->u.mesh.mshcfg); if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask)) conf->dot11MeshRetryTimeout = nconf->dot11MeshRetryTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_CONFIRM_TIMEOUT, mask)) conf->dot11MeshConfirmTimeout = nconf->dot11MeshConfirmTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_HOLDING_TIMEOUT, mask)) conf->dot11MeshHoldingTimeout = nconf->dot11MeshHoldingTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_MAX_PEER_LINKS, mask)) conf->dot11MeshMaxPeerLinks = nconf->dot11MeshMaxPeerLinks; if (_chg_mesh_attr(NL80211_MESHCONF_MAX_RETRIES, mask)) conf->dot11MeshMaxRetries = nconf->dot11MeshMaxRetries; if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask)) conf->dot11MeshTTL = nconf->dot11MeshTTL; if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask)) conf->element_ttl = nconf->element_ttl; if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) { if (ifmsh->user_mpm) return -EBUSY; conf->auto_open_plinks = nconf->auto_open_plinks; } if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) conf->dot11MeshNbrOffsetMaxNeighbor = nconf->dot11MeshNbrOffsetMaxNeighbor; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) conf->dot11MeshHWMPmaxPREQretries = nconf->dot11MeshHWMPmaxPREQretries; if (_chg_mesh_attr(NL80211_MESHCONF_PATH_REFRESH_TIME, mask)) conf->path_refresh_time = nconf->path_refresh_time; if (_chg_mesh_attr(NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, mask)) conf->min_discovery_timeout = nconf->min_discovery_timeout; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, mask)) conf->dot11MeshHWMPactivePathTimeout = nconf->dot11MeshHWMPactivePathTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask)) conf->dot11MeshHWMPpreqMinInterval = nconf->dot11MeshHWMPpreqMinInterval; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask)) conf->dot11MeshHWMPperrMinInterval = nconf->dot11MeshHWMPperrMinInterval; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, mask)) conf->dot11MeshHWMPnetDiameterTraversalTime = nconf->dot11MeshHWMPnetDiameterTraversalTime; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOTMODE, mask)) { conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode; ieee80211_mesh_root_setup(ifmsh); } if (_chg_mesh_attr(NL80211_MESHCONF_GATE_ANNOUNCEMENTS, mask)) { /* our current gate announcement implementation rides on root * announcements, so require this ifmsh to also be a root node * */ if (nconf->dot11MeshGateAnnouncementProtocol && !(conf->dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)) { conf->dot11MeshHWMPRootMode = IEEE80211_PROACTIVE_RANN; ieee80211_mesh_root_setup(ifmsh); } conf->dot11MeshGateAnnouncementProtocol = nconf->dot11MeshGateAnnouncementProtocol; } if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) conf->dot11MeshHWMPRannInterval = nconf->dot11MeshHWMPRannInterval; if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask)) conf->dot11MeshForwarding = nconf->dot11MeshForwarding; if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) { /* our RSSI threshold implementation is supported only for * devices that report signal in dBm. */ if (!ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM)) return -ENOTSUPP; conf->rssi_threshold = nconf->rssi_threshold; } if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) { conf->ht_opmode = nconf->ht_opmode; sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); } if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, mask)) conf->dot11MeshHWMPactivePathToRootTimeout = nconf->dot11MeshHWMPactivePathToRootTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOT_INTERVAL, mask)) conf->dot11MeshHWMProotInterval = nconf->dot11MeshHWMProotInterval; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask)) conf->dot11MeshHWMPconfirmationInterval = nconf->dot11MeshHWMPconfirmationInterval; if (_chg_mesh_attr(NL80211_MESHCONF_POWER_MODE, mask)) { conf->power_mode = nconf->power_mode; ieee80211_mps_local_status_update(sdata); } if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask)) conf->dot11MeshAwakeWindowDuration = nconf->dot11MeshAwakeWindowDuration; if (_chg_mesh_attr(NL80211_MESHCONF_PLINK_TIMEOUT, mask)) conf->plink_timeout = nconf->plink_timeout; if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_GATE, mask)) conf->dot11MeshConnectedToMeshGate = nconf->dot11MeshConnectedToMeshGate; ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); return 0; } static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev, const struct mesh_config *conf, const struct mesh_setup *setup) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; int err; memcpy(&ifmsh->mshcfg, conf, sizeof(struct mesh_config)); err = copy_mesh_setup(ifmsh, setup); if (err) return err; sdata->control_port_over_nl80211 = setup->control_port_over_nl80211; /* can mesh use other SMPS modes? */ sdata->smps_mode = IEEE80211_SMPS_OFF; sdata->needed_rx_chains = sdata->local->rx_chains; mutex_lock(&sdata->local->mtx); err = ieee80211_vif_use_channel(sdata, &setup->chandef, IEEE80211_CHANCTX_SHARED); mutex_unlock(&sdata->local->mtx); if (err) return err; return ieee80211_start_mesh(sdata); } static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_stop_mesh(sdata); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&sdata->local->mtx); return 0; } #endif static int ieee80211_change_bss(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_supported_band *sband; u32 changed = 0; if (!sdata_dereference(sdata->u.ap.beacon, sdata)) return -ENOENT; sband = ieee80211_get_sband(sdata); if (!sband) return -EINVAL; if (params->use_cts_prot >= 0) { sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; changed |= BSS_CHANGED_ERP_CTS_PROT; } if (params->use_short_preamble >= 0) { sdata->vif.bss_conf.use_short_preamble = params->use_short_preamble; changed |= BSS_CHANGED_ERP_PREAMBLE; } if (!sdata->vif.bss_conf.use_short_slot && sband->band == NL80211_BAND_5GHZ) { sdata->vif.bss_conf.use_short_slot = true; changed |= BSS_CHANGED_ERP_SLOT; } if (params->use_short_slot_time >= 0) { sdata->vif.bss_conf.use_short_slot = params->use_short_slot_time; changed |= BSS_CHANGED_ERP_SLOT; } if (params->basic_rates) { ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, wiphy->bands[sband->band], params->basic_rates, params->basic_rates_len, &sdata->vif.bss_conf.basic_rates); changed |= BSS_CHANGED_BASIC_RATES; ieee80211_check_rate_mask(sdata); } if (params->ap_isolate >= 0) { if (params->ap_isolate) sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS; else sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS; ieee80211_check_fast_rx_iface(sdata); } if (params->ht_opmode >= 0) { sdata->vif.bss_conf.ht_operation_mode = (u16) params->ht_opmode; changed |= BSS_CHANGED_HT; } if (params->p2p_ctwindow >= 0) { sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow &= ~IEEE80211_P2P_OPPPS_CTWINDOW_MASK; sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow |= params->p2p_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK; changed |= BSS_CHANGED_P2P_PS; } if (params->p2p_opp_ps > 0) { sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; changed |= BSS_CHANGED_P2P_PS; } else if (params->p2p_opp_ps == 0) { sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow &= ~IEEE80211_P2P_OPPPS_ENABLE_BIT; changed |= BSS_CHANGED_P2P_PS; } ieee80211_bss_info_change_notify(sdata, changed); return 0; } static int ieee80211_set_txq_params(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_txq_params *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_tx_queue_params p; if (!local->ops->conf_tx) return -EOPNOTSUPP; if (local->hw.queues < IEEE80211_NUM_ACS) return -EOPNOTSUPP; memset(&p, 0, sizeof(p)); p.aifs = params->aifs; p.cw_max = params->cwmax; p.cw_min = params->cwmin; p.txop = params->txop; /* * Setting tx queue params disables u-apsd because it's only * called in master mode. */ p.uapsd = false; ieee80211_regulatory_limit_wmm_params(sdata, &p, params->ac); sdata->tx_conf[params->ac] = p; if (drv_conf_tx(local, sdata, params->ac, &p)) { wiphy_debug(local->hw.wiphy, "failed to set TX queue parameters for AC %d\n", params->ac); return -EINVAL; } ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); return 0; } #ifdef CONFIG_PM static int ieee80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan) { return __ieee80211_suspend(wiphy_priv(wiphy), wowlan); } static int ieee80211_resume(struct wiphy *wiphy) { return __ieee80211_resume(wiphy_priv(wiphy)); } #else #define ieee80211_suspend NULL #define ieee80211_resume NULL #endif static int ieee80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *req) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev); switch (ieee80211_vif_type_p2p(&sdata->vif)) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_DEVICE: break; case NL80211_IFTYPE_P2P_GO: if (sdata->local->ops->hw_scan) break; /* * FIXME: implement NoA while scanning in software, * for now fall through to allow scanning only when * beaconing hasn't been configured yet */ /* fall through */ case NL80211_IFTYPE_AP: /* * If the scan has been forced (and the driver supports * forcing), don't care about being beaconing already. * This will create problems to the attached stations (e.g. all * the frames sent while scanning on other channel will be * lost) */ if (sdata->u.ap.beacon && (!(wiphy->features & NL80211_FEATURE_AP_SCAN) || !(req->flags & NL80211_SCAN_FLAG_AP))) return -EOPNOTSUPP; break; case NL80211_IFTYPE_NAN: default: return -EOPNOTSUPP; } return ieee80211_request_scan(sdata, req); } static void ieee80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev) { ieee80211_scan_cancel(wiphy_priv(wiphy)); } static int ieee80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_sched_scan_request *req) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!sdata->local->ops->sched_scan_start) return -EOPNOTSUPP; return ieee80211_request_sched_scan_start(sdata, req); } static int ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid) { struct ieee80211_local *local = wiphy_priv(wiphy); if (!local->ops->sched_scan_stop) return -EOPNOTSUPP; return ieee80211_request_sched_scan_stop(local); } static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_auth_request *req) { return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_assoc_request *req) { return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_deauth_request *req) { return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_disassoc_request *req) { return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params) { return ieee80211_ibss_join(IEEE80211_DEV_TO_SUB_IF(dev), params); } static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { return ieee80211_ibss_leave(IEEE80211_DEV_TO_SUB_IF(dev)); } static int ieee80211_join_ocb(struct wiphy *wiphy, struct net_device *dev, struct ocb_setup *setup) { return ieee80211_ocb_join(IEEE80211_DEV_TO_SUB_IF(dev), setup); } static int ieee80211_leave_ocb(struct wiphy *wiphy, struct net_device *dev) { return ieee80211_ocb_leave(IEEE80211_DEV_TO_SUB_IF(dev)); } static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, int rate[NUM_NL80211_BANDS]) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(int) * NUM_NL80211_BANDS); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MCAST_RATE); return 0; } static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct ieee80211_local *local = wiphy_priv(wiphy); int err; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { ieee80211_check_fast_xmit_all(local); err = drv_set_frag_threshold(local, wiphy->frag_threshold); if (err) { ieee80211_check_fast_xmit_all(local); return err; } } if ((changed & WIPHY_PARAM_COVERAGE_CLASS) || (changed & WIPHY_PARAM_DYN_ACK)) { s16 coverage_class; coverage_class = changed & WIPHY_PARAM_COVERAGE_CLASS ? wiphy->coverage_class : -1; err = drv_set_coverage_class(local, coverage_class); if (err) return err; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { err = drv_set_rts_threshold(local, wiphy->rts_threshold); if (err) return err; } if (changed & WIPHY_PARAM_RETRY_SHORT) { if (wiphy->retry_short > IEEE80211_MAX_TX_RETRY) return -EINVAL; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; } if (changed & WIPHY_PARAM_RETRY_LONG) { if (wiphy->retry_long > IEEE80211_MAX_TX_RETRY) return -EINVAL; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; } if (changed & (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS); if (changed & (WIPHY_PARAM_TXQ_LIMIT | WIPHY_PARAM_TXQ_MEMORY_LIMIT | WIPHY_PARAM_TXQ_QUANTUM)) ieee80211_txq_set_params(local); return 0; } static int ieee80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata; enum nl80211_tx_power_setting txp_type = type; bool update_txp_type = false; bool has_monitor = false; if (wdev) { sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { sdata = rtnl_dereference(local->monitor_sdata); if (!sdata) return -EOPNOTSUPP; } switch (type) { case NL80211_TX_POWER_AUTOMATIC: sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL; txp_type = NL80211_TX_POWER_LIMITED; break; case NL80211_TX_POWER_LIMITED: case NL80211_TX_POWER_FIXED: if (mbm < 0 || (mbm % 100)) return -EOPNOTSUPP; sdata->user_power_level = MBM_TO_DBM(mbm); break; } if (txp_type != sdata->vif.bss_conf.txpower_type) { update_txp_type = true; sdata->vif.bss_conf.txpower_type = txp_type; } ieee80211_recalc_txpower(sdata, update_txp_type); return 0; } switch (type) { case NL80211_TX_POWER_AUTOMATIC: local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; txp_type = NL80211_TX_POWER_LIMITED; break; case NL80211_TX_POWER_LIMITED: case NL80211_TX_POWER_FIXED: if (mbm < 0 || (mbm % 100)) return -EOPNOTSUPP; local->user_power_level = MBM_TO_DBM(mbm); break; } mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { has_monitor = true; continue; } sdata->user_power_level = local->user_power_level; if (txp_type != sdata->vif.bss_conf.txpower_type) update_txp_type = true; sdata->vif.bss_conf.txpower_type = txp_type; } list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_MONITOR) continue; ieee80211_recalc_txpower(sdata, update_txp_type); } mutex_unlock(&local->iflist_mtx); if (has_monitor) { sdata = rtnl_dereference(local->monitor_sdata); if (sdata) { sdata->user_power_level = local->user_power_level; if (txp_type != sdata->vif.bss_conf.txpower_type) update_txp_type = true; sdata->vif.bss_conf.txpower_type = txp_type; ieee80211_recalc_txpower(sdata, update_txp_type); } } return 0; } static int ieee80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, int *dbm) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (local->ops->get_txpower) return drv_get_txpower(local, sdata, dbm); if (!local->use_chanctx) *dbm = local->hw.conf.power_level; else *dbm = sdata->vif.bss_conf.txpower; return 0; } static int ieee80211_set_wds_peer(struct wiphy *wiphy, struct net_device *dev, const u8 *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(&sdata->u.wds.remote_addr, addr, ETH_ALEN); return 0; } static void ieee80211_rfkill_poll(struct wiphy *wiphy) { struct ieee80211_local *local = wiphy_priv(wiphy); drv_rfkill_poll(local); } #ifdef CPTCFG_NL80211_TESTMODE static int ieee80211_testmode_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_vif *vif = NULL; if (!local->ops->testmode_cmd) return -EOPNOTSUPP; if (wdev) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (sdata->flags & IEEE80211_SDATA_IN_DRIVER) vif = &sdata->vif; } return local->ops->testmode_cmd(&local->hw, vif, data, len); } static int ieee80211_testmode_dump(struct wiphy *wiphy, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len) { struct ieee80211_local *local = wiphy_priv(wiphy); if (!local->ops->testmode_dump) return -EOPNOTSUPP; return local->ops->testmode_dump(&local->hw, skb, cb, data, len); } #endif int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode) { struct sta_info *sta; enum ieee80211_smps_mode old_req; if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP)) return -EINVAL; if (sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) return 0; old_req = sdata->u.ap.req_smps; sdata->u.ap.req_smps = smps_mode; /* AUTOMATIC doesn't mean much for AP - don't allow it */ if (old_req == smps_mode || smps_mode == IEEE80211_SMPS_AUTOMATIC) return 0; ht_dbg(sdata, "SMPS %d requested in AP mode, sending Action frame to %d stations\n", smps_mode, atomic_read(&sdata->u.ap.num_mcast_sta)); mutex_lock(&sdata->local->sta_mtx); list_for_each_entry(sta, &sdata->local->sta_list, list) { /* * Only stations associated to our AP and * associated VLANs */ if (sta->sdata->bss != &sdata->u.ap) continue; /* This station doesn't support MIMO - skip it */ if (sta_info_tx_streams(sta) == 1) continue; /* * Don't wake up a STA just to send the action frame * unless we are getting more restrictive. */ if (test_sta_flag(sta, WLAN_STA_PS_STA) && !ieee80211_smps_is_restrictive(sta->known_smps_mode, smps_mode)) { ht_dbg(sdata, "Won't send SMPS to sleeping STA %pM\n", sta->sta.addr); continue; } /* * If the STA is not authorized, wait until it gets * authorized and the action frame will be sent then. */ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) continue; ht_dbg(sdata, "Sending SMPS to %pM\n", sta->sta.addr); ieee80211_send_smps_action(sdata, smps_mode, sta->sta.addr, sdata->vif.bss_conf.bssid); } mutex_unlock(&sdata->local->sta_mtx); sdata->smps_mode = smps_mode; ieee80211_queue_work(&sdata->local->hw, &sdata->recalc_smps); return 0; } int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode) { const u8 *ap; enum ieee80211_smps_mode old_req; int err; struct sta_info *sta; bool tdls_peer_found = false; lockdep_assert_held(&sdata->wdev.mtx); if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) return -EINVAL; old_req = sdata->u.mgd.req_smps; sdata->u.mgd.req_smps = smps_mode; if (old_req == smps_mode && smps_mode != IEEE80211_SMPS_AUTOMATIC) return 0; /* * If not associated, or current association is not an HT * association, there's no need to do anything, just store * the new value until we associate. */ if (!sdata->u.mgd.associated || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) return 0; ap = sdata->u.mgd.associated->bssid; rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) continue; tdls_peer_found = true; break; } rcu_read_unlock(); if (smps_mode == IEEE80211_SMPS_AUTOMATIC) { if (tdls_peer_found || !sdata->u.mgd.powersave) smps_mode = IEEE80211_SMPS_OFF; else smps_mode = IEEE80211_SMPS_DYNAMIC; } /* send SM PS frame to AP */ err = ieee80211_send_smps_action(sdata, smps_mode, ap, ap); if (err) sdata->u.mgd.req_smps = old_req; else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found) ieee80211_teardown_tdls_peers(sdata); return err; } static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) return -EOPNOTSUPP; if (enabled == sdata->u.mgd.powersave && timeout == local->dynamic_ps_forced_timeout) return 0; sdata->u.mgd.powersave = enabled; local->dynamic_ps_forced_timeout = timeout; /* no change, but if automatic follow powersave */ sdata_lock(sdata); __ieee80211_request_smps_mgd(sdata, sdata->u.mgd.req_smps); sdata_unlock(sdata); if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); ieee80211_check_fast_rx_iface(sdata); return 0; } static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_vif *vif = &sdata->vif; struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; if (rssi_thold == bss_conf->cqm_rssi_thold && rssi_hyst == bss_conf->cqm_rssi_hyst) return 0; if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER && !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) return -EOPNOTSUPP; bss_conf->cqm_rssi_thold = rssi_thold; bss_conf->cqm_rssi_hyst = rssi_hyst; bss_conf->cqm_rssi_low = 0; bss_conf->cqm_rssi_high = 0; sdata->u.mgd.last_cqm_event_signal = 0; /* tell the driver upon association, unless already associated */ if (sdata->u.mgd.associated && sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM); return 0; } static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_low, s32 rssi_high) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_vif *vif = &sdata->vif; struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) return -EOPNOTSUPP; bss_conf->cqm_rssi_low = rssi_low; bss_conf->cqm_rssi_high = rssi_high; bss_conf->cqm_rssi_thold = 0; bss_conf->cqm_rssi_hyst = 0; sdata->u.mgd.last_cqm_event_signal = 0; /* tell the driver upon association, unless already associated */ if (sdata->u.mgd.associated && sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM); return 0; } static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev, const u8 *addr, const struct cfg80211_bitrate_mask *mask) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); int i, ret; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; /* * If active validate the setting and reject it if it doesn't leave * at least one basic rate usable, since we really have to be able * to send something, and if we're an AP we have to be able to do * so at a basic rate so that all clients can receive it. */ if (rcu_access_pointer(sdata->vif.chanctx_conf) && sdata->vif.bss_conf.chandef.chan) { u32 basic_rates = sdata->vif.bss_conf.basic_rates; enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band; if (!(mask->control[band].legacy & basic_rates)) return -EINVAL; } if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { ret = drv_set_bitrate_mask(local, sdata, mask); if (ret) return ret; } for (i = 0; i < NUM_NL80211_BANDS; i++) { struct ieee80211_supported_band *sband = wiphy->bands[i]; int j; sdata->rc_rateidx_mask[i] = mask->control[i].legacy; memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs, sizeof(mask->control[i].ht_mcs)); memcpy(sdata->rc_rateidx_vht_mcs_mask[i], mask->control[i].vht_mcs, sizeof(mask->control[i].vht_mcs)); sdata->rc_has_mcs_mask[i] = false; sdata->rc_has_vht_mcs_mask[i] = false; if (!sband) continue; for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { if (~sdata->rc_rateidx_mcs_mask[i][j]) { sdata->rc_has_mcs_mask[i] = true; break; } } for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) { sdata->rc_has_vht_mcs_mask[i] = true; break; } } } return 0; } static int ieee80211_start_radar_detection(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int err; mutex_lock(&local->mtx); if (!list_empty(&local->roc_list) || local->scanning) { err = -EBUSY; goto out_unlock; } /* whatever, but channel contexts should not complain about that one */ sdata->smps_mode = IEEE80211_SMPS_OFF; sdata->needed_rx_chains = local->rx_chains; err = ieee80211_vif_use_channel(sdata, chandef, IEEE80211_CHANCTX_SHARED); if (err) goto out_unlock; ieee80211_queue_delayed_work(&sdata->local->hw, &sdata->dfs_cac_timer_work, msecs_to_jiffies(cac_time_ms)); out_unlock: mutex_unlock(&local->mtx); return err; } static struct cfg80211_beacon_data * cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) { struct cfg80211_beacon_data *new_beacon; u8 *pos; int len; len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + beacon->proberesp_ies_len + beacon->assocresp_ies_len + beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len; new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); if (!new_beacon) return NULL; pos = (u8 *)(new_beacon + 1); if (beacon->head_len) { new_beacon->head_len = beacon->head_len; new_beacon->head = pos; memcpy(pos, beacon->head, beacon->head_len); pos += beacon->head_len; } if (beacon->tail_len) { new_beacon->tail_len = beacon->tail_len; new_beacon->tail = pos; memcpy(pos, beacon->tail, beacon->tail_len); pos += beacon->tail_len; } if (beacon->beacon_ies_len) { new_beacon->beacon_ies_len = beacon->beacon_ies_len; new_beacon->beacon_ies = pos; memcpy(pos, beacon->beacon_ies, beacon->beacon_ies_len); pos += beacon->beacon_ies_len; } if (beacon->proberesp_ies_len) { new_beacon->proberesp_ies_len = beacon->proberesp_ies_len; new_beacon->proberesp_ies = pos; memcpy(pos, beacon->proberesp_ies, beacon->proberesp_ies_len); pos += beacon->proberesp_ies_len; } if (beacon->assocresp_ies_len) { new_beacon->assocresp_ies_len = beacon->assocresp_ies_len; new_beacon->assocresp_ies = pos; memcpy(pos, beacon->assocresp_ies, beacon->assocresp_ies_len); pos += beacon->assocresp_ies_len; } if (beacon->probe_resp_len) { new_beacon->probe_resp_len = beacon->probe_resp_len; new_beacon->probe_resp = pos; memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); pos += beacon->probe_resp_len; } /* might copy -1, meaning no changes requested */ new_beacon->ftm_responder = beacon->ftm_responder; if (beacon->lci) { new_beacon->lci_len = beacon->lci_len; new_beacon->lci = pos; memcpy(pos, beacon->lci, beacon->lci_len); pos += beacon->lci_len; } if (beacon->civicloc) { new_beacon->civicloc_len = beacon->civicloc_len; new_beacon->civicloc = pos; memcpy(pos, beacon->civicloc, beacon->civicloc_len); pos += beacon->civicloc_len; } return new_beacon; } void ieee80211_csa_finish(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); ieee80211_queue_work(&sdata->local->hw, &sdata->csa_finalize_work); } EXPORT_SYMBOL(ieee80211_csa_finish); static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, u32 *changed) { int err; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon, NULL); kfree(sdata->u.ap.next_beacon); sdata->u.ap.next_beacon = NULL; if (err < 0) return err; *changed |= err; break; case NL80211_IFTYPE_ADHOC: err = ieee80211_ibss_finish_csa(sdata); if (err < 0) return err; *changed |= err; break; #ifdef CPTCFG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: err = ieee80211_mesh_finish_csa(sdata); if (err < 0) return err; *changed |= err; break; #endif default: WARN_ON(1); return -EINVAL; } return 0; } static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; u32 changed = 0; int err; sdata_assert_lock(sdata); lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); /* * using reservation isn't immediate as it may be deferred until later * with multi-vif. once reservation is complete it will re-schedule the * work with no reserved_chanctx so verify chandef to check if it * completed successfully */ if (sdata->reserved_chanctx) { /* * with multi-vif csa driver may call ieee80211_csa_finish() * many times while waiting for other interfaces to use their * reservations */ if (sdata->reserved_ready) return 0; return ieee80211_vif_use_reserved_context(sdata); } if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef, &sdata->csa_chandef)) return -EINVAL; sdata->vif.csa_active = false; err = ieee80211_set_after_csa_beacon(sdata, &changed); if (err) return err; ieee80211_bss_info_change_notify(sdata, changed); if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_block_tx = false; } err = drv_post_channel_switch(sdata); if (err) return err; cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef); return 0; } static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) { if (__ieee80211_csa_finalize(sdata)) { sdata_info(sdata, "failed to finalize CSA, disconnecting\n"); cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev, GFP_KERNEL); } } void ieee80211_csa_finalize_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, csa_finalize_work); struct ieee80211_local *local = sdata->local; sdata_lock(sdata); mutex_lock(&local->mtx); mutex_lock(&local->chanctx_mtx); /* AP might have been stopped while waiting for the lock. */ if (!sdata->vif.csa_active) goto unlock; if (!ieee80211_sdata_running(sdata)) goto unlock; ieee80211_csa_finalize(sdata); unlock: mutex_unlock(&local->chanctx_mtx); mutex_unlock(&local->mtx); sdata_unlock(sdata); } static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *params, u32 *changed) { struct ieee80211_csa_settings csa = {}; int err; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: sdata->u.ap.next_beacon = cfg80211_beacon_dup(¶ms->beacon_after); if (!sdata->u.ap.next_beacon) return -ENOMEM; /* * With a count of 0, we don't have to wait for any * TBTT before switching, so complete the CSA * immediately. In theory, with a count == 1 we * should delay the switch until just before the next * TBTT, but that would complicate things so we switch * immediately too. If we would delay the switch * until the next TBTT, we would have to set the probe * response here. * * TODO: A channel switch with count <= 1 without * sending a CSA action frame is kind of useless, * because the clients won't know we're changing * channels. The action frame must be implemented * either here or in the userspace. */ if (params->count <= 1) break; if ((params->n_counter_offsets_beacon > IEEE80211_MAX_CSA_COUNTERS_NUM) || (params->n_counter_offsets_presp > IEEE80211_MAX_CSA_COUNTERS_NUM)) return -EINVAL; csa.counter_offsets_beacon = params->counter_offsets_beacon; csa.counter_offsets_presp = params->counter_offsets_presp; csa.n_counter_offsets_beacon = params->n_counter_offsets_beacon; csa.n_counter_offsets_presp = params->n_counter_offsets_presp; csa.count = params->count; err = ieee80211_assign_beacon(sdata, ¶ms->beacon_csa, &csa); if (err < 0) { kfree(sdata->u.ap.next_beacon); return err; } *changed |= err; break; case NL80211_IFTYPE_ADHOC: if (!sdata->vif.bss_conf.ibss_joined) return -EINVAL; if (params->chandef.width != sdata->u.ibss.chandef.width) return -EINVAL; switch (params->chandef.width) { case NL80211_CHAN_WIDTH_40: if (cfg80211_get_chandef_type(¶ms->chandef) != cfg80211_get_chandef_type(&sdata->u.ibss.chandef)) return -EINVAL; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: break; default: return -EINVAL; } /* changes into another band are not supported */ if (sdata->u.ibss.chandef.chan->band != params->chandef.chan->band) return -EINVAL; /* see comments in the NL80211_IFTYPE_AP block */ if (params->count > 1) { err = ieee80211_ibss_csa_beacon(sdata, params); if (err < 0) return err; *changed |= err; } ieee80211_send_action_csa(sdata, params); break; #ifdef CPTCFG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; if (params->chandef.width != sdata->vif.bss_conf.chandef.width) return -EINVAL; /* changes into another band are not supported */ if (sdata->vif.bss_conf.chandef.chan->band != params->chandef.chan->band) return -EINVAL; if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_NONE) { ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_INIT; if (!ifmsh->pre_value) ifmsh->pre_value = 1; else ifmsh->pre_value++; } /* see comments in the NL80211_IFTYPE_AP block */ if (params->count > 1) { err = ieee80211_mesh_csa_beacon(sdata, params); if (err < 0) { ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; return err; } *changed |= err; } if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) ieee80211_send_action_csa(sdata, params); break; } #endif default: return -EOPNOTSUPP; } return 0; } static int __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_csa_settings *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_channel_switch ch_switch; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *chanctx; u32 changed = 0; int err; sdata_assert_lock(sdata); lockdep_assert_held(&local->mtx); if (!list_empty(&local->roc_list) || local->scanning) return -EBUSY; if (sdata->wdev.cac_started) return -EBUSY; if (cfg80211_chandef_identical(¶ms->chandef, &sdata->vif.bss_conf.chandef)) return -EINVAL; /* don't allow another channel switch if one is already active. */ if (sdata->vif.csa_active) return -EBUSY; mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (!conf) { err = -EBUSY; goto out; } chanctx = container_of(conf, struct ieee80211_chanctx, conf); ch_switch.timestamp = 0; ch_switch.device_timestamp = 0; ch_switch.block_tx = params->block_tx; ch_switch.chandef = params->chandef; ch_switch.count = params->count; err = drv_pre_channel_switch(sdata, &ch_switch); if (err) goto out; err = ieee80211_vif_reserve_chanctx(sdata, ¶ms->chandef, chanctx->mode, params->radar_required); if (err) goto out; /* if reservation is invalid then this will fail */ err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0); if (err) { ieee80211_vif_unreserve_chanctx(sdata); goto out; } err = ieee80211_set_csa_beacon(sdata, params, &changed); if (err) { ieee80211_vif_unreserve_chanctx(sdata); goto out; } sdata->csa_chandef = params->chandef; sdata->csa_block_tx = params->block_tx; sdata->vif.csa_active = true; if (sdata->csa_block_tx) ieee80211_stop_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); cfg80211_ch_switch_started_notify(sdata->dev, &sdata->csa_chandef, params->count); if (changed) { ieee80211_bss_info_change_notify(sdata, changed); drv_channel_switch_beacon(sdata, ¶ms->chandef); } else { /* if the beacon didn't change, we can finalize immediately */ ieee80211_csa_finalize(sdata); } out: mutex_unlock(&local->chanctx_mtx); return err; } int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_csa_settings *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int err; mutex_lock(&local->mtx); err = __ieee80211_channel_switch(wiphy, dev, params); mutex_unlock(&local->mtx); return err; } u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local) { lockdep_assert_held(&local->mtx); local->roc_cookie_counter++; /* wow, you wrapped 64 bits ... more likely a bug */ if (WARN_ON(local->roc_cookie_counter == 0)) local->roc_cookie_counter++; return local->roc_cookie_counter; } int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, u64 *cookie, gfp_t gfp) { unsigned long spin_flags; struct sk_buff *ack_skb; int id; ack_skb = skb_copy(skb, gfp); if (!ack_skb) return -ENOMEM; spin_lock_irqsave(&local->ack_status_lock, spin_flags); id = idr_alloc(&local->ack_status_frames, ack_skb, 1, 0x10000, GFP_ATOMIC); spin_unlock_irqrestore(&local->ack_status_lock, spin_flags); if (id < 0) { kfree_skb(ack_skb); return -ENOMEM; } IEEE80211_SKB_CB(skb)->ack_frame_id = id; *cookie = ieee80211_mgmt_tx_cookie(local); IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie; return 0; } static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev, u16 frame_type, bool reg) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); switch (frame_type) { case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ: if (reg) { local->probe_req_reg++; sdata->vif.probe_req_reg++; } else { if (local->probe_req_reg) local->probe_req_reg--; if (sdata->vif.probe_req_reg) sdata->vif.probe_req_reg--; } if (!local->open_count) break; if (sdata->vif.probe_req_reg == 1) drv_config_iface_filter(local, sdata, FIF_PROBE_REQ, FIF_PROBE_REQ); else if (sdata->vif.probe_req_reg == 0) drv_config_iface_filter(local, sdata, 0, FIF_PROBE_REQ); ieee80211_configure_filter(local); break; default: break; } } static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) { struct ieee80211_local *local = wiphy_priv(wiphy); if (local->started) return -EOPNOTSUPP; return drv_set_antenna(local, tx_ant, rx_ant); } static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant) { struct ieee80211_local *local = wiphy_priv(wiphy); return drv_get_antenna(local, tx_ant, rx_ant); } static int ieee80211_set_rekey_data(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_gtk_rekey_data *data) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!local->ops->set_rekey_data) return -EOPNOTSUPP; drv_set_rekey_data(local, sdata, data); return 0; } static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_qos_hdr *nullfunc; struct sk_buff *skb; int size = sizeof(*nullfunc); __le16 fc; bool qos; struct ieee80211_tx_info *info; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; enum nl80211_band band; int ret; /* the lock is needed to assign the cookie later */ mutex_lock(&local->mtx); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { ret = -EINVAL; goto unlock; } band = chanctx_conf->def.chan->band; sta = sta_info_get_bss(sdata, peer); if (sta) { qos = sta->sta.wme; } else { ret = -ENOLINK; goto unlock; } if (qos) { fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC | IEEE80211_FCTL_FROMDS); } else { size -= 2; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_FROMDS); } skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); if (!skb) { ret = -ENOMEM; goto unlock; } skb->dev = dev; skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = skb_put(skb, size); nullfunc->frame_control = fc; nullfunc->duration_id = 0; memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); nullfunc->seq_ctrl = 0; info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_NL80211_FRAME_TX; info->band = band; skb_set_queue_mapping(skb, IEEE80211_AC_VO); skb->priority = 7; if (qos) nullfunc->qos_ctrl = cpu_to_le16(7); ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_ATOMIC); if (ret) { kfree_skb(skb); goto unlock; } local_bh_disable(); ieee80211_xmit(sdata, sta, skb, 0); local_bh_enable(); ret = 0; unlock: rcu_read_unlock(); mutex_unlock(&local->mtx); return ret; } static int ieee80211_cfg_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_chanctx_conf *chanctx_conf; int ret = -ENODATA; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (chanctx_conf) { *chandef = sdata->vif.bss_conf.chandef; ret = 0; } else if (local->open_count > 0 && local->open_count == local->monitors && sdata->vif.type == NL80211_IFTYPE_MONITOR) { if (local->use_chanctx) *chandef = local->monitor_chandef; else *chandef = local->_oper_chandef; ret = 0; } rcu_read_unlock(); return ret; } #ifdef CONFIG_PM static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled) { drv_set_wakeup(wiphy_priv(wiphy), enabled); } #endif static int ieee80211_set_qos_map(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_qos_map *qos_map) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct mac80211_qos_map *new_qos_map, *old_qos_map; if (qos_map) { new_qos_map = kzalloc(sizeof(*new_qos_map), GFP_KERNEL); if (!new_qos_map) return -ENOMEM; memcpy(&new_qos_map->qos_map, qos_map, sizeof(*qos_map)); } else { /* A NULL qos_map was passed to disable QoS mapping */ new_qos_map = NULL; } old_qos_map = sdata_dereference(sdata->qos_map, sdata); rcu_assign_pointer(sdata->qos_map, new_qos_map); if (old_qos_map) kfree_rcu(old_qos_map, rcu_head); return 0; } static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int ret; u32 changed = 0; ret = ieee80211_vif_change_bandwidth(sdata, chandef, &changed); if (ret == 0) ieee80211_bss_info_change_notify(sdata, changed); return ret; } static int ieee80211_add_tx_ts(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer, u8 up, u16 admitted_time) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; int ac = ieee802_1d_to_ac[up]; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!(sdata->wmm_acm & BIT(up))) return -EINVAL; if (ifmgd->tx_tspec[ac].admitted_time) return -EBUSY; if (admitted_time) { ifmgd->tx_tspec[ac].admitted_time = 32 * admitted_time; ifmgd->tx_tspec[ac].tsid = tsid; ifmgd->tx_tspec[ac].up = up; } return 0; } static int ieee80211_del_tx_ts(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = wiphy_priv(wiphy); int ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; /* skip unused entries */ if (!tx_tspec->admitted_time) continue; if (tx_tspec->tsid != tsid) continue; /* due to this new packets will be reassigned to non-ACM ACs */ tx_tspec->up = -1; /* Make sure that all packets have been sent to avoid to * restore the QoS params on packets that are still on the * queues. */ synchronize_net(); ieee80211_flush_queues(local, sdata, false); /* restore the normal QoS parameters * (unconditionally to avoid races) */ tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; tx_tspec->downgraded = false; ieee80211_sta_handle_tspec_ac_params(sdata); /* finally clear all the data */ memset(tx_tspec, 0, sizeof(*tx_tspec)); return 0; } return -ENOENT; } void ieee80211_nan_func_terminated(struct ieee80211_vif *vif, u8 inst_id, enum nl80211_nan_func_term_reason reason, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct cfg80211_nan_func *func; struct wireless_dev *wdev; u64 cookie; if (WARN_ON(vif->type != NL80211_IFTYPE_NAN)) return; spin_lock_bh(&sdata->u.nan.func_lock); func = idr_find(&sdata->u.nan.function_inst_ids, inst_id); if (WARN_ON(!func)) { spin_unlock_bh(&sdata->u.nan.func_lock); return; } cookie = func->cookie; idr_remove(&sdata->u.nan.function_inst_ids, inst_id); spin_unlock_bh(&sdata->u.nan.func_lock); cfg80211_free_nan_func(func); wdev = ieee80211_vif_to_wdev(vif); if (!WARN_ON_ONCE(!wdev)) cfg80211_nan_func_terminated(wdev, inst_id, reason, cookie, gfp); } EXPORT_SYMBOL(ieee80211_nan_func_terminated); void ieee80211_nan_func_match(struct ieee80211_vif *vif, struct cfg80211_nan_match_params *match, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct cfg80211_nan_func *func; struct wireless_dev *wdev; if (WARN_ON(vif->type != NL80211_IFTYPE_NAN)) return; spin_lock_bh(&sdata->u.nan.func_lock); func = idr_find(&sdata->u.nan.function_inst_ids, match->inst_id); if (WARN_ON(!func)) { spin_unlock_bh(&sdata->u.nan.func_lock); return; } match->cookie = func->cookie; spin_unlock_bh(&sdata->u.nan.func_lock); wdev = ieee80211_vif_to_wdev(vif); if (!WARN_ON_ONCE(!wdev)) cfg80211_nan_match(wdev, match, gfp); } EXPORT_SYMBOL(ieee80211_nan_func_match); static int ieee80211_set_multicast_to_unicast(struct wiphy *wiphy, struct net_device *dev, const bool enabled) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); sdata->u.ap.multicast_to_unicast = enabled; return 0; } void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats, struct txq_info *txqi) { if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_BACKLOG_BYTES))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_BYTES); txqstats->backlog_bytes = txqi->tin.backlog_bytes; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS); txqstats->backlog_packets = txqi->tin.backlog_packets; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_FLOWS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_FLOWS); txqstats->flows = txqi->tin.flows; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_DROPS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_DROPS); txqstats->drops = txqi->cstats.drop_count; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_ECN_MARKS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_ECN_MARKS); txqstats->ecn_marks = txqi->cstats.ecn_mark; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_OVERLIMIT))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_OVERLIMIT); txqstats->overlimit = txqi->tin.overlimit; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_COLLISIONS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_COLLISIONS); txqstats->collisions = txqi->tin.collisions; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_TX_BYTES))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_TX_BYTES); txqstats->tx_bytes = txqi->tin.tx_bytes; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_TX_PACKETS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_TX_PACKETS); txqstats->tx_packets = txqi->tin.tx_packets; } } static int ieee80211_get_txq_stats(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_txq_stats *txqstats) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata; int ret = 0; if (!local->ops->wake_tx_queue) return 1; spin_lock_bh(&local->fq.lock); rcu_read_lock(); if (wdev) { sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (!sdata->vif.txq) { ret = 1; goto out; } ieee80211_fill_txq_stats(txqstats, to_txq_info(sdata->vif.txq)); } else { /* phy stats */ txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS) | BIT(NL80211_TXQ_STATS_BACKLOG_BYTES) | BIT(NL80211_TXQ_STATS_OVERLIMIT) | BIT(NL80211_TXQ_STATS_OVERMEMORY) | BIT(NL80211_TXQ_STATS_COLLISIONS) | BIT(NL80211_TXQ_STATS_MAX_FLOWS); txqstats->backlog_packets = local->fq.backlog; txqstats->backlog_bytes = local->fq.memory_usage; txqstats->overlimit = local->fq.overlimit; txqstats->overmemory = local->fq.overmemory; txqstats->collisions = local->fq.collisions; txqstats->max_flows = local->fq.flows_cnt; } out: rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); return ret; } static int ieee80211_get_ftm_responder_stats(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ftm_responder_stats *ftm_stats) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); return drv_get_ftm_responder_stats(local, sdata, ftm_stats); } static int ieee80211_start_pmsr(struct wiphy *wiphy, struct wireless_dev *dev, struct cfg80211_pmsr_request *request) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(dev); return drv_start_pmsr(local, sdata, request); } static void ieee80211_abort_pmsr(struct wiphy *wiphy, struct wireless_dev *dev, struct cfg80211_pmsr_request *request) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(dev); return drv_abort_pmsr(local, sdata, request); } const struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, .change_virtual_intf = ieee80211_change_iface, .start_p2p_device = ieee80211_start_p2p_device, .stop_p2p_device = ieee80211_stop_p2p_device, .add_key = ieee80211_add_key, .del_key = ieee80211_del_key, .get_key = ieee80211_get_key, .set_default_key = ieee80211_config_default_key, .set_default_mgmt_key = ieee80211_config_default_mgmt_key, .start_ap = ieee80211_start_ap, .change_beacon = ieee80211_change_beacon, .stop_ap = ieee80211_stop_ap, .add_station = ieee80211_add_station, .del_station = ieee80211_del_station, .change_station = ieee80211_change_station, .get_station = ieee80211_get_station, .dump_station = ieee80211_dump_station, .dump_survey = ieee80211_dump_survey, #ifdef CPTCFG_MAC80211_MESH .add_mpath = ieee80211_add_mpath, .del_mpath = ieee80211_del_mpath, .change_mpath = ieee80211_change_mpath, .get_mpath = ieee80211_get_mpath, .dump_mpath = ieee80211_dump_mpath, .get_mpp = ieee80211_get_mpp, .dump_mpp = ieee80211_dump_mpp, .update_mesh_config = ieee80211_update_mesh_config, .get_mesh_config = ieee80211_get_mesh_config, .join_mesh = ieee80211_join_mesh, .leave_mesh = ieee80211_leave_mesh, #endif .join_ocb = ieee80211_join_ocb, .leave_ocb = ieee80211_leave_ocb, .change_bss = ieee80211_change_bss, .set_txq_params = ieee80211_set_txq_params, .set_monitor_channel = ieee80211_set_monitor_channel, .suspend = ieee80211_suspend, .resume = ieee80211_resume, .scan = ieee80211_scan, .abort_scan = ieee80211_abort_scan, .sched_scan_start = ieee80211_sched_scan_start, .sched_scan_stop = ieee80211_sched_scan_stop, .auth = ieee80211_auth, .assoc = ieee80211_assoc, .deauth = ieee80211_deauth, .disassoc = ieee80211_disassoc, .join_ibss = ieee80211_join_ibss, .leave_ibss = ieee80211_leave_ibss, .set_mcast_rate = ieee80211_set_mcast_rate, .set_wiphy_params = ieee80211_set_wiphy_params, .set_tx_power = ieee80211_set_tx_power, .get_tx_power = ieee80211_get_tx_power, .set_wds_peer = ieee80211_set_wds_peer, .rfkill_poll = ieee80211_rfkill_poll, CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) CFG80211_TESTMODE_DUMP(ieee80211_testmode_dump) .set_power_mgmt = ieee80211_set_power_mgmt, .set_bitrate_mask = ieee80211_set_bitrate_mask, .remain_on_channel = ieee80211_remain_on_channel, .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, .mgmt_tx = ieee80211_mgmt_tx, .mgmt_tx_cancel_wait = ieee80211_mgmt_tx_cancel_wait, .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, .set_cqm_rssi_range_config = ieee80211_set_cqm_rssi_range_config, .mgmt_frame_register = ieee80211_mgmt_frame_register, .set_antenna = ieee80211_set_antenna, .get_antenna = ieee80211_get_antenna, .set_rekey_data = ieee80211_set_rekey_data, .tdls_oper = ieee80211_tdls_oper, .tdls_mgmt = ieee80211_tdls_mgmt, .tdls_channel_switch = ieee80211_tdls_channel_switch, .tdls_cancel_channel_switch = ieee80211_tdls_cancel_channel_switch, .probe_client = ieee80211_probe_client, .set_noack_map = ieee80211_set_noack_map, #ifdef CONFIG_PM .set_wakeup = ieee80211_set_wakeup, #endif .get_channel = ieee80211_cfg_get_channel, .start_radar_detection = ieee80211_start_radar_detection, .channel_switch = ieee80211_channel_switch, .set_qos_map = ieee80211_set_qos_map, .set_ap_chanwidth = ieee80211_set_ap_chanwidth, .add_tx_ts = ieee80211_add_tx_ts, .del_tx_ts = ieee80211_del_tx_ts, .start_nan = ieee80211_start_nan, .stop_nan = ieee80211_stop_nan, .nan_change_conf = ieee80211_nan_change_conf, .add_nan_func = ieee80211_add_nan_func, .del_nan_func = ieee80211_del_nan_func, .set_multicast_to_unicast = ieee80211_set_multicast_to_unicast, .tx_control_port = ieee80211_tx_control_port, .get_txq_stats = ieee80211_get_txq_stats, .get_ftm_responder_stats = ieee80211_get_ftm_responder_stats, .start_pmsr = ieee80211_start_pmsr, .abort_pmsr = ieee80211_abort_pmsr, }; backport-iwlwifi-dkms-7906/net/mac80211/chan.c000066400000000000000000001316061351064634500207070ustar00rootroot00000000000000/* * mac80211 - channel management */ #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_sub_if_data *sdata; int num = 0; lockdep_assert_held(&local->chanctx_mtx); list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list) num++; return num; } static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_sub_if_data *sdata; int num = 0; lockdep_assert_held(&local->chanctx_mtx); list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list) num++; return num; } int ieee80211_chanctx_refcount(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { return ieee80211_chanctx_num_assigned(local, ctx) + ieee80211_chanctx_num_reserved(local, ctx); } static int ieee80211_num_chanctx(struct ieee80211_local *local) { struct ieee80211_chanctx *ctx; int num = 0; lockdep_assert_held(&local->chanctx_mtx); list_for_each_entry(ctx, &local->chanctx_list, list) num++; return num; } static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local) { lockdep_assert_held(&local->chanctx_mtx); return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local); } static struct ieee80211_chanctx * ieee80211_vif_get_chanctx(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local __maybe_unused = sdata->local; struct ieee80211_chanctx_conf *conf; conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (!conf) return NULL; return container_of(conf, struct ieee80211_chanctx, conf); } static const struct cfg80211_chan_def * ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *compat) { struct ieee80211_sub_if_data *sdata; lockdep_assert_held(&local->chanctx_mtx); list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list) { if (!compat) compat = &sdata->reserved_chandef; compat = cfg80211_chandef_compatible(&sdata->reserved_chandef, compat); if (!compat) break; } return compat; } static const struct cfg80211_chan_def * ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *compat) { struct ieee80211_sub_if_data *sdata; lockdep_assert_held(&local->chanctx_mtx); list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list) { if (sdata->reserved_chanctx != NULL) continue; if (!compat) compat = &sdata->vif.bss_conf.chandef; compat = cfg80211_chandef_compatible( &sdata->vif.bss_conf.chandef, compat); if (!compat) break; } return compat; } static const struct cfg80211_chan_def * ieee80211_chanctx_combined_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *compat) { lockdep_assert_held(&local->chanctx_mtx); compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat); if (!compat) return NULL; compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat); if (!compat) return NULL; return compat; } static bool ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *def) { lockdep_assert_held(&local->chanctx_mtx); if (ieee80211_chanctx_combined_chandef(local, ctx, def)) return true; if (!list_empty(&ctx->reserved_vifs) && ieee80211_chanctx_reserved_chandef(local, ctx, def)) return true; return false; } static struct ieee80211_chanctx * ieee80211_find_reservation_chanctx(struct ieee80211_local *local, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode) { struct ieee80211_chanctx *ctx; lockdep_assert_held(&local->chanctx_mtx); if (mode == IEEE80211_CHANCTX_EXCLUSIVE) return NULL; list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) continue; if (!ieee80211_chanctx_can_reserve_chandef(local, ctx, chandef)) continue; return ctx; } return NULL; } enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta) { switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_20: if (sta->ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20; else return NL80211_CHAN_WIDTH_20_NOHT; case IEEE80211_STA_RX_BW_40: return NL80211_CHAN_WIDTH_40; case IEEE80211_STA_RX_BW_80: return NL80211_CHAN_WIDTH_80; case IEEE80211_STA_RX_BW_160: /* * This applied for both 160 and 80+80. since we use * the returned value to consider degradation of * ctx->conf.min_def, we have to make sure to take * the bigger one (NL80211_CHAN_WIDTH_160). * Otherwise we might try degrading even when not * needed, as the max required sta_bw returned (80+80) * might be smaller than the configured bw (160). */ return NL80211_CHAN_WIDTH_160; default: WARN_ON(1); return NL80211_CHAN_WIDTH_20; } } static enum nl80211_chan_width ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata) { enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; struct sta_info *sta; rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { if (sdata != sta->sdata && !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) continue; max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta)); } rcu_read_unlock(); return max_bw; } static enum nl80211_chan_width ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, struct ieee80211_chanctx_conf *conf) { struct ieee80211_sub_if_data *sdata; enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { struct ieee80211_vif *vif = &sdata->vif; enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT; if (!ieee80211_sdata_running(sdata)) continue; if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf) continue; switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: width = ieee80211_get_max_required_bw(sdata); break; case NL80211_IFTYPE_STATION: /* * The ap's sta->bandwidth is not set yet at this * point, so take the width from the chandef, but * account also for TDLS peers */ width = max(vif->bss_conf.chandef.width, ieee80211_get_max_required_bw(sdata)); break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: continue; case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: width = vif->bss_conf.chandef.width; break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_NAN_DATA: WARN_ON_ONCE(1); } max_bw = max(max_bw, width); } /* use the configured bandwidth in case of monitor interface */ sdata = rcu_dereference(local->monitor_sdata); if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf) max_bw = max(max_bw, conf->def.width); rcu_read_unlock(); return max_bw; } /* * recalc the min required chan width of the channel context, which is * the max of min required widths of all the interfaces bound to this * channel context. */ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { enum nl80211_chan_width max_bw; struct cfg80211_chan_def min_def; lockdep_assert_held(&local->chanctx_mtx); /* don't optimize 5MHz, 10MHz, and radar_enabled confs */ if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 || ctx->conf.def.width == NL80211_CHAN_WIDTH_10 || ctx->conf.radar_enabled) { ctx->conf.min_def = ctx->conf.def; return; } max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf); /* downgrade chandef up to max_bw */ min_def = ctx->conf.def; while (min_def.width > max_bw) ieee80211_chandef_downgrade(&min_def); if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def)) return; ctx->conf.min_def = min_def; if (!ctx->driver_present) return; drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH); } static void ieee80211_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *chandef) { if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) { ieee80211_recalc_chanctx_min_def(local, ctx); return; } WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef)); ctx->conf.def = *chandef; drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH); ieee80211_recalc_chanctx_min_def(local, ctx); if (!local->use_chanctx) { local->_oper_chandef = *chandef; ieee80211_hw_config(local, 0); } } static struct ieee80211_chanctx * ieee80211_find_chanctx(struct ieee80211_local *local, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode) { struct ieee80211_chanctx *ctx; lockdep_assert_held(&local->chanctx_mtx); if (mode == IEEE80211_CHANCTX_EXCLUSIVE) return NULL; list_for_each_entry(ctx, &local->chanctx_list, list) { const struct cfg80211_chan_def *compat; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE) continue; if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) continue; compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef); if (!compat) continue; compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat); if (!compat) continue; ieee80211_change_chanctx(local, ctx, compat); return ctx; } return NULL; } bool ieee80211_is_radar_required(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; lockdep_assert_held(&local->mtx); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (sdata->radar_required) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } static bool ieee80211_chanctx_radar_required(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_chanctx_conf *conf = &ctx->conf; struct ieee80211_sub_if_data *sdata; bool required = false; lockdep_assert_held(&local->chanctx_mtx); lockdep_assert_held(&local->mtx); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf) continue; if (!sdata->radar_required) continue; required = true; break; } rcu_read_unlock(); return required; } static struct ieee80211_chanctx * ieee80211_alloc_chanctx(struct ieee80211_local *local, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode) { struct ieee80211_chanctx *ctx; lockdep_assert_held(&local->chanctx_mtx); ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL); if (!ctx) return NULL; INIT_LIST_HEAD(&ctx->assigned_vifs); INIT_LIST_HEAD(&ctx->reserved_vifs); ctx->conf.def = *chandef; ctx->conf.rx_chains_static = 1; ctx->conf.rx_chains_dynamic = 1; ctx->mode = mode; ctx->conf.radar_enabled = false; ieee80211_recalc_chanctx_min_def(local, ctx); return ctx; } static int ieee80211_add_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { u32 changed; int err; lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); if (!local->use_chanctx) local->hw.conf.radar_enabled = ctx->conf.radar_enabled; /* turn idle off *before* setting channel -- some drivers need that */ changed = ieee80211_idle_off(local); if (changed) ieee80211_hw_config(local, changed); if (!local->use_chanctx) { local->_oper_chandef = ctx->conf.def; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); } else { err = drv_add_chanctx(local, ctx); if (err) { ieee80211_recalc_idle(local); return err; } } return 0; } static struct ieee80211_chanctx * ieee80211_new_chanctx(struct ieee80211_local *local, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode) { struct ieee80211_chanctx *ctx; int err; lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); ctx = ieee80211_alloc_chanctx(local, chandef, mode); if (!ctx) return ERR_PTR(-ENOMEM); err = ieee80211_add_chanctx(local, ctx); if (err) { kfree(ctx); return ERR_PTR(err); } list_add_rcu(&ctx->list, &local->chanctx_list); return ctx; } static void ieee80211_del_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { lockdep_assert_held(&local->chanctx_mtx); if (!local->use_chanctx) { struct cfg80211_chan_def *chandef = &local->_oper_chandef; chandef->width = NL80211_CHAN_WIDTH_20_NOHT; chandef->center_freq1 = chandef->chan->center_freq; chandef->center_freq2 = 0; /* NOTE: Disabling radar is only valid here for * single channel context. To be sure, check it ... */ WARN_ON(local->hw.conf.radar_enabled && !list_empty(&local->chanctx_list)); local->hw.conf.radar_enabled = false; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); } else { drv_remove_chanctx(local, ctx); } ieee80211_recalc_idle(local); } static void ieee80211_free_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { lockdep_assert_held(&local->chanctx_mtx); WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0); list_del_rcu(&ctx->list); ieee80211_del_chanctx(local, ctx); kfree_rcu(ctx, rcu_head); } void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_chanctx_conf *conf = &ctx->conf; struct ieee80211_sub_if_data *sdata; const struct cfg80211_chan_def *compat = NULL; struct sta_info *sta; lockdep_assert_held(&local->chanctx_mtx); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf) continue; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; if (!compat) compat = &sdata->vif.bss_conf.chandef; compat = cfg80211_chandef_compatible( &sdata->vif.bss_conf.chandef, compat); if (WARN_ON_ONCE(!compat)) break; } /* TDLS peers can sometimes affect the chandef width */ list_for_each_entry_rcu(sta, &local->sta_list, list) { if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) || !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->tdls_chandef.chan) continue; compat = cfg80211_chandef_compatible(&sta->tdls_chandef, compat); if (WARN_ON_ONCE(!compat)) break; } rcu_read_unlock(); if (!compat) return; ieee80211_change_chanctx(local, ctx, compat); } static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx) { bool radar_enabled; lockdep_assert_held(&local->chanctx_mtx); /* for ieee80211_is_radar_required */ lockdep_assert_held(&local->mtx); radar_enabled = ieee80211_chanctx_radar_required(local, chanctx); if (radar_enabled == chanctx->conf.radar_enabled) return; chanctx->conf.radar_enabled = radar_enabled; if (!local->use_chanctx) { local->hw.conf.radar_enabled = chanctx->conf.radar_enabled; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); } drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR); } static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata, struct ieee80211_chanctx *new_ctx) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *curr_ctx = NULL; int ret = 0; if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN)) return -ENOTSUPP; conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (conf) { curr_ctx = container_of(conf, struct ieee80211_chanctx, conf); drv_unassign_vif_chanctx(local, sdata, curr_ctx); conf = NULL; list_del(&sdata->assigned_chanctx_list); } if (new_ctx) { ret = drv_assign_vif_chanctx(local, sdata, new_ctx); if (ret) goto out; conf = &new_ctx->conf; list_add(&sdata->assigned_chanctx_list, &new_ctx->assigned_vifs); } out: rcu_assign_pointer(sdata->vif.chanctx_conf, conf); sdata->vif.bss_conf.idle = !conf; if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) { ieee80211_recalc_chanctx_chantype(local, curr_ctx); ieee80211_recalc_smps_chanctx(local, curr_ctx); ieee80211_recalc_radar_chanctx(local, curr_ctx); ieee80211_recalc_chanctx_min_def(local, curr_ctx); } if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) { ieee80211_recalc_txpower(sdata, false); ieee80211_recalc_chanctx_min_def(local, new_ctx); } if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && sdata->vif.type != NL80211_IFTYPE_MONITOR) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE); ieee80211_check_fast_xmit_iface(sdata); return ret; } void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx) { struct ieee80211_sub_if_data *sdata; u8 rx_chains_static, rx_chains_dynamic; lockdep_assert_held(&local->chanctx_mtx); rx_chains_static = 1; rx_chains_dynamic = 1; rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { u8 needed_static, needed_dynamic; if (!ieee80211_sdata_running(sdata)) continue; if (rcu_access_pointer(sdata->vif.chanctx_conf) != &chanctx->conf) continue; switch (sdata->vif.type) { case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: continue; case NL80211_IFTYPE_STATION: if (!sdata->u.mgd.associated) continue; break; case NL80211_IFTYPE_AP_VLAN: continue; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: break; default: WARN_ON_ONCE(1); } switch (sdata->smps_mode) { default: WARN_ONCE(1, "Invalid SMPS mode %d\n", sdata->smps_mode); /* fall through */ case IEEE80211_SMPS_OFF: needed_static = sdata->needed_rx_chains; needed_dynamic = sdata->needed_rx_chains; break; case IEEE80211_SMPS_DYNAMIC: needed_static = 1; needed_dynamic = sdata->needed_rx_chains; break; case IEEE80211_SMPS_STATIC: needed_static = 1; needed_dynamic = 1; break; } rx_chains_static = max(rx_chains_static, needed_static); rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic); } /* Disable SMPS for the monitor interface */ sdata = rcu_dereference(local->monitor_sdata); if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == &chanctx->conf) rx_chains_dynamic = rx_chains_static = local->rx_chains; rcu_read_unlock(); if (!local->use_chanctx) { if (rx_chains_static > 1) local->smps_mode = IEEE80211_SMPS_OFF; else if (rx_chains_dynamic > 1) local->smps_mode = IEEE80211_SMPS_DYNAMIC; else local->smps_mode = IEEE80211_SMPS_STATIC; ieee80211_hw_config(local, 0); } if (rx_chains_static == chanctx->conf.rx_chains_static && rx_chains_dynamic == chanctx->conf.rx_chains_dynamic) return; chanctx->conf.rx_chains_static = rx_chains_static; chanctx->conf.rx_chains_dynamic = rx_chains_dynamic; drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS); } static void __ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, bool clear) { struct ieee80211_local *local __maybe_unused = sdata->local; struct ieee80211_sub_if_data *vlan; struct ieee80211_chanctx_conf *conf; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP)) return; lockdep_assert_held(&local->mtx); /* Check that conf exists, even when clearing this function * must be called with the AP's channel context still there * as it would otherwise cause VLANs to have an invalid * channel context pointer for a while, possibly pointing * to a channel context that has already been freed. */ conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); WARN_ON(!conf); if (clear) conf = NULL; list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) rcu_assign_pointer(vlan->vif.chanctx_conf, conf); } void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, bool clear) { struct ieee80211_local *local = sdata->local; mutex_lock(&local->chanctx_mtx); __ieee80211_vif_copy_chanctx_to_vlans(sdata, clear); mutex_unlock(&local->chanctx_mtx); } int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata) { struct ieee80211_chanctx *ctx = sdata->reserved_chanctx; lockdep_assert_held(&sdata->local->chanctx_mtx); if (WARN_ON(!ctx)) return -EINVAL; list_del(&sdata->reserved_chanctx_list); sdata->reserved_chanctx = NULL; if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0) { if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) { if (WARN_ON(!ctx->replace_ctx)) return -EINVAL; WARN_ON(ctx->replace_ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED); WARN_ON(ctx->replace_ctx->replace_ctx != ctx); ctx->replace_ctx->replace_ctx = NULL; ctx->replace_ctx->replace_state = IEEE80211_CHANCTX_REPLACE_NONE; list_del_rcu(&ctx->list); kfree_rcu(ctx, rcu_head); } else { ieee80211_free_chanctx(sdata->local, ctx); } } return 0; } int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode, bool radar_required) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx; lockdep_assert_held(&local->chanctx_mtx); curr_ctx = ieee80211_vif_get_chanctx(sdata); if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx) return -ENOTSUPP; new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode); if (!new_ctx) { if (ieee80211_can_create_new_chanctx(local)) { new_ctx = ieee80211_new_chanctx(local, chandef, mode); if (IS_ERR(new_ctx)) return PTR_ERR(new_ctx); } else { if (!curr_ctx || (curr_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) || !list_empty(&curr_ctx->reserved_vifs)) { /* * Another vif already requested this context * for a reservation. Find another one hoping * all vifs assigned to it will also switch * soon enough. * * TODO: This needs a little more work as some * cases (more than 2 chanctx capable devices) * may fail which could otherwise succeed * provided some channel context juggling was * performed. * * Consider ctx1..3, vif1..6, each ctx has 2 * vifs. vif1 and vif2 from ctx1 request new * different chandefs starting 2 in-place * reserations with ctx4 and ctx5 replacing * ctx1 and ctx2 respectively. Next vif5 and * vif6 from ctx3 reserve ctx4. If vif3 and * vif4 remain on ctx2 as they are then this * fails unless `replace_ctx` from ctx5 is * replaced with ctx3. */ list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE) continue; if (!list_empty(&ctx->reserved_vifs)) continue; curr_ctx = ctx; break; } } /* * If that's true then all available contexts already * have reservations and cannot be used. */ if (!curr_ctx || (curr_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) || !list_empty(&curr_ctx->reserved_vifs)) return -EBUSY; new_ctx = ieee80211_alloc_chanctx(local, chandef, mode); if (!new_ctx) return -ENOMEM; new_ctx->replace_ctx = curr_ctx; new_ctx->replace_state = IEEE80211_CHANCTX_REPLACES_OTHER; curr_ctx->replace_ctx = new_ctx; curr_ctx->replace_state = IEEE80211_CHANCTX_WILL_BE_REPLACED; list_add_rcu(&new_ctx->list, &local->chanctx_list); } } list_add(&sdata->reserved_chanctx_list, &new_ctx->reserved_vifs); sdata->reserved_chanctx = new_ctx; sdata->reserved_chandef = *chandef; sdata->reserved_radar_required = radar_required; sdata->reserved_ready = false; return 0; } static void ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata) { switch (sdata->vif.type) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: ieee80211_queue_work(&sdata->local->hw, &sdata->csa_finalize_work); break; case NL80211_IFTYPE_STATION: ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work); break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_NAN_DATA: case NUM_NL80211_IFTYPES: WARN_ON(1); break; } } static void ieee80211_vif_update_chandef(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *vlan; sdata->vif.bss_conf.chandef = *chandef; if (sdata->vif.type != NL80211_IFTYPE_AP) return; list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) vlan->vif.bss_conf.chandef = *chandef; } static int ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_vif_chanctx_switch vif_chsw[1] = {}; struct ieee80211_chanctx *old_ctx, *new_ctx; const struct cfg80211_chan_def *chandef; u32 changed = 0; int err; lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); new_ctx = sdata->reserved_chanctx; old_ctx = ieee80211_vif_get_chanctx(sdata); if (WARN_ON(!sdata->reserved_ready)) return -EBUSY; if (WARN_ON(!new_ctx)) return -EINVAL; if (WARN_ON(!old_ctx)) return -EINVAL; if (WARN_ON(new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)) return -EINVAL; chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx, &sdata->reserved_chandef); if (WARN_ON(!chandef)) return -EINVAL; ieee80211_change_chanctx(local, new_ctx, chandef); vif_chsw[0].vif = &sdata->vif; vif_chsw[0].old_ctx = &old_ctx->conf; vif_chsw[0].new_ctx = &new_ctx->conf; list_del(&sdata->reserved_chanctx_list); sdata->reserved_chanctx = NULL; err = drv_switch_vif_chanctx(local, vif_chsw, 1, CHANCTX_SWMODE_REASSIGN_VIF); if (err) { if (ieee80211_chanctx_refcount(local, new_ctx) == 0) ieee80211_free_chanctx(local, new_ctx); goto out; } list_move(&sdata->assigned_chanctx_list, &new_ctx->assigned_vifs); rcu_assign_pointer(sdata->vif.chanctx_conf, &new_ctx->conf); if (sdata->vif.type == NL80211_IFTYPE_AP) __ieee80211_vif_copy_chanctx_to_vlans(sdata, false); ieee80211_check_fast_xmit_iface(sdata); if (ieee80211_chanctx_refcount(local, old_ctx) == 0) ieee80211_free_chanctx(local, old_ctx); if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width) changed = BSS_CHANGED_BANDWIDTH; ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef); ieee80211_recalc_smps_chanctx(local, new_ctx); ieee80211_recalc_radar_chanctx(local, new_ctx); ieee80211_recalc_chanctx_min_def(local, new_ctx); if (changed) ieee80211_bss_info_change_notify(sdata, changed); out: ieee80211_vif_chanctx_reservation_complete(sdata); return err; } static int ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx *old_ctx, *new_ctx; const struct cfg80211_chan_def *chandef; int err; old_ctx = ieee80211_vif_get_chanctx(sdata); new_ctx = sdata->reserved_chanctx; if (WARN_ON(!sdata->reserved_ready)) return -EINVAL; if (WARN_ON(old_ctx)) return -EINVAL; if (WARN_ON(!new_ctx)) return -EINVAL; if (WARN_ON(new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)) return -EINVAL; chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx, &sdata->reserved_chandef); if (WARN_ON(!chandef)) return -EINVAL; ieee80211_change_chanctx(local, new_ctx, chandef); list_del(&sdata->reserved_chanctx_list); sdata->reserved_chanctx = NULL; err = ieee80211_assign_vif_chanctx(sdata, new_ctx); if (err) { if (ieee80211_chanctx_refcount(local, new_ctx) == 0) ieee80211_free_chanctx(local, new_ctx); goto out; } out: ieee80211_vif_chanctx_reservation_complete(sdata); return err; } static bool ieee80211_vif_has_in_place_reservation(struct ieee80211_sub_if_data *sdata) { struct ieee80211_chanctx *old_ctx, *new_ctx; lockdep_assert_held(&sdata->local->chanctx_mtx); new_ctx = sdata->reserved_chanctx; old_ctx = ieee80211_vif_get_chanctx(sdata); if (!old_ctx) return false; if (WARN_ON(!new_ctx)) return false; if (old_ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED) return false; if (new_ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) return false; return true; } static int ieee80211_chsw_switch_hwconf(struct ieee80211_local *local, struct ieee80211_chanctx *new_ctx) { const struct cfg80211_chan_def *chandef; lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); chandef = ieee80211_chanctx_reserved_chandef(local, new_ctx, NULL); if (WARN_ON(!chandef)) return -EINVAL; local->hw.conf.radar_enabled = new_ctx->conf.radar_enabled; local->_oper_chandef = *chandef; ieee80211_hw_config(local, 0); return 0; } static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local, int n_vifs) { struct ieee80211_vif_chanctx_switch *vif_chsw; struct ieee80211_sub_if_data *sdata; struct ieee80211_chanctx *ctx, *old_ctx; int i, err; lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); vif_chsw = kcalloc(n_vifs, sizeof(vif_chsw[0]), GFP_KERNEL); if (!vif_chsw) return -ENOMEM; i = 0; list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (WARN_ON(!ctx->replace_ctx)) { err = -EINVAL; goto out; } list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list) { if (!ieee80211_vif_has_in_place_reservation( sdata)) continue; old_ctx = ieee80211_vif_get_chanctx(sdata); vif_chsw[i].vif = &sdata->vif; vif_chsw[i].old_ctx = &old_ctx->conf; vif_chsw[i].new_ctx = &ctx->conf; i++; } } err = drv_switch_vif_chanctx(local, vif_chsw, n_vifs, CHANCTX_SWMODE_SWAP_CONTEXTS); out: kfree(vif_chsw); return err; } static int ieee80211_chsw_switch_ctxs(struct ieee80211_local *local) { struct ieee80211_chanctx *ctx; int err; lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (!list_empty(&ctx->replace_ctx->assigned_vifs)) continue; ieee80211_del_chanctx(local, ctx->replace_ctx); err = ieee80211_add_chanctx(local, ctx); if (err) goto err; } return 0; err: WARN_ON(ieee80211_add_chanctx(local, ctx)); list_for_each_entry_continue_reverse(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (!list_empty(&ctx->replace_ctx->assigned_vifs)) continue; ieee80211_del_chanctx(local, ctx); WARN_ON(ieee80211_add_chanctx(local, ctx->replace_ctx)); } return err; } static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *sdata_tmp; struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx; struct ieee80211_chanctx *new_ctx = NULL; int err, n_assigned, n_reserved, n_ready; int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0; lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); /* * If there are 2 independent pairs of channel contexts performing * cross-switch of their vifs this code will still wait until both are * ready even though it could be possible to switch one before the * other is ready. * * For practical reasons and code simplicity just do a single huge * switch. */ /* * Verify if the reservation is still feasible. * - if it's not then disconnect * - if it is but not all vifs necessary are ready then defer */ list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (WARN_ON(!ctx->replace_ctx)) { err = -EINVAL; goto err; } if (!local->use_chanctx) new_ctx = ctx; n_ctx++; n_assigned = 0; n_reserved = 0; n_ready = 0; list_for_each_entry(sdata, &ctx->replace_ctx->assigned_vifs, assigned_chanctx_list) { n_assigned++; if (sdata->reserved_chanctx) { n_reserved++; if (sdata->reserved_ready) n_ready++; } } if (n_assigned != n_reserved) { if (n_ready == n_reserved) { wiphy_info(local->hw.wiphy, "channel context reservation cannot be finalized because some interfaces aren't switching\n"); err = -EBUSY; goto err; } return -EAGAIN; } ctx->conf.radar_enabled = false; list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list) { if (ieee80211_vif_has_in_place_reservation(sdata) && !sdata->reserved_ready) return -EAGAIN; old_ctx = ieee80211_vif_get_chanctx(sdata); if (old_ctx) { if (old_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) n_vifs_switch++; else n_vifs_assign++; } else { n_vifs_ctxless++; } if (sdata->reserved_radar_required) ctx->conf.radar_enabled = true; } } if (WARN_ON(n_ctx == 0) || WARN_ON(n_vifs_switch == 0 && n_vifs_assign == 0 && n_vifs_ctxless == 0) || WARN_ON(n_ctx > 1 && !local->use_chanctx) || WARN_ON(!new_ctx && !local->use_chanctx)) { err = -EINVAL; goto err; } /* * All necessary vifs are ready. Perform the switch now depending on * reservations and driver capabilities. */ if (local->use_chanctx) { if (n_vifs_switch > 0) { err = ieee80211_chsw_switch_vifs(local, n_vifs_switch); if (err) goto err; } if (n_vifs_assign > 0 || n_vifs_ctxless > 0) { err = ieee80211_chsw_switch_ctxs(local); if (err) goto err; } } else { err = ieee80211_chsw_switch_hwconf(local, new_ctx); if (err) goto err; } /* * Update all structures, values and pointers to point to new channel * context(s). */ list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (WARN_ON(!ctx->replace_ctx)) { err = -EINVAL; goto err; } list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list) { u32 changed = 0; if (!ieee80211_vif_has_in_place_reservation(sdata)) continue; rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf); if (sdata->vif.type == NL80211_IFTYPE_AP) __ieee80211_vif_copy_chanctx_to_vlans(sdata, false); ieee80211_check_fast_xmit_iface(sdata); sdata->radar_required = sdata->reserved_radar_required; if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width) changed = BSS_CHANGED_BANDWIDTH; ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef); if (changed) ieee80211_bss_info_change_notify(sdata, changed); ieee80211_recalc_txpower(sdata, false); } ieee80211_recalc_chanctx_chantype(local, ctx); ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); ieee80211_recalc_chanctx_min_def(local, ctx); list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs, reserved_chanctx_list) { if (ieee80211_vif_get_chanctx(sdata) != ctx) continue; list_del(&sdata->reserved_chanctx_list); list_move(&sdata->assigned_chanctx_list, &ctx->assigned_vifs); sdata->reserved_chanctx = NULL; ieee80211_vif_chanctx_reservation_complete(sdata); } /* * This context might have been a dependency for an already * ready re-assign reservation interface that was deferred. Do * not propagate error to the caller though. The in-place * reservation for originally requested interface has already * succeeded at this point. */ list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs, reserved_chanctx_list) { if (WARN_ON(ieee80211_vif_has_in_place_reservation( sdata))) continue; if (WARN_ON(sdata->reserved_chanctx != ctx)) continue; if (!sdata->reserved_ready) continue; if (ieee80211_vif_get_chanctx(sdata)) err = ieee80211_vif_use_reserved_reassign( sdata); else err = ieee80211_vif_use_reserved_assign(sdata); if (err) { sdata_info(sdata, "failed to finalize (re-)assign reservation (err=%d)\n", err); ieee80211_vif_unreserve_chanctx(sdata); cfg80211_stop_iface(local->hw.wiphy, &sdata->wdev, GFP_KERNEL); } } } /* * Finally free old contexts */ list_for_each_entry_safe(ctx, ctx_tmp, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; ctx->replace_ctx->replace_ctx = NULL; ctx->replace_ctx->replace_state = IEEE80211_CHANCTX_REPLACE_NONE; list_del_rcu(&ctx->list); kfree_rcu(ctx, rcu_head); } return 0; err: list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs, reserved_chanctx_list) { ieee80211_vif_unreserve_chanctx(sdata); ieee80211_vif_chanctx_reservation_complete(sdata); } } return err; } static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; bool use_reserved_switch = false; lockdep_assert_held(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (!conf) return; ctx = container_of(conf, struct ieee80211_chanctx, conf); if (sdata->reserved_chanctx) { if (sdata->reserved_chanctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER && ieee80211_chanctx_num_reserved(local, sdata->reserved_chanctx) > 1) use_reserved_switch = true; ieee80211_vif_unreserve_chanctx(sdata); } ieee80211_assign_vif_chanctx(sdata, NULL); if (ieee80211_chanctx_refcount(local, ctx) == 0) ieee80211_free_chanctx(local, ctx); sdata->radar_required = false; /* Unreserving may ready an in-place reservation. */ if (use_reserved_switch) ieee80211_vif_use_reserved_switch(local); } int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx *ctx; u8 radar_detect_width = 0; int ret; lockdep_assert_held(&local->mtx); WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); mutex_lock(&local->chanctx_mtx); ret = cfg80211_chandef_dfs_required(local->hw.wiphy, chandef, sdata->wdev.iftype); if (ret < 0) goto out; if (ret > 0) radar_detect_width = BIT(chandef->width); sdata->radar_required = ret; ret = ieee80211_check_combinations(sdata, chandef, mode, radar_detect_width); if (ret < 0) goto out; __ieee80211_vif_release_channel(sdata); ctx = ieee80211_find_chanctx(local, chandef, mode); if (!ctx) ctx = ieee80211_new_chanctx(local, chandef, mode); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto out; } ieee80211_vif_update_chandef(sdata, chandef); ret = ieee80211_assign_vif_chanctx(sdata, ctx); if (ret) { /* if assign fails refcount stays the same */ if (ieee80211_chanctx_refcount(local, ctx) == 0) ieee80211_free_chanctx(local, ctx); goto out; } ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); out: if (ret) sdata->radar_required = false; mutex_unlock(&local->chanctx_mtx); return ret; } int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx *new_ctx; struct ieee80211_chanctx *old_ctx; int err; lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); new_ctx = sdata->reserved_chanctx; old_ctx = ieee80211_vif_get_chanctx(sdata); if (WARN_ON(!new_ctx)) return -EINVAL; if (WARN_ON(new_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)) return -EINVAL; if (WARN_ON(sdata->reserved_ready)) return -EINVAL; sdata->reserved_ready = true; if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) { if (old_ctx) err = ieee80211_vif_use_reserved_reassign(sdata); else err = ieee80211_vif_use_reserved_assign(sdata); if (err) return err; } /* * In-place reservation may need to be finalized now either if: * a) sdata is taking part in the swapping itself and is the last one * b) sdata has switched with a re-assign reservation to an existing * context readying in-place switching of old_ctx * * In case of (b) do not propagate the error up because the requested * sdata already switched successfully. Just spill an extra warning. * The ieee80211_vif_use_reserved_switch() already stops all necessary * interfaces upon failure. */ if ((old_ctx && old_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) || new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) { err = ieee80211_vif_use_reserved_switch(local); if (err && err != -EAGAIN) { if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) return err; wiphy_info(local->hw.wiphy, "depending in-place reservation failed (err=%d)\n", err); } } return 0; } int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, u32 *changed) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; const struct cfg80211_chan_def *compat; int ret; if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, IEEE80211_CHAN_DISABLED)) return -EINVAL; mutex_lock(&local->chanctx_mtx); if (cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) { ret = 0; goto out; } if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) { ret = -EINVAL; goto out; } conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (!conf) { ret = -EINVAL; goto out; } ctx = container_of(conf, struct ieee80211_chanctx, conf); compat = cfg80211_chandef_compatible(&conf->def, chandef); if (!compat) { ret = -EINVAL; goto out; } switch (ctx->replace_state) { case IEEE80211_CHANCTX_REPLACE_NONE: if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat)) { ret = -EBUSY; goto out; } break; case IEEE80211_CHANCTX_WILL_BE_REPLACED: /* TODO: Perhaps the bandwidth change could be treated as a * reservation itself? */ ret = -EBUSY; goto out; case IEEE80211_CHANCTX_REPLACES_OTHER: /* channel context that is going to replace another channel * context doesn't really exist and shouldn't be assigned * anywhere yet */ WARN_ON(1); break; } ieee80211_vif_update_chandef(sdata, chandef); ieee80211_recalc_chanctx_chantype(local, ctx); *changed |= BSS_CHANGED_BANDWIDTH; ret = 0; out: mutex_unlock(&local->chanctx_mtx); return ret; } void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) { WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); lockdep_assert_held(&sdata->local->mtx); mutex_lock(&sdata->local->chanctx_mtx); __ieee80211_vif_release_channel(sdata); mutex_unlock(&sdata->local->chanctx_mtx); } void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *ap; struct ieee80211_chanctx_conf *conf; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss)) return; ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(ap->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); rcu_assign_pointer(sdata->vif.chanctx_conf, conf); mutex_unlock(&local->chanctx_mtx); } void ieee80211_iter_chan_contexts_atomic( struct ieee80211_hw *hw, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_chanctx *ctx; rcu_read_lock(); list_for_each_entry_rcu(ctx, &local->chanctx_list, list) if (ctx->driver_present) iter(hw, &ctx->conf, iter_data); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic); backport-iwlwifi-dkms-7906/net/mac80211/debug.h000066400000000000000000000117631351064634500210720ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MAC80211_DEBUG_H #define __MAC80211_DEBUG_H #include #ifdef CPTCFG_MAC80211_OCB_DEBUG #define MAC80211_OCB_DEBUG 1 #else #define MAC80211_OCB_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_IBSS_DEBUG #define MAC80211_IBSS_DEBUG 1 #else #define MAC80211_IBSS_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_PS_DEBUG #define MAC80211_PS_DEBUG 1 #else #define MAC80211_PS_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_HT_DEBUG #define MAC80211_HT_DEBUG 1 #else #define MAC80211_HT_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_MPL_DEBUG #define MAC80211_MPL_DEBUG 1 #else #define MAC80211_MPL_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_MPATH_DEBUG #define MAC80211_MPATH_DEBUG 1 #else #define MAC80211_MPATH_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_MHWMP_DEBUG #define MAC80211_MHWMP_DEBUG 1 #else #define MAC80211_MHWMP_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_MESH_SYNC_DEBUG #define MAC80211_MESH_SYNC_DEBUG 1 #else #define MAC80211_MESH_SYNC_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_MESH_CSA_DEBUG #define MAC80211_MESH_CSA_DEBUG 1 #else #define MAC80211_MESH_CSA_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_MESH_PS_DEBUG #define MAC80211_MESH_PS_DEBUG 1 #else #define MAC80211_MESH_PS_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_TDLS_DEBUG #define MAC80211_TDLS_DEBUG 1 #else #define MAC80211_TDLS_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_STA_DEBUG #define MAC80211_STA_DEBUG 1 #else #define MAC80211_STA_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_MLME_DEBUG #define MAC80211_MLME_DEBUG 1 #else #define MAC80211_MLME_DEBUG 0 #endif #ifdef CPTCFG_MAC80211_MESSAGE_TRACING void __sdata_info(const char *fmt, ...) __printf(1, 2); void __sdata_dbg(bool print, const char *fmt, ...) __printf(2, 3); void __sdata_err(const char *fmt, ...) __printf(1, 2); void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...) __printf(3, 4); #define _sdata_info(sdata, fmt, ...) \ __sdata_info("%s: " fmt, (sdata)->name, ##__VA_ARGS__) #define _sdata_dbg(print, sdata, fmt, ...) \ __sdata_dbg(print, "%s: " fmt, (sdata)->name, ##__VA_ARGS__) #define _sdata_err(sdata, fmt, ...) \ __sdata_err("%s: " fmt, (sdata)->name, ##__VA_ARGS__) #define _wiphy_dbg(print, wiphy, fmt, ...) \ __wiphy_dbg(wiphy, print, fmt, ##__VA_ARGS__) #else #define _sdata_info(sdata, fmt, ...) \ do { \ pr_info("%s: " fmt, \ (sdata)->name, ##__VA_ARGS__); \ } while (0) #define _sdata_dbg(print, sdata, fmt, ...) \ do { \ if (print) \ pr_debug("%s: " fmt, \ (sdata)->name, ##__VA_ARGS__); \ } while (0) #define _sdata_err(sdata, fmt, ...) \ do { \ pr_err("%s: " fmt, \ (sdata)->name, ##__VA_ARGS__); \ } while (0) #define _wiphy_dbg(print, wiphy, fmt, ...) \ do { \ if (print) \ wiphy_dbg((wiphy), fmt, ##__VA_ARGS__); \ } while (0) #endif #define sdata_info(sdata, fmt, ...) \ _sdata_info(sdata, fmt, ##__VA_ARGS__) #define sdata_err(sdata, fmt, ...) \ _sdata_err(sdata, fmt, ##__VA_ARGS__) #define sdata_dbg(sdata, fmt, ...) \ _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__) #define ht_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_HT_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define ht_dbg_ratelimited(sdata, fmt, ...) \ _sdata_dbg(MAC80211_HT_DEBUG && net_ratelimit(), \ sdata, fmt, ##__VA_ARGS__) #define ocb_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_OCB_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define ibss_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_IBSS_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define ps_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_PS_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define ps_dbg_hw(hw, fmt, ...) \ _wiphy_dbg(MAC80211_PS_DEBUG, \ (hw)->wiphy, fmt, ##__VA_ARGS__) #define ps_dbg_ratelimited(sdata, fmt, ...) \ _sdata_dbg(MAC80211_PS_DEBUG && net_ratelimit(), \ sdata, fmt, ##__VA_ARGS__) #define mpl_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_MPL_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define mpath_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_MPATH_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define mhwmp_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_MHWMP_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define msync_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define mcsa_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_MESH_CSA_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define mps_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_MESH_PS_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define tdls_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_TDLS_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define sta_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_STA_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define mlme_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_MLME_DEBUG, \ sdata, fmt, ##__VA_ARGS__) #define mlme_dbg_ratelimited(sdata, fmt, ...) \ _sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \ sdata, fmt, ##__VA_ARGS__) #endif /* __MAC80211_DEBUG_H */ backport-iwlwifi-dkms-7906/net/mac80211/debugfs.c000066400000000000000000000626471351064634500214250ustar00rootroot00000000000000/* * mac80211 debugfs for wireless PHYs * * Copyright 2007 Johannes Berg * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * GPLv2 * */ #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "debugfs.h" #define DEBUGFS_FORMAT_BUFFER_SIZE 100 #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS #define TX_TIMING_STATS_BIN_DELIMTER_C ',' #define TX_TIMING_STATS_BIN_DELIMTER_S "," #define TX_TIMING_STATS_BINS_DISABLED "enable(bins disabled)\n" #define TX_TIMING_STATS_DISABLED "disable\n" static ssize_t sta_tx_latency_points_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; struct ieee80211_tx_latency_bin_ranges *tx_latency; char buf[516]; int bufsz = sizeof(buf); int pos = 0; rcu_read_lock(); tx_latency = rcu_dereference(local->tx_latency); /* Tx latency is not enabled or no thresholds were configured */ if (!tx_latency) { pos += scnprintf(buf + pos, bufsz - pos, "%s\n", TX_TIMING_STATS_DISABLED); goto unlock; } pos += scnprintf(buf + pos, bufsz - pos, "start: %u\n", local->tx_msrmnt_points[0]); pos += scnprintf(buf + pos, bufsz - pos, "end: %u\n", local->tx_msrmnt_points[1]); unlock: rcu_read_unlock(); return simple_read_from_buffer(userbuf, count, ppos, buf, pos); } /* * Configure the Tx latency measuring points. * * In order to analyze the Tx latency there are 4 points that are of interest: * 1. Tx packet Enter the Kernel * 2. Tx packet is written to the bus * 3. Tx packet is acked by the AP * 4. Tx packet is erased. * * By default the measurement is done between points 1 to 4 (measure the whole * Tx flow). If one sees that the there is some problem with the Tx latency, * he can try to focus the analysis to different parts of the Tx flow. * * Valid input is: a,b where * 1. a < b * 2. IEEE80211_TX_LAT_ENTER<=a * 3. b < IEEE80211_TX_LAT_MAX_POINT * 4. a & b are values from enum ieee80211_tx_lat_msr_point (0-3) */ static ssize_t sta_tx_latency_points_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; struct ieee80211_tx_latency_bin_ranges *tx_latency; int ret; u32 start, end; char buf[32] = {}; if (sizeof(buf) <= count) return -EINVAL; if (copy_from_user(buf, userbuf, count)) return -EFAULT; ret = count; if (sscanf(buf, "%u,%u", &start, &end) != 2 || end <= start || IEEE80211_TX_LAT_MAX_POINT <= end) return -EINVAL; mutex_lock(&local->sta_mtx); /* cannot change config once we have stations */ if (local->num_sta) goto unlock; tx_latency = rcu_dereference_protected(local->tx_latency, lockdep_is_held(&local->sta_mtx)); /* Tx latency disabled */ if (!tx_latency) goto unlock; local->tx_msrmnt_points[0] = start; local->tx_msrmnt_points[1] = end; rcu_assign_pointer(local->tx_latency, tx_latency); unlock: mutex_unlock(&local->sta_mtx); return ret; } static const struct file_operations stats_tx_latency_points_ops = { .write = sta_tx_latency_points_write, .read = sta_tx_latency_points_read, .open = simple_open, .llseek = generic_file_llseek, }; /* * Display if Tx latency statistics & bins are enabled/disabled */ static ssize_t sta_tx_latency_stat_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; struct ieee80211_tx_latency_bin_ranges *tx_latency; char *buf; int bufsz, i, ret; int pos = 0; rcu_read_lock(); tx_latency = rcu_dereference(local->tx_latency); if (tx_latency && tx_latency->n_ranges) { bufsz = tx_latency->n_ranges * 15; buf = kzalloc(bufsz, GFP_ATOMIC); if (!buf) goto err; for (i = 0; i < tx_latency->n_ranges; i++) pos += scnprintf(buf + pos, bufsz - pos, "%d,", tx_latency->ranges[i]); pos += scnprintf(buf + pos, bufsz - pos, "\n"); } else if (tx_latency) { bufsz = sizeof(TX_TIMING_STATS_BINS_DISABLED) + 1; buf = kzalloc(bufsz, GFP_ATOMIC); if (!buf) goto err; pos += scnprintf(buf + pos, bufsz - pos, "%s\n", TX_TIMING_STATS_BINS_DISABLED); } else { bufsz = sizeof(TX_TIMING_STATS_DISABLED) + 1; buf = kzalloc(bufsz, GFP_ATOMIC); if (!buf) goto err; pos += scnprintf(buf + pos, bufsz - pos, "%s\n", TX_TIMING_STATS_DISABLED); } rcu_read_unlock(); ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); kfree(buf); return ret; err: rcu_read_unlock(); return -ENOMEM; } /* * Receive input from user regarding Tx latency statistics * The input should indicate if Tx latency statistics and bins are * enabled/disabled. * If bins are enabled input should indicate the amount of different bins and * their ranges. Each bin will count how many Tx frames transmitted within the * appropriate latency. * Legal input is: * a) "enable(bins disabled)" - to enable only general statistics * b) "a,b,c,d,...z" - to enable general statistics and bins, where all are * numbers and a < b < c < d.. < z * c) "disable" - disable all statistics * NOTE: must configure Tx latency statistics bins before stations connected. */ static ssize_t sta_tx_latency_stat_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[128] = {}; char *bins = buf; char *token; int buf_size, i, alloc_size; int prev_bin = 0; int n_ranges = 0; int ret = count; struct ieee80211_tx_latency_bin_ranges *tx_latency; if (sizeof(buf) <= count) return -EINVAL; buf_size = count; if (copy_from_user(buf, userbuf, buf_size)) return -EFAULT; mutex_lock(&local->sta_mtx); /* cannot change config once we have stations */ if (local->num_sta) goto unlock; tx_latency = rcu_dereference_protected(local->tx_latency, lockdep_is_held(&local->sta_mtx)); /* disable Tx statistics */ if (!strcmp(buf, TX_TIMING_STATS_DISABLED)) { if (!tx_latency) goto unlock; RCU_INIT_POINTER(local->tx_latency, NULL); synchronize_rcu(); kfree(tx_latency); goto unlock; } /* Tx latency already enabled */ if (tx_latency) goto unlock; if (strcmp(TX_TIMING_STATS_BINS_DISABLED, buf)) { /* check how many bins and between what ranges user requested */ token = buf; while (*token != '\0') { if (*token == TX_TIMING_STATS_BIN_DELIMTER_C) n_ranges++; token++; } n_ranges++; } alloc_size = sizeof(struct ieee80211_tx_latency_bin_ranges) + n_ranges * sizeof(u32); tx_latency = kzalloc(alloc_size, GFP_ATOMIC); if (!tx_latency) { ret = -ENOMEM; goto unlock; } tx_latency->n_ranges = n_ranges; for (i = 0; i < n_ranges; i++) { /* setting bin ranges */ token = strsep(&bins, TX_TIMING_STATS_BIN_DELIMTER_S); sscanf(token, "%d", &tx_latency->ranges[i]); /* bins values should be in ascending order */ if (prev_bin >= tx_latency->ranges[i]) { ret = -EINVAL; kfree(tx_latency); goto unlock; } prev_bin = tx_latency->ranges[i]; } /* set default measurement points */ local->tx_msrmnt_points[0] = IEEE80211_TX_LAT_ENTER; local->tx_msrmnt_points[1] = IEEE80211_TX_LAT_DEL; rcu_assign_pointer(local->tx_latency, tx_latency); unlock: mutex_unlock(&local->sta_mtx); return ret; } static const struct file_operations stats_tx_latency_ops = { .write = sta_tx_latency_stat_write, .read = sta_tx_latency_stat_read, .open = simple_open, .llseek = generic_file_llseek, }; /* * Display if Tx consecutive loss statistics are enabled/disabled */ static ssize_t sta_tx_consecutive_loss_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; struct ieee80211_tx_consec_loss_ranges *tx_consec; char *buf; size_t bufsz, i; int ret; u32 pos = 0; rcu_read_lock(); tx_consec = rcu_dereference(local->tx_consec); if (tx_consec && tx_consec->n_ranges) { /* enabled */ bufsz = tx_consec->n_ranges * 16; buf = kzalloc(bufsz, GFP_ATOMIC); if (!buf) goto err; pos += scnprintf(buf, bufsz, "bins: "); for (i = 0; i < tx_consec->n_ranges; i++) pos += scnprintf(buf + pos, bufsz - pos, "%u,", tx_consec->ranges[i]); pos += scnprintf(buf + pos, bufsz - pos, "\nlate threshold: %d\n", tx_consec->late_threshold); } else { /* disabled */ bufsz = sizeof(TX_TIMING_STATS_DISABLED) + 1; buf = kzalloc(bufsz, GFP_ATOMIC); if (!buf) goto err; pos += scnprintf(buf + pos, bufsz - pos, "%s\n", TX_TIMING_STATS_DISABLED); } rcu_read_unlock(); ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); kfree(buf); return ret; err: rcu_read_unlock(); return -ENOMEM; } /* * Receive input from user regarding Tx consecutive loss statistics * The input should indicate if Tx consecutive loss statistics are * enabled/disabled. * This entry keep track of 2 statistics to do with Tx consecutive loss: * 1) How many consecutive packets were lost within the bin ranges * 2) How many consecutive packets were sent successfully but the transmit * latency is greater than the late threshold, therefor considered lost. * * Legal input is: * a) "a,b,c,d,...z,threshold" - to enable consecutive loss statistics, * where all are numbers and a < b < c < d.. < z * b) "disable" - disable all statistics * NOTE: * 1) Must supply at least 2 values. * 2) Last value is always the late threshold. * 3) Must configure Tx consecutive loss statistics bins before stations * connected. */ static ssize_t sta_tx_consecutive_loss_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[128] = {}; char *bins = buf; char *token; u32 i, alloc_size; u32 prev_bin = 0; int n_ranges = 0; int n_vals = 0; int ret = count; struct ieee80211_tx_consec_loss_ranges *tx_consec; if (sizeof(buf) <= count) return -EINVAL; if (copy_from_user(buf, userbuf, count)) return -EFAULT; mutex_lock(&local->sta_mtx); /* cannot change config once we have stations */ if (local->num_sta) goto unlock; tx_consec = rcu_dereference_protected(local->tx_consec, lockdep_is_held(&local->sta_mtx)); /* disable Tx statistics */ if (!strcmp(buf, TX_TIMING_STATS_DISABLED)) { if (!tx_consec) goto unlock; rcu_assign_pointer(local->tx_consec, NULL); synchronize_rcu(); kfree(tx_consec); goto unlock; } /* Tx latency already enabled */ if (tx_consec) goto unlock; /* check how many bins and between what ranges user requested */ token = buf; while (*token != '\0') { if (*token == TX_TIMING_STATS_BIN_DELIMTER_C) n_vals++; token++; } n_vals++; /* last value is for setting the late threshold */ n_ranges = n_vals - 1; /* * user needs to enter at least 2 values, one for the threshold, and * one for the range */ if (n_vals < 2) { ret = -EINVAL; goto unlock; } alloc_size = sizeof(struct ieee80211_tx_consec_loss_ranges) + n_ranges * sizeof(u32); tx_consec = kzalloc(alloc_size, GFP_ATOMIC); if (!tx_consec) { ret = -ENOMEM; goto unlock; } tx_consec->n_ranges = n_ranges; for (i = 0; i < n_vals; i++) { /* setting bin ranges */ token = strsep(&bins, TX_TIMING_STATS_BIN_DELIMTER_S); if (i == n_vals - 1) { /* last value - late threshold */ ret = sscanf(token, "%d", &tx_consec->late_threshold); if (ret != 1) { ret = -EINVAL; kfree(tx_consec); goto unlock; } break; } ret = sscanf(token, "%d", &tx_consec->ranges[i]); if (ret != 1) { ret = -EINVAL; kfree(tx_consec); goto unlock; } /* bins values should be in ascending order */ if (prev_bin >= tx_consec->ranges[i]) { ret = -EINVAL; kfree(tx_consec); goto unlock; } prev_bin = tx_consec->ranges[i]; } /* set default measurement points */ local->tx_msrmnt_points[0] = IEEE80211_TX_LAT_ENTER; local->tx_msrmnt_points[1] = IEEE80211_TX_LAT_DEL; rcu_assign_pointer(local->tx_consec, tx_consec); unlock: mutex_unlock(&local->sta_mtx); return ret; } static const struct file_operations stats_tx_consecutive_loss_ops = { .write = sta_tx_consecutive_loss_write, .read = sta_tx_consecutive_loss_read, .open = simple_open, .llseek = generic_file_llseek, }; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ int mac80211_format_buffer(char __user *userbuf, size_t count, loff_t *ppos, char *fmt, ...) { va_list args; char buf[DEBUGFS_FORMAT_BUFFER_SIZE]; int res; va_start(args, fmt); res = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } #define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct ieee80211_local *local = file->private_data; \ \ return mac80211_format_buffer(userbuf, count, ppos, \ fmt "\n", ##value); \ } #define DEBUGFS_READONLY_FILE_OPS(name) \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_READONLY_FILE(name, fmt, value...) \ DEBUGFS_READONLY_FILE_FN(name, fmt, value) \ DEBUGFS_READONLY_FILE_OPS(name) #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, phyd, local, &name## _ops); #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, phyd, local, &name## _ops); DEBUGFS_READONLY_FILE(user_power, "%d", local->user_power_level); DEBUGFS_READONLY_FILE(power, "%d", local->hw.conf.power_level); DEBUGFS_READONLY_FILE(total_ps_buffered, "%d", local->total_ps_buffered); DEBUGFS_READONLY_FILE(wep_iv, "%#08x", local->wep_iv & 0xffffff); DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s", local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); static ssize_t aqm_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; struct fq *fq = &local->fq; char buf[200]; int len = 0; spin_lock_bh(&local->fq.lock); rcu_read_lock(); len = scnprintf(buf, sizeof(buf), "access name value\n" "R fq_flows_cnt %u\n" "R fq_backlog %u\n" "R fq_overlimit %u\n" "R fq_overmemory %u\n" "R fq_collisions %u\n" "R fq_memory_usage %u\n" "RW fq_memory_limit %u\n" "RW fq_limit %u\n" "RW fq_quantum %u\n", fq->flows_cnt, fq->backlog, fq->overmemory, fq->overlimit, fq->collisions, fq->memory_usage, fq->memory_limit, fq->limit, fq->quantum); rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t aqm_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[100]; size_t len; if (count > sizeof(buf)) return -EINVAL; if (copy_from_user(buf, user_buf, count)) return -EFAULT; buf[sizeof(buf) - 1] = '\0'; len = strlen(buf); if (len > 0 && buf[len-1] == '\n') buf[len-1] = 0; if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1) return count; else if (sscanf(buf, "fq_memory_limit %u", &local->fq.memory_limit) == 1) return count; else if (sscanf(buf, "fq_quantum %u", &local->fq.quantum) == 1) return count; return -EINVAL; } static const struct file_operations aqm_ops = { .write = aqm_write, .read = aqm_read, .open = simple_open, .llseek = default_llseek, }; static ssize_t uapsd_black_list_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct uapsd_black_list *uapsd_bl = NULL, *old; struct ieee80211_local *local = file->private_data; unsigned int oui, num_oui, blsz; char *buf, *pos, *buf_ptr; int ret; buf = memdup_user_nul(user_buf, count); if (!buf) return -ENOMEM; mutex_lock(&local->mtx); old = rcu_dereference_protected(local->uapsd_black_list, lockdep_is_held(&local->mtx)); if (old) num_oui = old->num_oui + 1; else num_oui = 1; for (buf_ptr = buf; *buf_ptr; buf_ptr++) { if (*buf_ptr == '\n') num_oui++; } buf_ptr = buf; /* can't use krealloc since it would free old */ blsz = sizeof(*uapsd_bl) + sizeof(uapsd_bl->oui[0]) * num_oui; uapsd_bl = kzalloc(blsz, GFP_KERNEL); if (!uapsd_bl) { ret = -ENOMEM; goto out_unlock; } if (old) { memcpy(&uapsd_bl->oui[0], &old->oui[0], sizeof(uapsd_bl->oui[0]) * old->num_oui); uapsd_bl->num_oui = old->num_oui; } pos = buf_ptr; while ((buf_ptr = strsep(&pos, "\n"))) { if (!*buf_ptr) continue; ret = kstrtouint(buf_ptr, 16, &oui); if (ret) goto out_unlock; uapsd_bl->oui[uapsd_bl->num_oui] = oui; uapsd_bl->num_oui++; } rcu_assign_pointer(local->uapsd_black_list, uapsd_bl); if (old) kfree_rcu(old, rcu_head); buf = NULL; uapsd_bl = NULL; ret = count; out_unlock: mutex_unlock(&local->mtx); kfree(buf); kfree(uapsd_bl); return ret; } static ssize_t uapsd_black_list_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; const struct uapsd_black_list *uapsd_bl; int i, bufsz, ret = 0, pos = 0; char *buf; rcu_read_lock(); uapsd_bl = rcu_dereference(local->uapsd_black_list); if (!uapsd_bl) goto out; bufsz = uapsd_bl->num_oui * 7 + 1; buf = kzalloc(bufsz, GFP_ATOMIC); if (!buf) { ret = -ENOMEM; goto out; } for (i = 0; i < uapsd_bl->num_oui; i++) { pos += scnprintf(buf + pos, bufsz - pos, "%06x\n", uapsd_bl->oui[i]); } rcu_read_unlock(); ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); kfree(buf); return ret; out: rcu_read_unlock(); return ret; } static const struct file_operations uapsd_black_list_ops = { .write = uapsd_black_list_write, .read = uapsd_black_list_read, .open = simple_open, .llseek = noop_llseek, }; #ifdef CONFIG_PM static ssize_t reset_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; rtnl_lock(); __ieee80211_suspend(&local->hw, NULL); __ieee80211_resume(&local->hw); rtnl_unlock(); return count; } static const struct file_operations reset_ops = { .write = reset_write, .open = simple_open, .llseek = noop_llseek, }; #endif static const char *hw_flag_names[] = { #define FLAG(F) [IEEE80211_HW_##F] = #F FLAG(HAS_RATE_CONTROL), FLAG(RX_INCLUDES_FCS), FLAG(HOST_BROADCAST_PS_BUFFERING), FLAG(SIGNAL_UNSPEC), FLAG(SIGNAL_DBM), FLAG(NEED_DTIM_BEFORE_ASSOC), FLAG(SPECTRUM_MGMT), FLAG(AMPDU_AGGREGATION), FLAG(SUPPORTS_PS), FLAG(PS_NULLFUNC_STACK), FLAG(SUPPORTS_DYNAMIC_PS), FLAG(MFP_CAPABLE), FLAG(WANT_MONITOR_VIF), FLAG(NO_AUTO_VIF), FLAG(SW_CRYPTO_CONTROL), FLAG(SUPPORT_FAST_XMIT), FLAG(REPORTS_TX_ACK_STATUS), FLAG(CONNECTION_MONITOR), FLAG(QUEUE_CONTROL), FLAG(SUPPORTS_PER_STA_GTK), FLAG(AP_LINK_PS), FLAG(TX_AMPDU_SETUP_IN_HW), FLAG(SUPPORTS_RC_TABLE), FLAG(P2P_DEV_ADDR_FOR_INTF), FLAG(TIMING_BEACON_ONLY), FLAG(SUPPORTS_HT_CCK_RATES), FLAG(CHANCTX_STA_CSA), FLAG(SUPPORTS_CLONED_SKBS), FLAG(SINGLE_SCAN_ON_ALL_BANDS), FLAG(TDLS_WIDER_BW), FLAG(SUPPORTS_AMSDU_IN_AMPDU), FLAG(BEACON_TX_STATUS), FLAG(NEEDS_UNIQUE_STA_ADDR), FLAG(SUPPORTS_REORDERING_BUFFER), FLAG(USES_RSS), FLAG(TX_AMSDU), FLAG(TX_FRAG_LIST), FLAG(REPORTS_LOW_ACK), FLAG(SUPPORTS_TX_FRAG), FLAG(SUPPORTS_TDLS_BUFFER_STA), FLAG(DEAUTH_NEED_MGD_TX_PREP), FLAG(DOESNT_SUPPORT_QOS_NDP), FLAG(BUFF_MMPDU_TXQ), FLAG(SUPPORTS_VHT_EXT_NSS_BW), FLAG(STA_MMPDU_TXQ), FLAG(SUPPORTS_MULTI_BSSID), FLAG(SUPPORTS_ONLY_HE_MULTI_BSSID), #undef FLAG }; static ssize_t hwflags_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; size_t bufsz = 30 * NUM_IEEE80211_HW_FLAGS; char *buf = kzalloc(bufsz, GFP_KERNEL); char *pos = buf, *end = buf + bufsz - 1; ssize_t rv; int i; if (!buf) return -ENOMEM; /* fail compilation if somebody adds or removes * a flag without updating the name array above */ BUILD_BUG_ON(ARRAY_SIZE(hw_flag_names) != NUM_IEEE80211_HW_FLAGS); for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { if (test_bit(i, local->hw.flags)) pos += scnprintf(pos, end - pos, "%s\n", hw_flag_names[i]); } rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); kfree(buf); return rv; } static ssize_t misc_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; /* Max len of each line is 16 characters, plus 9 for 'pending:\n' */ size_t bufsz = IEEE80211_MAX_QUEUES * 16 + 9; char *buf; char *pos, *end; ssize_t rv; int i; int ln; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos = buf; end = buf + bufsz - 1; pos += scnprintf(pos, end - pos, "pending:\n"); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { ln = skb_queue_len(&local->pending[i]); pos += scnprintf(pos, end - pos, "[%i] %d\n", i, ln); } rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); kfree(buf); return rv; } static ssize_t queues_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; unsigned long flags; char buf[IEEE80211_MAX_QUEUES * 20]; int q, res = 0; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (q = 0; q < local->hw.queues; q++) res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, local->queue_stop_reasons[q], skb_queue_len(&local->pending[q])); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return simple_read_from_buffer(user_buf, count, ppos, buf, res); } DEBUGFS_READONLY_FILE_OPS(hwflags); DEBUGFS_READONLY_FILE_OPS(queues); DEBUGFS_READONLY_FILE_OPS(misc); /* statistics stuff */ static ssize_t format_devstat_counter(struct ieee80211_local *local, char __user *userbuf, size_t count, loff_t *ppos, int (*printvalue)(struct ieee80211_low_level_stats *stats, char *buf, int buflen)) { struct ieee80211_low_level_stats stats; char buf[20]; int res; rtnl_lock(); res = drv_get_stats(local, &stats); rtnl_unlock(); if (res) return res; res = printvalue(&stats, buf, sizeof(buf)); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } #define DEBUGFS_DEVSTATS_FILE(name) \ static int print_devstats_##name(struct ieee80211_low_level_stats *stats,\ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%u\n", stats->name); \ } \ static ssize_t stats_ ##name## _read(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return format_devstat_counter(file->private_data, \ userbuf, \ count, \ ppos, \ print_devstats_##name); \ } \ \ static const struct file_operations stats_ ##name## _ops = { \ .read = stats_ ##name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_STATS_ADD(name) \ debugfs_create_u32(#name, 0400, statsd, &local->name); #define DEBUGFS_DEVSTATS_ADD(name) \ debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); void debugfs_hw_add(struct ieee80211_local *local) { struct dentry *phyd = local->hw.wiphy->debugfsdir; struct dentry *statsd; if (!phyd) return; local->debugfs.keys = debugfs_create_dir("keys", phyd); DEBUGFS_ADD(total_ps_buffered); DEBUGFS_ADD(wep_iv); DEBUGFS_ADD(rate_ctrl_alg); DEBUGFS_ADD(queues); DEBUGFS_ADD(misc); #ifdef CONFIG_PM DEBUGFS_ADD_MODE(reset, 0200); #endif DEBUGFS_ADD(hwflags); DEBUGFS_ADD(user_power); DEBUGFS_ADD(power); DEBUGFS_ADD_MODE(aqm, 0600); DEBUGFS_ADD_MODE(uapsd_black_list, 0600); if (local->ops->wake_tx_queue) DEBUGFS_ADD_MODE(aqm, 0600); statsd = debugfs_create_dir("statistics", phyd); /* if the dir failed, don't put all the other things into the root! */ if (!statsd) return; #ifdef CPTCFG_MAC80211_DEBUG_COUNTERS DEBUGFS_STATS_ADD(dot11TransmittedFragmentCount); DEBUGFS_STATS_ADD(dot11MulticastTransmittedFrameCount); DEBUGFS_STATS_ADD(dot11FailedCount); DEBUGFS_STATS_ADD(dot11RetryCount); DEBUGFS_STATS_ADD(dot11MultipleRetryCount); DEBUGFS_STATS_ADD(dot11FrameDuplicateCount); DEBUGFS_STATS_ADD(dot11ReceivedFragmentCount); DEBUGFS_STATS_ADD(dot11MulticastReceivedFrameCount); DEBUGFS_STATS_ADD(dot11TransmittedFrameCount); DEBUGFS_STATS_ADD(tx_handlers_drop); DEBUGFS_STATS_ADD(tx_handlers_queued); DEBUGFS_STATS_ADD(tx_handlers_drop_wep); DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); DEBUGFS_STATS_ADD(rx_handlers_drop); DEBUGFS_STATS_ADD(rx_handlers_queued); DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); DEBUGFS_STATS_ADD(tx_expand_skb_head); DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag); DEBUGFS_STATS_ADD(rx_handlers_fragments); DEBUGFS_STATS_ADD(tx_status_drop); #endif DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount); DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount); DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount); DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS DEBUGFS_DEVSTATS_ADD(tx_latency); DEBUGFS_DEVSTATS_ADD(tx_latency_points); DEBUGFS_DEVSTATS_ADD(tx_consecutive_loss); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ } backport-iwlwifi-dkms-7906/net/mac80211/debugfs.h000066400000000000000000000006701351064634500214160ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MAC80211_DEBUGFS_H #define __MAC80211_DEBUGFS_H #include "ieee80211_i.h" #ifdef CPTCFG_MAC80211_DEBUGFS void debugfs_hw_add(struct ieee80211_local *local); int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count, loff_t *ppos, char *fmt, ...); #else static inline void debugfs_hw_add(struct ieee80211_local *local) { } #endif #endif /* __MAC80211_DEBUGFS_H */ backport-iwlwifi-dkms-7906/net/mac80211/debugfs_key.c000066400000000000000000000275111351064634500222640ustar00rootroot00000000000000/* * Copyright 2003-2005 Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc * Copyright 2007 Johannes Berg * Copyright (C) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include "ieee80211_i.h" #include "key.h" #include "debugfs.h" #include "debugfs_key.h" #define KEY_READ(name, prop, format_string) \ static ssize_t key_##name##_read(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct ieee80211_key *key = file->private_data; \ return mac80211_format_buffer(userbuf, count, ppos, \ format_string, key->prop); \ } #define KEY_READ_D(name) KEY_READ(name, name, "%d\n") #define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n") #define KEY_OPS(name) \ static const struct file_operations key_ ##name## _ops = { \ .read = key_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #define KEY_OPS_W(name) \ static const struct file_operations key_ ##name## _ops = { \ .read = key_##name##_read, \ .write = key_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #define KEY_FILE(name, format) \ KEY_READ_##format(name) \ KEY_OPS(name) #define KEY_CONF_READ(name, format_string) \ KEY_READ(conf_##name, conf.name, format_string) #define KEY_CONF_READ_D(name) KEY_CONF_READ(name, "%d\n") #define KEY_CONF_OPS(name) \ static const struct file_operations key_ ##name## _ops = { \ .read = key_conf_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #define KEY_CONF_FILE(name, format) \ KEY_CONF_READ_##format(name) \ KEY_CONF_OPS(name) KEY_CONF_FILE(keylen, D); KEY_CONF_FILE(keyidx, D); KEY_CONF_FILE(hw_key_idx, D); KEY_FILE(flags, X); KEY_READ(ifindex, sdata->name, "%s\n"); KEY_OPS(ifindex); static ssize_t key_algorithm_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[15]; struct ieee80211_key *key = file->private_data; u32 c = key->conf.cipher; sprintf(buf, "%.2x-%.2x-%.2x:%d\n", c >> 24, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff); return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); } KEY_OPS(algorithm); static ssize_t key_tx_spec_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; u64 pn; int ret; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return -EINVAL; case WLAN_CIPHER_SUITE_TKIP: /* not supported yet */ return -EOPNOTSUPP; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: ret = kstrtou64_from_user(userbuf, count, 16, &pn); if (ret) return ret; /* PN is a 48-bit counter */ if (pn >= (1ULL << 48)) return -ERANGE; atomic64_set(&key->conf.tx_pn, pn); return count; default: return 0; } } static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { u64 pn; char buf[20]; int len; struct ieee80211_key *key = file->private_data; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: len = scnprintf(buf, sizeof(buf), "\n"); break; case WLAN_CIPHER_SUITE_TKIP: pn = atomic64_read(&key->conf.tx_pn); len = scnprintf(buf, sizeof(buf), "%08x %04x\n", TKIP_PN_TO_IV32(pn), TKIP_PN_TO_IV16(pn)); break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: pn = atomic64_read(&key->conf.tx_pn); len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24), (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn); break; default: return 0; } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS_W(tx_spec); static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; char buf[14*IEEE80211_NUM_TIDS+1], *p = buf; int i, len; const u8 *rpn; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: len = scnprintf(buf, sizeof(buf), "\n"); break; case WLAN_CIPHER_SUITE_TKIP: for (i = 0; i < IEEE80211_NUM_TIDS; i++) p += scnprintf(p, sizeof(buf)+buf-p, "%08x %04x\n", key->u.tkip.rx[i].iv32, key->u.tkip.rx[i].iv16); len = p - buf; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) { rpn = key->u.ccmp.rx_pn[i]; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5]); } len = p - buf; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: rpn = key->u.aes_cmac.rx_pn; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5]); len = p - buf; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: rpn = key->u.aes_gmac.rx_pn; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5]); len = p - buf; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) { rpn = key->u.gcmp.rx_pn[i]; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5]); } len = p - buf; break; default: return 0; } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS(rx_spec); static ssize_t key_replays_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; char buf[20]; int len; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_cmac.replays); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_gmac.replays); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.gcmp.replays); break; default: return 0; } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS(replays); static ssize_t key_icverrors_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; char buf[20]; int len; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_cmac.icverrors); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_gmac.icverrors); break; default: return 0; } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS(icverrors); static ssize_t key_mic_failures_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; char buf[20]; int len; if (key->conf.cipher != WLAN_CIPHER_SUITE_TKIP) return -EINVAL; len = scnprintf(buf, sizeof(buf), "%u\n", key->u.tkip.mic_failures); return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS(mic_failures); static ssize_t key_key_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; int i, bufsize = 2 * key->conf.keylen + 2; char *buf = kmalloc(bufsize, GFP_KERNEL); char *p = buf; ssize_t res; if (!buf) return -ENOMEM; for (i = 0; i < key->conf.keylen; i++) p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); p += scnprintf(p, bufsize+buf-p, "\n"); res = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); kfree(buf); return res; } KEY_OPS(key); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, key->debugfs.dir, \ key, &key_##name##_ops); #define DEBUGFS_ADD_W(name) \ debugfs_create_file(#name, 0600, key->debugfs.dir, \ key, &key_##name##_ops); void ieee80211_debugfs_key_add(struct ieee80211_key *key) { static int keycount; char buf[100]; struct sta_info *sta; if (!key->local->debugfs.keys) return; sprintf(buf, "%d", keycount); key->debugfs.cnt = keycount; keycount++; key->debugfs.dir = debugfs_create_dir(buf, key->local->debugfs.keys); if (!key->debugfs.dir) return; sta = key->sta; if (sta) { sprintf(buf, "../../netdev:%s/stations/%pM", sta->sdata->name, sta->sta.addr); key->debugfs.stalink = debugfs_create_symlink("station", key->debugfs.dir, buf); } DEBUGFS_ADD(keylen); DEBUGFS_ADD(flags); DEBUGFS_ADD(keyidx); DEBUGFS_ADD(hw_key_idx); DEBUGFS_ADD(algorithm); DEBUGFS_ADD_W(tx_spec); DEBUGFS_ADD(rx_spec); DEBUGFS_ADD(replays); DEBUGFS_ADD(icverrors); DEBUGFS_ADD(mic_failures); DEBUGFS_ADD(key); DEBUGFS_ADD(ifindex); }; void ieee80211_debugfs_key_remove(struct ieee80211_key *key) { if (!key) return; debugfs_remove_recursive(key->debugfs.dir); key->debugfs.dir = NULL; } void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata) { char buf[50]; struct ieee80211_key *key; if (!sdata->vif.debugfs_dir) return; lockdep_assert_held(&sdata->local->key_mtx); debugfs_remove(sdata->debugfs.default_unicast_key); sdata->debugfs.default_unicast_key = NULL; if (sdata->default_unicast_key) { key = key_mtx_dereference(sdata->local, sdata->default_unicast_key); sprintf(buf, "../keys/%d", key->debugfs.cnt); sdata->debugfs.default_unicast_key = debugfs_create_symlink("default_unicast_key", sdata->vif.debugfs_dir, buf); } debugfs_remove(sdata->debugfs.default_multicast_key); sdata->debugfs.default_multicast_key = NULL; if (sdata->default_multicast_key) { key = key_mtx_dereference(sdata->local, sdata->default_multicast_key); sprintf(buf, "../keys/%d", key->debugfs.cnt); sdata->debugfs.default_multicast_key = debugfs_create_symlink("default_multicast_key", sdata->vif.debugfs_dir, buf); } } void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) { char buf[50]; struct ieee80211_key *key; if (!sdata->vif.debugfs_dir) return; key = key_mtx_dereference(sdata->local, sdata->default_mgmt_key); if (key) { sprintf(buf, "../keys/%d", key->debugfs.cnt); sdata->debugfs.default_mgmt_key = debugfs_create_symlink("default_mgmt_key", sdata->vif.debugfs_dir, buf); } else ieee80211_debugfs_key_remove_mgmt_default(sdata); } void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sdata) { if (!sdata) return; debugfs_remove(sdata->debugfs.default_mgmt_key); sdata->debugfs.default_mgmt_key = NULL; } void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, struct sta_info *sta) { debugfs_remove(key->debugfs.stalink); key->debugfs.stalink = NULL; } backport-iwlwifi-dkms-7906/net/mac80211/debugfs_key.h000066400000000000000000000022771351064634500222730ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MAC80211_DEBUGFS_KEY_H #define __MAC80211_DEBUGFS_KEY_H #ifdef CPTCFG_MAC80211_DEBUGFS void ieee80211_debugfs_key_add(struct ieee80211_key *key); void ieee80211_debugfs_key_remove(struct ieee80211_key *key); void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata); void ieee80211_debugfs_key_add_mgmt_default( struct ieee80211_sub_if_data *sdata); void ieee80211_debugfs_key_remove_mgmt_default( struct ieee80211_sub_if_data *sdata); void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, struct sta_info *sta); #else static inline void ieee80211_debugfs_key_add(struct ieee80211_key *key) {} static inline void ieee80211_debugfs_key_remove(struct ieee80211_key *key) {} static inline void ieee80211_debugfs_key_update_default( struct ieee80211_sub_if_data *sdata) {} static inline void ieee80211_debugfs_key_add_mgmt_default( struct ieee80211_sub_if_data *sdata) {} static inline void ieee80211_debugfs_key_remove_mgmt_default( struct ieee80211_sub_if_data *sdata) {} static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, struct sta_info *sta) {} #endif #endif /* __MAC80211_DEBUGFS_KEY_H */ backport-iwlwifi-dkms-7906/net/mac80211/debugfs_netdev.c000066400000000000000000000602721351064634500227620ustar00rootroot00000000000000/* * Copyright (c) 2006 Jiri Benc * Copyright 2007 Johannes Berg * Copyright 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "rate.h" #include "debugfs.h" #include "debugfs_netdev.h" #include "driver-ops.h" static ssize_t ieee80211_if_read( struct ieee80211_sub_if_data *sdata, char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int)) { char buf[200]; ssize_t ret = -EINVAL; read_lock(&dev_base_lock); ret = (*format)(sdata, buf, sizeof(buf)); read_unlock(&dev_base_lock); if (ret >= 0) ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret); return ret; } static ssize_t ieee80211_if_write( struct ieee80211_sub_if_data *sdata, const char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) { char buf[64]; ssize_t ret; if (count >= sizeof(buf)) return -E2BIG; if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[count] = '\0'; ret = -ENODEV; rtnl_lock(); ret = (*write)(sdata, buf, count); rtnl_unlock(); return ret; } #define IEEE80211_IF_FMT(name, field, format_string) \ static ssize_t ieee80211_if_fmt_##name( \ const struct ieee80211_sub_if_data *sdata, char *buf, \ int buflen) \ { \ return scnprintf(buf, buflen, format_string, sdata->field); \ } #define IEEE80211_IF_FMT_DEC(name, field) \ IEEE80211_IF_FMT(name, field, "%d\n") #define IEEE80211_IF_FMT_HEX(name, field) \ IEEE80211_IF_FMT(name, field, "%#x\n") #define IEEE80211_IF_FMT_LHEX(name, field) \ IEEE80211_IF_FMT(name, field, "%#lx\n") #define IEEE80211_IF_FMT_SIZE(name, field) \ IEEE80211_IF_FMT(name, field, "%zd\n") #define IEEE80211_IF_FMT_HEXARRAY(name, field) \ static ssize_t ieee80211_if_fmt_##name( \ const struct ieee80211_sub_if_data *sdata, \ char *buf, int buflen) \ { \ char *p = buf; \ int i; \ for (i = 0; i < sizeof(sdata->field); i++) { \ p += scnprintf(p, buflen + buf - p, "%.2x ", \ sdata->field[i]); \ } \ p += scnprintf(p, buflen + buf - p, "\n"); \ return p - buf; \ } #define IEEE80211_IF_FMT_ATOMIC(name, field) \ static ssize_t ieee80211_if_fmt_##name( \ const struct ieee80211_sub_if_data *sdata, \ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%d\n", atomic_read(&sdata->field));\ } #define IEEE80211_IF_FMT_MAC(name, field) \ static ssize_t ieee80211_if_fmt_##name( \ const struct ieee80211_sub_if_data *sdata, char *buf, \ int buflen) \ { \ return scnprintf(buf, buflen, "%pM\n", sdata->field); \ } #define IEEE80211_IF_FMT_JIFFIES_TO_MS(name, field) \ static ssize_t ieee80211_if_fmt_##name( \ const struct ieee80211_sub_if_data *sdata, \ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%d\n", \ jiffies_to_msecs(sdata->field)); \ } #define _IEEE80211_IF_FILE_OPS(name, _read, _write) \ static const struct file_operations name##_ops = { \ .read = (_read), \ .write = (_write), \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #define _IEEE80211_IF_FILE_R_FN(name) \ static ssize_t ieee80211_if_read_##name(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return ieee80211_if_read(file->private_data, \ userbuf, count, ppos, \ ieee80211_if_fmt_##name); \ } #define _IEEE80211_IF_FILE_W_FN(name) \ static ssize_t ieee80211_if_write_##name(struct file *file, \ const char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return ieee80211_if_write(file->private_data, userbuf, count, \ ppos, ieee80211_if_parse_##name); \ } #define IEEE80211_IF_FILE_R(name) \ _IEEE80211_IF_FILE_R_FN(name) \ _IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, NULL) #define IEEE80211_IF_FILE_W(name) \ _IEEE80211_IF_FILE_W_FN(name) \ _IEEE80211_IF_FILE_OPS(name, NULL, ieee80211_if_write_##name) #define IEEE80211_IF_FILE_RW(name) \ _IEEE80211_IF_FILE_R_FN(name) \ _IEEE80211_IF_FILE_W_FN(name) \ _IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, \ ieee80211_if_write_##name) #define IEEE80211_IF_FILE(name, field, format) \ IEEE80211_IF_FMT_##format(name, field) \ IEEE80211_IF_FILE_R(name) /* common attributes */ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[NL80211_BAND_2GHZ], HEX); IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[NL80211_BAND_5GHZ], HEX); IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz, rc_rateidx_mcs_mask[NL80211_BAND_2GHZ], HEXARRAY); IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz, rc_rateidx_mcs_mask[NL80211_BAND_5GHZ], HEXARRAY); static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { int i, len = 0; const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_2GHZ]; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); len += scnprintf(buf + len, buflen - len, "\n"); return len; } IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz); static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { int i, len = 0; const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_5GHZ]; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); len += scnprintf(buf + len, buflen - len, "\n"); return len; } IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz); IEEE80211_IF_FILE(flags, flags, HEX); IEEE80211_IF_FILE(state, state, LHEX); IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC); IEEE80211_IF_FILE(ap_power_level, ap_power_level, DEC); IEEE80211_IF_FILE(user_power_level, user_power_level, DEC); static ssize_t ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { int len; len = scnprintf(buf, buflen, "AC queues: VO:%d VI:%d BE:%d BK:%d\n", sdata->vif.hw_queue[IEEE80211_AC_VO], sdata->vif.hw_queue[IEEE80211_AC_VI], sdata->vif.hw_queue[IEEE80211_AC_BE], sdata->vif.hw_queue[IEEE80211_AC_BK]); if (sdata->vif.type == NL80211_IFTYPE_AP) len += scnprintf(buf + len, buflen - len, "cab queue: %d\n", sdata->vif.cab_queue); return len; } IEEE80211_IF_FILE_R(hw_queues); /* STA attributes */ IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); IEEE80211_IF_FILE(beacon_timeout, u.mgd.beacon_timeout, JIFFIES_TO_MS); static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode) { struct ieee80211_local *local = sdata->local; int err; if (!(local->hw.wiphy->features & NL80211_FEATURE_STATIC_SMPS) && smps_mode == IEEE80211_SMPS_STATIC) return -EINVAL; /* auto should be dynamic if in PS mode */ if (!(local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) && (smps_mode == IEEE80211_SMPS_DYNAMIC || smps_mode == IEEE80211_SMPS_AUTOMATIC)) return -EINVAL; if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_AP) return -EOPNOTSUPP; sdata_lock(sdata); if (sdata->vif.type == NL80211_IFTYPE_STATION) err = __ieee80211_request_smps_mgd(sdata, smps_mode); else err = __ieee80211_request_smps_ap(sdata, smps_mode); sdata_unlock(sdata); return err; } static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { [IEEE80211_SMPS_AUTOMATIC] = "auto", [IEEE80211_SMPS_OFF] = "off", [IEEE80211_SMPS_STATIC] = "static", [IEEE80211_SMPS_DYNAMIC] = "dynamic", }; static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { if (sdata->vif.type == NL80211_IFTYPE_STATION) return snprintf(buf, buflen, "request: %s\nused: %s\n", smps_modes[sdata->u.mgd.req_smps], smps_modes[sdata->smps_mode]); if (sdata->vif.type == NL80211_IFTYPE_AP) return snprintf(buf, buflen, "request: %s\nused: %s\n", smps_modes[sdata->u.ap.req_smps], smps_modes[sdata->smps_mode]); return -EINVAL; } static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { enum ieee80211_smps_mode mode; for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) { if (strncmp(buf, smps_modes[mode], buflen) == 0) { int err = ieee80211_set_smps(sdata, mode); if (!err) return buflen; return err; } } return -EINVAL; } IEEE80211_IF_FILE_RW(smps); static ssize_t ieee80211_if_parse_tkip_mic_test( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_local *local = sdata->local; u8 addr[ETH_ALEN]; struct sk_buff *skb; struct ieee80211_hdr *hdr; __le16 fc; if (!mac_pton(buf, addr)) return -EINVAL; if (!ieee80211_sdata_running(sdata)) return -ENOTCONN; skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100); if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom); hdr = skb_put_zero(skb, 24); fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); switch (sdata->vif.type) { case NL80211_IFTYPE_AP: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ memcpy(hdr->addr1, addr, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN); break; case NL80211_IFTYPE_STATION: fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ sdata_lock(sdata); if (!sdata->u.mgd.associated) { sdata_unlock(sdata); dev_kfree_skb(skb); return -ENOTCONN; } memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, addr, ETH_ALEN); sdata_unlock(sdata); break; default: dev_kfree_skb(skb); return -EOPNOTSUPP; } hdr->frame_control = fc; /* * Add some length to the test frame to make it look bit more valid. * The exact contents does not matter since the recipient is required * to drop this because of the Michael MIC failure. */ skb_put_zero(skb, 50); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE; ieee80211_tx_skb(sdata, skb); return buflen; } IEEE80211_IF_FILE_W(tkip_mic_test); static ssize_t ieee80211_if_parse_beacon_loss( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { if (!ieee80211_sdata_running(sdata) || !sdata->vif.bss_conf.assoc) return -ENOTCONN; ieee80211_beacon_loss(&sdata->vif); return buflen; } IEEE80211_IF_FILE_W(beacon_loss); static ssize_t ieee80211_if_fmt_uapsd_queues( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_queues); } static ssize_t ieee80211_if_parse_uapsd_queues( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 val; int ret; ret = kstrtou8(buf, 0, &val); if (ret) return ret; if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) return -ERANGE; ifmgd->uapsd_queues = val; return buflen; } IEEE80211_IF_FILE_RW(uapsd_queues); static ssize_t ieee80211_if_fmt_uapsd_max_sp_len( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_max_sp_len); } static ssize_t ieee80211_if_parse_uapsd_max_sp_len( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; unsigned long val; int ret; ret = kstrtoul(buf, 0, &val); if (ret) return -EINVAL; if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK) return -ERANGE; ifmgd->uapsd_max_sp_len = val; return buflen; } IEEE80211_IF_FILE_RW(uapsd_max_sp_len); static ssize_t ieee80211_if_fmt_tdls_wider_bw( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool tdls_wider_bw; tdls_wider_bw = ieee80211_hw_check(&sdata->local->hw, TDLS_WIDER_BW) && !ifmgd->tdls_wider_bw_prohibited; return snprintf(buf, buflen, "%d\n", tdls_wider_bw); } static ssize_t ieee80211_if_parse_tdls_wider_bw( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 val; int ret; ret = kstrtou8(buf, 0, &val); if (ret) return ret; ifmgd->tdls_wider_bw_prohibited = !val; return buflen; } IEEE80211_IF_FILE_RW(tdls_wider_bw); /* AP attributes */ IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC); IEEE80211_IF_FILE(dtim_count, u.ap.ps.dtim_count, DEC); IEEE80211_IF_FILE(num_mcast_sta_vlan, u.vlan.num_mcast_sta, ATOMIC); static ssize_t ieee80211_if_fmt_num_buffered_multicast( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { return scnprintf(buf, buflen, "%u\n", skb_queue_len(&sdata->u.ap.ps.bc_buf)); } IEEE80211_IF_FILE_R(num_buffered_multicast); static ssize_t ieee80211_if_fmt_aqm( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { struct ieee80211_local *local = sdata->local; struct txq_info *txqi = to_txq_info(sdata->vif.txq); int len; spin_lock_bh(&local->fq.lock); rcu_read_lock(); len = scnprintf(buf, buflen, "ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n" "%u %u %u %u %u %u %u %u %u %u\n", txqi->txq.ac, txqi->tin.backlog_bytes, txqi->tin.backlog_packets, txqi->tin.flows, txqi->cstats.drop_count, txqi->cstats.ecn_mark, txqi->tin.overlimit, txqi->tin.collisions, txqi->tin.tx_bytes, txqi->tin.tx_packets); rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); return len; } IEEE80211_IF_FILE_R(aqm); IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX); /* IBSS attributes */ static ssize_t ieee80211_if_fmt_tsf( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { struct ieee80211_local *local = sdata->local; u64 tsf; tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata); return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf); } static ssize_t ieee80211_if_parse_tsf( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_local *local = sdata->local; unsigned long long tsf; int ret; int tsf_is_delta = 0; if (strncmp(buf, "reset", 5) == 0) { if (local->ops->reset_tsf) { drv_reset_tsf(local, sdata); wiphy_info(local->hw.wiphy, "debugfs reset TSF\n"); } } else { if (buflen > 10 && buf[1] == '=') { if (buf[0] == '+') tsf_is_delta = 1; else if (buf[0] == '-') tsf_is_delta = -1; else return -EINVAL; buf += 2; } ret = kstrtoull(buf, 10, &tsf); if (ret < 0) return ret; if (tsf_is_delta && local->ops->offset_tsf) { drv_offset_tsf(local, sdata, tsf_is_delta * tsf); wiphy_info(local->hw.wiphy, "debugfs offset TSF by %018lld\n", tsf_is_delta * tsf); } else if (local->ops->set_tsf) { if (tsf_is_delta) tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf; drv_set_tsf(local, sdata, tsf); wiphy_info(local->hw.wiphy, "debugfs set TSF to %#018llx\n", tsf); } } ieee80211_recalc_dtim(local, sdata); return buflen; } IEEE80211_IF_FILE_RW(tsf); /* WDS attributes */ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); #ifdef CPTCFG_MAC80211_MESH IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC); /* Mesh stats attributes */ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC); IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); IEEE80211_IF_FILE(dropped_frames_congestion, u.mesh.mshstats.dropped_frames_congestion, DEC); IEEE80211_IF_FILE(dropped_frames_no_route, u.mesh.mshstats.dropped_frames_no_route, DEC); /* Mesh parameters */ IEEE80211_IF_FILE(dot11MeshMaxRetries, u.mesh.mshcfg.dot11MeshMaxRetries, DEC); IEEE80211_IF_FILE(dot11MeshRetryTimeout, u.mesh.mshcfg.dot11MeshRetryTimeout, DEC); IEEE80211_IF_FILE(dot11MeshConfirmTimeout, u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC); IEEE80211_IF_FILE(dot11MeshHoldingTimeout, u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC); IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC); IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC); IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC); IEEE80211_IF_FILE(dot11MeshMaxPeerLinks, u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC); IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout, u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC); IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval, u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC); IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval, u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC); IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime, u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC); IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries, u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC); IEEE80211_IF_FILE(path_refresh_time, u.mesh.mshcfg.path_refresh_time, DEC); IEEE80211_IF_FILE(min_discovery_timeout, u.mesh.mshcfg.min_discovery_timeout, DEC); IEEE80211_IF_FILE(dot11MeshHWMPRootMode, u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol, u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC); IEEE80211_IF_FILE(dot11MeshHWMPRannInterval, u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC); IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC); IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC); IEEE80211_IF_FILE(dot11MeshHWMPactivePathToRootTimeout, u.mesh.mshcfg.dot11MeshHWMPactivePathToRootTimeout, DEC); IEEE80211_IF_FILE(dot11MeshHWMProotInterval, u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC); IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval, u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC); IEEE80211_IF_FILE(power_mode, u.mesh.mshcfg.power_mode, DEC); IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration, u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC); IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate, u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC); #endif #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \ sdata, &name##_ops); #define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) static void add_common_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(rc_rateidx_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mask_5ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz); DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz); DEBUGFS_ADD(hw_queues); if (sdata->local->ops->wake_tx_queue) DEBUGFS_ADD(aqm); } static void add_sta_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(bssid); DEBUGFS_ADD(aid); DEBUGFS_ADD(beacon_timeout); DEBUGFS_ADD_MODE(smps, 0600); DEBUGFS_ADD_MODE(tkip_mic_test, 0200); DEBUGFS_ADD_MODE(beacon_loss, 0200); DEBUGFS_ADD_MODE(uapsd_queues, 0600); DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600); DEBUGFS_ADD_MODE(tdls_wider_bw, 0600); } static void add_ap_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(num_mcast_sta); DEBUGFS_ADD_MODE(smps, 0600); DEBUGFS_ADD(num_sta_ps); DEBUGFS_ADD(dtim_count); DEBUGFS_ADD(num_buffered_multicast); DEBUGFS_ADD_MODE(tkip_mic_test, 0200); DEBUGFS_ADD_MODE(multicast_to_unicast, 0600); } static void add_vlan_files(struct ieee80211_sub_if_data *sdata) { /* add num_mcast_sta_vlan using name num_mcast_sta */ debugfs_create_file("num_mcast_sta", 0400, sdata->vif.debugfs_dir, sdata, &num_mcast_sta_vlan_ops); } static void add_ibss_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD_MODE(tsf, 0600); } static void add_wds_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(peer); } #ifdef CPTCFG_MAC80211_MESH static void add_mesh_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD_MODE(tsf, 0600); DEBUGFS_ADD_MODE(estab_plinks, 0400); } static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) { struct dentry *dir = debugfs_create_dir("mesh_stats", sdata->vif.debugfs_dir); #define MESHSTATS_ADD(name)\ debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); MESHSTATS_ADD(fwded_mcast); MESHSTATS_ADD(fwded_unicast); MESHSTATS_ADD(fwded_frames); MESHSTATS_ADD(dropped_frames_ttl); MESHSTATS_ADD(dropped_frames_no_route); MESHSTATS_ADD(dropped_frames_congestion); #undef MESHSTATS_ADD } static void add_mesh_config(struct ieee80211_sub_if_data *sdata) { struct dentry *dir = debugfs_create_dir("mesh_config", sdata->vif.debugfs_dir); #define MESHPARAMS_ADD(name) \ debugfs_create_file(#name, 0600, dir, sdata, &name##_ops); MESHPARAMS_ADD(dot11MeshMaxRetries); MESHPARAMS_ADD(dot11MeshRetryTimeout); MESHPARAMS_ADD(dot11MeshConfirmTimeout); MESHPARAMS_ADD(dot11MeshHoldingTimeout); MESHPARAMS_ADD(dot11MeshTTL); MESHPARAMS_ADD(element_ttl); MESHPARAMS_ADD(auto_open_plinks); MESHPARAMS_ADD(dot11MeshMaxPeerLinks); MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout); MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval); MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval); MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime); MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); MESHPARAMS_ADD(path_refresh_time); MESHPARAMS_ADD(min_discovery_timeout); MESHPARAMS_ADD(dot11MeshHWMPRootMode); MESHPARAMS_ADD(dot11MeshHWMPRannInterval); MESHPARAMS_ADD(dot11MeshForwarding); MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); MESHPARAMS_ADD(rssi_threshold); MESHPARAMS_ADD(ht_opmode); MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout); MESHPARAMS_ADD(dot11MeshHWMProotInterval); MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval); MESHPARAMS_ADD(power_mode); MESHPARAMS_ADD(dot11MeshAwakeWindowDuration); MESHPARAMS_ADD(dot11MeshConnectedToMeshGate); #undef MESHPARAMS_ADD } #endif static void add_files(struct ieee80211_sub_if_data *sdata) { if (!sdata->vif.debugfs_dir) return; DEBUGFS_ADD(flags); DEBUGFS_ADD(state); DEBUGFS_ADD(txpower); DEBUGFS_ADD(user_power_level); DEBUGFS_ADD(ap_power_level); if (sdata->vif.type != NL80211_IFTYPE_MONITOR) add_common_files(sdata); switch (sdata->vif.type) { case NL80211_IFTYPE_MESH_POINT: #ifdef CPTCFG_MAC80211_MESH add_mesh_files(sdata); add_mesh_stats(sdata); add_mesh_config(sdata); #endif break; case NL80211_IFTYPE_STATION: add_sta_files(sdata); break; case NL80211_IFTYPE_ADHOC: add_ibss_files(sdata); break; case NL80211_IFTYPE_AP: add_ap_files(sdata); break; case NL80211_IFTYPE_AP_VLAN: add_vlan_files(sdata); break; case NL80211_IFTYPE_WDS: add_wds_files(sdata); break; default: break; } } void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) { char buf[10+IFNAMSIZ]; sprintf(buf, "netdev:%s", sdata->name); sdata->vif.debugfs_dir = debugfs_create_dir(buf, sdata->local->hw.wiphy->debugfsdir); if (sdata->vif.debugfs_dir) sdata->debugfs.subdir_stations = debugfs_create_dir("stations", sdata->vif.debugfs_dir); add_files(sdata); } void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) { if (!sdata->vif.debugfs_dir) return; debugfs_remove_recursive(sdata->vif.debugfs_dir); sdata->vif.debugfs_dir = NULL; sdata->debugfs.subdir_stations = NULL; } void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) { struct dentry *dir; char buf[10 + IFNAMSIZ]; dir = sdata->vif.debugfs_dir; if (!dir) return; sprintf(buf, "netdev:%s", sdata->name); if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) sdata_err(sdata, "debugfs: failed to rename debugfs dir to %s\n", buf); } backport-iwlwifi-dkms-7906/net/mac80211/debugfs_netdev.h000066400000000000000000000014031351064634500227560ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* routines exported for debugfs handling */ #ifndef __IEEE80211_DEBUGFS_NETDEV_H #define __IEEE80211_DEBUGFS_NETDEV_H #include "ieee80211_i.h" #ifdef CPTCFG_MAC80211_DEBUGFS void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata); #else static inline void ieee80211_debugfs_add_netdev( struct ieee80211_sub_if_data *sdata) {} static inline void ieee80211_debugfs_remove_netdev( struct ieee80211_sub_if_data *sdata) {} static inline void ieee80211_debugfs_rename_netdev( struct ieee80211_sub_if_data *sdata) {} #endif #endif /* __IEEE80211_DEBUGFS_NETDEV_H */ backport-iwlwifi-dkms-7906/net/mac80211/debugfs_sta.c000066400000000000000000001075301351064634500222630ustar00rootroot00000000000000/* * Copyright 2003-2005 Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include "ieee80211_i.h" #include "debugfs.h" #include "debugfs_sta.h" #include "sta_info.h" #include "driver-ops.h" /* sta attributtes */ #define STA_READ(name, field, format_string) \ static ssize_t sta_ ##name## _read(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct sta_info *sta = file->private_data; \ return mac80211_format_buffer(userbuf, count, ppos, \ format_string, sta->field); \ } #define STA_READ_D(name, field) STA_READ(name, field, "%d\n") #define STA_OPS(name) \ static const struct file_operations sta_ ##name## _ops = { \ .read = sta_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS #define STA_OPS_W(name) \ static const struct file_operations sta_ ##name## _ops = { \ .write = sta_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ #define STA_OPS_RW(name) \ static const struct file_operations sta_ ##name## _ops = { \ .read = sta_##name##_read, \ .write = sta_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } #define STA_FILE(name, field, format) \ STA_READ_##format(name, field) \ STA_OPS(name) STA_FILE(aid, sta.aid, D); static const char * const sta_flag_names[] = { #define FLAG(F) [WLAN_STA_##F] = #F FLAG(AUTH), FLAG(ASSOC), FLAG(PS_STA), FLAG(AUTHORIZED), FLAG(SHORT_PREAMBLE), FLAG(WDS), FLAG(CLEAR_PS_FILT), FLAG(MFP), FLAG(BLOCK_BA), FLAG(PS_DRIVER), FLAG(PSPOLL), FLAG(TDLS_PEER), FLAG(TDLS_PEER_AUTH), FLAG(TDLS_INITIATOR), FLAG(TDLS_CHAN_SWITCH), FLAG(TDLS_OFF_CHANNEL), FLAG(TDLS_WIDER_BW), FLAG(UAPSD), FLAG(SP), FLAG(4ADDR_EVENT), FLAG(INSERTED), FLAG(RATE_CONTROL), FLAG(TOFFSET_KNOWN), FLAG(MPSP_OWNER), FLAG(MPSP_RECIPIENT), FLAG(PS_DELIVER), #undef FLAG }; static ssize_t sta_flags_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[16 * NUM_WLAN_STA_FLAGS], *pos = buf; char *end = buf + sizeof(buf) - 1; struct sta_info *sta = file->private_data; unsigned int flg; BUILD_BUG_ON(ARRAY_SIZE(sta_flag_names) != NUM_WLAN_STA_FLAGS); for (flg = 0; flg < NUM_WLAN_STA_FLAGS; flg++) { if (test_sta_flag(sta, flg)) pos += scnprintf(pos, end - pos, "%s\n", sta_flag_names[flg]); } return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); } STA_OPS(flags); static ssize_t sta_num_ps_buf_frames_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct sta_info *sta = file->private_data; char buf[17*IEEE80211_NUM_ACS], *p = buf; int ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac, skb_queue_len(&sta->ps_tx_buf[ac]) + skb_queue_len(&sta->tx_filtered[ac])); return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } STA_OPS(num_ps_buf_frames); static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[15*IEEE80211_NUM_TIDS], *p = buf; int i; struct sta_info *sta = file->private_data; for (i = 0; i < IEEE80211_NUM_TIDS; i++) p += scnprintf(p, sizeof(buf)+buf-p, "%x ", le16_to_cpu(sta->last_seq_ctrl[i])); p += scnprintf(p, sizeof(buf)+buf-p, "\n"); return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } STA_OPS(last_seq_ctrl); #define AQM_TXQ_ENTRY_LEN 130 static ssize_t sta_aqm_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct sta_info *sta = file->private_data; struct ieee80211_local *local = sta->local; size_t bufsz = AQM_TXQ_ENTRY_LEN * (IEEE80211_NUM_TIDS + 2); char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; struct txq_info *txqi; ssize_t rv; int i; if (!buf) return -ENOMEM; spin_lock_bh(&local->fq.lock); rcu_read_lock(); p += scnprintf(p, bufsz+buf-p, "target %uus interval %uus ecn %s\n", codel_time_to_us(sta->cparams.target), codel_time_to_us(sta->cparams.interval), sta->cparams.ecn ? "yes" : "no"); p += scnprintf(p, bufsz+buf-p, "tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n"); for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { if (!sta->sta.txq[i]) continue; txqi = to_txq_info(sta->sta.txq[i]); p += scnprintf(p, bufsz+buf-p, "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n", txqi->txq.tid, txqi->txq.ac, txqi->tin.backlog_bytes, txqi->tin.backlog_packets, txqi->tin.flows, txqi->cstats.drop_count, txqi->cstats.ecn_mark, txqi->tin.overlimit, txqi->tin.collisions, txqi->tin.tx_bytes, txqi->tin.tx_packets, txqi->flags, txqi->flags & (1<flags & (1<flags & (1<fq.lock); rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); kfree(buf); return rv; } STA_OPS(aqm); static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[71 + IEEE80211_NUM_TIDS * 40], *p = buf; int i; struct sta_info *sta = file->private_data; struct tid_ampdu_rx *tid_rx; struct tid_ampdu_tx *tid_tx; rcu_read_lock(); p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", sta->ampdu_mlme.dialog_token_allocator + 1); p += scnprintf(p, sizeof(buf) + buf - p, "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); for (i = 0; i < IEEE80211_NUM_TIDS; i++) { bool tid_rx_valid; tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); tid_rx_valid = test_bit(i, sta->ampdu_mlme.agg_session_valid); p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", tid_rx_valid); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", tid_rx_valid ? sta->ampdu_mlme.tid_rx_token[i] : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", tid_rx ? tid_rx->ssn : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_tx); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", tid_tx ? tid_tx->dialog_token : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", tid_tx ? skb_queue_len(&tid_tx->pending) : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\n"); } rcu_read_unlock(); return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { char _buf[25] = {}, *buf = _buf; struct sta_info *sta = file->private_data; bool start, tx; unsigned long tid; char *pos; int ret, timeout = 5000; if (count > sizeof(_buf)) return -EINVAL; if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[sizeof(_buf) - 1] = '\0'; pos = buf; buf = strsep(&pos, " "); if (!buf) return -EINVAL; if (!strcmp(buf, "tx")) tx = true; else if (!strcmp(buf, "rx")) tx = false; else return -EINVAL; buf = strsep(&pos, " "); if (!buf) return -EINVAL; if (!strcmp(buf, "start")) { start = true; if (!tx) return -EINVAL; } else if (!strcmp(buf, "stop")) { start = false; } else { return -EINVAL; } buf = strsep(&pos, " "); if (!buf) return -EINVAL; if (sscanf(buf, "timeout=%d", &timeout) == 1) { buf = strsep(&pos, " "); if (!buf || !tx || !start) return -EINVAL; } ret = kstrtoul(buf, 0, &tid); if (ret || tid >= IEEE80211_NUM_TIDS) return -EINVAL; if (tx) { if (start) ret = ieee80211_start_tx_ba_session(&sta->sta, tid, timeout); else ret = ieee80211_stop_tx_ba_session(&sta->sta, tid); } else { __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3, true); ret = 0; } return ret ?: count; } STA_OPS_RW(agg_status); static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { #define PRINT_HT_CAP(_cond, _str) \ do { \ if (_cond) \ p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \ } while (0) char buf[512], *p = buf; int i; struct sta_info *sta = file->private_data; struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", htc->ht_supported ? "" : "not "); if (htc->ht_supported) { p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC"); PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled"); PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield"); PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI"); PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI"); PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams"); PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " "3839 bytes"); PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " "7935 bytes"); /* * For beacons and probe response this would mean the BSS * does or does not allow the usage of DSSS/CCK HT40. * Otherwise it means the STA does or does not use * DSSS/CCK HT40. */ PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40"); PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40"); /* BIT(13) is reserved */ PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant"); PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection"); p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", htc->ampdu_factor, htc->ampdu_density); p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", htc->mcs.rx_mask[i]); p += scnprintf(p, sizeof(buf)+buf-p, "\n"); /* If not set this is meaningless */ if (le16_to_cpu(htc->mcs.rx_highest)) { p += scnprintf(p, sizeof(buf)+buf-p, "MCS rx highest: %d Mbps\n", le16_to_cpu(htc->mcs.rx_highest)); } p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", htc->mcs.tx_params); } return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } STA_OPS(ht_capa); static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[512], *p = buf; struct sta_info *sta = file->private_data; struct ieee80211_sta_vht_cap *vhtc = &sta->sta.vht_cap; p += scnprintf(p, sizeof(buf) + buf - p, "VHT %ssupported\n", vhtc->vht_supported ? "" : "not "); if (vhtc->vht_supported) { p += scnprintf(p, sizeof(buf) + buf - p, "cap: %#.8x\n", vhtc->cap); #define PFLAG(a, b) \ do { \ if (vhtc->cap & IEEE80211_VHT_CAP_ ## a) \ p += scnprintf(p, sizeof(buf) + buf - p, \ "\t\t%s\n", b); \ } while (0) switch (vhtc->cap & 0x3) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: p += scnprintf(p, sizeof(buf) + buf - p, "\t\tMAX-MPDU-3895\n"); break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: p += scnprintf(p, sizeof(buf) + buf - p, "\t\tMAX-MPDU-7991\n"); break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: p += scnprintf(p, sizeof(buf) + buf - p, "\t\tMAX-MPDU-11454\n"); break; default: p += scnprintf(p, sizeof(buf) + buf - p, "\t\tMAX-MPDU-UNKNOWN\n"); } switch (vhtc->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case 0: p += scnprintf(p, sizeof(buf) + buf - p, "\t\t80Mhz\n"); break; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: p += scnprintf(p, sizeof(buf) + buf - p, "\t\t160Mhz\n"); break; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: p += scnprintf(p, sizeof(buf) + buf - p, "\t\t80+80Mhz\n"); break; default: p += scnprintf(p, sizeof(buf) + buf - p, "\t\tUNKNOWN-MHZ: 0x%x\n", (vhtc->cap >> 2) & 0x3); } PFLAG(RXLDPC, "RXLDPC"); PFLAG(SHORT_GI_80, "SHORT-GI-80"); PFLAG(SHORT_GI_160, "SHORT-GI-160"); PFLAG(TXSTBC, "TXSTBC"); p += scnprintf(p, sizeof(buf) + buf - p, "\t\tRXSTBC_%d\n", (vhtc->cap >> 8) & 0x7); PFLAG(SU_BEAMFORMER_CAPABLE, "SU-BEAMFORMER-CAPABLE"); PFLAG(SU_BEAMFORMEE_CAPABLE, "SU-BEAMFORMEE-CAPABLE"); p += scnprintf(p, sizeof(buf) + buf - p, "\t\tBEAMFORMEE-STS: 0x%x\n", (vhtc->cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK) >> IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); p += scnprintf(p, sizeof(buf) + buf - p, "\t\tSOUNDING-DIMENSIONS: 0x%x\n", (vhtc->cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK) >> IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT); PFLAG(MU_BEAMFORMER_CAPABLE, "MU-BEAMFORMER-CAPABLE"); PFLAG(MU_BEAMFORMEE_CAPABLE, "MU-BEAMFORMEE-CAPABLE"); PFLAG(VHT_TXOP_PS, "TXOP-PS"); PFLAG(HTC_VHT, "HTC-VHT"); p += scnprintf(p, sizeof(buf) + buf - p, "\t\tMPDU-LENGTH-EXPONENT: 0x%x\n", (vhtc->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); PFLAG(VHT_LINK_ADAPTATION_VHT_UNSOL_MFB, "LINK-ADAPTATION-VHT-UNSOL-MFB"); p += scnprintf(p, sizeof(buf) + buf - p, "\t\tLINK-ADAPTATION-VHT-MRQ-MFB: 0x%x\n", (vhtc->cap & IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB) >> 26); PFLAG(RX_ANTENNA_PATTERN, "RX-ANTENNA-PATTERN"); PFLAG(TX_ANTENNA_PATTERN, "TX-ANTENNA-PATTERN"); p += scnprintf(p, sizeof(buf)+buf-p, "RX MCS: %.4x\n", le16_to_cpu(vhtc->vht_mcs.rx_mcs_map)); if (vhtc->vht_mcs.rx_highest) p += scnprintf(p, sizeof(buf)+buf-p, "MCS RX highest: %d Mbps\n", le16_to_cpu(vhtc->vht_mcs.rx_highest)); p += scnprintf(p, sizeof(buf)+buf-p, "TX MCS: %.4x\n", le16_to_cpu(vhtc->vht_mcs.tx_mcs_map)); if (vhtc->vht_mcs.tx_highest) p += scnprintf(p, sizeof(buf)+buf-p, "MCS TX highest: %d Mbps\n", le16_to_cpu(vhtc->vht_mcs.tx_highest)); #undef PFLAG } return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } STA_OPS(vht_capa); static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char *buf, *p; size_t buf_sz = PAGE_SIZE; struct sta_info *sta = file->private_data; struct ieee80211_sta_he_cap *hec = &sta->sta.he_cap; struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp; u8 ppe_size; u8 *cap; int i; ssize_t ret; buf = kmalloc(buf_sz, GFP_KERNEL); if (!buf) return -ENOMEM; p = buf; p += scnprintf(p, buf_sz + buf - p, "HE %ssupported\n", hec->has_he ? "" : "not "); if (!hec->has_he) goto out; cap = hec->he_cap_elem.mac_cap_info; p += scnprintf(p, buf_sz + buf - p, "MAC-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n", cap[0], cap[1], cap[2], cap[3], cap[4], cap[5]); #define PRINT(fmt, ...) \ p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \ ##__VA_ARGS__) #define PFLAG(t, n, a, b) \ do { \ if (cap[n] & IEEE80211_HE_##t##_CAP##n##_##a) \ PRINT("%s", b); \ } while (0) #define PFLAG_RANGE(t, i, n, s, m, off, fmt) \ do { \ u8 msk = IEEE80211_HE_##t##_CAP##i##_##n##_MASK; \ u8 idx = ((cap[i] & msk) >> (ffs(msk) - 1)) + off; \ PRINT(fmt, (s << idx) + (m * idx)); \ } while (0) #define PFLAG_RANGE_DEFAULT(t, i, n, s, m, off, fmt, a, b) \ do { \ if (cap[i] == IEEE80211_HE_##t ##_CAP##i##_##n##_##a) { \ PRINT("%s", b); \ break; \ } \ PFLAG_RANGE(t, i, n, s, m, off, fmt); \ } while (0) PFLAG(MAC, 0, HTC_HE, "HTC-HE"); PFLAG(MAC, 0, TWT_REQ, "TWT-REQ"); PFLAG(MAC, 0, TWT_RES, "TWT-RES"); PFLAG_RANGE_DEFAULT(MAC, 0, DYNAMIC_FRAG, 0, 1, 0, "DYNAMIC-FRAG-LEVEL-%d", NOT_SUPP, "NOT-SUPP"); PFLAG_RANGE_DEFAULT(MAC, 0, MAX_NUM_FRAG_MSDU, 1, 0, 0, "MAX-NUM-FRAG-MSDU-%d", UNLIMITED, "UNLIMITED"); PFLAG_RANGE_DEFAULT(MAC, 1, MIN_FRAG_SIZE, 128, 0, -1, "MIN-FRAG-SIZE-%d", UNLIMITED, "UNLIMITED"); PFLAG_RANGE_DEFAULT(MAC, 1, TF_MAC_PAD_DUR, 0, 8, 0, "TF-MAC-PAD-DUR-%dUS", MASK, "UNKNOWN"); PFLAG_RANGE(MAC, 1, MULTI_TID_AGG_RX_QOS, 0, 1, 1, "MULTI-TID-AGG-RX-QOS-%d"); if (cap[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) { switch (((cap[2] << 1) | (cap[1] >> 7)) & 0x3) { case 0: PRINT("LINK-ADAPTATION-NO-FEEDBACK"); break; case 1: PRINT("LINK-ADAPTATION-RESERVED"); break; case 2: PRINT("LINK-ADAPTATION-UNSOLICITED-FEEDBACK"); break; case 3: PRINT("LINK-ADAPTATION-BOTH"); break; } } PFLAG(MAC, 2, ALL_ACK, "ALL-ACK"); PFLAG(MAC, 2, TRS, "TRS"); PFLAG(MAC, 2, BSR, "BSR"); PFLAG(MAC, 2, BCAST_TWT, "BCAST-TWT"); PFLAG(MAC, 2, 32BIT_BA_BITMAP, "32BIT-BA-BITMAP"); PFLAG(MAC, 2, MU_CASCADING, "MU-CASCADING"); PFLAG(MAC, 2, ACK_EN, "ACK-EN"); PFLAG(MAC, 3, OMI_CONTROL, "OMI-CONTROL"); PFLAG(MAC, 3, OFDMA_RA, "OFDMA-RA"); switch (cap[3] & IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) { case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT: PRINT("MAX-AMPDU-LEN-EXP-USE-VHT"); break; case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1: PRINT("MAX-AMPDU-LEN-EXP-VHT-1"); break; case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2: PRINT("MAX-AMPDU-LEN-EXP-VHT-2"); break; case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED: PRINT("MAX-AMPDU-LEN-EXP-RESERVED"); break; } PFLAG(MAC, 3, AMSDU_FRAG, "AMSDU-FRAG"); PFLAG(MAC, 3, FLEX_TWT_SCHED, "FLEX-TWT-SCHED"); PFLAG(MAC, 3, RX_CTRL_FRAME_TO_MULTIBSS, "RX-CTRL-FRAME-TO-MULTIBSS"); PFLAG(MAC, 4, BSRP_BQRP_A_MPDU_AGG, "BSRP-BQRP-A-MPDU-AGG"); PFLAG(MAC, 4, QTP, "QTP"); PFLAG(MAC, 4, BQR, "BQR"); PFLAG(MAC, 4, SRP_RESP, "SRP-RESP"); PFLAG(MAC, 4, NDP_FB_REP, "NDP-FB-REP"); PFLAG(MAC, 4, OPS, "OPS"); PFLAG(MAC, 4, AMDSU_IN_AMPDU, "AMSDU-IN-AMPDU"); PRINT("MULTI-TID-AGG-TX-QOS-%d", ((cap[5] << 1) | (cap[4] >> 7)) & 0x7); PFLAG(MAC, 5, SUBCHAN_SELECVITE_TRANSMISSION, "SUBCHAN-SELECVITE-TRANSMISSION"); PFLAG(MAC, 5, UL_2x996_TONE_RU, "UL-2x996-TONE-RU"); PFLAG(MAC, 5, OM_CTRL_UL_MU_DATA_DIS_RX, "OM-CTRL-UL-MU-DATA-DIS-RX"); PFLAG(MAC, 5, HE_DYNAMIC_SM_PS, "HE-DYNAMIC-SM-PS"); PFLAG(MAC, 5, PUNCTURED_SOUNDING, "PUNCTURED-SOUNDING"); PFLAG(MAC, 5, HT_VHT_TRIG_FRAME_RX, "HT-VHT-TRIG-FRAME-RX"); cap = hec->he_cap_elem.phy_cap_info; p += scnprintf(p, buf_sz + buf - p, "PHY CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n", cap[0], cap[1], cap[2], cap[3], cap[4], cap[5], cap[6], cap[7], cap[8], cap[9], cap[10]); PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_IN_2G, "CHANNEL-WIDTH-SET-40MHZ-IN-2G"); PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G, "CHANNEL-WIDTH-SET-40MHZ-80MHZ-IN-5G"); PFLAG(PHY, 0, CHANNEL_WIDTH_SET_160MHZ_IN_5G, "CHANNEL-WIDTH-SET-160MHZ-IN-5G"); PFLAG(PHY, 0, CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, "CHANNEL-WIDTH-SET-80PLUS80-MHZ-IN-5G"); PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G, "CHANNEL-WIDTH-SET-RU-MAPPING-IN-2G"); PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G, "CHANNEL-WIDTH-SET-RU-MAPPING-IN-5G"); switch (cap[1] & IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK) { case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ: PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-20MHZ"); break; case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ: PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-40MHZ"); break; case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ: PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-20MHZ"); break; case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ: PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-40MHZ"); break; } PFLAG(PHY, 1, DEVICE_CLASS_A, "IEEE80211-HE-PHY-CAP1-DEVICE-CLASS-A"); PFLAG(PHY, 1, LDPC_CODING_IN_PAYLOAD, "LDPC-CODING-IN-PAYLOAD"); PFLAG(PHY, 1, HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US, "HY-CAP1-HE-LTF-AND-GI-FOR-HE-PPDUS-0-8US"); PRINT("MIDAMBLE-RX-MAX-NSTS-%d", ((cap[2] << 1) | (cap[1] >> 7)) & 0x3); PFLAG(PHY, 2, NDP_4x_LTF_AND_3_2US, "NDP-4X-LTF-AND-3-2US"); PFLAG(PHY, 2, STBC_TX_UNDER_80MHZ, "STBC-TX-UNDER-80MHZ"); PFLAG(PHY, 2, STBC_RX_UNDER_80MHZ, "STBC-RX-UNDER-80MHZ"); PFLAG(PHY, 2, DOPPLER_TX, "DOPPLER-TX"); PFLAG(PHY, 2, DOPPLER_RX, "DOPPLER-RX"); PFLAG(PHY, 2, UL_MU_FULL_MU_MIMO, "UL-MU-FULL-MU-MIMO"); PFLAG(PHY, 2, UL_MU_PARTIAL_MU_MIMO, "UL-MU-PARTIAL-MU-MIMO"); switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK) { case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM: PRINT("DCM-MAX-CONST-TX-NO-DCM"); break; case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK: PRINT("DCM-MAX-CONST-TX-BPSK"); break; case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK: PRINT("DCM-MAX-CONST-TX-QPSK"); break; case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM: PRINT("DCM-MAX-CONST-TX-16-QAM"); break; } PFLAG(PHY, 3, DCM_MAX_TX_NSS_1, "DCM-MAX-TX-NSS-1"); PFLAG(PHY, 3, DCM_MAX_TX_NSS_2, "DCM-MAX-TX-NSS-2"); switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK) { case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM: PRINT("DCM-MAX-CONST-RX-NO-DCM"); break; case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK: PRINT("DCM-MAX-CONST-RX-BPSK"); break; case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK: PRINT("DCM-MAX-CONST-RX-QPSK"); break; case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM: PRINT("DCM-MAX-CONST-RX-16-QAM"); break; } PFLAG(PHY, 3, DCM_MAX_RX_NSS_1, "DCM-MAX-RX-NSS-1"); PFLAG(PHY, 3, DCM_MAX_RX_NSS_2, "DCM-MAX-RX-NSS-2"); PFLAG(PHY, 3, RX_HE_MU_PPDU_FROM_NON_AP_STA, "RX-HE-MU-PPDU-FROM-NON-AP-STA"); PFLAG(PHY, 3, SU_BEAMFORMER, "SU-BEAMFORMER"); PFLAG(PHY, 4, SU_BEAMFORMEE, "SU-BEAMFORMEE"); PFLAG(PHY, 4, MU_BEAMFORMER, "MU-BEAMFORMER"); PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_UNDER_80MHZ, 0, 1, 4, "BEAMFORMEE-MAX-STS-UNDER-%d"); PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_ABOVE_80MHZ, 0, 1, 4, "BEAMFORMEE-MAX-STS-ABOVE-%d"); PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ, 0, 1, 1, "NUM-SND-DIM-UNDER-80MHZ-%d"); PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ, 0, 1, 1, "NUM-SND-DIM-ABOVE-80MHZ-%d"); PFLAG(PHY, 5, NG16_SU_FEEDBACK, "NG16-SU-FEEDBACK"); PFLAG(PHY, 5, NG16_MU_FEEDBACK, "NG16-MU-FEEDBACK"); PFLAG(PHY, 6, CODEBOOK_SIZE_42_SU, "CODEBOOK-SIZE-42-SU"); PFLAG(PHY, 6, CODEBOOK_SIZE_75_MU, "CODEBOOK-SIZE-75-MU"); PFLAG(PHY, 6, TRIG_SU_BEAMFORMER_FB, "TRIG-SU-BEAMFORMER-FB"); PFLAG(PHY, 6, TRIG_MU_BEAMFORMER_FB, "TRIG-MU-BEAMFORMER-FB"); PFLAG(PHY, 6, TRIG_CQI_FB, "TRIG-CQI-FB"); PFLAG(PHY, 6, PARTIAL_BW_EXT_RANGE, "PARTIAL-BW-EXT-RANGE"); PFLAG(PHY, 6, PARTIAL_BANDWIDTH_DL_MUMIMO, "PARTIAL-BANDWIDTH-DL-MUMIMO"); PFLAG(PHY, 6, PPE_THRESHOLD_PRESENT, "PPE-THRESHOLD-PRESENT"); PFLAG(PHY, 7, SRP_BASED_SR, "SRP-BASED-SR"); PFLAG(PHY, 7, POWER_BOOST_FACTOR_AR, "POWER-BOOST-FACTOR-AR"); PFLAG(PHY, 7, HE_SU_MU_PPDU_4XLTF_AND_08_US_GI, "HE-SU-MU-PPDU-4XLTF-AND-08-US-GI"); PFLAG_RANGE(PHY, 7, MAX_NC, 0, 1, 1, "MAX-NC-%d"); PFLAG(PHY, 7, STBC_TX_ABOVE_80MHZ, "STBC-TX-ABOVE-80MHZ"); PFLAG(PHY, 7, STBC_RX_ABOVE_80MHZ, "STBC-RX-ABOVE-80MHZ"); PFLAG(PHY, 8, HE_ER_SU_PPDU_4XLTF_AND_08_US_GI, "HE-ER-SU-PPDU-4XLTF-AND-08-US-GI"); PFLAG(PHY, 8, 20MHZ_IN_40MHZ_HE_PPDU_IN_2G, "20MHZ-IN-40MHZ-HE-PPDU-IN-2G"); PFLAG(PHY, 8, 20MHZ_IN_160MHZ_HE_PPDU, "20MHZ-IN-160MHZ-HE-PPDU"); PFLAG(PHY, 8, 80MHZ_IN_160MHZ_HE_PPDU, "80MHZ-IN-160MHZ-HE-PPDU"); PFLAG(PHY, 8, HE_ER_SU_1XLTF_AND_08_US_GI, "HE-ER-SU-1XLTF-AND-08-US-GI"); PFLAG(PHY, 8, MIDAMBLE_RX_TX_2X_AND_1XLTF, "MIDAMBLE-RX-TX-2X-AND-1XLTF"); switch (cap[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK) { case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242: PRINT("DCM-MAX-RU-242"); break; case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484: PRINT("DCM-MAX-RU-484"); break; case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996: PRINT("DCM-MAX-RU-996"); break; case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996: PRINT("DCM-MAX-RU-2x996"); break; } PFLAG(PHY, 9, LONGER_THAN_16_SIGB_OFDM_SYM, "LONGER-THAN-16-SIGB-OFDM-SYM"); PFLAG(PHY, 9, NON_TRIGGERED_CQI_FEEDBACK, "NON-TRIGGERED-CQI-FEEDBACK"); PFLAG(PHY, 9, TX_1024_QAM_LESS_THAN_242_TONE_RU, "TX-1024-QAM-LESS-THAN-242-TONE-RU"); PFLAG(PHY, 9, RX_1024_QAM_LESS_THAN_242_TONE_RU, "RX-1024-QAM-LESS-THAN-242-TONE-RU"); PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB, "RX-FULL-BW-SU-USING-MU-WITH-COMP-SIGB"); PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, "RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB"); switch (cap[9] & IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: PRINT("NOMINAL-PACKET-PADDING-0US"); break; case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: PRINT("NOMINAL-PACKET-PADDING-8US"); break; case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: PRINT("NOMINAL-PACKET-PADDING-16US"); break; } #undef PFLAG_RANGE_DEFAULT #undef PFLAG_RANGE #undef PFLAG #define PRINT_NSS_SUPP(f, n) \ do { \ int _i; \ u16 v = le16_to_cpu(nss->f); \ p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v); \ for (_i = 0; _i < 8; _i += 2) { \ switch ((v >> _i) & 0x3) { \ case 0: \ PRINT(n "-%d-SUPPORT-0-7", _i / 2); \ break; \ case 1: \ PRINT(n "-%d-SUPPORT-0-9", _i / 2); \ break; \ case 2: \ PRINT(n "-%d-SUPPORT-0-11", _i / 2); \ break; \ case 3: \ PRINT(n "-%d-NOT-SUPPORTED", _i / 2); \ break; \ } \ } \ } while (0) PRINT_NSS_SUPP(rx_mcs_80, "RX-MCS-80"); PRINT_NSS_SUPP(tx_mcs_80, "TX-MCS-80"); if (cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) { PRINT_NSS_SUPP(rx_mcs_160, "RX-MCS-160"); PRINT_NSS_SUPP(tx_mcs_160, "TX-MCS-160"); } if (cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { PRINT_NSS_SUPP(rx_mcs_80p80, "RX-MCS-80P80"); PRINT_NSS_SUPP(tx_mcs_80p80, "TX-MCS-80P80"); } #undef PRINT_NSS_SUPP #undef PRINT if (!(cap[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)) goto out; p += scnprintf(p, buf_sz + buf - p, "PPE-THRESHOLDS: %#.2x", hec->ppe_thres[0]); ppe_size = ieee80211_he_ppe_size(hec->ppe_thres[0], cap); for (i = 1; i < ppe_size; i++) { p += scnprintf(p, buf_sz + buf - p, " %#.2x", hec->ppe_thres[i]); } p += scnprintf(p, buf_sz + buf - p, "\n"); out: ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); kfree(buf); return ret; } STA_OPS(he_capa); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS static int sta_tx_consec_loss_stat_header(struct ieee80211_tx_consec_loss_ranges *tx_csc, char *buf, int pos, int bufsz) { int i; u32 range_count = tx_csc->n_ranges; u32 *bin_ranges = tx_csc->ranges; pos += scnprintf(buf + pos, bufsz - pos, "Station\t\t\tTID\tType"); if (range_count) { for (i = 0; i < range_count - 1; i++) pos += scnprintf(buf + pos, bufsz - pos, "\t%d-%d", bin_ranges[i], bin_ranges[i+1]); pos += scnprintf(buf + pos, bufsz - pos, "\t%d=<", bin_ranges[range_count - 1]); } pos += scnprintf(buf + pos, bufsz - pos, "\n"); return pos; } static int tx_consec_loss_stat_table(struct ieee80211_tx_consec_loss_ranges *tx_range, u32 bin_count, char *buf, int pos, int bufsz, u32 *bins, char *type, int tid) { int j; pos += scnprintf(buf + pos, bufsz - pos, "\t\t\t%d\t%s", tid, type); if (tx_range->n_ranges && bins) for (j = 0; j < bin_count; j++) pos += scnprintf(buf + pos, bufsz - pos, "\t%d", bins[j]); pos += scnprintf(buf + pos, bufsz - pos, "\n"); return pos; } /* * Output stations Tx consecutive loss statistics */ static ssize_t sta_tx_consecutive_loss_stat_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct sta_info *sta = file->private_data; struct ieee80211_local *local = sta->local; struct ieee80211_tx_consec_loss_ranges *tx_consec; struct ieee80211_tx_consec_loss_stat *tx_csc; char *buf; size_t bufsz, i; int ret; size_t pos = 0; bufsz = 100 * IEEE80211_NUM_TIDS; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; rcu_read_lock(); tx_consec = rcu_dereference(local->tx_consec); if (!sta->tx_consec) { pos += scnprintf(buf + pos, bufsz - pos, "Tx consecutive loss statistics are not enabled\n"); goto unlock; } pos = sta_tx_consec_loss_stat_header(tx_consec, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->sta.addr); for (i = 0; i < IEEE80211_NUM_TIDS; i++) { tx_csc = &sta->tx_consec[i]; pos = tx_consec_loss_stat_table(tx_consec, tx_csc->bin_count, buf, pos, bufsz, tx_csc->late_bins, "late", i); pos = tx_consec_loss_stat_table(tx_consec, tx_csc->bin_count, buf, pos, bufsz, tx_csc->loss_bins, "lost", i); pos = tx_consec_loss_stat_table(tx_consec, tx_csc->bin_count, buf, pos, bufsz, tx_csc->total_loss_bins, "total", i); } unlock: rcu_read_unlock(); ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); kfree(buf); return ret; } STA_OPS(tx_consecutive_loss_stat); static int sta_tx_latency_stat_header(struct ieee80211_tx_latency_bin_ranges *tx_latency, char *buf, int pos, int bufsz) { int i; int range_count = tx_latency->n_ranges; u32 *bin_ranges = tx_latency->ranges; pos += scnprintf(buf + pos, bufsz - pos, "Station\t\t\tTID\tMax\tMax time\tAvg"); if (range_count) { pos += scnprintf(buf + pos, bufsz - pos, "\t<=%d", bin_ranges[0]); for (i = 0; i < range_count - 1; i++) pos += scnprintf(buf + pos, bufsz - pos, "\t%d-%d", bin_ranges[i], bin_ranges[i+1]); pos += scnprintf(buf + pos, bufsz - pos, "\t%d<", bin_ranges[range_count - 1]); } pos += scnprintf(buf + pos, bufsz - pos, "\n"); return pos; } static int sta_tx_latency_stat_table(struct ieee80211_tx_latency_bin_ranges *tx_lat_range, struct ieee80211_tx_latency_stat *tx_lat, char *buf, int pos, int bufsz, int tid) { u32 avg = 0; int j; int bin_count = tx_lat->bin_count; pos += scnprintf(buf + pos, bufsz - pos, "\t\t\t%d", tid); /* make sure you don't divide in 0 */ if (tx_lat->counter) avg = tx_lat->sum / tx_lat->counter; pos += scnprintf(buf + pos, bufsz - pos, "\t%d\t%u\t%d", tx_lat->max, tx_lat->max_ts, avg); if (tx_lat_range->n_ranges && tx_lat->bins) for (j = 0; j < bin_count; j++) pos += scnprintf(buf + pos, bufsz - pos, "\t%d", tx_lat->bins[j]); pos += scnprintf(buf + pos, bufsz - pos, "\n"); return pos; } /* * Output Tx latency statistics station && restart all statistics information */ static ssize_t sta_tx_latency_stat_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct sta_info *sta = file->private_data; struct ieee80211_local *local = sta->local; struct ieee80211_tx_latency_bin_ranges *tx_latency; char *buf; int bufsz, ret, i; int pos = 0; bufsz = 20 * IEEE80211_NUM_TIDS * sizeof(struct ieee80211_tx_latency_stat); buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; rcu_read_lock(); tx_latency = rcu_dereference(local->tx_latency); if (!sta->tx_lat) { pos += scnprintf(buf + pos, bufsz - pos, "Tx latency statistics are not enabled\n"); goto unlock; } pos = sta_tx_latency_stat_header(tx_latency, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->sta.addr); for (i = 0; i < IEEE80211_NUM_TIDS; i++) pos = sta_tx_latency_stat_table(tx_latency, &sta->tx_lat[i], buf, pos, bufsz, i); unlock: rcu_read_unlock(); ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); kfree(buf); return ret; } STA_OPS(tx_latency_stat); static ssize_t sta_tx_timing_stats_reset_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { u32 *bins; int bin_count; u32 *loss_bins; u32 *late_bins; u32 *total_loss_bins; u32 csc_bin_cnt; struct sta_info *sta = file->private_data; int i; if (!sta->tx_lat && !sta->tx_consec) return -EINVAL; for (i = 0; i < IEEE80211_NUM_TIDS; i++) { if (sta->tx_lat) { /* latency stats enabled */ bins = sta->tx_lat[i].bins; bin_count = sta->tx_lat[i].bin_count; sta->tx_lat[i].max = 0; sta->tx_lat[i].sum = 0; sta->tx_lat[i].counter = 0; if (bin_count) memset(bins, 0, bin_count * sizeof(u32)); } if (sta->tx_consec) { /* consecutive loss stats enabled */ csc_bin_cnt = sta->tx_consec[i].bin_count; late_bins = sta->tx_consec[i].late_bins; loss_bins = sta->tx_consec[i].loss_bins; total_loss_bins = sta->tx_consec[i].total_loss_bins; memset(late_bins, 0, csc_bin_cnt * sizeof(u32)); memset(loss_bins, 0, csc_bin_cnt * sizeof(u32)); memset(total_loss_bins, 0, csc_bin_cnt * sizeof(u32)); } } return count; } STA_OPS_W(tx_timing_stats_reset); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, \ sta->debugfs_dir, sta, &sta_ ##name## _ops); #define DEBUGFS_ADD_COUNTER(name, field) \ if (sizeof(sta->field) == sizeof(u32)) \ debugfs_create_u32(#name, 0400, sta->debugfs_dir, \ (u32 *) &sta->field); \ else \ debugfs_create_u64(#name, 0400, sta->debugfs_dir, \ (u64 *) &sta->field); void ieee80211_sta_debugfs_add(struct sta_info *sta) { struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; u8 mac[3*ETH_ALEN]; if (!stations_dir) return; snprintf(mac, sizeof(mac), "%pM", sta->sta.addr); /* * This might fail due to a race condition: * When mac80211 unlinks a station, the debugfs entries * remain, but it is already possible to link a new * station with the same address which triggers adding * it to debugfs; therefore, if the old station isn't * destroyed quickly enough the old station's debugfs * dir might still be around. */ sta->debugfs_dir = debugfs_create_dir(mac, stations_dir); if (!sta->debugfs_dir) return; DEBUGFS_ADD(flags); DEBUGFS_ADD(aid); DEBUGFS_ADD(num_ps_buf_frames); DEBUGFS_ADD(last_seq_ctrl); DEBUGFS_ADD(agg_status); DEBUGFS_ADD(ht_capa); DEBUGFS_ADD(vht_capa); DEBUGFS_ADD(he_capa); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS DEBUGFS_ADD(tx_consecutive_loss_stat); DEBUGFS_ADD(tx_latency_stat); DEBUGFS_ADD(tx_timing_stats_reset); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates); DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments); DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered); if (local->ops->wake_tx_queue) DEBUGFS_ADD(aqm); if (sizeof(sta->driver_buffered_tids) == sizeof(u32)) debugfs_create_x32("driver_buffered_tids", 0400, sta->debugfs_dir, (u32 *)&sta->driver_buffered_tids); else debugfs_create_x64("driver_buffered_tids", 0400, sta->debugfs_dir, (u64 *)&sta->driver_buffered_tids); drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs_dir); } void ieee80211_sta_debugfs_remove(struct sta_info *sta) { debugfs_remove_recursive(sta->debugfs_dir); sta->debugfs_dir = NULL; } backport-iwlwifi-dkms-7906/net/mac80211/debugfs_sta.h000066400000000000000000000007221351064634500222630ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MAC80211_DEBUGFS_STA_H #define __MAC80211_DEBUGFS_STA_H #include "sta_info.h" #ifdef CPTCFG_MAC80211_DEBUGFS void ieee80211_sta_debugfs_add(struct sta_info *sta); void ieee80211_sta_debugfs_remove(struct sta_info *sta); #else static inline void ieee80211_sta_debugfs_add(struct sta_info *sta) {} static inline void ieee80211_sta_debugfs_remove(struct sta_info *sta) {} #endif #endif /* __MAC80211_DEBUGFS_STA_H */ backport-iwlwifi-dkms-7906/net/mac80211/driver-ops.c000066400000000000000000000166731351064634500220760ustar00rootroot00000000000000/* * Copyright 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include "ieee80211_i.h" #include "trace.h" #include "driver-ops.h" int drv_start(struct ieee80211_local *local) { int ret; might_sleep(); if (WARN_ON(local->started)) return -EALREADY; trace_drv_start(local); local->started = true; /* allow rx frames */ smp_mb(); ret = local->ops->start(&local->hw); trace_drv_return_int(local, ret); if (ret) local->started = false; return ret; } void drv_stop(struct ieee80211_local *local) { might_sleep(); if (WARN_ON(!local->started)) return; trace_drv_stop(local); local->ops->stop(&local->hw); trace_drv_return_void(local); /* sync away all work on the tasklet before clearing started */ tasklet_disable(&local->tasklet); tasklet_enable(&local->tasklet); barrier(); local->started = false; } int drv_add_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { int ret; might_sleep(); if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) && !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)))) return -EINVAL; trace_drv_add_interface(local, sdata); ret = local->ops->add_interface(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); if (ret == 0) sdata->flags |= IEEE80211_SDATA_IN_DRIVER; return ret; } int drv_change_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type, bool p2p) { int ret; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_change_interface(local, sdata, type, p2p); ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p); trace_drv_return_int(local, ret); return ret; } void drv_remove_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; trace_drv_remove_interface(local, sdata); local->ops->remove_interface(&local->hw, &sdata->vif); sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; trace_drv_return_void(local); } __must_check int drv_sta_state(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { int ret = 0; might_sleep(); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state); if (local->ops->sta_state) { ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta, old_state, new_state); } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { ret = drv_sta_add(local, sdata, &sta->sta); if (ret == 0) sta->uploaded = true; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { drv_sta_remove(local, sdata, &sta->sta); } trace_drv_return_int(local, ret); return ret; } void drv_sta_rc_update(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u32 changed) { sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED && (sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT)); trace_drv_sta_rc_update(local, sdata, sta, changed); if (local->ops->sta_rc_update) local->ops->sta_rc_update(&local->hw, &sdata->vif, sta, changed); trace_drv_return_void(local); } int drv_conf_tx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 ac, const struct ieee80211_tx_queue_params *params) { int ret = -EOPNOTSUPP; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; if (WARN_ONCE(params->cw_min == 0 || params->cw_min > params->cw_max, "%s: invalid CW_min/CW_max: %d/%d\n", sdata->name, params->cw_min, params->cw_max)) return -EINVAL; trace_drv_conf_tx(local, sdata, ac, params); if (local->ops->conf_tx) ret = local->ops->conf_tx(&local->hw, &sdata->vif, ac, params); trace_drv_return_int(local, ret); return ret; } u64 drv_get_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { u64 ret = -1ULL; might_sleep(); if (!check_sdata_in_driver(sdata)) return ret; trace_drv_get_tsf(local, sdata); if (local->ops->get_tsf) ret = local->ops->get_tsf(&local->hw, &sdata->vif); trace_drv_return_u64(local, ret); return ret; } void drv_set_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u64 tsf) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; trace_drv_set_tsf(local, sdata, tsf); if (local->ops->set_tsf) local->ops->set_tsf(&local->hw, &sdata->vif, tsf); trace_drv_return_void(local); } void drv_offset_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, s64 offset) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; trace_drv_offset_tsf(local, sdata, offset); if (local->ops->offset_tsf) local->ops->offset_tsf(&local->hw, &sdata->vif, offset); trace_drv_return_void(local); } void drv_reset_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; trace_drv_reset_tsf(local, sdata); if (local->ops->reset_tsf) local->ops->reset_tsf(&local->hw, &sdata->vif); trace_drv_return_void(local); } int drv_switch_vif_chanctx(struct ieee80211_local *local, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { int ret = 0; int i; might_sleep(); if (!local->ops->switch_vif_chanctx) return -EOPNOTSUPP; for (i = 0; i < n_vifs; i++) { struct ieee80211_chanctx *new_ctx = container_of(vifs[i].new_ctx, struct ieee80211_chanctx, conf); struct ieee80211_chanctx *old_ctx = container_of(vifs[i].old_ctx, struct ieee80211_chanctx, conf); WARN_ON_ONCE(!old_ctx->driver_present); WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS && new_ctx->driver_present) || (mode == CHANCTX_SWMODE_REASSIGN_VIF && !new_ctx->driver_present)); } trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode); ret = local->ops->switch_vif_chanctx(&local->hw, vifs, n_vifs, mode); trace_drv_return_int(local, ret); if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) { for (i = 0; i < n_vifs; i++) { struct ieee80211_chanctx *new_ctx = container_of(vifs[i].new_ctx, struct ieee80211_chanctx, conf); struct ieee80211_chanctx *old_ctx = container_of(vifs[i].old_ctx, struct ieee80211_chanctx, conf); new_ctx->driver_present = true; old_ctx->driver_present = false; } } return ret; } int drv_ampdu_action(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_ampdu_params *params) { int ret = -EOPNOTSUPP; might_sleep(); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_ampdu_action(local, sdata, params); if (local->ops->ampdu_action) ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params); trace_drv_return_int(local, ret); return ret; } backport-iwlwifi-dkms-7906/net/mac80211/driver-ops.h000066400000000000000000001034211351064634500220670ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright(c) 2016 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation */ #ifndef __MAC80211_DRIVER_OPS #define __MAC80211_DRIVER_OPS #include #include "ieee80211_i.h" #include "trace.h" static inline bool check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) { return !WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); } static inline struct ieee80211_sub_if_data * get_bss_sdata(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); return sdata; } static inline void drv_tx(struct ieee80211_local *local, struct ieee80211_tx_control *control, struct sk_buff *skb) { local->ops->tx(&local->hw, control, skb); } static inline void drv_sync_rx_queues(struct ieee80211_local *local, struct sta_info *sta) { if (local->ops->sync_rx_queues) { trace_drv_sync_rx_queues(local, sta->sdata, &sta->sta); local->ops->sync_rx_queues(&local->hw); trace_drv_return_void(local); } } static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, u32 sset, u8 *data) { struct ieee80211_local *local = sdata->local; if (local->ops->get_et_strings) { trace_drv_get_et_strings(local, sset); local->ops->get_et_strings(&local->hw, &sdata->vif, sset, data); trace_drv_return_void(local); } } static inline void drv_get_et_stats(struct ieee80211_sub_if_data *sdata, struct ethtool_stats *stats, u64 *data) { struct ieee80211_local *local = sdata->local; if (local->ops->get_et_stats) { trace_drv_get_et_stats(local); local->ops->get_et_stats(&local->hw, &sdata->vif, stats, data); trace_drv_return_void(local); } } static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata, int sset) { struct ieee80211_local *local = sdata->local; int rv = 0; if (local->ops->get_et_sset_count) { trace_drv_get_et_sset_count(local, sset); rv = local->ops->get_et_sset_count(&local->hw, &sdata->vif, sset); trace_drv_return_int(local, rv); } return rv; } int drv_start(struct ieee80211_local *local); void drv_stop(struct ieee80211_local *local); #ifdef CONFIG_PM static inline int drv_suspend(struct ieee80211_local *local, struct cfg80211_wowlan *wowlan) { int ret; might_sleep(); trace_drv_suspend(local); ret = local->ops->suspend(&local->hw, wowlan); trace_drv_return_int(local, ret); return ret; } static inline int drv_resume(struct ieee80211_local *local) { int ret; might_sleep(); trace_drv_resume(local); ret = local->ops->resume(&local->hw); trace_drv_return_int(local, ret); return ret; } static inline void drv_set_wakeup(struct ieee80211_local *local, bool enabled) { might_sleep(); if (!local->ops->set_wakeup) return; trace_drv_set_wakeup(local, enabled); local->ops->set_wakeup(&local->hw, enabled); trace_drv_return_void(local); } #endif int drv_add_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); int drv_change_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type, bool p2p); void drv_remove_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); static inline int drv_config(struct ieee80211_local *local, u32 changed) { int ret; might_sleep(); trace_drv_config(local, changed); ret = local->ops->config(&local->hw, changed); trace_drv_return_int(local, ret); return ret; } static inline void drv_bss_info_changed(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *info, u32 changed) { might_sleep(); if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED) && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_OCB)) return; if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || sdata->vif.type == NL80211_IFTYPE_NAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && !sdata->vif.mu_mimo_owner && !(changed & BSS_CHANGED_TXPOWER)))) return; if (!check_sdata_in_driver(sdata)) return; trace_drv_bss_info_changed(local, sdata, info, changed); if (local->ops->bss_info_changed) local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); trace_drv_return_void(local); } static inline u64 drv_prepare_multicast(struct ieee80211_local *local, struct netdev_hw_addr_list *mc_list) { u64 ret = 0; trace_drv_prepare_multicast(local, mc_list->count); if (local->ops->prepare_multicast) ret = local->ops->prepare_multicast(&local->hw, mc_list); trace_drv_return_u64(local, ret); return ret; } static inline void drv_configure_filter(struct ieee80211_local *local, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { might_sleep(); trace_drv_configure_filter(local, changed_flags, total_flags, multicast); local->ops->configure_filter(&local->hw, changed_flags, total_flags, multicast); trace_drv_return_void(local); } static inline void drv_config_iface_filter(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int filter_flags, unsigned int changed_flags) { might_sleep(); trace_drv_config_iface_filter(local, sdata, filter_flags, changed_flags); if (local->ops->config_iface_filter) local->ops->config_iface_filter(&local->hw, &sdata->vif, filter_flags, changed_flags); trace_drv_return_void(local); } static inline int drv_set_tim(struct ieee80211_local *local, struct ieee80211_sta *sta, bool set) { int ret = 0; trace_drv_set_tim(local, sta, set); if (local->ops->set_tim) ret = local->ops->set_tim(&local->hw, sta, set); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_key(struct ieee80211_local *local, enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { int ret; might_sleep(); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_set_key(local, cmd, sdata, sta, key); ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); trace_drv_return_int(local, ret); return ret; } static inline void drv_update_tkip_key(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_key_conf *conf, struct sta_info *sta, u32 iv32, u16 *phase1key) { struct ieee80211_sta *ista = NULL; if (sta) ista = &sta->sta; sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); if (local->ops->update_tkip_key) local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, ista, iv32, phase1key); trace_drv_return_void(local); } static inline int drv_hw_scan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_scan_request *req) { int ret; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_hw_scan(local, sdata); ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); trace_drv_return_int(local, ret); return ret; } static inline void drv_cancel_hw_scan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; trace_drv_cancel_hw_scan(local, sdata); local->ops->cancel_hw_scan(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline int drv_sched_scan_start(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { int ret; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_sched_scan_start(local, sdata); ret = local->ops->sched_scan_start(&local->hw, &sdata->vif, req, ies); trace_drv_return_int(local, ret); return ret; } static inline int drv_sched_scan_stop(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { int ret; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_sched_scan_stop(local, sdata); ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); return ret; } static inline void drv_sw_scan_start(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const u8 *mac_addr) { might_sleep(); trace_drv_sw_scan_start(local, sdata, mac_addr); if (local->ops->sw_scan_start) local->ops->sw_scan_start(&local->hw, &sdata->vif, mac_addr); trace_drv_return_void(local); } static inline void drv_sw_scan_complete(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); trace_drv_sw_scan_complete(local, sdata); if (local->ops->sw_scan_complete) local->ops->sw_scan_complete(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline int drv_get_stats(struct ieee80211_local *local, struct ieee80211_low_level_stats *stats) { int ret = -EOPNOTSUPP; might_sleep(); if (local->ops->get_stats) ret = local->ops->get_stats(&local->hw, stats); trace_drv_get_stats(local, stats, ret); return ret; } static inline void drv_get_key_seq(struct ieee80211_local *local, struct ieee80211_key *key, struct ieee80211_key_seq *seq) { if (local->ops->get_key_seq) local->ops->get_key_seq(&local->hw, &key->conf, seq); trace_drv_get_key_seq(local, &key->conf); } static inline int drv_set_frag_threshold(struct ieee80211_local *local, u32 value) { int ret = 0; might_sleep(); trace_drv_set_frag_threshold(local, value); if (local->ops->set_frag_threshold) ret = local->ops->set_frag_threshold(&local->hw, value); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_rts_threshold(struct ieee80211_local *local, u32 value) { int ret = 0; might_sleep(); trace_drv_set_rts_threshold(local, value); if (local->ops->set_rts_threshold) ret = local->ops->set_rts_threshold(&local->hw, value); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_coverage_class(struct ieee80211_local *local, s16 value) { int ret = 0; might_sleep(); trace_drv_set_coverage_class(local, value); if (local->ops->set_coverage_class) local->ops->set_coverage_class(&local->hw, value); else ret = -EOPNOTSUPP; trace_drv_return_int(local, ret); return ret; } static inline void drv_sta_notify(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_notify(local, sdata, cmd, sta); if (local->ops->sta_notify) local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); trace_drv_return_void(local); } static inline int drv_sta_add(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta) { int ret = 0; might_sleep(); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_sta_add(local, sdata, sta); if (local->ops->sta_add) ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); trace_drv_return_int(local, ret); return ret; } static inline void drv_sta_remove(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta) { might_sleep(); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_remove(local, sdata, sta); if (local->ops->sta_remove) local->ops->sta_remove(&local->hw, &sdata->vif, sta); trace_drv_return_void(local); } #ifdef CPTCFG_MAC80211_DEBUGFS static inline void drv_sta_add_debugfs(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct dentry *dir) { might_sleep(); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; if (local->ops->sta_add_debugfs) local->ops->sta_add_debugfs(&local->hw, &sdata->vif, sta, dir); } #endif static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { might_sleep(); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_pre_rcu_remove(local, sdata, &sta->sta); if (local->ops->sta_pre_rcu_remove) local->ops->sta_pre_rcu_remove(&local->hw, &sdata->vif, &sta->sta); trace_drv_return_void(local); } __must_check int drv_sta_state(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); void drv_sta_rc_update(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u32 changed); static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta) { sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_rate_tbl_update(local, sdata, sta); if (local->ops->sta_rate_tbl_update) local->ops->sta_rate_tbl_update(&local->hw, &sdata->vif, sta); trace_drv_return_void(local); } static inline void drv_sta_statistics(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct station_info *sinfo) { sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_statistics(local, sdata, sta); if (local->ops->sta_statistics) local->ops->sta_statistics(&local->hw, &sdata->vif, sta, sinfo); trace_drv_return_void(local); } int drv_conf_tx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 ac, const struct ieee80211_tx_queue_params *params); u64 drv_get_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void drv_set_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u64 tsf); void drv_offset_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, s64 offset); void drv_reset_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); static inline int drv_tx_last_beacon(struct ieee80211_local *local) { int ret = 0; /* default unsupported op for less congestion */ might_sleep(); trace_drv_tx_last_beacon(local); if (local->ops->tx_last_beacon) ret = local->ops->tx_last_beacon(&local->hw); trace_drv_return_int(local, ret); return ret; } int drv_ampdu_action(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_ampdu_params *params); static inline int drv_get_survey(struct ieee80211_local *local, int idx, struct survey_info *survey) { int ret = -EOPNOTSUPP; trace_drv_get_survey(local, idx, survey); if (local->ops->get_survey) ret = local->ops->get_survey(&local->hw, idx, survey); trace_drv_return_int(local, ret); return ret; } static inline void drv_rfkill_poll(struct ieee80211_local *local) { might_sleep(); if (local->ops->rfkill_poll) local->ops->rfkill_poll(&local->hw); } static inline void drv_flush(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u32 queues, bool drop) { struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL; might_sleep(); if (sdata && !check_sdata_in_driver(sdata)) return; trace_drv_flush(local, queues, drop); if (local->ops->flush) local->ops->flush(&local->hw, vif, queues, drop); trace_drv_return_void(local); } static inline void drv_channel_switch(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch) { might_sleep(); trace_drv_channel_switch(local, sdata, ch_switch); local->ops->channel_switch(&local->hw, &sdata->vif, ch_switch); trace_drv_return_void(local); } static inline int drv_set_antenna(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant) { int ret = -EOPNOTSUPP; might_sleep(); if (local->ops->set_antenna) ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant); trace_drv_set_antenna(local, tx_ant, rx_ant, ret); return ret; } static inline int drv_get_antenna(struct ieee80211_local *local, u32 *tx_ant, u32 *rx_ant) { int ret = -EOPNOTSUPP; might_sleep(); if (local->ops->get_antenna) ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant); trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret); return ret; } static inline int drv_remain_on_channel(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *chan, unsigned int duration, enum ieee80211_roc_type type) { int ret; might_sleep(); trace_drv_remain_on_channel(local, sdata, chan, duration, type); ret = local->ops->remain_on_channel(&local->hw, &sdata->vif, chan, duration, type); trace_drv_return_int(local, ret); return ret; } static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local) { int ret; might_sleep(); trace_drv_cancel_remain_on_channel(local); ret = local->ops->cancel_remain_on_channel(&local->hw); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_ringparam(struct ieee80211_local *local, u32 tx, u32 rx) { int ret = -ENOTSUPP; might_sleep(); trace_drv_set_ringparam(local, tx, rx); if (local->ops->set_ringparam) ret = local->ops->set_ringparam(&local->hw, tx, rx); trace_drv_return_int(local, ret); return ret; } static inline void drv_get_ringparam(struct ieee80211_local *local, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max) { might_sleep(); trace_drv_get_ringparam(local, tx, tx_max, rx, rx_max); if (local->ops->get_ringparam) local->ops->get_ringparam(&local->hw, tx, tx_max, rx, rx_max); trace_drv_return_void(local); } static inline bool drv_tx_frames_pending(struct ieee80211_local *local) { bool ret = false; might_sleep(); trace_drv_tx_frames_pending(local); if (local->ops->tx_frames_pending) ret = local->ops->tx_frames_pending(&local->hw); trace_drv_return_bool(local, ret); return ret; } static inline int drv_set_bitrate_mask(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct cfg80211_bitrate_mask *mask) { int ret = -EOPNOTSUPP; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_set_bitrate_mask(local, sdata, mask); if (local->ops->set_bitrate_mask) ret = local->ops->set_bitrate_mask(&local->hw, &sdata->vif, mask); trace_drv_return_int(local, ret); return ret; } static inline void drv_set_rekey_data(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_gtk_rekey_data *data) { if (!check_sdata_in_driver(sdata)) return; trace_drv_set_rekey_data(local, sdata, data); if (local->ops->set_rekey_data) local->ops->set_rekey_data(&local->hw, &sdata->vif, data); trace_drv_return_void(local); } static inline void drv_event_callback(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct ieee80211_event *event) { trace_drv_event_callback(local, sdata, event); if (local->ops->event_callback) local->ops->event_callback(&local->hw, &sdata->vif, event); trace_drv_return_void(local); } static inline void drv_release_buffered_frames(struct ieee80211_local *local, struct sta_info *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { trace_drv_release_buffered_frames(local, &sta->sta, tids, num_frames, reason, more_data); if (local->ops->release_buffered_frames) local->ops->release_buffered_frames(&local->hw, &sta->sta, tids, num_frames, reason, more_data); trace_drv_return_void(local); } static inline void drv_allow_buffered_frames(struct ieee80211_local *local, struct sta_info *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { trace_drv_allow_buffered_frames(local, &sta->sta, tids, num_frames, reason, more_data); if (local->ops->allow_buffered_frames) local->ops->allow_buffered_frames(&local->hw, &sta->sta, tids, num_frames, reason, more_data); trace_drv_return_void(local); } static inline void drv_mgd_prepare_tx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 duration) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); trace_drv_mgd_prepare_tx(local, sdata, duration); if (local->ops->mgd_prepare_tx) local->ops->mgd_prepare_tx(&local->hw, &sdata->vif, duration); trace_drv_return_void(local); } static inline void drv_mgd_protect_tdls_discover(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); trace_drv_mgd_protect_tdls_discover(local, sdata); if (local->ops->mgd_protect_tdls_discover) local->ops->mgd_protect_tdls_discover(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline int drv_add_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { int ret = -EOPNOTSUPP; might_sleep(); trace_drv_add_chanctx(local, ctx); if (local->ops->add_chanctx) ret = local->ops->add_chanctx(&local->hw, &ctx->conf); trace_drv_return_int(local, ret); if (!ret) ctx->driver_present = true; return ret; } static inline void drv_remove_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { might_sleep(); if (WARN_ON(!ctx->driver_present)) return; trace_drv_remove_chanctx(local, ctx); if (local->ops->remove_chanctx) local->ops->remove_chanctx(&local->hw, &ctx->conf); trace_drv_return_void(local); ctx->driver_present = false; } static inline void drv_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, u32 changed) { might_sleep(); trace_drv_change_chanctx(local, ctx, changed); if (local->ops->change_chanctx) { WARN_ON_ONCE(!ctx->driver_present); local->ops->change_chanctx(&local->hw, &ctx->conf, changed); } trace_drv_return_void(local); } static inline int drv_assign_vif_chanctx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_chanctx *ctx) { int ret = 0; if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_assign_vif_chanctx(local, sdata, ctx); if (local->ops->assign_vif_chanctx) { WARN_ON_ONCE(!ctx->driver_present); ret = local->ops->assign_vif_chanctx(&local->hw, &sdata->vif, &ctx->conf); } trace_drv_return_int(local, ret); return ret; } static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_chanctx *ctx) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; trace_drv_unassign_vif_chanctx(local, sdata, ctx); if (local->ops->unassign_vif_chanctx) { WARN_ON_ONCE(!ctx->driver_present); local->ops->unassign_vif_chanctx(&local->hw, &sdata->vif, &ctx->conf); } trace_drv_return_void(local); } int drv_switch_vif_chanctx(struct ieee80211_local *local, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode); static inline int drv_start_ap(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { int ret = 0; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf); if (local->ops->start_ap) ret = local->ops->start_ap(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); return ret; } static inline void drv_stop_ap(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { if (!check_sdata_in_driver(sdata)) return; trace_drv_stop_ap(local, sdata); if (local->ops->stop_ap) local->ops->stop_ap(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline void drv_reconfig_complete(struct ieee80211_local *local, enum ieee80211_reconfig_type reconfig_type) { might_sleep(); trace_drv_reconfig_complete(local, reconfig_type); if (local->ops->reconfig_complete) local->ops->reconfig_complete(&local->hw, reconfig_type); trace_drv_return_void(local); } static inline void drv_set_default_unicast_key(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int key_idx) { if (!check_sdata_in_driver(sdata)) return; WARN_ON_ONCE(key_idx < -1 || key_idx > 3); trace_drv_set_default_unicast_key(local, sdata, key_idx); if (local->ops->set_default_unicast_key) local->ops->set_default_unicast_key(&local->hw, &sdata->vif, key_idx); trace_drv_return_void(local); } #if IS_ENABLED(CONFIG_IPV6) static inline void drv_ipv6_addr_change(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct inet6_dev *idev) { trace_drv_ipv6_addr_change(local, sdata); if (local->ops->ipv6_addr_change) local->ops->ipv6_addr_change(&local->hw, &sdata->vif, idev); trace_drv_return_void(local); } #endif static inline void drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_chan_def *chandef) { struct ieee80211_local *local = sdata->local; if (local->ops->channel_switch_beacon) { trace_drv_channel_switch_beacon(local, sdata, chandef); local->ops->channel_switch_beacon(&local->hw, &sdata->vif, chandef); } } static inline int drv_pre_channel_switch(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch) { struct ieee80211_local *local = sdata->local; int ret = 0; if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_pre_channel_switch(local, sdata, ch_switch); if (local->ops->pre_channel_switch) ret = local->ops->pre_channel_switch(&local->hw, &sdata->vif, ch_switch); trace_drv_return_int(local, ret); return ret; } static inline int drv_post_channel_switch(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; int ret = 0; if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_post_channel_switch(local, sdata); if (local->ops->post_channel_switch) ret = local->ops->post_channel_switch(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); return ret; } static inline void drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; if (!check_sdata_in_driver(sdata)) return; trace_drv_abort_channel_switch(local, sdata); if (local->ops->abort_channel_switch) local->ops->abort_channel_switch(&local->hw, &sdata->vif); } static inline void drv_channel_switch_rx_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch) { struct ieee80211_local *local = sdata->local; if (!check_sdata_in_driver(sdata)) return; trace_drv_channel_switch_rx_beacon(local, sdata, ch_switch); if (local->ops->channel_switch_rx_beacon) local->ops->channel_switch_rx_beacon(&local->hw, &sdata->vif, ch_switch); } static inline int drv_join_ibss(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { int ret = 0; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf); if (local->ops->join_ibss) ret = local->ops->join_ibss(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); return ret; } static inline void drv_leave_ibss(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; trace_drv_leave_ibss(local, sdata); if (local->ops->leave_ibss) local->ops->leave_ibss(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline u32 drv_get_expected_throughput(struct ieee80211_local *local, struct sta_info *sta) { u32 ret = 0; trace_drv_get_expected_throughput(&sta->sta); if (local->ops->get_expected_throughput && sta->uploaded) ret = local->ops->get_expected_throughput(&local->hw, &sta->sta); trace_drv_return_u32(local, ret); return ret; } static inline int drv_get_txpower(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int *dbm) { int ret; if (!local->ops->get_txpower) return -EOPNOTSUPP; ret = local->ops->get_txpower(&local->hw, &sdata->vif, dbm); trace_drv_get_txpower(local, sdata, *dbm, ret); return ret; } static inline int drv_tdls_channel_switch(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) { int ret; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; if (!local->ops->tdls_channel_switch) return -EOPNOTSUPP; trace_drv_tdls_channel_switch(local, sdata, sta, oper_class, chandef); ret = local->ops->tdls_channel_switch(&local->hw, &sdata->vif, sta, oper_class, chandef, tmpl_skb, ch_sw_tm_ie); trace_drv_return_int(local, ret); return ret; } static inline void drv_tdls_cancel_channel_switch(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta) { might_sleep(); if (!check_sdata_in_driver(sdata)) return; if (!local->ops->tdls_cancel_channel_switch) return; trace_drv_tdls_cancel_channel_switch(local, sdata, sta); local->ops->tdls_cancel_channel_switch(&local->hw, &sdata->vif, sta); trace_drv_return_void(local); } static inline void drv_tdls_recv_channel_switch(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_tdls_ch_sw_params *params) { trace_drv_tdls_recv_channel_switch(local, sdata, params); if (local->ops->tdls_recv_channel_switch) local->ops->tdls_recv_channel_switch(&local->hw, &sdata->vif, params); trace_drv_return_void(local); } static inline void drv_wake_tx_queue(struct ieee80211_local *local, struct txq_info *txq) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); if (!check_sdata_in_driver(sdata)) return; trace_drv_wake_tx_queue(local, sdata, txq); local->ops->wake_tx_queue(&local->hw, &txq->txq); } static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local, struct sk_buff *head, struct sk_buff *skb) { if (!local->ops->can_aggregate_in_amsdu) return true; return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb); } static inline int drv_get_ftm_responder_stats(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_ftm_responder_stats *ftm_stats) { u32 ret = -EOPNOTSUPP; if (local->ops->get_ftm_responder_stats) ret = local->ops->get_ftm_responder_stats(&local->hw, &sdata->vif, ftm_stats); trace_drv_get_ftm_responder_stats(local, sdata, ftm_stats); return ret; } static inline int drv_start_pmsr(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_pmsr_request *request) { int ret = -EOPNOTSUPP; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_start_pmsr(local, sdata); if (local->ops->start_pmsr) ret = local->ops->start_pmsr(&local->hw, &sdata->vif, request); trace_drv_return_int(local, ret); return ret; } static inline void drv_abort_pmsr(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_pmsr_request *request) { trace_drv_abort_pmsr(local, sdata); might_sleep(); if (!check_sdata_in_driver(sdata)) return; if (local->ops->abort_pmsr) local->ops->abort_pmsr(&local->hw, &sdata->vif, request); trace_drv_return_void(local); } static inline int drv_start_nan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_nan_conf *conf) { int ret; might_sleep(); check_sdata_in_driver(sdata); trace_drv_start_nan(local, sdata, conf); ret = local->ops->start_nan(&local->hw, &sdata->vif, conf); trace_drv_return_int(local, ret); return ret; } static inline void drv_stop_nan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); check_sdata_in_driver(sdata); trace_drv_stop_nan(local, sdata); local->ops->stop_nan(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline int drv_nan_change_conf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_nan_conf *conf, u32 changes) { int ret; might_sleep(); check_sdata_in_driver(sdata); if (!local->ops->nan_change_conf) return -EOPNOTSUPP; trace_drv_nan_change_conf(local, sdata, conf, changes); ret = local->ops->nan_change_conf(&local->hw, &sdata->vif, conf, changes); trace_drv_return_int(local, ret); return ret; } static inline int drv_add_nan_func(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct cfg80211_nan_func *nan_func) { int ret; might_sleep(); check_sdata_in_driver(sdata); if (!local->ops->add_nan_func) return -EOPNOTSUPP; trace_drv_add_nan_func(local, sdata, nan_func); ret = local->ops->add_nan_func(&local->hw, &sdata->vif, nan_func); trace_drv_return_int(local, ret); return ret; } static inline void drv_del_nan_func(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u8 instance_id) { might_sleep(); check_sdata_in_driver(sdata); trace_drv_del_nan_func(local, sdata, instance_id); if (local->ops->del_nan_func) local->ops->del_nan_func(&local->hw, &sdata->vif, instance_id); trace_drv_return_void(local); } #endif /* __MAC80211_DRIVER_OPS */ backport-iwlwifi-dkms-7906/net/mac80211/ethtool.c000066400000000000000000000143671351064634500214600ustar00rootroot00000000000000/* * mac80211 ethtool hooks for cfg80211 * * Copied from cfg.c - originally * Copyright 2006-2010 Johannes Berg * Copyright 2014 Intel Corporation (Author: Johannes Berg) * Copyright (C) 2018 Intel Corporation * * This file is GPLv2 as found in COPYING. */ #include #include #include "ieee80211_i.h" #include "sta_info.h" #include "driver-ops.h" static int ieee80211_set_ringparam(struct net_device *dev, struct ethtool_ringparam *rp) { struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0) return -EINVAL; return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending); } static void ieee80211_get_ringparam(struct net_device *dev, struct ethtool_ringparam *rp) { struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); memset(rp, 0, sizeof(*rp)); drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending, &rp->rx_pending, &rp->rx_max_pending); } static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { "rx_packets", "rx_bytes", "rx_duplicates", "rx_fragments", "rx_dropped", "tx_packets", "tx_bytes", "tx_filtered", "tx_retry_failed", "tx_retries", "sta_state", "txrate", "rxrate", "signal", "channel", "noise", "ch_time", "ch_time_busy", "ch_time_ext_busy", "ch_time_rx", "ch_time_tx" }; #define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats) static int ieee80211_get_sset_count(struct net_device *dev, int sset) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int rv = 0; if (sset == ETH_SS_STATS) rv += STA_STATS_LEN; rv += drv_get_et_sset_count(sdata, sset); if (rv == 0) return -EOPNOTSUPP; return rv; } static void ieee80211_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; struct sta_info *sta; struct ieee80211_local *local = sdata->local; struct station_info sinfo; struct survey_info survey; int i, q; #define STA_STATS_SURVEY_LEN 7 memset(data, 0, sizeof(u64) * STA_STATS_LEN); #define ADD_STA_STATS(sta) \ do { \ data[i++] += sta->rx_stats.packets; \ data[i++] += sta->rx_stats.bytes; \ data[i++] += sta->rx_stats.num_duplicates; \ data[i++] += sta->rx_stats.fragments; \ data[i++] += sta->rx_stats.dropped; \ \ data[i++] += sinfo.tx_packets; \ data[i++] += sinfo.tx_bytes; \ data[i++] += sta->status_stats.filtered; \ data[i++] += sta->status_stats.retry_failed; \ data[i++] += sta->status_stats.retry_count; \ } while (0) /* For Managed stations, find the single station based on BSSID * and use that. For interface types, iterate through all available * stations and add stats for any station that is assigned to this * network device. */ mutex_lock(&local->sta_mtx); if (sdata->vif.type == NL80211_IFTYPE_STATION) { sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid); if (!(sta && !WARN_ON(sta->sdata->dev != dev))) goto do_survey; memset(&sinfo, 0, sizeof(sinfo)); sta_set_sinfo(sta, &sinfo, false); i = 0; ADD_STA_STATS(sta); data[i++] = sta->sta_state; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) data[i] = 100000ULL * cfg80211_calculate_bitrate(&sinfo.txrate); i++; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) data[i] = 100000ULL * cfg80211_calculate_bitrate(&sinfo.rxrate); i++; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG)) data[i] = (u8)sinfo.signal_avg; i++; } else { list_for_each_entry(sta, &local->sta_list, list) { /* Make sure this station belongs to the proper dev */ if (sta->sdata->dev != dev) continue; memset(&sinfo, 0, sizeof(sinfo)); sta_set_sinfo(sta, &sinfo, false); i = 0; ADD_STA_STATS(sta); } } do_survey: i = STA_STATS_LEN - STA_STATS_SURVEY_LEN; /* Get survey stats for current channel */ survey.filled = 0; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (chanctx_conf) channel = chanctx_conf->def.chan; else channel = NULL; rcu_read_unlock(); if (channel) { q = 0; do { survey.filled = 0; if (drv_get_survey(local, q, &survey) != 0) { survey.filled = 0; break; } q++; } while (channel != survey.channel); } if (survey.filled) data[i++] = survey.channel->center_freq; else data[i++] = 0; if (survey.filled & SURVEY_INFO_NOISE_DBM) data[i++] = (u8)survey.noise; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME) data[i++] = survey.time; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME_BUSY) data[i++] = survey.time_busy; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME_EXT_BUSY) data[i++] = survey.time_ext_busy; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME_RX) data[i++] = survey.time_rx; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME_TX) data[i++] = survey.time_tx; else data[i++] = -1LL; mutex_unlock(&local->sta_mtx); if (WARN_ON(i != STA_STATS_LEN)) return; drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN])); } static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int sz_sta_stats = 0; if (sset == ETH_SS_STATS) { sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats); } drv_get_et_strings(sdata, sset, &(data[sz_sta_stats])); } static int ieee80211_get_regs_len(struct net_device *dev) { return 0; } static void ieee80211_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *data) { struct wireless_dev *wdev = dev->ieee80211_ptr; regs->version = wdev->wiphy->hw_version; regs->len = 0; } const struct ethtool_ops ieee80211_ethtool_ops = { .get_drvinfo = cfg80211_get_drvinfo, .get_regs_len = ieee80211_get_regs_len, .get_regs = ieee80211_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = ieee80211_get_ringparam, .set_ringparam = ieee80211_set_ringparam, .get_strings = ieee80211_get_strings, .get_ethtool_stats = ieee80211_get_stats, .get_sset_count = ieee80211_get_sset_count, }; backport-iwlwifi-dkms-7906/net/mac80211/fils_aead.c000066400000000000000000000207461351064634500217070ustar00rootroot00000000000000#if LINUX_VERSION_IS_GEQ(4,3,0) /* * FILS AEAD for (Re)Association Request/Response frames * Copyright 2016, Qualcomm Atheros, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include "ieee80211_i.h" #include "aes_cmac.h" #include "fils_aead.h" static void gf_mulx(u8 *pad) { u64 a = get_unaligned_be64(pad); u64 b = get_unaligned_be64(pad + 8); put_unaligned_be64((a << 1) | (b >> 63), pad); put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8); } static int aes_s2v(struct crypto_shash *tfm, size_t num_elem, const u8 *addr[], size_t len[], u8 *v) { u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {}; SHASH_DESC_ON_STACK(desc, tfm); size_t i; desc->tfm = tfm; /* D = AES-CMAC(K, ) */ crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d); for (i = 0; i < num_elem - 1; i++) { /* D = dbl(D) xor AES_CMAC(K, Si) */ gf_mulx(d); /* dbl */ crypto_shash_digest(desc, addr[i], len[i], tmp); crypto_xor(d, tmp, AES_BLOCK_SIZE); } crypto_shash_init(desc); if (len[i] >= AES_BLOCK_SIZE) { /* len(Sn) >= 128 */ /* T = Sn xorend D */ crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE); crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE, AES_BLOCK_SIZE); } else { /* len(Sn) < 128 */ /* T = dbl(D) xor pad(Sn) */ gf_mulx(d); /* dbl */ crypto_xor(d, addr[i], len[i]); d[len[i]] ^= 0x80; } /* V = AES-CMAC(K, T) */ crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v); return 0; } /* Note: addr[] and len[] needs to have one extra slot at the end. */ static int aes_siv_encrypt(const u8 *key, size_t key_len, const u8 *plain, size_t plain_len, size_t num_elem, const u8 *addr[], size_t len[], u8 *out) { u8 v[AES_BLOCK_SIZE]; struct crypto_shash *tfm; struct crypto_skcipher *tfm2; struct skcipher_request *req; int res; struct scatterlist src[1], dst[1]; u8 *tmp; key_len /= 2; /* S2V key || CTR key */ addr[num_elem] = plain; len[num_elem] = plain_len; num_elem++; /* S2V */ tfm = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); /* K1 for S2V */ res = crypto_shash_setkey(tfm, key, key_len); if (!res) res = aes_s2v(tfm, num_elem, addr, len, v); crypto_free_shash(tfm); if (res) return res; /* Use a temporary buffer of the plaintext to handle need for * overwriting this during AES-CTR. */ tmp = kmemdup(plain, plain_len, GFP_KERNEL); if (!tmp) return -ENOMEM; /* IV for CTR before encrypted data */ memcpy(out, v, AES_BLOCK_SIZE); /* Synthetic IV to be used as the initial counter in CTR: * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31) */ v[8] &= 0x7f; v[12] &= 0x7f; /* CTR */ tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm2)) { kfree(tmp); return PTR_ERR(tfm2); } /* K2 for CTR */ res = crypto_skcipher_setkey(tfm2, key + key_len, key_len); if (res) goto fail; req = skcipher_request_alloc(tfm2, GFP_KERNEL); if (!req) { res = -ENOMEM; goto fail; } sg_init_one(src, tmp, plain_len); sg_init_one(dst, out + AES_BLOCK_SIZE, plain_len); skcipher_request_set_crypt(req, src, dst, plain_len, v); res = crypto_skcipher_encrypt(req); skcipher_request_free(req); fail: kfree(tmp); crypto_free_skcipher(tfm2); return res; } /* Note: addr[] and len[] needs to have one extra slot at the end. */ static int aes_siv_decrypt(const u8 *key, size_t key_len, const u8 *iv_crypt, size_t iv_c_len, size_t num_elem, const u8 *addr[], size_t len[], u8 *out) { struct crypto_shash *tfm; struct crypto_skcipher *tfm2; struct skcipher_request *req; struct scatterlist src[1], dst[1]; size_t crypt_len; int res; u8 frame_iv[AES_BLOCK_SIZE], iv[AES_BLOCK_SIZE]; u8 check[AES_BLOCK_SIZE]; crypt_len = iv_c_len - AES_BLOCK_SIZE; key_len /= 2; /* S2V key || CTR key */ addr[num_elem] = out; len[num_elem] = crypt_len; num_elem++; memcpy(iv, iv_crypt, AES_BLOCK_SIZE); memcpy(frame_iv, iv_crypt, AES_BLOCK_SIZE); /* Synthetic IV to be used as the initial counter in CTR: * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31) */ iv[8] &= 0x7f; iv[12] &= 0x7f; /* CTR */ tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm2)) return PTR_ERR(tfm2); /* K2 for CTR */ res = crypto_skcipher_setkey(tfm2, key + key_len, key_len); if (res) { crypto_free_skcipher(tfm2); return res; } req = skcipher_request_alloc(tfm2, GFP_KERNEL); if (!req) { crypto_free_skcipher(tfm2); return -ENOMEM; } sg_init_one(src, iv_crypt + AES_BLOCK_SIZE, crypt_len); sg_init_one(dst, out, crypt_len); skcipher_request_set_crypt(req, src, dst, crypt_len, iv); res = crypto_skcipher_decrypt(req); skcipher_request_free(req); crypto_free_skcipher(tfm2); if (res) return res; /* S2V */ tfm = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); /* K1 for S2V */ res = crypto_shash_setkey(tfm, key, key_len); if (!res) res = aes_s2v(tfm, num_elem, addr, len, check); crypto_free_shash(tfm); if (res) return res; if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0) return -EINVAL; return 0; } int fils_encrypt_assoc_req(struct sk_buff *skb, struct ieee80211_mgd_assoc_data *assoc_data) { struct ieee80211_mgmt *mgmt = (void *)skb->data; u8 *capab, *ies, *encr; const u8 *addr[5 + 1], *session; size_t len[5 + 1]; size_t crypt_len; if (ieee80211_is_reassoc_req(mgmt->frame_control)) { capab = (u8 *)&mgmt->u.reassoc_req.capab_info; ies = mgmt->u.reassoc_req.variable; } else { capab = (u8 *)&mgmt->u.assoc_req.capab_info; ies = mgmt->u.assoc_req.variable; } session = cfg80211_find_ext_ie(WLAN_EID_EXT_FILS_SESSION, ies, skb->data + skb->len - ies); if (!session || session[1] != 1 + 8) return -EINVAL; /* encrypt after FILS Session element */ encr = (u8 *)session + 2 + 1 + 8; /* AES-SIV AAD vectors */ /* The STA's MAC address */ addr[0] = mgmt->sa; len[0] = ETH_ALEN; /* The AP's BSSID */ addr[1] = mgmt->da; len[1] = ETH_ALEN; /* The STA's nonce */ addr[2] = assoc_data->fils_nonces; len[2] = FILS_NONCE_LEN; /* The AP's nonce */ addr[3] = &assoc_data->fils_nonces[FILS_NONCE_LEN]; len[3] = FILS_NONCE_LEN; /* The (Re)Association Request frame from the Capability Information * field to the FILS Session element (both inclusive). */ addr[4] = capab; len[4] = encr - capab; crypt_len = skb->data + skb->len - encr; skb_put(skb, AES_BLOCK_SIZE); return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, encr, crypt_len, 5, addr, len, encr); } int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, u8 *frame, size_t *frame_len, struct ieee80211_mgd_assoc_data *assoc_data) { struct ieee80211_mgmt *mgmt = (void *)frame; u8 *capab, *ies, *encr; const u8 *addr[5 + 1], *session; size_t len[5 + 1]; int res; size_t crypt_len; if (*frame_len < 24 + 6) return -EINVAL; capab = (u8 *)&mgmt->u.assoc_resp.capab_info; ies = mgmt->u.assoc_resp.variable; session = cfg80211_find_ext_ie(WLAN_EID_EXT_FILS_SESSION, ies, frame + *frame_len - ies); if (!session || session[1] != 1 + 8) { mlme_dbg(sdata, "No (valid) FILS Session element in (Re)Association Response frame from %pM", mgmt->sa); return -EINVAL; } /* decrypt after FILS Session element */ encr = (u8 *)session + 2 + 1 + 8; /* AES-SIV AAD vectors */ /* The AP's BSSID */ addr[0] = mgmt->sa; len[0] = ETH_ALEN; /* The STA's MAC address */ addr[1] = mgmt->da; len[1] = ETH_ALEN; /* The AP's nonce */ addr[2] = &assoc_data->fils_nonces[FILS_NONCE_LEN]; len[2] = FILS_NONCE_LEN; /* The STA's nonce */ addr[3] = assoc_data->fils_nonces; len[3] = FILS_NONCE_LEN; /* The (Re)Association Response frame from the Capability Information * field to the FILS Session element (both inclusive). */ addr[4] = capab; len[4] = encr - capab; crypt_len = frame + *frame_len - encr; if (crypt_len < AES_BLOCK_SIZE) { mlme_dbg(sdata, "Not enough room for AES-SIV data after FILS Session element in (Re)Association Response frame from %pM", mgmt->sa); return -EINVAL; } res = aes_siv_decrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, encr, crypt_len, 5, addr, len, encr); if (res != 0) { mlme_dbg(sdata, "AES-SIV decryption of (Re)Association Response frame from %pM failed", mgmt->sa); return res; } *frame_len -= AES_BLOCK_SIZE; return 0; } #endif backport-iwlwifi-dkms-7906/net/mac80211/fils_aead.h000066400000000000000000000017441351064634500217110ustar00rootroot00000000000000/* * FILS AEAD for (Re)Association Request/Response frames * Copyright 2016, Qualcomm Atheros, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef FILS_AEAD_H #define FILS_AEAD_H #if LINUX_VERSION_IS_GEQ(4,3,0) int fils_encrypt_assoc_req(struct sk_buff *skb, struct ieee80211_mgd_assoc_data *assoc_data); int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, u8 *frame, size_t *frame_len, struct ieee80211_mgd_assoc_data *assoc_data); #else static inline int fils_encrypt_assoc_req(struct sk_buff *skb, struct ieee80211_mgd_assoc_data *assoc_data) { return -EOPNOTSUPP; } static inline int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, u8 *frame, size_t *frame_len, struct ieee80211_mgd_assoc_data *assoc_data) { return -EOPNOTSUPP; } #endif #endif /* FILS_AEAD_H */ backport-iwlwifi-dkms-7906/net/mac80211/he.c000066400000000000000000000031131351064634500203610ustar00rootroot00000000000000/* * HE handling * * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "ieee80211_i.h" void ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const u8 *he_cap_ie, u8 he_cap_len, struct sta_info *sta) { struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap; struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; u8 he_ppe_size; u8 mcs_nss_size; u8 he_total_size; memset(he_cap, 0, sizeof(*he_cap)); if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband)) return; /* Make sure size is OK */ mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem); he_ppe_size = ieee80211_he_ppe_size(he_cap_ie[sizeof(he_cap->he_cap_elem) + mcs_nss_size], he_cap_ie_elem->phy_cap_info); he_total_size = sizeof(he_cap->he_cap_elem) + mcs_nss_size + he_ppe_size; if (he_cap_len < he_total_size) return; memcpy(&he_cap->he_cap_elem, he_cap_ie, sizeof(he_cap->he_cap_elem)); /* HE Tx/Rx HE MCS NSS Support Field */ memcpy(&he_cap->he_mcs_nss_supp, &he_cap_ie[sizeof(he_cap->he_cap_elem)], mcs_nss_size); /* Check if there are (optional) PPE Thresholds */ if (he_cap->he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) memcpy(he_cap->ppe_thres, &he_cap_ie[sizeof(he_cap->he_cap_elem) + mcs_nss_size], he_ppe_size); he_cap->has_he = true; } backport-iwlwifi-dkms-7906/net/mac80211/ht.c000066400000000000000000000417671351064634500204210ustar00rootroot00000000000000/* * HT handling * * Copyright 2003, Jouni Malinen * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include "ieee80211_i.h" #include "rate.h" static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa, struct ieee80211_ht_cap *ht_capa_mask, struct ieee80211_sta_ht_cap *ht_cap, u16 flag) { __le16 le_flag = cpu_to_le16(flag); if (ht_capa_mask->cap_info & le_flag) { if (!(ht_capa->cap_info & le_flag)) ht_cap->cap &= ~flag; } } static void __check_htcap_enable(struct ieee80211_ht_cap *ht_capa, struct ieee80211_ht_cap *ht_capa_mask, struct ieee80211_sta_ht_cap *ht_cap, u16 flag) { __le16 le_flag = cpu_to_le16(flag); if ((ht_capa_mask->cap_info & le_flag) && (ht_capa->cap_info & le_flag)) ht_cap->cap |= flag; } void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_ht_cap *ht_cap) { struct ieee80211_ht_cap *ht_capa, *ht_capa_mask; u8 *scaps, *smask; int i; if (!ht_cap->ht_supported) return; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ht_capa = &sdata->u.mgd.ht_capa; ht_capa_mask = &sdata->u.mgd.ht_capa_mask; break; case NL80211_IFTYPE_ADHOC: ht_capa = &sdata->u.ibss.ht_capa; ht_capa_mask = &sdata->u.ibss.ht_capa_mask; break; default: WARN_ON_ONCE(1); return; } scaps = (u8 *)(&ht_capa->mcs.rx_mask); smask = (u8 *)(&ht_capa_mask->mcs.rx_mask); /* NOTE: If you add more over-rides here, update register_hw * ht_capa_mod_mask logic in main.c as well. * And, if this method can ever change ht_cap.ht_supported, fix * the check in ieee80211_add_ht_ie. */ /* check for HT over-rides, MCS rates first. */ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { u8 m = smask[i]; ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */ /* Add back rates that are supported */ ht_cap->mcs.rx_mask[i] |= (m & scaps[i]); } /* Force removal of HT-40 capabilities? */ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40); __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, IEEE80211_HT_CAP_SGI_40); /* Allow user to disable SGI-20 (SGI-40 is handled above) */ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, IEEE80211_HT_CAP_SGI_20); /* Allow user to disable the max-AMSDU bit. */ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU); /* Allow user to disable LDPC */ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, IEEE80211_HT_CAP_LDPC_CODING); /* Allow user to enable 40 MHz intolerant bit. */ __check_htcap_enable(ht_capa, ht_capa_mask, ht_cap, IEEE80211_HT_CAP_40MHZ_INTOLERANT); /* Allow user to decrease AMPDU factor */ if (ht_capa_mask->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_FACTOR) { u8 n = ht_capa->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_FACTOR; if (n < ht_cap->ampdu_factor) ht_cap->ampdu_factor = n; } /* Allow the user to increase AMPDU density. */ if (ht_capa_mask->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_DENSITY) { u8 n = (ht_capa->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT; if (n > ht_cap->ampdu_density) ht_cap->ampdu_density = n; } } bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_ht_cap *ht_cap_ie, struct sta_info *sta) { struct ieee80211_sta_ht_cap ht_cap, own_cap; u8 ampdu_info, tx_mcs_set_cap; int i, max_tx_streams; bool changed; enum ieee80211_sta_rx_bandwidth bw; enum ieee80211_smps_mode smps_mode; memset(&ht_cap, 0, sizeof(ht_cap)); if (!ht_cap_ie || !sband->ht_cap.ht_supported) goto apply; ht_cap.ht_supported = true; own_cap = sband->ht_cap; /* * If user has specified capability over-rides, take care * of that if the station we're setting up is the AP or TDLS peer that * we advertised a restricted capability set to. Override * our own capabilities and then use those below. */ if (sdata->vif.type == NL80211_IFTYPE_STATION || sdata->vif.type == NL80211_IFTYPE_ADHOC) ieee80211_apply_htcap_overrides(sdata, &own_cap); /* * The bits listed in this expression should be * the same for the peer and us, if the station * advertises more then we can't use those thus * we mask them out. */ ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) & (own_cap.cap | ~(IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40)); /* * The STBC bits are asymmetric -- if we don't have * TX then mask out the peer's RX and vice versa. */ if (!(own_cap.cap & IEEE80211_HT_CAP_TX_STBC)) ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC; if (!(own_cap.cap & IEEE80211_HT_CAP_RX_STBC)) ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC; ampdu_info = ht_cap_ie->ampdu_params_info; ht_cap.ampdu_factor = ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR; ht_cap.ampdu_density = (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; /* own MCS TX capabilities */ tx_mcs_set_cap = own_cap.mcs.tx_params; /* Copy peer MCS TX capabilities, the driver might need them. */ ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params; /* can we TX with MCS rates? */ if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) goto apply; /* Counting from 0, therefore +1 */ if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF) max_tx_streams = ((tx_mcs_set_cap & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; else max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS; /* * 802.11n-2009 20.3.5 / 20.6 says: * - indices 0 to 7 and 32 are single spatial stream * - 8 to 31 are multiple spatial streams using equal modulation * [8..15 for two streams, 16..23 for three and 24..31 for four] * - remainder are multiple spatial streams using unequal modulation */ for (i = 0; i < max_tx_streams; i++) ht_cap.mcs.rx_mask[i] = own_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; i < IEEE80211_HT_MCS_MASK_LEN; i++) ht_cap.mcs.rx_mask[i] = own_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; /* handle MCS rate 32 too */ if (own_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) ht_cap.mcs.rx_mask[32/8] |= 1; /* set Rx highest rate */ ht_cap.mcs.rx_highest = ht_cap_ie->mcs.rx_highest; if (ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU) sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_7935; else sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839; apply: changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap)); memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap)); switch (sdata->vif.bss_conf.chandef.width) { default: WARN_ON_ONCE(1); /* fall through */ case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: bw = IEEE80211_STA_RX_BW_20; break; case NL80211_CHAN_WIDTH_40: case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; break; } sta->sta.bandwidth = bw; sta->cur_max_bandwidth = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; switch ((ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT) { case WLAN_HT_CAP_SM_PS_INVALID: case WLAN_HT_CAP_SM_PS_STATIC: smps_mode = IEEE80211_SMPS_STATIC; break; case WLAN_HT_CAP_SM_PS_DYNAMIC: smps_mode = IEEE80211_SMPS_DYNAMIC; break; case WLAN_HT_CAP_SM_PS_DISABLED: smps_mode = IEEE80211_SMPS_OFF; break; } if (smps_mode != sta->sta.smps_mode) changed = true; sta->sta.smps_mode = smps_mode; return changed; } void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, enum ieee80211_agg_stop_reason reason) { int i; for (i = 0; i < IEEE80211_NUM_TIDS; i++) { __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_LEAVE_QBSS, reason != AGG_STOP_DESTROY_STA && reason != AGG_STOP_PEER_REQUEST); } for (i = 0; i < IEEE80211_NUM_TIDS; i++) __ieee80211_stop_tx_ba_session(sta, i, reason); /* * In case the tear down is part of a reconfigure due to HW restart * request, it is possible that the low level driver requested to stop * the BA session, so handle it to properly clean tid_tx data. */ if(reason == AGG_STOP_DESTROY_STA) { cancel_work_sync(&sta->ampdu_mlme.work); mutex_lock(&sta->ampdu_mlme.mtx); for (i = 0; i < IEEE80211_NUM_TIDS; i++) { struct tid_ampdu_tx *tid_tx = rcu_dereference_protected_tid_tx(sta, i); if (!tid_tx) continue; if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state)) ieee80211_stop_tx_ba_cb(sta, i, tid_tx); } mutex_unlock(&sta->ampdu_mlme.mtx); } } void ieee80211_ba_session_work(struct work_struct *work) { struct sta_info *sta = container_of(work, struct sta_info, ampdu_mlme.work); struct tid_ampdu_tx *tid_tx; bool blocked; int tid; /* When this flag is set, new sessions should be blocked. */ blocked = test_sta_flag(sta, WLAN_STA_BLOCK_BA); mutex_lock(&sta->ampdu_mlme.mtx); for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) ___ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_TIMEOUT, true); if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_stop_requested)) ___ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_UNSPECIFIED, true); if (!blocked && test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_manage_offl)) ___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid, IEEE80211_MAX_AMPDU_BUF_HT, false, true); if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS, sta->ampdu_mlme.tid_rx_manage_offl)) ___ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, 0, false); spin_lock_bh(&sta->lock); tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; if (!blocked && tid_tx) { /* * Assign it over to the normal tid_tx array * where it "goes live". */ sta->ampdu_mlme.tid_start_tx[tid] = NULL; /* could there be a race? */ if (sta->ampdu_mlme.tid_tx[tid]) kfree(tid_tx); else ieee80211_assign_tid_tx(sta, tid, tid_tx); spin_unlock_bh(&sta->lock); ieee80211_tx_ba_session_handle_start(sta, tid); continue; } spin_unlock_bh(&sta->lock); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); if (!tid_tx) continue; if (!blocked && test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state)) ieee80211_start_tx_ba_cb(sta, tid, tid_tx); if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)) ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_LOCAL_REQUEST); if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state)) ieee80211_stop_tx_ba_cb(sta, tid, tid_tx); } mutex_unlock(&sta->ampdu_mlme.mtx); } void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u16 initiator, u16 reason_code) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 params; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = skb_put_zero(skb, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_MESH_POINT) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; params = (u16)(initiator << 11); /* bit 11 initiator */ params |= (u16)(tid << 12); /* bit 15:12 TID number */ mgmt->u.action.u.delba.params = cpu_to_le16(params); mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); ieee80211_tx_skb(sdata, skb); } void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { u16 tid, params; u16 initiator; params = le16_to_cpu(mgmt->u.action.u.delba.params); tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; ht_dbg_ratelimited(sdata, "delba from %pM (%s) tid %d reason code %d\n", mgmt->sa, initiator ? "initiator" : "recipient", tid, le16_to_cpu(mgmt->u.action.u.delba.reason_code)); if (initiator == WLAN_BACK_INITIATOR) __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0, true); else __ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_PEER_REQUEST); } enum nl80211_smps_mode ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps) { switch (smps) { case IEEE80211_SMPS_OFF: return NL80211_SMPS_OFF; case IEEE80211_SMPS_STATIC: return NL80211_SMPS_STATIC; case IEEE80211_SMPS_DYNAMIC: return NL80211_SMPS_DYNAMIC; default: return NL80211_SMPS_OFF; } } int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps, const u8 *da, const u8 *bssid) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *action_frame; /* 27 = header + category + action + smps mode */ skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom); if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom); action_frame = skb_put(skb, 27); memcpy(action_frame->da, da, ETH_ALEN); memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN); memcpy(action_frame->bssid, bssid, ETH_ALEN); action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); action_frame->u.action.category = WLAN_CATEGORY_HT; action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS; switch (smps) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); /* fall through */ case IEEE80211_SMPS_OFF: action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_DISABLED; break; case IEEE80211_SMPS_STATIC: action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_STATIC; break; case IEEE80211_SMPS_DYNAMIC: action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_DYNAMIC; break; } /* we'll do more on status of this frame */ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; ieee80211_tx_skb(sdata, skb); return 0; } void ieee80211_request_smps_mgd_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.request_smps_work); sdata_lock(sdata); __ieee80211_request_smps_mgd(sdata, sdata->u.mgd.driver_smps_mode); sdata_unlock(sdata); } void ieee80211_request_smps_ap_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.ap.request_smps_work); sdata_lock(sdata); if (sdata_dereference(sdata->u.ap.beacon, sdata)) __ieee80211_request_smps_ap(sdata, sdata->u.ap.driver_smps_mode); sdata_unlock(sdata); } void ieee80211_request_smps(struct ieee80211_vif *vif, enum ieee80211_smps_mode smps_mode) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION && vif->type != NL80211_IFTYPE_AP)) return; if (vif->type == NL80211_IFTYPE_STATION) { if (sdata->u.mgd.driver_smps_mode == smps_mode) return; sdata->u.mgd.driver_smps_mode = smps_mode; ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.request_smps_work); } else { /* AUTOMATIC is meaningless in AP mode */ if (WARN_ON_ONCE(smps_mode == IEEE80211_SMPS_AUTOMATIC)) return; if (sdata->u.ap.driver_smps_mode == smps_mode) return; sdata->u.ap.driver_smps_mode = smps_mode; ieee80211_queue_work(&sdata->local->hw, &sdata->u.ap.request_smps_work); } } /* this might change ... don't want non-open drivers using it */ EXPORT_SYMBOL_GPL(ieee80211_request_smps); backport-iwlwifi-dkms-7906/net/mac80211/ibss.c000066400000000000000000001427231351064634500207400ustar00rootroot00000000000000/* * IBSS mode implementation * Copyright 2003-2008, Jouni Malinen * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2009, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #define IEEE80211_SCAN_INTERVAL (2 * HZ) #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) #define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ) #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 static struct beacon_data * ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata, const int beacon_int, const u32 basic_rates, const u16 capability, u64 tsf, struct cfg80211_chan_def *chandef, bool *have_higher_than_11mbit, struct cfg80211_csa_settings *csa_settings) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; int rates_n = 0, i, ri; struct ieee80211_mgmt *mgmt; u8 *pos; struct ieee80211_supported_band *sband; u32 rate_flags, rates = 0, rates_added = 0; struct beacon_data *presp; int frame_len; int shift; /* Build IBSS probe response */ frame_len = sizeof(struct ieee80211_hdr_3addr) + 12 /* struct ieee80211_mgmt.u.beacon */ + 2 + IEEE80211_MAX_SSID_LEN /* max SSID */ + 2 + 8 /* max Supported Rates */ + 3 /* max DS params */ + 4 /* IBSS params */ + 5 /* Channel Switch Announcement */ + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + 2 + sizeof(struct ieee80211_ht_operation) + 2 + sizeof(struct ieee80211_vht_cap) + 2 + sizeof(struct ieee80211_vht_operation) + ifibss->ie_len; presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL); if (!presp) return NULL; presp->head = (void *)(presp + 1); mgmt = (void *) presp->head; mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); eth_broadcast_addr(mgmt->da); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); mgmt->u.beacon.timestamp = cpu_to_le64(tsf); mgmt->u.beacon.capab_info = cpu_to_le16(capability); pos = (u8 *)mgmt + offsetof(struct ieee80211_mgmt, u.beacon.variable); *pos++ = WLAN_EID_SSID; *pos++ = ifibss->ssid_len; memcpy(pos, ifibss->ssid, ifibss->ssid_len); pos += ifibss->ssid_len; sband = local->hw.wiphy->bands[chandef->chan->band]; rate_flags = ieee80211_chandef_rate_flags(chandef); shift = ieee80211_chandef_get_shift(chandef); rates_n = 0; if (have_higher_than_11mbit) *have_higher_than_11mbit = false; for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; if (sband->bitrates[i].bitrate > 110 && have_higher_than_11mbit) *have_higher_than_11mbit = true; rates |= BIT(i); rates_n++; } *pos++ = WLAN_EID_SUPP_RATES; *pos++ = min_t(int, 8, rates_n); for (ri = 0; ri < sband->n_bitrates; ri++) { int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, 5 * (1 << shift)); u8 basic = 0; if (!(rates & BIT(ri))) continue; if (basic_rates & BIT(ri)) basic = 0x80; *pos++ = basic | (u8) rate; if (++rates_added == 8) { ri++; /* continue at next rate for EXT_SUPP_RATES */ break; } } if (sband->band == NL80211_BAND_2GHZ) { *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; *pos++ = ieee80211_frequency_to_channel( chandef->chan->center_freq); } *pos++ = WLAN_EID_IBSS_PARAMS; *pos++ = 2; /* FIX: set ATIM window based on scan results */ *pos++ = 0; *pos++ = 0; if (csa_settings) { *pos++ = WLAN_EID_CHANNEL_SWITCH; *pos++ = 3; *pos++ = csa_settings->block_tx ? 1 : 0; *pos++ = ieee80211_frequency_to_channel( csa_settings->chandef.chan->center_freq); presp->csa_counter_offsets[0] = (pos - presp->head); *pos++ = csa_settings->count; presp->csa_current_counter = csa_settings->count; } /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */ if (rates_n > 8) { *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = rates_n - 8; for (; ri < sband->n_bitrates; ri++) { int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, 5 * (1 << shift)); u8 basic = 0; if (!(rates & BIT(ri))) continue; if (basic_rates & BIT(ri)) basic = 0x80; *pos++ = basic | (u8) rate; } } if (ifibss->ie_len) { memcpy(pos, ifibss->ie, ifibss->ie_len); pos += ifibss->ie_len; } /* add HT capability and information IEs */ if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT && chandef->width != NL80211_CHAN_WIDTH_5 && chandef->width != NL80211_CHAN_WIDTH_10 && sband->ht_cap.ht_supported) { struct ieee80211_sta_ht_cap ht_cap; memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); ieee80211_apply_htcap_overrides(sdata, &ht_cap); pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); /* * Note: According to 802.11n-2009 9.13.3.1, HT Protection * field and RIFS Mode are reserved in IBSS mode, therefore * keep them at 0 */ pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, chandef, 0, false); /* add VHT capability and information IEs */ if (chandef->width != NL80211_CHAN_WIDTH_20 && chandef->width != NL80211_CHAN_WIDTH_40 && sband->vht_cap.vht_supported) { pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap); pos = ieee80211_ie_build_vht_oper(pos, &sband->vht_cap, chandef); } } if (local->hw.queues >= IEEE80211_NUM_ACS) pos = ieee80211_add_wmm_info_ie(pos, 0); /* U-APSD not in use */ presp->head_len = pos - presp->head; if (WARN_ON(presp->head_len > frame_len)) goto error; return presp; error: kfree(presp); return NULL; } static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const int beacon_int, struct cfg80211_chan_def *req_chandef, const u32 basic_rates, const u16 capability, u64 tsf, bool creator) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct cfg80211_bss *bss; u32 bss_change; struct cfg80211_chan_def chandef; struct ieee80211_channel *chan; struct beacon_data *presp; struct cfg80211_inform_bss bss_meta = {}; bool have_higher_than_11mbit; bool radar_required; int err; sdata_assert_lock(sdata); /* Reset own TSF to allow time synchronization work. */ drv_reset_tsf(local, sdata); if (!ether_addr_equal(ifibss->bssid, bssid)) sta_info_flush(sdata); /* if merging, indicate to driver that we leave the old IBSS */ if (sdata->vif.bss_conf.ibss_joined) { sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; netif_carrier_off(sdata->dev); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS | BSS_CHANGED_BEACON_ENABLED); drv_leave_ibss(local, sdata); } presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(ifibss->presp, NULL); if (presp) kfree_rcu(presp, rcu_head); /* make a copy of the chandef, it could be modified below. */ chandef = *req_chandef; chan = chandef.chan; if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef, NL80211_IFTYPE_ADHOC)) { if (chandef.width == NL80211_CHAN_WIDTH_5 || chandef.width == NL80211_CHAN_WIDTH_10 || chandef.width == NL80211_CHAN_WIDTH_20_NOHT || chandef.width == NL80211_CHAN_WIDTH_20) { sdata_info(sdata, "Failed to join IBSS, beacons forbidden\n"); return; } chandef.width = NL80211_CHAN_WIDTH_20; chandef.center_freq1 = chan->center_freq; /* check again for downgraded chandef */ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef, NL80211_IFTYPE_ADHOC)) { sdata_info(sdata, "Failed to join IBSS, beacons forbidden\n"); return; } } err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, &chandef, NL80211_IFTYPE_ADHOC); if (err < 0) { sdata_info(sdata, "Failed to join IBSS, invalid chandef\n"); return; } if (err > 0 && !ifibss->userspace_handles_dfs) { sdata_info(sdata, "Failed to join IBSS, DFS channel without control program\n"); return; } radar_required = err; mutex_lock(&local->mtx); if (ieee80211_vif_use_channel(sdata, &chandef, ifibss->fixed_channel ? IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE)) { sdata_info(sdata, "Failed to join IBSS, no channel context\n"); mutex_unlock(&local->mtx); return; } sdata->radar_required = radar_required; mutex_unlock(&local->mtx); memcpy(ifibss->bssid, bssid, ETH_ALEN); presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates, capability, tsf, &chandef, &have_higher_than_11mbit, NULL); if (!presp) return; rcu_assign_pointer(ifibss->presp, presp); mgmt = (void *)presp->head; sdata->vif.bss_conf.enable_beacon = true; sdata->vif.bss_conf.beacon_int = beacon_int; sdata->vif.bss_conf.basic_rates = basic_rates; sdata->vif.bss_conf.ssid_len = ifibss->ssid_len; memcpy(sdata->vif.bss_conf.ssid, ifibss->ssid, ifibss->ssid_len); bss_change = BSS_CHANGED_BEACON_INT; bss_change |= ieee80211_reset_erp_info(sdata); bss_change |= BSS_CHANGED_BSSID; bss_change |= BSS_CHANGED_BEACON; bss_change |= BSS_CHANGED_BEACON_ENABLED; bss_change |= BSS_CHANGED_BASIC_RATES; bss_change |= BSS_CHANGED_HT; bss_change |= BSS_CHANGED_IBSS; bss_change |= BSS_CHANGED_SSID; /* * In 5 GHz/802.11a, we can always use short slot time. * (IEEE 802.11-2012 18.3.8.7) * * In 2.4GHz, we must always use long slots in IBSS for compatibility * reasons. * (IEEE 802.11-2012 19.4.5) * * HT follows these specifications (IEEE 802.11-2012 20.3.18) */ sdata->vif.bss_conf.use_short_slot = chan->band == NL80211_BAND_5GHZ; bss_change |= BSS_CHANGED_ERP_SLOT; /* cf. IEEE 802.11 9.2.12 */ if (chan->band == NL80211_BAND_2GHZ && have_higher_than_11mbit) sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; else sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; ieee80211_set_wmm_default(sdata, true, false); sdata->vif.bss_conf.ibss_joined = true; sdata->vif.bss_conf.ibss_creator = creator; err = drv_join_ibss(local, sdata); if (err) { sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; sdata->vif.bss_conf.ssid_len = 0; RCU_INIT_POINTER(ifibss->presp, NULL); kfree_rcu(presp, rcu_head); mutex_lock(&local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&local->mtx); sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n", err); return; } ieee80211_bss_info_change_notify(sdata, bss_change); ifibss->state = IEEE80211_IBSS_MLME_JOINED; mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); bss_meta.chan = chan; bss_meta.scan_width = cfg80211_chandef_to_scan_width(&chandef); bss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt, presp->head_len, GFP_KERNEL); cfg80211_put_bss(local->hw.wiphy, bss); netif_carrier_on(sdata->dev); cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL); } static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss *bss) { struct cfg80211_bss *cbss = container_of((void *)bss, struct cfg80211_bss, priv); struct ieee80211_supported_band *sband; struct cfg80211_chan_def chandef; u32 basic_rates; int i, j; u16 beacon_int = cbss->beacon_interval; const struct cfg80211_bss_ies *ies; enum nl80211_channel_type chan_type; u64 tsf; u32 rate_flags; int shift; sdata_assert_lock(sdata); if (beacon_int < 10) beacon_int = 10; switch (sdata->u.ibss.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef); cfg80211_chandef_create(&chandef, cbss->channel, chan_type); break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: cfg80211_chandef_create(&chandef, cbss->channel, NL80211_CHAN_NO_HT); chandef.width = sdata->u.ibss.chandef.width; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: chandef = sdata->u.ibss.chandef; chandef.chan = cbss->channel; break; default: /* fall back to 20 MHz for unsupported modes */ cfg80211_chandef_create(&chandef, cbss->channel, NL80211_CHAN_NO_HT); break; } sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef); shift = ieee80211_vif_get_shift(&sdata->vif); basic_rates = 0; for (i = 0; i < bss->supp_rates_len; i++) { int rate = bss->supp_rates[i] & 0x7f; bool is_basic = !!(bss->supp_rates[i] & 0x80); for (j = 0; j < sband->n_bitrates; j++) { int brate; if ((rate_flags & sband->bitrates[j].flags) != rate_flags) continue; brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, 5 * (1 << shift)); if (brate == rate) { if (is_basic) basic_rates |= BIT(j); break; } } } rcu_read_lock(); ies = rcu_dereference(cbss->ies); tsf = ies->tsf; rcu_read_unlock(); __ieee80211_sta_join_ibss(sdata, cbss->bssid, beacon_int, &chandef, basic_rates, cbss->capability, tsf, false); } int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct beacon_data *presp, *old_presp; struct cfg80211_bss *cbss; const struct cfg80211_bss_ies *ies; u16 capability = WLAN_CAPABILITY_IBSS; u64 tsf; int ret = 0; sdata_assert_lock(sdata); if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY(ifibss->privacy)); if (WARN_ON(!cbss)) { ret = -EINVAL; goto out; } rcu_read_lock(); ies = rcu_dereference(cbss->ies); tsf = ies->tsf; rcu_read_unlock(); cfg80211_put_bss(sdata->local->hw.wiphy, cbss); old_presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); presp = ieee80211_ibss_build_presp(sdata, sdata->vif.bss_conf.beacon_int, sdata->vif.bss_conf.basic_rates, capability, tsf, &ifibss->chandef, NULL, csa_settings); if (!presp) { ret = -ENOMEM; goto out; } rcu_assign_pointer(ifibss->presp, presp); if (old_presp) kfree_rcu(old_presp, rcu_head); return BSS_CHANGED_BEACON; out: return ret; } int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct cfg80211_bss *cbss; int err, changed = 0; sdata_assert_lock(sdata); /* update cfg80211 bss information with the new channel */ if (!is_zero_ether_addr(ifibss->bssid)) { cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY(ifibss->privacy)); /* XXX: should not really modify cfg80211 data */ if (cbss) { cbss->channel = sdata->csa_chandef.chan; cfg80211_put_bss(sdata->local->hw.wiphy, cbss); } } ifibss->chandef = sdata->csa_chandef; /* generate the beacon */ err = ieee80211_ibss_csa_beacon(sdata, NULL); if (err < 0) return err; changed |= err; return changed; } void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; cancel_work_sync(&ifibss->csa_connection_drop_work); } static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) __acquires(RCU) { struct ieee80211_sub_if_data *sdata = sta->sdata; u8 addr[ETH_ALEN]; memcpy(addr, sta->sta.addr, ETH_ALEN); ibss_dbg(sdata, "Adding new IBSS station %pM\n", addr); sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); /* authorize the station only if the network is not RSN protected. If * not wait for the userspace to authorize it */ if (!sta->sdata->u.ibss.control_port) sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); rate_control_rate_init(sta); /* If it fails, maybe we raced another insertion? */ if (sta_info_insert_rcu(sta)) return sta_info_get(sdata, addr); return sta; } static struct sta_info * ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) __acquires(RCU) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; enum nl80211_bss_scan_width scan_width; int band; /* * XXX: Consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n", sdata->name, addr); rcu_read_lock(); return NULL; } if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) { rcu_read_lock(); return NULL; } if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) { rcu_read_lock(); return NULL; } rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) return NULL; band = chanctx_conf->def.chan->band; scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_KERNEL); if (!sta) { rcu_read_lock(); return NULL; } /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband, scan_width); return ieee80211_ibss_finish_sta(sta); } static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; int active = 0; struct sta_info *sta; sdata_assert_lock(sdata); rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { unsigned long last_active = ieee80211_sta_last_active(sta); if (sta->sdata == sdata && time_is_after_jiffies(last_active + IEEE80211_IBSS_MERGE_INTERVAL)) { active++; break; } } rcu_read_unlock(); return active; } static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct beacon_data *presp; if (!is_zero_ether_addr(ifibss->bssid)) { cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY(ifibss->privacy)); if (cbss) { cfg80211_unlink_bss(local->hw.wiphy, cbss); cfg80211_put_bss(sdata->local->hw.wiphy, cbss); } } ifibss->state = IEEE80211_IBSS_MLME_SEARCH; sta_info_flush(sdata); netif_carrier_off(sdata->dev); sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; sdata->vif.bss_conf.ssid_len = 0; /* remove beacon */ presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); if (presp) kfree_rcu(presp, rcu_head); clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_IBSS); drv_leave_ibss(local, sdata); mutex_lock(&local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&local->mtx); } static void ieee80211_csa_connection_drop_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.ibss.csa_connection_drop_work); sdata_lock(sdata); ieee80211_ibss_disconnect(sdata); synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); /* trigger a scan to find another IBSS network to join */ ieee80211_queue_work(&sdata->local->hw, &sdata->work); sdata_unlock(sdata); } static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; int err; /* if the current channel is a DFS channel, mark the channel as * unavailable. */ err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, &ifibss->chandef, NL80211_IFTYPE_ADHOC); if (err > 0) cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef, GFP_ATOMIC); } static bool ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, bool beacon) { struct cfg80211_csa_settings params; struct ieee80211_csa_ie csa_ie; struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; enum nl80211_channel_type ch_type; int err; u32 sta_flags; sdata_assert_lock(sdata); sta_flags = IEEE80211_STA_DISABLE_VHT; switch (ifibss->chandef.width) { case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: sta_flags |= IEEE80211_STA_DISABLE_HT; /* fall through */ case NL80211_CHAN_WIDTH_20: sta_flags |= IEEE80211_STA_DISABLE_40MHZ; break; default: break; } memset(¶ms, 0, sizeof(params)); err = ieee80211_parse_ch_switch_ie(sdata, elems, ifibss->chandef.chan->band, sta_flags, ifibss->bssid, &csa_ie); /* can't switch to destination channel, fail */ if (err < 0) goto disconnect; /* did not contain a CSA */ if (err) return false; /* channel switch is not supported, disconnect */ if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) goto disconnect; params.count = csa_ie.count; params.chandef = csa_ie.chandef; switch (ifibss->chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: /* keep our current HT mode (HT20/HT40+/HT40-), even if * another mode has been announced. The mode is not adopted * within the beacon while doing CSA and we should therefore * keep the mode which we announce. */ ch_type = cfg80211_get_chandef_type(&ifibss->chandef); cfg80211_chandef_create(¶ms.chandef, params.chandef.chan, ch_type); break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: if (params.chandef.width != ifibss->chandef.width) { sdata_info(sdata, "IBSS %pM received channel switch from incompatible channel width (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", ifibss->bssid, params.chandef.chan->center_freq, params.chandef.width, params.chandef.center_freq1, params.chandef.center_freq2); goto disconnect; } break; default: /* should not happen, sta_flags should prevent VHT modes. */ WARN_ON(1); goto disconnect; } if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, ¶ms.chandef, NL80211_IFTYPE_ADHOC)) { sdata_info(sdata, "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", ifibss->bssid, params.chandef.chan->center_freq, params.chandef.width, params.chandef.center_freq1, params.chandef.center_freq2); goto disconnect; } err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, ¶ms.chandef, NL80211_IFTYPE_ADHOC); if (err < 0) goto disconnect; if (err > 0 && !ifibss->userspace_handles_dfs) { /* IBSS-DFS only allowed with a control program */ goto disconnect; } params.radar_required = err; if (cfg80211_chandef_identical(¶ms.chandef, &sdata->vif.bss_conf.chandef)) { ibss_dbg(sdata, "received csa with an identical chandef, ignoring\n"); return true; } /* all checks done, now perform the channel switch. */ ibss_dbg(sdata, "received channel switch announcement to go to channel %d MHz\n", params.chandef.chan->center_freq); params.block_tx = !!csa_ie.mode; if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev, ¶ms)) goto disconnect; ieee80211_ibss_csa_mark_radar(sdata); return true; disconnect: ibss_dbg(sdata, "Can't handle channel switch, disconnect\n"); ieee80211_queue_work(&sdata->local->hw, &ifibss->csa_connection_drop_work); ieee80211_ibss_csa_mark_radar(sdata); return true; } static void ieee80211_rx_mgmt_spectrum_mgmt(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems) { int required_len; if (len < IEEE80211_MIN_ACTION_SIZE + 1) return; /* CSA is the only action we handle for now */ if (mgmt->u.action.u.measurement.action_code != WLAN_ACTION_SPCT_CHL_SWITCH) return; required_len = IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.chan_switch); if (len < required_len) return; if (!sdata->vif.csa_active) ieee80211_ibss_process_chanswitch(sdata, elems, false); } static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { u16 reason = le16_to_cpu(mgmt->u.deauth.reason_code); if (len < IEEE80211_DEAUTH_FRAME_LEN) return; ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); sta_info_destroy_addr(sdata, mgmt->sa); } static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { u16 auth_alg, auth_transaction; sdata_assert_lock(sdata); if (len < 24 + 6) return; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", mgmt->bssid, auth_transaction); if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) return; /* * IEEE 802.11 standard does not require authentication in IBSS * networks and most implementations do not seem to use it. * However, try to reply to authentication attempts if someone * has actually implemented this. */ ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0, mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0); } static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems, struct ieee80211_channel *channel) { struct sta_info *sta; enum nl80211_band band = rx_status->band; enum nl80211_bss_scan_width scan_width; struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; bool rates_updated = false; u32 supp_rates = 0; if (sdata->vif.type != NL80211_IFTYPE_ADHOC) return; if (!ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) return; sband = local->hw.wiphy->bands[band]; if (WARN_ON(!sband)) return; rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (elems->supp_rates) { supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); if (sta) { u32 prev_rates; prev_rates = sta->sta.supp_rates[band]; /* make sure mandatory rates are always added */ scan_width = NL80211_BSS_CHAN_WIDTH_20; if (rx_status->bw == RATE_INFO_BW_5) scan_width = NL80211_BSS_CHAN_WIDTH_5; else if (rx_status->bw == RATE_INFO_BW_10) scan_width = NL80211_BSS_CHAN_WIDTH_10; sta->sta.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband, scan_width); if (sta->sta.supp_rates[band] != prev_rates) { ibss_dbg(sdata, "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n", sta->sta.addr, prev_rates, sta->sta.supp_rates[band]); rates_updated = true; } } else { rcu_read_unlock(); sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); } } if (sta && !sta->sta.wme && elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS) { sta->sta.wme = true; ieee80211_check_fast_xmit(sta); } if (sta && elems->ht_operation && elems->ht_cap_elem && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) { /* we both use HT */ struct ieee80211_ht_cap htcap_ie; struct cfg80211_chan_def chandef; enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); ieee80211_chandef_ht_oper(elems->ht_operation, &chandef); memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie)); rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, &htcap_ie, sta); if (elems->vht_operation && elems->vht_cap_elem && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20 && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_40) { /* we both use VHT */ struct ieee80211_vht_cap cap_ie; struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap; ieee80211_chandef_vht_oper(&local->hw, elems->vht_operation, elems->ht_operation, &chandef); memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie)); ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, &cap_ie, sta); if (memcmp(&cap, &sta->sta.vht_cap, sizeof(cap))) rates_updated |= true; } if (bw != sta->sta.bandwidth) rates_updated |= true; if (!cfg80211_chandef_compatible(&sdata->u.ibss.chandef, &chandef)) WARN_ON_ONCE(1); } if (sta && rates_updated) { u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED; u8 rx_nss = sta->sta.rx_nss; /* Force rx_nss recalculation */ sta->sta.rx_nss = 0; rate_control_rate_init(sta); if (sta->sta.rx_nss != rx_nss) changed |= IEEE80211_RC_NSS_CHANGED; drv_sta_rc_update(local, sdata, &sta->sta, changed); } rcu_read_unlock(); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct ieee80211_bss *bss; struct ieee80211_channel *channel; u64 beacon_timestamp, rx_timestamp; u32 supp_rates = 0; enum nl80211_band band = rx_status->band; channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); if (!channel) return; ieee80211_update_sta_info(sdata, mgmt, len, rx_status, elems, channel); bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel); if (!bss) return; cbss = container_of((void *)bss, struct cfg80211_bss, priv); /* same for beacon and probe response */ beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); /* check if we need to merge IBSS */ /* not an IBSS */ if (!(cbss->capability & WLAN_CAPABILITY_IBSS)) goto put_bss; /* different channel */ if (sdata->u.ibss.fixed_channel && sdata->u.ibss.chandef.chan != cbss->channel) goto put_bss; /* different SSID */ if (elems->ssid_len != sdata->u.ibss.ssid_len || memcmp(elems->ssid, sdata->u.ibss.ssid, sdata->u.ibss.ssid_len)) goto put_bss; /* process channel switch */ if (sdata->vif.csa_active || ieee80211_ibss_process_chanswitch(sdata, elems, true)) goto put_bss; /* same BSSID */ if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) goto put_bss; /* we use a fixed BSSID */ if (sdata->u.ibss.fixed_bssid) goto put_bss; if (ieee80211_have_rx_timestamp(rx_status)) { /* time when timestamp field was received */ rx_timestamp = ieee80211_calculate_rx_timestamp(local, rx_status, len + FCS_LEN, 24); } else { /* * second best option: get current TSF * (will return -1 if not supported) */ rx_timestamp = drv_get_tsf(local, sdata); } ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", mgmt->sa, mgmt->bssid, (unsigned long long)rx_timestamp); ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", (unsigned long long)beacon_timestamp, (unsigned long long)(rx_timestamp - beacon_timestamp), jiffies); if (beacon_timestamp > rx_timestamp) { ibss_dbg(sdata, "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n", mgmt->bssid); ieee80211_sta_join_ibss(sdata, bss); supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); rcu_read_unlock(); } put_bss: ieee80211_rx_bss_put(local, bss); } void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_chanctx_conf *chanctx_conf; struct sk_buff *skb; if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) return; if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) return; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) { rcu_read_unlock(); return; } skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, addr, bssid, (u32)-1, chanctx_conf->def.chan, ifibss->ssid, ifibss->ssid_len, NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); if (skb) ieee80211_tx_skb(sdata, skb); rcu_read_unlock(); } static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta, *tmp; unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT; unsigned long exp_rsn = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT; mutex_lock(&local->sta_mtx); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { unsigned long last_active = ieee80211_sta_last_active(sta); if (sdata != sta->sdata) continue; if (time_is_before_jiffies(last_active + exp_time) || (time_is_before_jiffies(last_active + exp_rsn) && sta->sta_state != IEEE80211_STA_AUTHORIZED)) { sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n", sta->sta_state != IEEE80211_STA_AUTHORIZED ? "not authorized " : "", sta->sta.addr); WARN_ON(__sta_info_destroy(sta)); } } mutex_unlock(&local->sta_mtx); } /* * This function is called with state == IEEE80211_IBSS_MLME_JOINED */ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; enum nl80211_bss_scan_width scan_width; sdata_assert_lock(sdata); mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); ieee80211_ibss_sta_expire(sdata); if (time_before(jiffies, ifibss->last_scan_completed + IEEE80211_IBSS_MERGE_INTERVAL)) return; if (ieee80211_sta_active_ibss(sdata)) return; if (ifibss->fixed_channel) return; sdata_info(sdata, "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n"); scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef); ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, NULL, 0, scan_width); } static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; u8 bssid[ETH_ALEN]; u16 capability; int i; sdata_assert_lock(sdata); if (ifibss->fixed_bssid) { memcpy(bssid, ifibss->bssid, ETH_ALEN); } else { /* Generate random, not broadcast, locally administered BSSID. Mix in * own MAC address to make sure that devices that do not have proper * random number generator get different BSSID. */ get_random_bytes(bssid, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) bssid[i] ^= sdata->vif.addr[i]; bssid[0] &= ~0x01; bssid[0] |= 0x02; } sdata_info(sdata, "Creating new IBSS network, BSSID %pM\n", bssid); capability = WLAN_CAPABILITY_IBSS; if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, &ifibss->chandef, ifibss->basic_rates, capability, 0, true); } static unsigned ibss_setup_channels(struct wiphy *wiphy, struct ieee80211_channel **channels, unsigned int channels_max, u32 center_freq, u32 width) { struct ieee80211_channel *chan = NULL; unsigned int n_chan = 0; u32 start_freq, end_freq, freq; if (width <= 20) { start_freq = center_freq; end_freq = center_freq; } else { start_freq = center_freq - width / 2 + 10; end_freq = center_freq + width / 2 - 10; } for (freq = start_freq; freq <= end_freq; freq += 20) { chan = ieee80211_get_channel(wiphy, freq); if (!chan) continue; if (n_chan >= channels_max) return n_chan; channels[n_chan] = chan; n_chan++; } return n_chan; } static unsigned int ieee80211_ibss_setup_scan_channels(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, struct ieee80211_channel **channels, unsigned int channels_max) { unsigned int n_chan = 0; u32 width, cf1, cf2 = 0; switch (chandef->width) { case NL80211_CHAN_WIDTH_40: width = 40; break; case NL80211_CHAN_WIDTH_80P80: cf2 = chandef->center_freq2; /* fall through */ case NL80211_CHAN_WIDTH_80: width = 80; break; case NL80211_CHAN_WIDTH_160: width = 160; break; default: width = 20; break; } cf1 = chandef->center_freq1; n_chan = ibss_setup_channels(wiphy, channels, channels_max, cf1, width); if (cf2) n_chan += ibss_setup_channels(wiphy, &channels[n_chan], channels_max - n_chan, cf2, width); return n_chan; } /* * This function is called with state == IEEE80211_IBSS_MLME_SEARCH */ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct ieee80211_channel *chan = NULL; const u8 *bssid = NULL; enum nl80211_bss_scan_width scan_width; int active_ibss; sdata_assert_lock(sdata); active_ibss = ieee80211_sta_active_ibss(sdata); ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss); if (active_ibss) return; if (ifibss->fixed_bssid) bssid = ifibss->bssid; if (ifibss->fixed_channel) chan = ifibss->chandef.chan; if (!is_zero_ether_addr(ifibss->bssid)) bssid = ifibss->bssid; cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid, ifibss->ssid, ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY(ifibss->privacy)); if (cbss) { struct ieee80211_bss *bss; bss = (void *)cbss->priv; ibss_dbg(sdata, "sta_find_ibss: selected %pM current %pM\n", cbss->bssid, ifibss->bssid); sdata_info(sdata, "Selected IBSS BSSID %pM based on configured SSID\n", cbss->bssid); ieee80211_sta_join_ibss(sdata, bss); ieee80211_rx_bss_put(local, bss); return; } /* if a fixed bssid and a fixed freq have been provided create the IBSS * directly and do not waste time scanning */ if (ifibss->fixed_bssid && ifibss->fixed_channel) { sdata_info(sdata, "Created IBSS using preconfigured BSSID %pM\n", bssid); ieee80211_sta_create_ibss(sdata); return; } ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n"); /* Selected IBSS not found in current scan results - try to scan */ if (time_after(jiffies, ifibss->last_scan_completed + IEEE80211_SCAN_INTERVAL)) { struct ieee80211_channel *channels[8]; unsigned int num; sdata_info(sdata, "Trigger new scan to find an IBSS to join\n"); scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef); if (ifibss->fixed_channel) { num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy, &ifibss->chandef, channels, ARRAY_SIZE(channels)); ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, channels, num, scan_width); } else { ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, NULL, 0, scan_width); } } else { int interval = IEEE80211_SCAN_INTERVAL; if (time_after(jiffies, ifibss->ibss_join_req + IEEE80211_IBSS_JOIN_TIMEOUT)) ieee80211_sta_create_ibss(sdata); mod_timer(&ifibss->timer, round_jiffies(jiffies + interval)); } } static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *req) { struct ieee80211_mgmt *mgmt = (void *)req->data; struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; int tx_last_beacon, len = req->len; struct sk_buff *skb; struct beacon_data *presp; u8 *pos, *end; sdata_assert_lock(sdata); presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || len < 24 + 2 || !presp) return; tx_last_beacon = drv_tx_last_beacon(local); ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", mgmt->bssid, tx_last_beacon); if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) return; if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) && !is_broadcast_ether_addr(mgmt->bssid)) return; end = ((u8 *) mgmt) + len; pos = mgmt->u.probe_req.variable; if (pos[0] != WLAN_EID_SSID || pos + 2 + pos[1] > end) { ibss_dbg(sdata, "Invalid SSID IE in ProbeReq from %pM\n", mgmt->sa); return; } if (pos[1] != 0 && (pos[1] != ifibss->ssid_len || memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { /* Ignore ProbeReq for foreign SSID */ return; } /* Reply with ProbeResp */ skb = dev_alloc_skb(local->tx_headroom + presp->head_len); if (!skb) return; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, presp->head, presp->head_len); memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN); ibss_dbg(sdata, "Sending ProbeResp to %pM\n", mgmt->sa); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; /* avoid excessive retries for probe request to wildcard SSIDs */ if (pos[1] == 0) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_NO_ACK; ieee80211_tx_skb(sdata, skb); } static void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { size_t baselen; struct ieee802_11_elems elems; BUILD_BUG_ON(offsetof(typeof(mgmt->u.probe_resp), variable) != offsetof(typeof(mgmt->u.beacon), variable)); /* * either beacon or probe_resp but the variable field is at the * same offset */ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return; ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, false, &elems, mgmt->bssid, NULL); ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); } void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; u16 fc; struct ieee802_11_elems elems; int ies_len; rx_status = IEEE80211_SKB_RXCB(skb); mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); sdata_lock(sdata); if (!sdata->u.ibss.ssid_len) goto mgmt_out; /* not ready to merge yet */ switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_PROBE_REQ: ieee80211_rx_mgmt_probe_req(sdata, skb); break; case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_BEACON: ieee80211_rx_mgmt_probe_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_AUTH: ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: switch (mgmt->u.action.category) { case WLAN_CATEGORY_SPECTRUM_MGMT: ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); if (ies_len < 0) break; ieee802_11_parse_elems( mgmt->u.action.u.chan_switch.variable, ies_len, true, &elems, mgmt->bssid, NULL); if (elems.parse_error) break; ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, skb->len, rx_status, &elems); break; } } mgmt_out: sdata_unlock(sdata); } void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; sdata_lock(sdata); /* * Work could be scheduled after scan or similar * when we aren't even joined (or trying) with a * network. */ if (!ifibss->ssid_len) goto out; switch (ifibss->state) { case IEEE80211_IBSS_MLME_SEARCH: ieee80211_sta_find_ibss(sdata); break; case IEEE80211_IBSS_MLME_JOINED: ieee80211_sta_merge_ibss(sdata); break; default: WARN_ON(1); break; } out: sdata_unlock(sdata); } static void ieee80211_ibss_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.ibss.timer); ieee80211_queue_work(&sdata->local->hw, &sdata->work); } void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0); INIT_WORK(&ifibss->csa_connection_drop_work, ieee80211_csa_connection_drop_work); } /* scan finished notification */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type != NL80211_IFTYPE_ADHOC) continue; sdata->u.ibss.last_scan_completed = jiffies; } mutex_unlock(&local->iflist_mtx); } int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params) { u32 changed = 0; u32 rate_flags; struct ieee80211_supported_band *sband; enum ieee80211_chanctx_mode chanmode; struct ieee80211_local *local = sdata->local; int radar_detect_width = 0; int i; int ret; ret = cfg80211_chandef_dfs_required(local->hw.wiphy, ¶ms->chandef, sdata->wdev.iftype); if (ret < 0) return ret; if (ret > 0) { if (!params->userspace_handles_dfs) return -EINVAL; radar_detect_width = BIT(params->chandef.width); } chanmode = (params->channel_fixed && !ret) ? IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE; mutex_lock(&local->chanctx_mtx); ret = ieee80211_check_combinations(sdata, ¶ms->chandef, chanmode, radar_detect_width); mutex_unlock(&local->chanctx_mtx); if (ret < 0) return ret; if (params->bssid) { memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); sdata->u.ibss.fixed_bssid = true; } else sdata->u.ibss.fixed_bssid = false; sdata->u.ibss.privacy = params->privacy; sdata->u.ibss.control_port = params->control_port; sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs; sdata->u.ibss.basic_rates = params->basic_rates; sdata->u.ibss.last_scan_completed = jiffies; /* fix basic_rates if channel does not support these rates */ rate_flags = ieee80211_chandef_rate_flags(¶ms->chandef); sband = local->hw.wiphy->bands[params->chandef.chan->band]; for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) sdata->u.ibss.basic_rates &= ~BIT(i); } memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, sizeof(params->mcast_rate)); sdata->vif.bss_conf.beacon_int = params->beacon_interval; sdata->u.ibss.chandef = params->chandef; sdata->u.ibss.fixed_channel = params->channel_fixed; if (params->ie) { sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, GFP_KERNEL); if (sdata->u.ibss.ie) sdata->u.ibss.ie_len = params->ie_len; } sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; sdata->u.ibss.ibss_join_req = jiffies; memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); sdata->u.ibss.ssid_len = params->ssid_len; memcpy(&sdata->u.ibss.ht_capa, ¶ms->ht_capa, sizeof(sdata->u.ibss.ht_capa)); memcpy(&sdata->u.ibss.ht_capa_mask, ¶ms->ht_capa_mask, sizeof(sdata->u.ibss.ht_capa_mask)); /* * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is * reserved, but an HT STA shall protect HT transmissions as though * the HT Protection field were set to non-HT mixed mode. * * In an IBSS, the RIFS Mode field of the HT Operation element is * also reserved, but an HT STA shall operate as though this field * were set to 1. */ sdata->vif.bss_conf.ht_operation_mode |= IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_PARAM_RIFS_MODE; changed |= BSS_CHANGED_HT | BSS_CHANGED_MCAST_RATE; ieee80211_bss_info_change_notify(sdata, changed); sdata->smps_mode = IEEE80211_SMPS_OFF; sdata->needed_rx_chains = local->rx_chains; sdata->control_port_over_nl80211 = params->control_port_over_nl80211; ieee80211_queue_work(&local->hw, &sdata->work); return 0; } int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; ieee80211_ibss_disconnect(sdata); ifibss->ssid_len = 0; eth_zero_addr(ifibss->bssid); /* remove beacon */ kfree(sdata->u.ibss.ie); /* on the next join, re-program HT parameters */ memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa)); memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask)); synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); del_timer_sync(&sdata->u.ibss.timer); return 0; } backport-iwlwifi-dkms-7906/net/mac80211/ieee80211_i.h000066400000000000000000002245531351064634500216220ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef IEEE80211_I_H #define IEEE80211_I_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "key.h" #include "sta_info.h" #include "debug.h" extern const struct cfg80211_ops mac80211_config_ops; struct ieee80211_local; /* Maximum number of broadcast/multicast frames to buffer when some of the * associated stations are using power saving. */ #define AP_MAX_BC_BUFFER 128 /* Maximum number of frames buffered to all STAs, including multicast frames. * Note: increasing this limit increases the potential memory requirement. Each * frame can be up to about 2 kB long. */ #define TOTAL_MAX_TX_BUFFER 512 /* Required encryption head and tailroom */ #define IEEE80211_ENCRYPT_HEADROOM 8 #define IEEE80211_ENCRYPT_TAILROOM 18 /* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent * reception of at least three fragmented frames. This limit can be increased * by changing this define, at the cost of slower frame reassembly and * increased memory use (about 2 kB of RAM per entry). */ #define IEEE80211_FRAGMENT_MAX 4 /* power level hasn't been configured (or set to automatic) */ #define IEEE80211_UNSET_POWER_LEVEL INT_MIN /* * Some APs experience problems when working with U-APSD. Decreasing the * probability of that happening by using legacy mode for all ACs but VO isn't * enough. * * Cisco 4410N originally forced us to enable VO by default only because it * treated non-VO ACs as legacy. * * However some APs (notably Netgear R7000) silently reclassify packets to * different ACs. Since u-APSD ACs require trigger frames for frame retrieval * clients would never see some frames (e.g. ARP responses) or would fetch them * accidentally after a long time. * * It makes little sense to enable u-APSD queues by default because it needs * userspace applications to be aware of it to actually take advantage of the * possible additional powersavings. Implicitly depending on driver autotrigger * frame support doesn't make much sense. */ #define IEEE80211_DEFAULT_UAPSD_QUEUES 0 #define IEEE80211_DEFAULT_MAX_SP_LEN \ IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS]; #define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */) #define IEEE80211_MAX_NAN_INSTANCE_ID 255 struct ieee80211_fragment_entry { struct sk_buff_head skb_list; unsigned long first_frag_time; u16 seq; u16 extra_len; u16 last_frag; u8 rx_queue; bool check_sequential_pn; /* needed for CCMP/GCMP */ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ }; struct ieee80211_bss { u32 device_ts_beacon, device_ts_presp; bool wmm_used; bool uapsd_supported; #define IEEE80211_MAX_SUPP_RATES 32 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; size_t supp_rates_len; struct ieee80211_rate *beacon_rate; /* * During association, we save an ERP value from a probe response so * that we can feed ERP info to the driver when handling the * association completes. these fields probably won't be up-to-date * otherwise, you probably don't want to use them. */ bool has_erp_value; u8 erp_value; /* Keep track of the corruption of the last beacon/probe response. */ u8 corrupt_data; /* Keep track of what bits of information we have valid info for. */ u8 valid_data; }; /** * enum ieee80211_corrupt_data_flags - BSS data corruption flags * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted * * These are bss flags that are attached to a bss in the * @corrupt_data field of &struct ieee80211_bss. */ enum ieee80211_bss_corrupt_data_flags { IEEE80211_BSS_CORRUPT_BEACON = BIT(0), IEEE80211_BSS_CORRUPT_PROBE_RESP = BIT(1) }; /** * enum ieee80211_valid_data_flags - BSS valid data flags * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE * * These are bss flags that are attached to a bss in the * @valid_data field of &struct ieee80211_bss. They show which parts * of the data structure were received as a result of an un-corrupted * beacon/probe response. */ enum ieee80211_bss_valid_data_flags { IEEE80211_BSS_VALID_WMM = BIT(1), IEEE80211_BSS_VALID_RATES = BIT(2), IEEE80211_BSS_VALID_ERP = BIT(3) }; typedef unsigned __bitwise ieee80211_tx_result; #define TX_CONTINUE ((__force ieee80211_tx_result) 0u) #define TX_DROP ((__force ieee80211_tx_result) 1u) #define TX_QUEUED ((__force ieee80211_tx_result) 2u) #define IEEE80211_TX_NO_SEQNO BIT(0) #define IEEE80211_TX_UNICAST BIT(1) #define IEEE80211_TX_PS_BUFFERED BIT(2) struct ieee80211_tx_data { struct sk_buff *skb; struct sk_buff_head skbs; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; struct ieee80211_key *key; struct ieee80211_tx_rate rate; unsigned int flags; }; typedef unsigned __bitwise ieee80211_rx_result; #define RX_CONTINUE ((__force ieee80211_rx_result) 0u) #define RX_DROP_UNUSABLE ((__force ieee80211_rx_result) 1u) #define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u) #define RX_QUEUED ((__force ieee80211_rx_result) 3u) /** * enum ieee80211_packet_rx_flags - packet RX flags * @IEEE80211_RX_AMSDU: a-MSDU packet * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering * * These are per-frame flags that are attached to a frame in the * @rx_flags field of &struct ieee80211_rx_status. */ enum ieee80211_packet_rx_flags { IEEE80211_RX_AMSDU = BIT(3), IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4), IEEE80211_RX_DEFERRED_RELEASE = BIT(5), }; /** * enum ieee80211_rx_flags - RX data flags * * @IEEE80211_RX_CMNTR: received on cooked monitor already * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported * to cfg80211_report_obss_beacon(). * * These flags are used across handling multiple interfaces * for a single frame. */ enum ieee80211_rx_flags { IEEE80211_RX_CMNTR = BIT(0), IEEE80211_RX_BEACON_REPORTED = BIT(1), }; struct ieee80211_rx_data { struct napi_struct *napi; struct sk_buff *skb; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; struct ieee80211_key *key; unsigned int flags; /* * Index into sequence numbers array, 0..16 * since the last (16) is used for non-QoS, * will be 16 on non-QoS frames. */ int seqno_idx; /* * Index into the security IV/PN arrays, 0..16 * since the last (16) is used for CCMP-encrypted * management frames, will be set to 16 on mgmt * frames and 0 on non-QoS frames. */ int security_idx; u32 tkip_iv32; u16 tkip_iv16; }; struct ieee80211_csa_settings { const u16 *counter_offsets_beacon; const u16 *counter_offsets_presp; int n_counter_offsets_beacon; int n_counter_offsets_presp; u8 count; }; struct beacon_data { u8 *head, *tail; int head_len, tail_len; struct ieee80211_meshconf_ie *meshconf; u16 csa_counter_offsets[IEEE80211_MAX_CSA_COUNTERS_NUM]; u8 csa_current_counter; struct rcu_head rcu_head; }; struct probe_resp { struct rcu_head rcu_head; int len; u16 csa_counter_offsets[IEEE80211_MAX_CSA_COUNTERS_NUM]; u8 data[0]; }; struct ps_data { /* yes, this looks ugly, but guarantees that we can later use * bitmap_empty :) * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)] __aligned(__alignof__(unsigned long)); struct sk_buff_head bc_buf; atomic_t num_sta_ps; /* number of stations in PS mode */ int dtim_count; bool dtim_bc_mc; }; struct ieee80211_if_ap { struct beacon_data __rcu *beacon; struct probe_resp __rcu *probe_resp; /* to be used after channel switch. */ struct cfg80211_beacon_data *next_beacon; struct list_head vlans; /* write-protected with RTNL and local->mtx */ struct ps_data ps; atomic_t num_mcast_sta; /* number of stations receiving multicast */ enum ieee80211_smps_mode req_smps, /* requested smps mode */ driver_smps_mode; /* smps mode request */ struct work_struct request_smps_work; bool multicast_to_unicast; }; struct ieee80211_if_wds { struct sta_info *sta; u8 remote_addr[ETH_ALEN]; }; struct ieee80211_if_vlan { struct list_head list; /* write-protected with RTNL and local->mtx */ /* used for all tx if the VLAN is configured to 4-addr mode */ struct sta_info __rcu *sta; atomic_t num_mcast_sta; /* number of stations receiving multicast */ }; struct mesh_stats { __u32 fwded_mcast; /* Mesh forwarded multicast frames */ __u32 fwded_unicast; /* Mesh forwarded unicast frames */ __u32 fwded_frames; /* Mesh total forwarded frames */ __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ __u32 dropped_frames_no_route; /* Not transmitted, no route found */ __u32 dropped_frames_congestion;/* Not forwarded due to congestion */ }; #define PREQ_Q_F_START 0x1 #define PREQ_Q_F_REFRESH 0x2 struct mesh_preq_queue { struct list_head list; u8 dst[ETH_ALEN]; u8 flags; }; struct ieee80211_roc_work { struct list_head list; struct ieee80211_sub_if_data *sdata; struct ieee80211_channel *chan; bool started, abort, hw_begun, notified; bool on_channel; unsigned long start_time; u32 duration, req_duration; struct sk_buff *frame; u64 cookie, mgmt_tx_cookie; enum ieee80211_roc_type type; }; /* flags used in struct ieee80211_if_managed.flags */ enum ieee80211_sta_flags { IEEE80211_STA_CONNECTION_POLL = BIT(1), IEEE80211_STA_CONTROL_PORT = BIT(2), IEEE80211_STA_DISABLE_HT = BIT(4), IEEE80211_STA_MFP_ENABLED = BIT(6), IEEE80211_STA_UAPSD_ENABLED = BIT(7), IEEE80211_STA_NULLFUNC_ACKED = BIT(8), IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), IEEE80211_STA_DISABLE_40MHZ = BIT(10), IEEE80211_STA_DISABLE_VHT = BIT(11), IEEE80211_STA_DISABLE_80P80MHZ = BIT(12), IEEE80211_STA_DISABLE_160MHZ = BIT(13), IEEE80211_STA_DISABLE_WMM = BIT(14), IEEE80211_STA_ENABLE_RRM = BIT(15), IEEE80211_STA_DISABLE_HE = BIT(16), }; struct ieee80211_mgd_auth_data { struct cfg80211_bss *bss; unsigned long timeout; int tries; u16 algorithm, expected_transaction; u8 key[WLAN_KEY_LEN_WEP104]; u8 key_len, key_idx; bool done; bool peer_confirmed; bool timeout_started; u16 sae_trans, sae_status; size_t data_len; u8 data[]; }; struct ieee80211_mgd_assoc_data { struct cfg80211_bss *bss; const u8 *supp_rates; unsigned long timeout; int tries; u16 capability; u8 prev_bssid[ETH_ALEN]; u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len; u8 supp_rates_len; bool wmm, uapsd; bool need_beacon; bool synced; bool timeout_started; u8 ap_ht_param; struct ieee80211_vht_cap ap_vht_cap; u8 fils_nonces[2 * FILS_NONCE_LEN]; u8 fils_kek[FILS_MAX_KEK_LEN]; size_t fils_kek_len; size_t ie_len; u8 ie[]; }; struct ieee80211_sta_tx_tspec { /* timestamp of the first packet in the time slice */ unsigned long time_slice_start; u32 admitted_time; /* in usecs, unlike over the air */ u8 tsid; s8 up; /* signed to be able to invalidate with -1 during teardown */ /* consumed TX time in microseconds in the time slice */ u32 consumed_tx_time; enum { TX_TSPEC_ACTION_NONE = 0, TX_TSPEC_ACTION_DOWNGRADE, TX_TSPEC_ACTION_STOP_DOWNGRADE, } action; bool downgraded; }; DECLARE_EWMA(beacon_signal, 4, 4) struct ieee80211_if_managed { struct timer_list timer; struct timer_list conn_mon_timer; struct timer_list bcn_mon_timer; struct timer_list chswitch_timer; struct work_struct monitor_work; struct work_struct chswitch_work; struct work_struct beacon_connection_loss_work; struct work_struct csa_connection_drop_work; unsigned long beacon_timeout; unsigned long probe_timeout; int probe_send_count; bool nullfunc_failed; bool connection_loss; struct cfg80211_bss *associated; struct ieee80211_mgd_auth_data *auth_data; struct ieee80211_mgd_assoc_data *assoc_data; u8 bssid[ETH_ALEN] __aligned(2); u16 aid; bool powersave; /* powersave requested for this iface */ bool broken_ap; /* AP is broken -- turn off powersave */ bool have_beacon; u8 dtim_period; enum ieee80211_smps_mode req_smps, /* requested smps mode */ driver_smps_mode; /* smps mode request */ struct work_struct request_smps_work; unsigned int flags; bool csa_waiting_bcn; bool csa_ignored_same_chan; bool beacon_crc_valid; u32 beacon_crc; bool status_acked; bool status_received; __le16 status_fc; enum { IEEE80211_MFP_DISABLED, IEEE80211_MFP_OPTIONAL, IEEE80211_MFP_REQUIRED } mfp; /* management frame protection */ /* * Bitmask of enabled u-apsd queues, * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association * to take effect. */ unsigned int uapsd_queues; /* * Maximum number of buffered frames AP can deliver during a * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar. * Needs a new association to take effect. */ unsigned int uapsd_max_sp_len; int wmm_last_param_set; int mu_edca_last_param_set; u8 use_4addr; s16 p2p_noa_index; struct ewma_beacon_signal ave_beacon_signal; /* * Number of Beacon frames used in ave_beacon_signal. This can be used * to avoid generating less reliable cqm events that would be based * only on couple of received frames. */ unsigned int count_beacon_signal; /* Number of times beacon loss was invoked. */ unsigned int beacon_loss_count; /* * Last Beacon frame signal strength average (ave_beacon_signal / 16) * that triggered a cqm event. 0 indicates that no event has been * generated for the current association. */ int last_cqm_event_signal; /* * State variables for keeping track of RSSI of the AP currently * connected to and informing driver when RSSI has gone * below/above a certain threshold. */ int rssi_min_thold, rssi_max_thold; int last_ave_beacon_signal; struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */ struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */ /* TDLS support */ u8 tdls_peer[ETH_ALEN] __aligned(2); struct delayed_work tdls_peer_del_work; struct sk_buff *orig_teardown_skb; /* The original teardown skb */ struct sk_buff *teardown_skb; /* A copy to send through the AP */ spinlock_t teardown_lock; /* To lock changing teardown_skb */ bool tdls_chan_switch_prohibited; bool tdls_wider_bw_prohibited; /* WMM-AC TSPEC support */ struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS]; /* Use a separate work struct so that we can do something here * while the sdata->work is flushing the queues, for example. * otherwise, in scenarios where we hardly get any traffic out * on the BE queue, but there's a lot of VO traffic, we might * get stuck in a downgraded situation and flush takes forever. */ struct delayed_work tx_tspec_wk; /* Information elements from the last transmitted (Re)Association * Request frame. */ u8 *assoc_req_ies; size_t assoc_req_ies_len; }; struct ieee80211_if_ibss { struct timer_list timer; struct work_struct csa_connection_drop_work; unsigned long last_scan_completed; u32 basic_rates; bool fixed_bssid; bool fixed_channel; bool privacy; bool control_port; bool userspace_handles_dfs; u8 bssid[ETH_ALEN] __aligned(2); u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len, ie_len; u8 *ie; struct cfg80211_chan_def chandef; unsigned long ibss_join_req; /* probe response/beacon for IBSS */ struct beacon_data __rcu *presp; struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ enum { IEEE80211_IBSS_MLME_SEARCH, IEEE80211_IBSS_MLME_JOINED, } state; }; /** * struct ieee80211_if_ocb - OCB mode state * * @housekeeping_timer: timer for periodic invocation of a housekeeping task * @wrkq_flags: OCB deferred task action * @incomplete_lock: delayed STA insertion lock * @incomplete_stations: list of STAs waiting for delayed insertion * @joined: indication if the interface is connected to an OCB network */ struct ieee80211_if_ocb { struct timer_list housekeeping_timer; unsigned long wrkq_flags; spinlock_t incomplete_lock; struct list_head incomplete_stations; bool joined; }; /** * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface * * these declarations define the interface, which enables * vendor-specific mesh synchronization * */ struct ieee802_11_elems; struct ieee80211_mesh_sync_ops { void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata, u16 stype, struct ieee80211_mgmt *mgmt, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status); /* should be called with beacon_data under RCU read lock */ void (*adjust_tsf)(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon); /* add other framework functions here */ }; struct mesh_csa_settings { struct rcu_head rcu_head; struct cfg80211_csa_settings settings; }; struct ieee80211_if_mesh { struct timer_list housekeeping_timer; struct timer_list mesh_path_timer; struct timer_list mesh_path_root_timer; unsigned long wrkq_flags; unsigned long mbss_changed; bool userspace_handles_dfs; u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; size_t mesh_id_len; /* Active Path Selection Protocol Identifier */ u8 mesh_pp_id; /* Active Path Selection Metric Identifier */ u8 mesh_pm_id; /* Congestion Control Mode Identifier */ u8 mesh_cc_id; /* Synchronization Protocol Identifier */ u8 mesh_sp_id; /* Authentication Protocol Identifier */ u8 mesh_auth_id; /* Local mesh Sequence Number */ u32 sn; /* Last used PREQ ID */ u32 preq_id; atomic_t mpaths; /* Timestamp of last SN update */ unsigned long last_sn_update; /* Time when it's ok to send next PERR */ unsigned long next_perr; /* Timestamp of last PREQ sent */ unsigned long last_preq; struct mesh_rmc *rmc; spinlock_t mesh_preq_queue_lock; struct mesh_preq_queue preq_queue; int preq_queue_len; struct mesh_stats mshstats; struct mesh_config mshcfg; atomic_t estab_plinks; u32 mesh_seqnum; bool accepting_plinks; int num_gates; struct beacon_data __rcu *beacon; const u8 *ie; u8 ie_len; enum { IEEE80211_MESH_SEC_NONE = 0x0, IEEE80211_MESH_SEC_AUTHED = 0x1, IEEE80211_MESH_SEC_SECURED = 0x2, } security; bool user_mpm; /* Extensible Synchronization Framework */ const struct ieee80211_mesh_sync_ops *sync_ops; s64 sync_offset_clockdrift_max; spinlock_t sync_offset_lock; /* mesh power save */ enum nl80211_mesh_power_mode nonpeer_pm; int ps_peers_light_sleep; int ps_peers_deep_sleep; struct ps_data ps; /* Channel Switching Support */ struct mesh_csa_settings __rcu *csa; enum { IEEE80211_MESH_CSA_ROLE_NONE, IEEE80211_MESH_CSA_ROLE_INIT, IEEE80211_MESH_CSA_ROLE_REPEATER, } csa_role; u8 chsw_ttl; u16 pre_value; /* offset from skb->data while building IE */ int meshconf_offset; struct mesh_table *mesh_paths; struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; int mpp_paths_generation; }; #ifdef CPTCFG_MAC80211_MESH #define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ do { (msh)->mshstats.name++; } while (0) #else #define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ do { } while (0) #endif /** * enum ieee80211_sub_if_data_flags - virtual interface flags * * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between * associated stations and deliver multicast frames both * back to wireless media and to the local net stack. * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume. * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver */ enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_ALLMULTI = BIT(0), IEEE80211_SDATA_OPERATING_GMODE = BIT(2), IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4), IEEE80211_SDATA_IN_DRIVER = BIT(5), }; /** * enum ieee80211_sdata_state_bits - virtual interface state bits * @SDATA_STATE_RUNNING: virtual interface is up & running; this * mirrors netif_running() but is separate for interface type * change handling while the interface is up * @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel * mode, so queues are stopped * @SDATA_STATE_OFFCHANNEL_BEACON_STOPPED: Beaconing was stopped due * to offchannel, reset when offchannel returns */ enum ieee80211_sdata_state_bits { SDATA_STATE_RUNNING, SDATA_STATE_OFFCHANNEL, SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, }; /** * enum ieee80211_chanctx_mode - channel context configuration mode * * @IEEE80211_CHANCTX_SHARED: channel context may be used by * multiple interfaces * @IEEE80211_CHANCTX_EXCLUSIVE: channel context can be used * only by a single interface. This can be used for example for * non-fixed channel IBSS. */ enum ieee80211_chanctx_mode { IEEE80211_CHANCTX_SHARED, IEEE80211_CHANCTX_EXCLUSIVE }; /** * enum ieee80211_chanctx_replace_state - channel context replacement state * * This is used for channel context in-place reservations that require channel * context switch/swap. * * @IEEE80211_CHANCTX_REPLACE_NONE: no replacement is taking place * @IEEE80211_CHANCTX_WILL_BE_REPLACED: this channel context will be replaced * by a (not yet registered) channel context pointed by %replace_ctx. * @IEEE80211_CHANCTX_REPLACES_OTHER: this (not yet registered) channel context * replaces an existing channel context pointed to by %replace_ctx. */ enum ieee80211_chanctx_replace_state { IEEE80211_CHANCTX_REPLACE_NONE, IEEE80211_CHANCTX_WILL_BE_REPLACED, IEEE80211_CHANCTX_REPLACES_OTHER, }; struct ieee80211_chanctx { struct list_head list; struct rcu_head rcu_head; struct list_head assigned_vifs; struct list_head reserved_vifs; enum ieee80211_chanctx_replace_state replace_state; struct ieee80211_chanctx *replace_ctx; enum ieee80211_chanctx_mode mode; bool driver_present; struct ieee80211_chanctx_conf conf; }; struct mac80211_qos_map { struct cfg80211_qos_map qos_map; struct rcu_head rcu_head; }; enum txq_info_flags { IEEE80211_TXQ_STOP, IEEE80211_TXQ_AMPDU, IEEE80211_TXQ_NO_AMSDU, IEEE80211_TXQ_STOP_NETIF_TX, }; /** * struct txq_info - per tid queue * * @tin: contains packets split into multiple flows * @def_flow: used as a fallback flow when a packet destined to @tin hashes to * a fq_flow which is already owned by a different tin * @def_cvars: codel vars for @def_flow * @frags: used to keep fragments created after dequeue */ struct txq_info { struct fq_tin tin; struct fq_flow def_flow; struct codel_vars def_cvars; struct codel_stats cstats; struct sk_buff_head frags; unsigned long flags; /* keep last! */ struct ieee80211_txq txq; }; struct ieee80211_if_mntr { u32 flags; u8 mu_follow_addr[ETH_ALEN] __aligned(2); struct list_head list; }; /** * struct ieee80211_if_nan - NAN state * * @conf: current NAN configuration * @func_ids: a bitmap of available instance_id's */ struct ieee80211_if_nan { struct cfg80211_nan_conf conf; /* protects function_inst_ids */ spinlock_t func_lock; struct idr function_inst_ids; }; struct ieee80211_sub_if_data { struct list_head list; struct wireless_dev wdev; /* keys */ struct list_head key_list; /* count for keys needing tailroom space allocation */ int crypto_tx_tailroom_needed_cnt; int crypto_tx_tailroom_pending_dec; struct delayed_work dec_tailroom_needed_wk; struct net_device *dev; struct ieee80211_local *local; unsigned int flags; unsigned long state; char name[IFNAMSIZ]; /* Fragment table for host-based reassembly */ struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; unsigned int fragment_next; /* TID bitmap for NoAck policy */ u16 noack_map; /* bit field of ACM bits (BIT(802.1D tag)) */ u8 wmm_acm; struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; struct ieee80211_key __rcu *default_unicast_key; struct ieee80211_key __rcu *default_multicast_key; struct ieee80211_key __rcu *default_mgmt_key; u16 sequence_number; __be16 control_port_protocol; bool control_port_no_encrypt; bool control_port_over_nl80211; int encrypt_headroom; atomic_t num_tx_queued; struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; struct mac80211_qos_map __rcu *qos_map; struct work_struct csa_finalize_work; bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */ struct cfg80211_chan_def csa_chandef; struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */ struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */ /* context reservation -- protected with chanctx_mtx */ struct ieee80211_chanctx *reserved_chanctx; struct cfg80211_chan_def reserved_chandef; bool reserved_radar_required; bool reserved_ready; /* used to reconfigure hardware SM PS */ struct work_struct recalc_smps; struct work_struct work; struct sk_buff_head skb_queue; u8 needed_rx_chains; enum ieee80211_smps_mode smps_mode; int user_power_level; /* in dBm */ int ap_power_level; /* in dBm */ bool radar_required; struct delayed_work dfs_cac_timer_work; /* * AP this belongs to: self in AP mode and * corresponding AP in VLAN mode, NULL for * all others (might be needed later in IBSS) */ struct ieee80211_if_ap *bss; /* bitmap of allowed (non-MCS) rate indexes for rate control */ u32 rc_rateidx_mask[NUM_NL80211_BANDS]; bool rc_has_mcs_mask[NUM_NL80211_BANDS]; u8 rc_rateidx_mcs_mask[NUM_NL80211_BANDS][IEEE80211_HT_MCS_MASK_LEN]; bool rc_has_vht_mcs_mask[NUM_NL80211_BANDS]; u16 rc_rateidx_vht_mcs_mask[NUM_NL80211_BANDS][NL80211_VHT_NSS_MAX]; union { struct ieee80211_if_ap ap; struct ieee80211_if_wds wds; struct ieee80211_if_vlan vlan; struct ieee80211_if_managed mgd; struct ieee80211_if_ibss ibss; struct ieee80211_if_mesh mesh; struct ieee80211_if_ocb ocb; struct ieee80211_if_mntr mntr; struct ieee80211_if_nan nan; } u; #ifdef CPTCFG_MAC80211_DEBUGFS struct { struct dentry *subdir_stations; struct dentry *default_unicast_key; struct dentry *default_multicast_key; struct dentry *default_mgmt_key; } debugfs; #endif /* must be last, dynamically sized area in this! */ struct ieee80211_vif vif; }; static inline struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) { return container_of(p, struct ieee80211_sub_if_data, vif); } static inline void sdata_lock(struct ieee80211_sub_if_data *sdata) __acquires(&sdata->wdev.mtx) { mutex_lock(&sdata->wdev.mtx); __acquire(&sdata->wdev.mtx); } static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata) __releases(&sdata->wdev.mtx) { mutex_unlock(&sdata->wdev.mtx); __release(&sdata->wdev.mtx); } #define sdata_dereference(p, sdata) \ rcu_dereference_protected(p, lockdep_is_held(&sdata->wdev.mtx)) static inline void sdata_assert_lock(struct ieee80211_sub_if_data *sdata) { lockdep_assert_held(&sdata->wdev.mtx); } static inline int ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_5: return 2; case NL80211_CHAN_WIDTH_10: return 1; default: return 0; } } static inline int ieee80211_vif_get_shift(struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; int shift = 0; rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (chanctx_conf) shift = ieee80211_chandef_get_shift(&chanctx_conf->def); rcu_read_unlock(); return shift; } enum { IEEE80211_RX_MSG = 1, IEEE80211_TX_STATUS_MSG = 2, }; enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_DRIVER, IEEE80211_QUEUE_STOP_REASON_PS, IEEE80211_QUEUE_STOP_REASON_CSA, IEEE80211_QUEUE_STOP_REASON_AGGREGATION, IEEE80211_QUEUE_STOP_REASON_SUSPEND, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, IEEE80211_QUEUE_STOP_REASON_FLUSH, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, IEEE80211_QUEUE_STOP_REASONS, }; #ifdef CPTCFG_MAC80211_LEDS struct tpt_led_trigger { char name[32]; const struct ieee80211_tpt_blink *blink_table; unsigned int blink_table_len; struct timer_list timer; struct ieee80211_local *local; unsigned long prev_traffic; unsigned long tx_bytes, rx_bytes; unsigned int active, want; bool running; }; #endif #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS /* * struct ieee80211_tx_consec_loss_ranges - Tx consecutive loss statistics * bins ranges * * Measuring Tx consecutive loss statistics. * 1) Tx frames that were transmitted unsuccessfully. * 2) Tx frames that were transmitted successfully, but there latency passed * the late threshold, and therefor considered as transmitted unsuccessfully. * The user can configure the ranges via debugfs. * * @late_threshold: the late threshold for the successful packets. * @n_ranges: number of ranges that are taken in account * @ranges: the ranges that the user requested or NULL if disabled. */ struct ieee80211_tx_consec_loss_ranges { u32 late_threshold; u32 n_ranges; u32 ranges[]; }; /* * enum ieee80211_tx_lat_msr_point - points where to measure the latency * * There are 4 points where we are capturing the timestamp: * 1. Tx packet Enter the Kernel * 2. Tx packet is written to the bus * 3. Tx packet is acked by the AP * 4. Tx packet is erased. */ enum ieee80211_tx_lat_msr_point { IEEE80211_TX_LAT_ENTER, IEEE80211_TX_LAT_WRITE, IEEE80211_TX_LAT_ACK, IEEE80211_TX_LAT_DEL, /* should always be last */ IEEE80211_TX_LAT_MAX_POINT, }; #define IEEE80211_IF_DISABLE_THSHLD -1 /* * struct ieee80211_tx_latency_bin_ranges - Tx latency statistics bins ranges * * Measuring Tx latency statistics. Counts how many Tx frames transmitted in a * certain latency range (in Milliseconds). Each station that uses these * ranges will have bins to count the amount of frames received in that range. * The user can configure the ranges via debugfs. * If ranges is NULL then Tx latency statistics bins are disabled for all * stations. * * @n_ranges: number of ranges that are taken in account * @ranges: the ranges that the user requested or NULL if disabled. */ struct ieee80211_tx_latency_bin_ranges { int n_ranges; u32 ranges[]; }; /* * struct ieee80211_tx_latency_points - Tx latency statistics measurment points * * Measuring Tx latency statistics. Needs a definition of the measurment points, * which is defined in this struct. * * @points: start & end points from where to measure the latency. */ struct ieee80211_tx_latency_points { enum ieee80211_tx_lat_msr_point points[2]; }; /* * struct ieee80211_tx_latency_threshold - Tx latency threshold * * A user can configure the Tx latency threshold that would trigger collecting * debug data via the wrt mechanism. * * @thresholds_bss: list of thresholds for each tid in a bss interface * @thresholds_p2p: list of thresholds for each tid in a p2p interface * @monitor_collec_wind: collection window for monitor logs */ struct ieee80211_tx_latency_threshold { u32 *thresholds_bss; u32 *thresholds_p2p; u8 monitor_record_mode; u16 monitor_collec_wind; }; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ /** * mac80211 scan flags - currently active scan mode * * @SCAN_SW_SCANNING: We're currently in the process of scanning but may as * well be on the operating channel * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to * determine if we are on the operating channel or not * @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating * channel. This should not interrupt normal traffic. * @SCAN_COMPLETED: Set for our scan work function when the driver reported * that the scan completed. * @SCAN_ABORTED: Set for our scan work function when the driver reported * a scan complete for an aborted scan. * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being * cancelled. */ enum { SCAN_SW_SCANNING, SCAN_HW_SCANNING, SCAN_ONCHANNEL_SCANNING, SCAN_COMPLETED, SCAN_ABORTED, SCAN_HW_CANCELLED, }; /** * enum mac80211_scan_state - scan state machine states * * @SCAN_DECISION: Main entry point to the scan state machine, this state * determines if we should keep on scanning or switch back to the * operating channel * @SCAN_SET_CHANNEL: Set the next channel to be scanned * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to * send out data * @SCAN_RESUME: Resume the scan and scan the next channel * @SCAN_ABORT: Abort the scan and go back to operating channel */ enum mac80211_scan_state { SCAN_DECISION, SCAN_SET_CHANNEL, SCAN_SEND_PROBE, SCAN_SUSPEND, SCAN_RESUME, SCAN_ABORT, }; struct ieee80211_local { /* embed the driver visible part. * don't cast (use the static inlines below), but we keep * it first anyway so they become a no-op */ struct ieee80211_hw hw; struct fq fq; struct codel_vars *cvars; struct codel_params cparams; const struct ieee80211_ops *ops; /* * private workqueue to mac80211. mac80211 makes this accessible * via ieee80211_queue_work() */ struct workqueue_struct *workqueue; unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; int q_stop_reasons[IEEE80211_MAX_QUEUES][IEEE80211_QUEUE_STOP_REASONS]; /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ spinlock_t queue_stop_reason_lock; int open_count; int monitors, cooked_mntrs; /* number of interfaces with corresponding FIF_ flags */ int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, fif_probe_req; int probe_req_reg; unsigned int filter_flags; /* FIF_* */ bool wiphy_ciphers_allocated; bool use_chanctx; /* protects the aggregated multicast list and filter calls */ spinlock_t filter_lock; /* used for uploading changed mc list */ struct work_struct reconfig_filter; /* aggregated multicast list */ struct netdev_hw_addr_list mc_list; bool tim_in_locked_section; /* see ieee80211_beacon_get() */ /* * suspended is true if we finished all the suspend _and_ we have * not yet come up from resume. This is to be used by mac80211 * to ensure driver sanity during suspend and mac80211's own * sanity. It can eventually be used for WoW as well. */ bool suspended; /* * Resuming is true while suspended, but when we're reprogramming the * hardware -- at that time it's allowed to use ieee80211_queue_work() * again even though some other parts of the stack are still suspended * and we still drop received frames to avoid waking the stack. */ bool resuming; /* * quiescing is true during the suspend process _only_ to * ease timer cancelling etc. */ bool quiescing; /* device is started */ bool started; /* device is during a HW reconfig */ bool in_reconfig; /* wowlan is enabled -- don't reconfig on resume */ bool wowlan; struct work_struct radar_detected_work; /* number of RX chains the hardware has */ u8 rx_chains; /* bitmap of which sbands were copied */ u8 sband_allocated; int tx_headroom; /* required headroom for hardware/radiotap */ /* Tasklet and skb queue to process calls from IRQ mode. All frames * added to skb_queue will be processed, but frames in * skb_queue_unreliable may be dropped if the total length of these * queues increases over the limit. */ #define IEEE80211_IRQSAFE_QUEUE_LIMIT 128 struct tasklet_struct tasklet; struct sk_buff_head skb_queue; struct sk_buff_head skb_queue_unreliable; spinlock_t rx_path_lock; /* Station data */ /* * The mutex only protects the list, hash table and * counter, reads are done with RCU. */ struct mutex sta_mtx; spinlock_t tim_lock; unsigned long num_sta; struct list_head sta_list; struct rhltable sta_hash; struct timer_list sta_cleanup; int sta_generation; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS /* * Tx latency & consecutive loss statistics parameters for * all stations. * Can enable via debugfs (NULL when disabled). */ struct ieee80211_tx_consec_loss_ranges __rcu *tx_consec; struct ieee80211_tx_latency_bin_ranges __rcu *tx_latency; /* start & end points from where to measure the latency. */ enum ieee80211_tx_lat_msr_point tx_msrmnt_points[2]; struct ieee80211_tx_latency_threshold __rcu *tx_threshold; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; struct tasklet_struct tx_pending_tasklet; struct tasklet_struct wake_txqs_tasklet; atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES]; /* number of interfaces with allmulti RX */ atomic_t iff_allmultis; struct rate_control_ref *rate_ctrl; struct crypto_cipher *wep_tx_tfm; struct crypto_cipher *wep_rx_tfm; u32 wep_iv; /* see iface.c */ struct list_head interfaces; struct list_head mon_list; /* only that are IFF_UP && !cooked */ struct mutex iflist_mtx; /* * Key mutex, protects sdata's key_list and sta_info's * key pointers (write access, they're RCU.) */ struct mutex key_mtx; /* mutex for scan and work locking */ struct mutex mtx; /* Scanning and BSS list */ unsigned long scanning; struct cfg80211_ssid scan_ssid; struct cfg80211_scan_request *int_scan_req; struct cfg80211_scan_request __rcu *scan_req; struct ieee80211_scan_request *hw_scan_req; struct cfg80211_chan_def scan_chandef; enum nl80211_band hw_scan_band; int scan_channel_idx; int scan_ies_len; int hw_scan_ies_bufsize; struct cfg80211_scan_info scan_info; struct work_struct sched_scan_stopped_work; struct ieee80211_sub_if_data __rcu *sched_scan_sdata; struct cfg80211_sched_scan_request __rcu *sched_scan_req; u8 scan_addr[ETH_ALEN]; unsigned long leave_oper_channel_time; enum mac80211_scan_state next_scan_state; struct delayed_work scan_work; struct ieee80211_sub_if_data __rcu *scan_sdata; /* For backward compatibility only -- do not use */ struct cfg80211_chan_def _oper_chandef; /* Temporary remain-on-channel for off-channel operations */ struct ieee80211_channel *tmp_channel; /* channel contexts */ struct list_head chanctx_list; struct mutex chanctx_mtx; #ifdef CPTCFG_MAC80211_LEDS int tx_led_counter, rx_led_counter; struct led_trigger tx_led, rx_led, assoc_led, radio_led; struct led_trigger tpt_led; atomic_t tx_led_active, rx_led_active, assoc_led_active; atomic_t radio_led_active, tpt_led_active; struct tpt_led_trigger *tpt_led_trigger; #endif #ifdef CPTCFG_MAC80211_DEBUG_COUNTERS /* SNMP counters */ /* dot11CountersTable */ u32 dot11TransmittedFragmentCount; u32 dot11MulticastTransmittedFrameCount; u32 dot11FailedCount; u32 dot11RetryCount; u32 dot11MultipleRetryCount; u32 dot11FrameDuplicateCount; u32 dot11ReceivedFragmentCount; u32 dot11MulticastReceivedFrameCount; u32 dot11TransmittedFrameCount; /* TX/RX handler statistics */ unsigned int tx_handlers_drop; unsigned int tx_handlers_queued; unsigned int tx_handlers_drop_wep; unsigned int tx_handlers_drop_not_assoc; unsigned int tx_handlers_drop_unauth_port; unsigned int rx_handlers_drop; unsigned int rx_handlers_queued; unsigned int rx_handlers_drop_nullfunc; unsigned int rx_handlers_drop_defrag; unsigned int tx_expand_skb_head; unsigned int tx_expand_skb_head_cloned; unsigned int rx_expand_skb_head_defrag; unsigned int rx_handlers_fragments; unsigned int tx_status_drop; #define I802_DEBUG_INC(c) (c)++ #else /* CPTCFG_MAC80211_DEBUG_COUNTERS */ #define I802_DEBUG_INC(c) do { } while (0) #endif /* CPTCFG_MAC80211_DEBUG_COUNTERS */ int total_ps_buffered; /* total number of all buffered unicast and * multicast packets for power saving stations */ bool pspolling; bool offchannel_ps_enabled; /* * PS can only be enabled when we have exactly one managed * interface (and monitors) in PS, this then points there. */ struct ieee80211_sub_if_data *ps_sdata; struct work_struct dynamic_ps_enable_work; struct work_struct dynamic_ps_disable_work; struct timer_list dynamic_ps_timer; struct notifier_block ifa_notifier; struct notifier_block ifa6_notifier; /* * The dynamic ps timeout configured from user space via WEXT - * this will override whatever chosen by mac80211 internally. */ int dynamic_ps_forced_timeout; int user_power_level; /* in dBm, for all interfaces */ enum ieee80211_smps_mode smps_mode; struct work_struct restart_work; #ifdef CPTCFG_MAC80211_DEBUGFS struct local_debugfsdentries { struct dentry *rcdir; struct dentry *keys; } debugfs; #endif /* * Remain-on-channel support */ struct delayed_work roc_work; struct list_head roc_list; struct work_struct hw_roc_start, hw_roc_done; unsigned long hw_roc_start_time; u64 roc_cookie_counter; struct idr ack_status_frames; spinlock_t ack_status_lock; struct ieee80211_sub_if_data __rcu *p2p_sdata; /* virtual monitor interface */ struct ieee80211_sub_if_data __rcu *monitor_sdata; struct cfg80211_chan_def monitor_chandef; /* extended capabilities provided by mac80211 */ u8 ext_capa[9]; /* TDLS channel switch */ struct work_struct tdls_chsw_work; struct sk_buff_head skb_queue_tdls_chsw; u64 msrment_cookie_counter; struct uapsd_black_list { struct rcu_head rcu_head; unsigned int num_oui; unsigned int oui[]; } __rcu *uapsd_black_list; }; static inline struct ieee80211_sub_if_data * IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) { return netdev_priv(dev); } static inline struct ieee80211_sub_if_data * IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev) { return container_of(wdev, struct ieee80211_sub_if_data, wdev); } static inline struct ieee80211_supported_band * ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; enum nl80211_band band; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return NULL; } band = chanctx_conf->def.chan->band; rcu_read_unlock(); return local->hw.wiphy->bands[band]; } /* this struct holds the value parsing from channel switch IE */ struct ieee80211_csa_ie { struct cfg80211_chan_def chandef; u8 mode; u8 count; u8 ttl; u16 pre_value; u16 reason_code; u32 max_switch_time; }; /* Parsed Information Elements */ struct ieee802_11_elems { const u8 *ie_start; size_t total_len; /* pointers to IEs */ const struct ieee80211_tdls_lnkie *lnk_id; const struct ieee80211_ch_switch_timing *ch_sw_timing; const u8 *ext_capab; const u8 *ssid; const u8 *supp_rates; const u8 *ds_params; const struct ieee80211_tim_ie *tim; const u8 *challenge; const u8 *rsn; const u8 *erp_info; const u8 *ext_supp_rates; const u8 *wmm_info; const u8 *wmm_param; const struct ieee80211_ht_cap *ht_cap_elem; const struct ieee80211_ht_operation *ht_operation; const struct ieee80211_vht_cap *vht_cap_elem; const struct ieee80211_vht_operation *vht_operation; const struct ieee80211_meshconf_ie *mesh_config; const u8 *he_cap; const struct ieee80211_he_operation *he_operation; const struct ieee80211_mu_edca_param_set *mu_edca_param_set; const u8 *uora_element; const u8 *mesh_id; const u8 *peering; const __le16 *awake_window; const u8 *preq; const u8 *prep; const u8 *perr; const struct ieee80211_rann_ie *rann; const struct ieee80211_channel_sw_ie *ch_switch_ie; const struct ieee80211_ext_chansw_ie *ext_chansw_ie; const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; const u8 *max_channel_switch_time; const u8 *country_elem; const u8 *pwr_constr_elem; const u8 *cisco_dtpc_elem; const struct ieee80211_timeout_interval_ie *timeout_int; const u8 *opmode_notif; const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; const struct ieee80211_multiple_bssid_configuration *mbssid_config_ie; const struct ieee80211_bssid_index *bssid_index; u8 max_bssid_indicator; u8 dtim_count; u8 dtim_period; /* length of them, respectively */ u8 ext_capab_len; u8 ssid_len; u8 supp_rates_len; u8 tim_len; u8 challenge_len; u8 rsn_len; u8 ext_supp_rates_len; u8 wmm_info_len; u8 wmm_param_len; u8 he_cap_len; u8 mesh_id_len; u8 peering_len; u8 preq_len; u8 prep_len; u8 perr_len; u8 country_elem_len; u8 bssid_index_len; /* whether a parse error occurred while retrieving these elements */ bool parse_error; }; static inline struct ieee80211_local *hw_to_local( struct ieee80211_hw *hw) { return container_of(hw, struct ieee80211_local, hw); } static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq) { return container_of(txq, struct txq_info, txq); } static inline bool txq_has_queue(struct ieee80211_txq *txq) { struct txq_info *txqi = to_txq_info(txq); return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets); } static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) { return ether_addr_equal(raddr, addr) || is_broadcast_ether_addr(raddr); } static inline bool ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) { WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START && status->flag & RX_FLAG_MACTIME_END); if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END)) return true; /* can't handle non-legacy preamble yet */ if (status->flag & RX_FLAG_MACTIME_PLCP_START && status->encoding == RX_ENC_LEGACY) return true; return false; } void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata); /* This function returns the number of multicast stations connected to this * interface. It returns -1 if that number is not tracked, that is for netdevs * not in AP or AP_VLAN mode or when using 4addr. */ static inline int ieee80211_vif_get_num_mcast_if(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_AP) return atomic_read(&sdata->u.ap.num_mcast_sta); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) return atomic_read(&sdata->u.vlan.num_mcast_sta); return -1; } u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, struct ieee80211_rx_status *status, unsigned int mpdu_len, unsigned int mpdu_offset); int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, u32 changed); void ieee80211_configure_filter(struct ieee80211_local *local); u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local); int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, u64 *cookie, gfp_t gfp); void ieee80211_check_fast_rx(struct sta_info *sta); void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata); void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata); void ieee80211_clear_fast_rx(struct sta_info *sta); /* STA code */ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, struct cfg80211_auth_request *req); int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_assoc_request *req); int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, struct cfg80211_deauth_request *req); int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_disassoc_request *req); void ieee80211_send_pspoll(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_ps(struct ieee80211_local *local); void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata); int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, __le16 fc, bool acked); void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates); int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params); int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings); int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata); /* OCB code */ void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata); void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates); void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata); int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, struct ocb_setup *setup); int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata); /* mesh code */ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings); int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata); /* scan/BSS handling */ void ieee80211_scan_work(struct work_struct *work); int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, const u8 *ssid, u8 ssid_len, struct ieee80211_channel **channels, unsigned int n_channels, enum nl80211_bss_scan_width scan_width); int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req); void ieee80211_scan_cancel(struct ieee80211_local *local); void ieee80211_run_deferred_scan(struct ieee80211_local *local); void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb); void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); struct ieee80211_bss * ieee80211_bss_info_update(struct ieee80211_local *local, struct ieee80211_rx_status *rx_status, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_channel *channel); void ieee80211_rx_bss_put(struct ieee80211_local *local, struct ieee80211_bss *bss); /* scheduled scan handling */ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req); int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req); int ieee80211_request_sched_scan_stop(struct ieee80211_local *local); void ieee80211_sched_scan_end(struct ieee80211_local *local); void ieee80211_sched_scan_stopped_work(struct work_struct *work); /* off-channel/mgmt-tx */ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local); void ieee80211_offchannel_return(struct ieee80211_local *local); void ieee80211_roc_setup(struct ieee80211_local *local); void ieee80211_start_next_roc(struct ieee80211_local *local); void ieee80211_roc_purge(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie); int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); /* channel switch handling */ void ieee80211_csa_finalize_work(struct work_struct *work); int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_csa_settings *params); /* interface handling */ int ieee80211_iface_init(void); void ieee80211_iface_exit(void); int ieee80211_if_add(struct ieee80211_local *local, const char *name, unsigned char name_assign_type, struct wireless_dev **new_wdev, enum nl80211_iftype type, struct vif_params *params); int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type); void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); void ieee80211_remove_interfaces(struct ieee80211_local *local); u32 ieee80211_idle_off(struct ieee80211_local *local); void ieee80211_recalc_idle(struct ieee80211_local *local); void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset); int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up); void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata); int ieee80211_add_virtual_monitor(struct ieee80211_local *local); void ieee80211_del_virtual_monitor(struct ieee80211_local *local); bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, bool update_bss); static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) { return test_bit(SDATA_STATE_RUNNING, &sdata->state); } /* tx handling */ void ieee80211_clear_tx_pending(struct ieee80211_local *local); void ieee80211_tx_pending(unsigned long data); netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev, u32 info_flags); void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, struct sk_buff_head *skbs); struct sk_buff * ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags); void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_supported_band *sband, int retry_count, int shift, bool send_to_cooked); void ieee80211_check_fast_xmit(struct sta_info *sta); void ieee80211_check_fast_xmit_all(struct ieee80211_local *local); void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata); void ieee80211_clear_fast_xmit(struct sta_info *sta); int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len, const u8 *dest, __be16 proto, bool unencrypted); /* HT */ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_ht_cap *ht_cap); bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_ht_cap *ht_cap_ie, struct sta_info *sta); void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u16 initiator, u16 reason_code); int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps, const u8 *da, const u8 *bssid); void ieee80211_request_smps_ap_work(struct work_struct *work); void ieee80211_request_smps_mgd_work(struct work_struct *work); bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old, enum ieee80211_smps_mode smps_mode_new); void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool stop); void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool stop); void ___ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, u16 buf_size, bool tx, bool auto_seq); void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, enum ieee80211_agg_stop_reason reason); void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len); void ieee80211_process_addba_resp(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len); void ieee80211_process_addba_request(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len); int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_agg_stop_reason reason); int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_agg_stop_reason reason); void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx); void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx); void ieee80211_ba_session_work(struct work_struct *work); void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs); enum nl80211_smps_mode ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps); /* VHT */ void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_vht_cap *vht_cap_ie, struct sta_info *sta); enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta); enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta); void ieee80211_sta_set_rx_nss(struct sta_info *sta); enum ieee80211_sta_rx_bandwidth ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width); enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta); void ieee80211_sta_set_rx_nss(struct sta_info *sta); void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt); u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, enum nl80211_band band); void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, enum nl80211_band band); void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap); void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, u16 vht_mask[NL80211_VHT_NSS_MAX]); enum nl80211_chan_width ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta); /* HE */ void ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const u8 *he_cap_ie, u8 he_cap_len, struct sta_info *sta); /* HE */ void ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const u8 *he_cap_ie, u8 he_cap_len, struct sta_info *sta); /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); /** * ieee80211_parse_ch_switch_ie - parses channel switch IEs * @sdata: the sdata of the interface which has received the frame * @elems: parsed 802.11 elements received with the frame * @current_band: indicates the current band * @sta_flags: contains information about own capabilities and restrictions * to decide which channel switch announcements can be accepted. Only the * following subset of &enum ieee80211_sta_flags are evaluated: * %IEEE80211_STA_DISABLE_HT, %IEEE80211_STA_DISABLE_VHT, * %IEEE80211_STA_DISABLE_40MHZ, %IEEE80211_STA_DISABLE_80P80MHZ, * %IEEE80211_STA_DISABLE_160MHZ. * @bssid: the currently connected bssid (for reporting) * @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl. All of them will be filled with if success only. * Return: 0 on success, <0 on error and >0 if there is nothing to parse. */ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band current_band, u32 sta_flags, u8 *bssid, struct ieee80211_csa_ie *csa_ie); /* Suspend/resume and hw reconfiguration */ int ieee80211_reconfig(struct ieee80211_local *local); void ieee80211_stop_device(struct ieee80211_local *local); int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); static inline int __ieee80211_resume(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) && !test_bit(SCAN_COMPLETED, &local->scanning), "%s: resume with hardware scan still in progress\n", wiphy_name(hw->wiphy)); return ieee80211_reconfig(hw_to_local(hw)); } /* utility functions/constants */ extern const void *const mac80211_wiphy_privid; /* for wiphy privid */ int ieee80211_frame_duration(enum nl80211_band band, size_t len, int rate, int erp, int short_preamble, int shift); void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_queue_params *qparam, int ac); void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, bool bss_notify, bool enable_qos); void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb, u32 txdata_flags); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, enum nl80211_band band, u32 txdata_flags); static inline void ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, enum nl80211_band band, u32 txdata_flags) { rcu_read_lock(); __ieee80211_tx_skb_tid_band(sdata, skb, tid, band, txdata_flags); rcu_read_unlock(); } static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid) { struct ieee80211_chanctx_conf *chanctx_conf; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); kfree_skb(skb); return; } __ieee80211_tx_skb_tid_band(sdata, skb, tid, chanctx_conf->def.chan->band, 0); rcu_read_unlock(); } static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ ieee80211_tx_skb_tid(sdata, skb, 7); } u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc, u8 *transmitter_bssid, u8 *bss_bssid); static inline void ieee802_11_parse_elems(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u8 *transmitter_bssid, u8 *bss_bssid) { ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0, transmitter_bssid, bss_bssid); } extern const int ieee802_1d_to_ac[8]; static inline int ieee80211_ac_from_tid(int tid) { return ieee802_1d_to_ac[tid & 7]; } void ieee80211_dynamic_ps_enable_work(struct work_struct *work); void ieee80211_dynamic_ps_disable_work(struct work_struct *work); void ieee80211_dynamic_ps_timer(struct timer_list *t); void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool powersave); void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr); void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, bool ack, u16 tx_time); void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, struct ieee80211_channel *channel); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted); void ieee80211_stop_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason); void ieee80211_wake_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason); void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted); void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted); void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted); void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb); void ieee80211_add_pending_skbs(struct ieee80211_local *local, struct sk_buff_head *skbs); void ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool drop); void __ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int queues, bool drop); static inline bool ieee80211_can_run_worker(struct ieee80211_local *local) { /* * It's unsafe to try to do any work during reconfigure flow. * When the flow ends the work will be requeued. */ if (local->in_reconfig) return false; /* * If quiescing is set, we are racing with __ieee80211_suspend. * __ieee80211_suspend flushes the workers after setting quiescing, * and we check quiescing / suspended before enqueing new workers. * We should abort the worker to avoid the races below. */ if (local->quiescing) return false; /* * We might already be suspended if the following scenario occurs: * __ieee80211_suspend Control path * * if (local->quiescing) * return; * local->quiescing = true; * flush_workqueue(); * queue_work(...); * local->suspended = true; * local->quiescing = false; * worker starts running... */ if (local->suspended) return false; return true; } int ieee80211_txq_setup_flows(struct ieee80211_local *local); void ieee80211_txq_set_params(struct ieee80211_local *local); void ieee80211_txq_teardown_flows(struct ieee80211_local *local); void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct txq_info *txq, int tid); void ieee80211_txq_purge(struct ieee80211_local *local, struct txq_info *txqi); void ieee80211_txq_remove_vlan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats, struct txq_info *txqi); void ieee80211_wake_txqs(unsigned long data); void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *bssid, const u8 *da, const u8 *key, u8 key_len, u8 key_idx, u32 tx_flags); void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *bssid, u16 stype, u16 reason, bool send_frame, u8 *frame_buf); enum { IEEE80211_PROBE_FLAG_DIRECTED = BIT(0), IEEE80211_PROBE_FLAG_MIN_CONTENT = BIT(1), IEEE80211_PROBE_FLAG_RANDOM_SN = BIT(2), }; int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, struct cfg80211_chan_def *chandef, u32 flags); struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *bssid, u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 flags); u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band band, u32 *basic_rates); int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode); int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode); void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata); size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap); u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, const struct cfg80211_chan_def *chandef, u16 prot_mode, bool rifs_mode); void ieee80211_ie_build_wide_bw_cs(u8 *pos, const struct cfg80211_chan_def *chandef); u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap); u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, const struct cfg80211_chan_def *chandef); u8 *ieee80211_ie_build_he_cap(u8 *pos, const struct ieee80211_sta_he_cap *he_cap, u8 *end); int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, const struct ieee80211_supported_band *sband, const u8 *srates, int srates_len, u32 *rates); int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, enum nl80211_band band); int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, enum nl80211_band band); u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo); /* channel management */ bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, struct cfg80211_chan_def *chandef); bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, const struct ieee80211_vht_operation *oper, const struct ieee80211_ht_operation *htop, struct cfg80211_chan_def *chandef); u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c); int __must_check ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode); int __must_check ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode, bool radar_required); int __must_check ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata); int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata); int __must_check ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, u32 *changed); void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, bool clear); int ieee80211_chanctx_refcount(struct ieee80211_local *local, struct ieee80211_chanctx *ctx); void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx); void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx); bool ieee80211_is_radar_required(struct ieee80211_local *local); void ieee80211_dfs_cac_timer(unsigned long data); void ieee80211_dfs_cac_timer_work(struct work_struct *work); void ieee80211_dfs_cac_cancel(struct ieee80211_local *local); void ieee80211_dfs_radar_detected_work(struct work_struct *work); int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings); bool ieee80211_cs_valid(const struct ieee80211_cipher_scheme *cs); bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n); const struct ieee80211_cipher_scheme * ieee80211_cs_get(struct ieee80211_local *local, u32 cipher, enum nl80211_iftype iftype); int ieee80211_cs_headroom(struct ieee80211_local *local, struct cfg80211_crypto_settings *crypto, enum nl80211_iftype iftype); void ieee80211_recalc_dtim(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode chanmode, u8 radar_detect); int ieee80211_max_num_channels(struct ieee80211_local *local); enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta); void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, struct ieee80211_chanctx *ctx); /* TDLS */ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len); int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper); void ieee80211_tdls_peer_del_work(struct work_struct *wk); int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef); void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr); void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata); void ieee80211_tdls_chsw_work(struct work_struct *wk); void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, const u8 *peer, u16 reason); const char *ieee80211_get_reason_code_string(u16 reason_code); extern const struct ethtool_ops ieee80211_ethtool_ops; #ifdef CPTCFG_MAC80211_NOINLINE #define debug_noinline noinline #else #define debug_noinline #endif #endif /* IEEE80211_I_H */ backport-iwlwifi-dkms-7906/net/mac80211/iface.c000066400000000000000000001567441351064634500210570ustar00rootroot00000000000000/* * Interface handling * * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc * Copyright 2008, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (c) 2016 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "sta_info.h" #include "debugfs_netdev.h" #include "mesh.h" #include "led.h" #include "driver-ops.h" #include "wme.h" #include "rate.h" /** * DOC: Interface list locking * * The interface list in each struct ieee80211_local is protected * three-fold: * * (1) modifications may only be done under the RTNL * (2) modifications and readers are protected against each other by * the iflist_mtx. * (3) modifications are done in an RCU manner so atomic readers * can traverse the list in RCU-safe blocks. * * As a consequence, reads (traversals) of the list can be protected * by either the RTNL, the iflist_mtx or RCU. */ static void ieee80211_iface_work(struct work_struct *work); bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) { struct ieee80211_chanctx_conf *chanctx_conf; int power; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); return false; } power = ieee80211_chandef_max_power(&chanctx_conf->def); rcu_read_unlock(); if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL) power = min(power, sdata->user_power_level); if (sdata->ap_power_level != IEEE80211_UNSET_POWER_LEVEL) power = min(power, sdata->ap_power_level); if (power != sdata->vif.bss_conf.txpower) { sdata->vif.bss_conf.txpower = power; ieee80211_hw_config(sdata->local, 0); return true; } return false; } void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, bool update_bss) { if (__ieee80211_recalc_txpower(sdata) || (update_bss && ieee80211_sdata_running(sdata))) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); } static u32 __ieee80211_idle_off(struct ieee80211_local *local) { if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) return 0; local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; } static u32 __ieee80211_idle_on(struct ieee80211_local *local) { if (local->hw.conf.flags & IEEE80211_CONF_IDLE) return 0; ieee80211_flush_queues(local, NULL, false); local->hw.conf.flags |= IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; } static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, bool force_active) { bool working, scanning, active; unsigned int led_trig_start = 0, led_trig_stop = 0; lockdep_assert_held(&local->mtx); active = force_active || !list_empty(&local->chanctx_list) || local->monitors; working = !local->ops->remain_on_channel && !list_empty(&local->roc_list); scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) || test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning); if (working || scanning) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; if (active) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); if (working || scanning || active) return __ieee80211_idle_off(local); return __ieee80211_idle_on(local); } u32 ieee80211_idle_off(struct ieee80211_local *local) { return __ieee80211_recalc_idle(local, true); } void ieee80211_recalc_idle(struct ieee80211_local *local) { u32 change = __ieee80211_recalc_idle(local, false); if (change) ieee80211_hw_config(local, change); } static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr, bool check_dup) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *iter; u64 new, mask, tmp; u8 *m; int ret = 0; if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) return 0; m = addr; new = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); m = local->hw.wiphy->addr_mask; mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); if (!check_dup) return ret; mutex_lock(&local->iflist_mtx); list_for_each_entry(iter, &local->interfaces, list) { if (iter == sdata) continue; if (iter->vif.type == NL80211_IFTYPE_MONITOR && !(iter->u.mntr.flags & MONITOR_FLAG_ACTIVE)) continue; m = iter->vif.addr; tmp = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); if ((new & ~mask) != (tmp & ~mask)) { ret = -EINVAL; break; } } mutex_unlock(&local->iflist_mtx); return ret; } static int ieee80211_change_mac(struct net_device *dev, void *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sockaddr *sa = addr; bool check_dup = true; int ret; if (ieee80211_sdata_running(sdata)) return -EBUSY; if (sdata->vif.type == NL80211_IFTYPE_MONITOR && !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) check_dup = false; ret = ieee80211_verify_mac(sdata, sa->sa_data, check_dup); if (ret) return ret; ret = eth_mac_addr(dev, sa); if (ret == 0) memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); return ret; } static inline int identical_mac_addr_allowed(int type1, int type2) { return type1 == NL80211_IFTYPE_MONITOR || type2 == NL80211_IFTYPE_MONITOR || type1 == NL80211_IFTYPE_P2P_DEVICE || type2 == NL80211_IFTYPE_P2P_DEVICE || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || (type1 == NL80211_IFTYPE_WDS && (type2 == NL80211_IFTYPE_WDS || type2 == NL80211_IFTYPE_AP)) || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || (type1 == NL80211_IFTYPE_AP_VLAN && (type2 == NL80211_IFTYPE_AP || type2 == NL80211_IFTYPE_AP_VLAN)); } static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype iftype) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *nsdata; int ret; ASSERT_RTNL(); /* we hold the RTNL here so can safely walk the list */ list_for_each_entry(nsdata, &local->interfaces, list) { if (nsdata != sdata && ieee80211_sdata_running(nsdata)) { /* * Only OCB and monitor mode may coexist */ if ((sdata->vif.type == NL80211_IFTYPE_OCB && nsdata->vif.type != NL80211_IFTYPE_MONITOR) || (sdata->vif.type != NL80211_IFTYPE_MONITOR && nsdata->vif.type == NL80211_IFTYPE_OCB)) return -EBUSY; /* * Allow only a single IBSS interface to be up at any * time. This is restricted because beacon distribution * cannot work properly if both are in the same IBSS. * * To remove this restriction we'd have to disallow them * from setting the same SSID on different IBSS interfaces * belonging to the same hardware. Then, however, we're * faced with having to adopt two different TSF timers... */ if (iftype == NL80211_IFTYPE_ADHOC && nsdata->vif.type == NL80211_IFTYPE_ADHOC) return -EBUSY; /* * will not add another interface while any channel * switch is active. */ if (nsdata->vif.csa_active) return -EBUSY; /* * The remaining checks are only performed for interfaces * with the same MAC address. */ if (!ether_addr_equal(sdata->vif.addr, nsdata->vif.addr)) continue; /* * check whether it may have the same address */ if (!identical_mac_addr_allowed(iftype, nsdata->vif.type)) return -ENOTUNIQ; /* * can only add VLANs to enabled APs */ if (iftype == NL80211_IFTYPE_AP_VLAN && nsdata->vif.type == NL80211_IFTYPE_AP) sdata->bss = &nsdata->u.ap; } } mutex_lock(&local->chanctx_mtx); ret = ieee80211_check_combinations(sdata, NULL, 0, 0); mutex_unlock(&local->chanctx_mtx); return ret; } static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype iftype) { int n_queues = sdata->local->hw.queues; int i; if (iftype == NL80211_IFTYPE_NAN) return 0; if (iftype != NL80211_IFTYPE_P2P_DEVICE) { for (i = 0; i < IEEE80211_NUM_ACS; i++) { if (WARN_ON_ONCE(sdata->vif.hw_queue[i] == IEEE80211_INVAL_HW_QUEUE)) return -EINVAL; if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >= n_queues)) return -EINVAL; } } if ((iftype != NL80211_IFTYPE_AP && iftype != NL80211_IFTYPE_P2P_GO && iftype != NL80211_IFTYPE_MESH_POINT) || !ieee80211_hw_check(&sdata->local->hw, QUEUE_CONTROL)) { sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; return 0; } if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE)) return -EINVAL; if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues)) return -EINVAL; return 0; } void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset) { struct ieee80211_local *local = sdata->local; u32 flags = sdata->u.mntr.flags; #define ADJUST(_f, _s) do { \ if (flags & MONITOR_FLAG_##_f) \ local->fif_##_s += offset; \ } while (0) ADJUST(FCSFAIL, fcsfail); ADJUST(PLCPFAIL, plcpfail); ADJUST(CONTROL, control); ADJUST(CONTROL, pspoll); ADJUST(OTHER_BSS, other_bss); #undef ADJUST } static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; int i; for (i = 0; i < IEEE80211_NUM_ACS; i++) { if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE; else if (local->hw.queues >= IEEE80211_NUM_ACS) sdata->vif.hw_queue[i] = i; else sdata->vif.hw_queue[i] = 0; } sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; } int ieee80211_add_virtual_monitor(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; int ret; if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) return 0; ASSERT_RTNL(); if (local->monitor_sdata) return 0; sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); if (!sdata) return -ENOMEM; /* set up data */ sdata->local = local; sdata->vif.type = NL80211_IFTYPE_MONITOR; snprintf(sdata->name, IFNAMSIZ, "%s-monitor", wiphy_name(local->hw.wiphy)); sdata->wdev.iftype = NL80211_IFTYPE_MONITOR; sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; ieee80211_set_default_queues(sdata); ret = drv_add_interface(local, sdata); if (WARN_ON(ret)) { /* ok .. stupid driver, it asked for this! */ kfree(sdata); return ret; } ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR); if (ret) { kfree(sdata); return ret; } mutex_lock(&local->iflist_mtx); rcu_assign_pointer(local->monitor_sdata, sdata); mutex_unlock(&local->iflist_mtx); mutex_lock(&local->mtx); ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, IEEE80211_CHANCTX_EXCLUSIVE); mutex_unlock(&local->mtx); if (ret) { mutex_lock(&local->iflist_mtx); RCU_INIT_POINTER(local->monitor_sdata, NULL); mutex_unlock(&local->iflist_mtx); synchronize_net(); drv_remove_interface(local, sdata); kfree(sdata); return ret; } skb_queue_head_init(&sdata->skb_queue); INIT_WORK(&sdata->work, ieee80211_iface_work); return 0; } void ieee80211_del_virtual_monitor(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) return; ASSERT_RTNL(); mutex_lock(&local->iflist_mtx); sdata = rcu_dereference_protected(local->monitor_sdata, lockdep_is_held(&local->iflist_mtx)); if (!sdata) { mutex_unlock(&local->iflist_mtx); return; } RCU_INIT_POINTER(local->monitor_sdata, NULL); mutex_unlock(&local->iflist_mtx); synchronize_net(); mutex_lock(&local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&local->mtx); drv_remove_interface(local, sdata); kfree(sdata); } /* * NOTE: Be very careful when changing this function, it must NOT return * an error on interface type changes that have been pre-checked, so most * checks should be in ieee80211_check_concurrent_iface. */ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct net_device *dev = wdev->netdev; struct ieee80211_local *local = sdata->local; struct sta_info *sta; u32 changed = 0; int res; u32 hw_reconf_flags = 0; switch (sdata->vif.type) { case NL80211_IFTYPE_WDS: if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) return -ENOLINK; break; case NL80211_IFTYPE_AP_VLAN: { struct ieee80211_sub_if_data *master; if (!sdata->bss) return -ENOLINK; mutex_lock(&local->mtx); list_add(&sdata->u.vlan.list, &sdata->bss->vlans); mutex_unlock(&local->mtx); master = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); sdata->control_port_protocol = master->control_port_protocol; sdata->control_port_no_encrypt = master->control_port_no_encrypt; sdata->control_port_over_nl80211 = master->control_port_over_nl80211; sdata->vif.cab_queue = master->vif.cab_queue; memcpy(sdata->vif.hw_queue, master->vif.hw_queue, sizeof(sdata->vif.hw_queue)); sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; mutex_lock(&local->key_mtx); sdata->crypto_tx_tailroom_needed_cnt += master->crypto_tx_tailroom_needed_cnt; mutex_unlock(&local->key_mtx); break; } case NL80211_IFTYPE_AP: sdata->bss = &sdata->u.ap; break; case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_NAN: /* no special treatment */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_NAN_DATA: /* cannot happen */ WARN_ON(1); break; } if (local->open_count == 0) { res = drv_start(local); if (res) goto err_del_bss; /* we're brought up, everything changes */ hw_reconf_flags = ~0; ieee80211_led_radio(local, true); ieee80211_mod_tpt_led_trig(local, IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); } /* * Copy the hopefully now-present MAC address to * this interface, if it has the special null one. */ if (dev && is_zero_ether_addr(dev->dev_addr)) { memcpy(dev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); if (!is_valid_ether_addr(dev->dev_addr)) { res = -EADDRNOTAVAIL; goto err_stop; } } switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: /* no need to tell driver, but set carrier and chanctx */ if (rtnl_dereference(sdata->bss->beacon)) { ieee80211_vif_vlan_copy_chanctx(sdata); netif_carrier_on(dev); } else { netif_carrier_off(dev); } break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) { local->cooked_mntrs++; break; } if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) { res = drv_add_interface(local, sdata); if (res) goto err_stop; } else if (local->monitors == 0 && local->open_count == 0) { res = ieee80211_add_virtual_monitor(local); if (res) goto err_stop; } /* must be before the call to ieee80211_configure_filter */ local->monitors++; if (local->monitors == 1) { local->hw.conf.flags |= IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; } ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); mutex_lock(&local->mtx); ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); netif_carrier_on(dev); break; default: if (coming_up) { ieee80211_del_virtual_monitor(local); res = drv_add_interface(local, sdata); if (res) goto err_stop; res = ieee80211_check_queues(sdata, ieee80211_vif_type_p2p(&sdata->vif)); if (res) goto err_del_interface; } if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll++; local->fif_probe_req++; ieee80211_configure_filter(local); } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { local->fif_probe_req++; } if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && sdata->vif.type != NL80211_IFTYPE_NAN) changed |= ieee80211_reset_erp_info(sdata); ieee80211_bss_info_change_notify(sdata, changed); switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: netif_carrier_off(dev); break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: break; default: /* not reached */ WARN_ON(1); } /* * Set default queue parameters so drivers don't * need to initialise the hardware if the hardware * doesn't start up with sane defaults. * Enable QoS for anything but station interfaces. */ ieee80211_set_wmm_default(sdata, true, sdata->vif.type != NL80211_IFTYPE_STATION); } set_bit(SDATA_STATE_RUNNING, &sdata->state); switch (sdata->vif.type) { case NL80211_IFTYPE_WDS: /* Create STA entry for the WDS peer */ sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, GFP_KERNEL); if (!sta) { res = -ENOMEM; goto err_del_interface; } sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); res = sta_info_insert(sta); if (res) { /* STA has been freed */ goto err_del_interface; } rate_control_rate_init(sta); netif_carrier_on(dev); break; case NL80211_IFTYPE_P2P_DEVICE: rcu_assign_pointer(local->p2p_sdata, sdata); break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) break; list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list); break; default: break; } /* * set_multicast_list will be invoked by the networking core * which will check whether any increments here were done in * error and sync them down to the hardware as filter flags. */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) atomic_inc(&local->iff_allmultis); if (coming_up) local->open_count++; if (hw_reconf_flags) ieee80211_hw_config(local, hw_reconf_flags); ieee80211_recalc_ps(local); if (sdata->vif.type == NL80211_IFTYPE_MONITOR || sdata->vif.type == NL80211_IFTYPE_AP_VLAN || local->ops->wake_tx_queue) { /* XXX: for AP_VLAN, actually track AP queues */ if (dev) netif_tx_start_all_queues(dev); } else if (dev) { unsigned long flags; int n_acs = IEEE80211_NUM_ACS; int ac; if (local->hw.queues < IEEE80211_NUM_ACS) n_acs = 1; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE || (local->queue_stop_reasons[sdata->vif.cab_queue] == 0 && skb_queue_empty(&local->pending[sdata->vif.cab_queue]))) { for (ac = 0; ac < n_acs; ac++) { int ac_queue = sdata->vif.hw_queue[ac]; if (local->queue_stop_reasons[ac_queue] == 0 && skb_queue_empty(&local->pending[ac_queue])) netif_start_subqueue(dev, ac); } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } return 0; err_del_interface: drv_remove_interface(local, sdata); err_stop: if (!local->open_count) drv_stop(local); err_del_bss: sdata->bss = NULL; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { mutex_lock(&local->mtx); list_del(&sdata->u.vlan.list); mutex_unlock(&local->mtx); } /* might already be clear but that doesn't matter */ clear_bit(SDATA_STATE_RUNNING, &sdata->state); return res; } static int ieee80211_open(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int err; /* fail early if user set an invalid address */ if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type); if (err) return err; return ieee80211_do_open(&sdata->wdev, true); } static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down) { struct ieee80211_local *local = sdata->local; unsigned long flags; struct sk_buff *skb, *tmp; u32 hw_reconf_flags = 0; int i, flushed; struct ps_data *ps; struct cfg80211_chan_def chandef; bool cancel_scan; struct cfg80211_nan_func *func; clear_bit(SDATA_STATE_RUNNING, &sdata->state); cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata; if (cancel_scan) ieee80211_scan_cancel(local); /* * Stop TX on this interface first. */ if (sdata->dev) netif_tx_stop_all_queues(sdata->dev); ieee80211_roc_purge(local, sdata); switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_mgd_stop(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_stop(sdata); break; case NL80211_IFTYPE_AP: cancel_work_sync(&sdata->u.ap.request_smps_work); break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) break; list_del_rcu(&sdata->u.mntr.list); break; default: break; } /* * Remove all stations associated with this interface. * * This must be done before calling ops->remove_interface() * because otherwise we can later invoke ops->sta_notify() * whenever the STAs are removed, and that invalidates driver * assumptions about always getting a vif pointer that is valid * (because if we remove a STA after ops->remove_interface() * the driver will have removed the vif info already!) * * In WDS mode a station must exist here and be flushed, for * AP_VLANs stations may exist since there's nothing else that * would have removed them, but in other modes there shouldn't * be any stations. */ flushed = sta_info_flush(sdata); WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) || (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1))); /* don't count this interface for allmulti while it is down */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) atomic_dec(&local->iff_allmultis); if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll--; local->fif_probe_req--; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { local->fif_probe_req--; } if (sdata->dev) { netif_addr_lock_bh(sdata->dev); spin_lock_bh(&local->filter_lock); __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, sdata->dev->addr_len); spin_unlock_bh(&local->filter_lock); netif_addr_unlock_bh(sdata->dev); } del_timer_sync(&local->dynamic_ps_timer); cancel_work_sync(&local->dynamic_ps_enable_work); cancel_work_sync(&sdata->recalc_smps); sdata_lock(sdata); mutex_lock(&local->mtx); sdata->vif.csa_active = false; if (sdata->vif.type == NL80211_IFTYPE_STATION) sdata->u.mgd.csa_waiting_bcn = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_block_tx = false; } mutex_unlock(&local->mtx); sdata_unlock(sdata); cancel_work_sync(&sdata->csa_finalize_work); cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); if (sdata->wdev.cac_started) { chandef = sdata->vif.bss_conf.chandef; WARN_ON(local->suspended); mutex_lock(&local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&local->mtx); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); } /* APs need special treatment */ if (sdata->vif.type == NL80211_IFTYPE_AP) { struct ieee80211_sub_if_data *vlan, *tmpsdata; /* down all dependent devices, that is VLANs */ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, u.vlan.list) dev_close(vlan->dev); WARN_ON(!list_empty(&sdata->u.ap.vlans)); } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { /* remove all packets in parent bc_buf pointing to this dev */ ps = &sdata->bss->ps; spin_lock_irqsave(&ps->bc_buf.lock, flags); skb_queue_walk_safe(&ps->bc_buf, skb, tmp) { if (skb->dev == sdata->dev) { __skb_unlink(skb, &ps->bc_buf); local->total_ps_buffered--; ieee80211_free_txskb(&local->hw, skb); } } spin_unlock_irqrestore(&ps->bc_buf.lock, flags); } if (going_down) local->open_count--; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: mutex_lock(&local->mtx); list_del(&sdata->u.vlan.list); mutex_unlock(&local->mtx); RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); /* see comment in the default case below */ ieee80211_free_keys(sdata, true); /* no need to tell driver */ break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) { local->cooked_mntrs--; break; } local->monitors--; if (local->monitors == 0) { local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; } ieee80211_adjust_monitor_flags(sdata, -1); break; case NL80211_IFTYPE_NAN: /* clean all the functions */ spin_lock_bh(&sdata->u.nan.func_lock); idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, i) { idr_remove(&sdata->u.nan.function_inst_ids, i); cfg80211_free_nan_func(func); } idr_destroy(&sdata->u.nan.function_inst_ids); spin_unlock_bh(&sdata->u.nan.func_lock); break; case NL80211_IFTYPE_P2P_DEVICE: /* relies on synchronize_rcu() below */ RCU_INIT_POINTER(local->p2p_sdata, NULL); /* fall through */ default: cancel_work_sync(&sdata->work); /* * When we get here, the interface is marked down. * Free the remaining keys, if there are any * (which can happen in AP mode if userspace sets * keys before the interface is operating, and maybe * also in WDS mode) * * Force the key freeing to always synchronize_net() * to wait for the RX path in case it is using this * interface enqueuing frames at this very time on * another CPU. */ ieee80211_free_keys(sdata, true); skb_queue_purge(&sdata->skb_queue); } spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_walk_safe(&local->pending[i], skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (info->control.vif == &sdata->vif) { __skb_unlink(skb, &local->pending[i]); ieee80211_free_txskb(&local->hw, skb); } } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ieee80211_txq_remove_vlan(local, sdata); sdata->bss = NULL; if (local->open_count == 0) ieee80211_clear_tx_pending(local); sdata->vif.bss_conf.beacon_int = 0; /* * If the interface goes down while suspended, presumably because * the device was unplugged and that happens before our resume, * then the driver is already unconfigured and the remainder of * this function isn't needed. * XXX: what about WoWLAN? If the device has software state, e.g. * memory allocated, it might expect teardown commands from * mac80211 here? */ if (local->suspended) { WARN_ON(local->wowlan); WARN_ON(rtnl_dereference(local->monitor_sdata)); return; } switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: break; case NL80211_IFTYPE_MONITOR: if (local->monitors == 0) ieee80211_del_virtual_monitor(local); mutex_lock(&local->mtx); ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) break; /* fall through */ default: if (going_down) drv_remove_interface(local, sdata); } ieee80211_recalc_ps(local); if (cancel_scan) flush_delayed_work(&local->scan_work); if (local->open_count == 0) { ieee80211_stop_device(local); /* no reconfiguring after stop! */ return; } /* do after stop to avoid reconfiguring when we stop anyway */ ieee80211_configure_filter(local); ieee80211_hw_config(local, hw_reconf_flags); if (local->monitors == local->open_count) ieee80211_add_virtual_monitor(local); } static int ieee80211_stop(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_do_stop(sdata, true); return 0; } static void ieee80211_set_multicast_list(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int allmulti, sdata_allmulti; allmulti = !!(dev->flags & IFF_ALLMULTI); sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); if (allmulti != sdata_allmulti) { if (dev->flags & IFF_ALLMULTI) atomic_inc(&local->iff_allmultis); else atomic_dec(&local->iff_allmultis); sdata->flags ^= IEEE80211_SDATA_ALLMULTI; } spin_lock_bh(&local->filter_lock); __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); spin_unlock_bh(&local->filter_lock); ieee80211_queue_work(&local->hw, &local->reconfig_filter); } /* * Called when the netdev is removed or, by the code below, before * the interface type changes. */ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) { int i; /* free extra data */ ieee80211_free_keys(sdata, false); ieee80211_debugfs_remove_netdev(sdata); for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) __skb_queue_purge(&sdata->fragments[i].skb_list); sdata->fragment_next = 0; if (ieee80211_vif_is_mesh(&sdata->vif)) ieee80211_mesh_teardown_sdata(sdata); } static void ieee80211_uninit(struct net_device *dev) { ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev)); } #if LINUX_VERSION_IS_GEQ(4,19,0) static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev, select_queue_fallback_t fallback) #elif LINUX_VERSION_IS_GEQ(3,14,0) || \ (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30) || \ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) #elif LINUX_VERSION_IS_GEQ(3,13,0) static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv) #else static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb) #endif { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); } static void ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; for_each_possible_cpu(i) { const struct pcpu_sw_netstats *tstats; u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; tstats = per_cpu_ptr(netdev_tstats(dev), i); do { start = u64_stats_fetch_begin_irq(&tstats->syncp); rx_packets = tstats->rx_packets; tx_packets = tstats->tx_packets; rx_bytes = tstats->rx_bytes; tx_bytes = tstats->tx_bytes; } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); stats->rx_packets += rx_packets; stats->tx_packets += tx_packets; stats->rx_bytes += rx_bytes; stats->tx_bytes += tx_bytes; } } #if LINUX_VERSION_IS_LESS(4,11,0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) /* Just declare it here to keep sparse happy */ struct rtnl_link_stats64 *bp_ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); struct rtnl_link_stats64 * bp_ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats){ ieee80211_get_stats64(dev, stats); return stats; } #endif #if LINUX_VERSION_IS_LESS(4,10,0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) static int __change_mtu(struct net_device *ndev, int new_mtu){ if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN) return -EINVAL; ndev->mtu = new_mtu; return 0; } #endif static const struct net_device_ops ieee80211_dataif_ops = { #if LINUX_VERSION_IS_LESS(4,10,0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) .ndo_change_mtu = __change_mtu, #endif .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_uninit, .ndo_start_xmit = ieee80211_subif_start_xmit, .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_netdev_select_queue, #if LINUX_VERSION_IS_GEQ(4,11,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) .ndo_get_stats64 = ieee80211_get_stats64, #else .ndo_get_stats64 = bp_ieee80211_get_stats64, #endif }; #if LINUX_VERSION_IS_GEQ(4,19,0) static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev, select_queue_fallback_t fallback) #elif LINUX_VERSION_IS_GEQ(3,14,0) || \ (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30) || \ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) #elif LINUX_VERSION_IS_GEQ(3,13,0) static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv) #else static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb) #endif { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_radiotap_header *rtap = (void *)skb->data; if (local->hw.queues < IEEE80211_NUM_ACS) return 0; if (skb->len < 4 || skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) return 0; /* doesn't matter, frame will be dropped */ hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); return ieee80211_select_queue_80211(sdata, skb, hdr); } static const struct net_device_ops ieee80211_monitorif_ops = { #if LINUX_VERSION_IS_LESS(4,10,0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) .ndo_change_mtu = __change_mtu, #endif .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_uninit, .ndo_start_xmit = ieee80211_monitor_start_xmit, .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_monitor_select_queue, #if LINUX_VERSION_IS_GEQ(4,11,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6) .ndo_get_stats64 = ieee80211_get_stats64, #else .ndo_get_stats64 = bp_ieee80211_get_stats64, #endif }; static void ieee80211_if_free(struct net_device *dev) { free_percpu(netdev_tstats(dev)); } #if LINUX_VERSION_IS_LESS(4,12,0) static void __ieee80211_if_free(struct net_device *ndev){ ieee80211_if_free(ndev); free_netdev(ndev); } #endif static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; netdev_set_priv_destructor(dev, ieee80211_if_free); } static void ieee80211_if_setup_no_queue(struct net_device *dev) { ieee80211_if_setup(dev); #if LINUX_VERSION_IS_GEQ(4,3,0) dev->priv_flags |= IFF_NO_QUEUE; #else dev->tx_queue_len = 0; #endif } static void ieee80211_iface_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, work); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct sta_info *sta; if (!ieee80211_sdata_running(sdata)) return; if (test_bit(SCAN_SW_SCANNING, &local->scanning)) return; if (!ieee80211_can_run_worker(local)) return; /* first process frames */ while ((skb = skb_dequeue(&sdata->skb_queue))) { struct ieee80211_mgmt *mgmt = (void *)skb->data; if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK) { int len = skb->len; mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: ieee80211_process_addba_request( local, sta, mgmt, len); break; case WLAN_ACTION_ADDBA_RESP: ieee80211_process_addba_resp(local, sta, mgmt, len); break; case WLAN_ACTION_DELBA: ieee80211_process_delba(sdata, sta, mgmt, len); break; default: WARN_ON(1); break; } } mutex_unlock(&local->sta_mtx); } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_VHT) { switch (mgmt->u.action.u.vht_group_notif.action_code) { case WLAN_VHT_ACTION_OPMODE_NOTIF: { struct ieee80211_rx_status *status; enum nl80211_band band; u8 opmode; status = IEEE80211_SKB_RXCB(skb); band = status->band; opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) ieee80211_vht_handle_opmode(sdata, sta, opmode, band); mutex_unlock(&local->sta_mtx); break; } case WLAN_VHT_ACTION_GROUPID_MGMT: ieee80211_process_mu_groups(sdata, mgmt); break; default: WARN_ON(1); break; } } else if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *)mgmt; /* * So the frame isn't mgmt, but frame_control * is at the right place anyway, of course, so * the if statement is correct. * * Warn if we have other data frame types here, * they must not get here. */ WARN_ON(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); WARN_ON(!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG))); /* * This was a fragment of a frame, received while * a block-ack session was active. That cannot be * right, so terminate the session. */ mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { u16 tid = ieee80211_get_tid(hdr); __ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_REQUIRE_SETUP, true); } mutex_unlock(&local->sta_mtx); } else switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_rx_queued_mgmt(sdata, skb); break; default: WARN(1, "frame for unexpected interface type"); break; } kfree_skb(skb); } /* then other type-dependent work */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_work(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_work(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_work(sdata); break; case NL80211_IFTYPE_OCB: ieee80211_ocb_work(sdata); break; default: break; } } static void ieee80211_recalc_smps_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, recalc_smps); ieee80211_recalc_smps(sdata); } /* * Helper function to initialise an interface to a specific type. */ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { static const u8 bssid_wildcard[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /* clear type-dependent union */ memset(&sdata->u, 0, sizeof(sdata->u)); /* and set some type-dependent values */ sdata->vif.type = type; sdata->vif.p2p = false; sdata->wdev.iftype = type; sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); sdata->control_port_no_encrypt = false; sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; sdata->vif.bss_conf.idle = true; sdata->noack_map = 0; /* only monitor/p2p-device differ */ if (sdata->dev) { sdata->dev->netdev_ops = &ieee80211_dataif_ops; sdata->dev->type = ARPHRD_ETHER; } skb_queue_head_init(&sdata->skb_queue); INIT_WORK(&sdata->work, ieee80211_iface_work); INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work); INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work); INIT_LIST_HEAD(&sdata->assigned_chanctx_list); INIT_LIST_HEAD(&sdata->reserved_chanctx_list); switch (type) { case NL80211_IFTYPE_P2P_GO: type = NL80211_IFTYPE_AP; sdata->vif.type = type; sdata->vif.p2p = true; /* fall through */ case NL80211_IFTYPE_AP: skb_queue_head_init(&sdata->u.ap.ps.bc_buf); INIT_LIST_HEAD(&sdata->u.ap.vlans); INIT_WORK(&sdata->u.ap.request_smps_work, ieee80211_request_smps_ap_work); sdata->vif.bss_conf.bssid = sdata->vif.addr; sdata->u.ap.req_smps = IEEE80211_SMPS_OFF; break; case NL80211_IFTYPE_P2P_CLIENT: type = NL80211_IFTYPE_STATION; sdata->vif.type = type; sdata->vif.p2p = true; /* fall through */ case NL80211_IFTYPE_STATION: sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid; ieee80211_sta_setup_sdata(sdata); break; case NL80211_IFTYPE_OCB: sdata->vif.bss_conf.bssid = bssid_wildcard; ieee80211_ocb_setup_sdata(sdata); break; case NL80211_IFTYPE_ADHOC: sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; ieee80211_ibss_setup_sdata(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif)) ieee80211_mesh_init_sdata(sdata); break; case NL80211_IFTYPE_MONITOR: sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; sdata->dev->netdev_ops = &ieee80211_monitorif_ops; sdata->u.mntr.flags = MONITOR_FLAG_CONTROL | MONITOR_FLAG_OTHER_BSS; break; case NL80211_IFTYPE_WDS: sdata->vif.bss_conf.bssid = NULL; break; case NL80211_IFTYPE_NAN: idr_init(&sdata->u.nan.function_inst_ids); spin_lock_init(&sdata->u.nan.func_lock); sdata->vif.bss_conf.bssid = sdata->vif.addr; break; case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_DEVICE: sdata->vif.bss_conf.bssid = sdata->vif.addr; break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_NAN_DATA: WARN_ON(1); break; } ieee80211_debugfs_add_netdev(sdata); } static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { struct ieee80211_local *local = sdata->local; int ret, err; enum nl80211_iftype internal_type = type; bool p2p = false; ASSERT_RTNL(); if (!local->ops->change_interface) return -EBUSY; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_OCB: /* * Could maybe also all others here? * Just not sure how that interacts * with the RX/config path e.g. for * mesh. */ break; default: return -EBUSY; } switch (type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_OCB: /* * Could probably support everything * but WDS here (WDS do_open can fail * under memory pressure, which this * code isn't prepared to handle). */ break; case NL80211_IFTYPE_P2P_CLIENT: p2p = true; internal_type = NL80211_IFTYPE_STATION; break; case NL80211_IFTYPE_P2P_GO: p2p = true; internal_type = NL80211_IFTYPE_AP; break; default: return -EBUSY; } ret = ieee80211_check_concurrent_iface(sdata, internal_type); if (ret) return ret; ieee80211_do_stop(sdata, false); ieee80211_teardown_sdata(sdata); ret = drv_change_interface(local, sdata, internal_type, p2p); if (ret) type = ieee80211_vif_type_p2p(&sdata->vif); /* * Ignore return value here, there's not much we can do since * the driver changed the interface type internally already. * The warnings will hopefully make driver authors fix it :-) */ ieee80211_check_queues(sdata, type); ieee80211_setup_sdata(sdata, type); err = ieee80211_do_open(&sdata->wdev, false); WARN(err, "type change: do_open returned %d", err); return ret; } int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { int ret; ASSERT_RTNL(); if (type == ieee80211_vif_type_p2p(&sdata->vif)) return 0; if (ieee80211_sdata_running(sdata)) { ret = ieee80211_runtime_change_iftype(sdata, type); if (ret) return ret; } else { /* Purge and reset type-dependent state. */ ieee80211_teardown_sdata(sdata); ieee80211_setup_sdata(sdata, type); } /* reset some values that shouldn't be kept across type changes */ if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = false; return 0; } static void ieee80211_assign_perm_addr(struct ieee80211_local *local, u8 *perm_addr, enum nl80211_iftype type) { struct ieee80211_sub_if_data *sdata; u64 mask, start, addr, val, inc; u8 *m; u8 tmp_addr[ETH_ALEN]; int i; /* default ... something at least */ memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && local->hw.wiphy->n_addresses <= 1) return; mutex_lock(&local->iflist_mtx); switch (type) { case NL80211_IFTYPE_MONITOR: /* doesn't matter */ break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_AP_VLAN: /* match up with an AP interface */ list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_AP) continue; memcpy(perm_addr, sdata->vif.addr, ETH_ALEN); break; } /* keep default if no AP interface present */ break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: if (ieee80211_hw_check(&local->hw, P2P_DEV_ADDR_FOR_INTF)) { list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) continue; if (!ieee80211_sdata_running(sdata)) continue; memcpy(perm_addr, sdata->vif.addr, ETH_ALEN); goto out_unlock; } } /* fall through */ default: /* assign a new address if possible -- try n_addresses first */ for (i = 0; i < local->hw.wiphy->n_addresses; i++) { bool used = false; list_for_each_entry(sdata, &local->interfaces, list) { if (ether_addr_equal(local->hw.wiphy->addresses[i].addr, sdata->vif.addr)) { used = true; break; } } if (!used) { memcpy(perm_addr, local->hw.wiphy->addresses[i].addr, ETH_ALEN); break; } } /* try mask if available */ if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) break; m = local->hw.wiphy->addr_mask; mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { /* not a contiguous mask ... not handled now! */ pr_info("not contiguous\n"); break; } /* * Pick address of existing interface in case user changed * MAC address manually, default to perm_addr. */ m = local->hw.wiphy->perm_addr; list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_MONITOR) continue; m = sdata->vif.addr; break; } start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); inc = 1ULL<<__ffs64(mask); val = (start & mask); addr = (start & ~mask) | (val & mask); do { bool used = false; tmp_addr[5] = addr >> 0*8; tmp_addr[4] = addr >> 1*8; tmp_addr[3] = addr >> 2*8; tmp_addr[2] = addr >> 3*8; tmp_addr[1] = addr >> 4*8; tmp_addr[0] = addr >> 5*8; val += inc; list_for_each_entry(sdata, &local->interfaces, list) { if (ether_addr_equal(tmp_addr, sdata->vif.addr)) { used = true; break; } } if (!used) { memcpy(perm_addr, tmp_addr, ETH_ALEN); break; } addr = (start & ~mask) | (val & mask); } while (addr != start); break; } out_unlock: mutex_unlock(&local->iflist_mtx); } int ieee80211_if_add(struct ieee80211_local *local, const char *name, unsigned char name_assign_type, struct wireless_dev **new_wdev, enum nl80211_iftype type, struct vif_params *params) { struct net_device *ndev = NULL; struct ieee80211_sub_if_data *sdata = NULL; struct txq_info *txqi; void (*if_setup)(struct net_device *dev); int ret, i; int txqs = 1; ASSERT_RTNL(); if (type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN) { struct wireless_dev *wdev; sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); if (!sdata) return -ENOMEM; wdev = &sdata->wdev; sdata->dev = NULL; strlcpy(sdata->name, name, IFNAMSIZ); ieee80211_assign_perm_addr(local, wdev->address, type); memcpy(sdata->vif.addr, wdev->address, ETH_ALEN); } else { int size = ALIGN(sizeof(*sdata) + local->hw.vif_data_size, sizeof(void *)); int txq_size = 0; if (local->ops->wake_tx_queue && type != NL80211_IFTYPE_AP_VLAN && (type != NL80211_IFTYPE_MONITOR || (params->flags & MONITOR_FLAG_ACTIVE))) txq_size += sizeof(struct txq_info) + local->hw.txq_data_size; if (local->ops->wake_tx_queue) if_setup = ieee80211_if_setup_no_queue; else if_setup = ieee80211_if_setup; if (local->hw.queues >= IEEE80211_NUM_ACS) txqs = IEEE80211_NUM_ACS; ndev = alloc_netdev_mqs(size + txq_size, name, name_assign_type, if_setup, txqs, 1); if (!ndev) return -ENOMEM; dev_net_set(ndev, wiphy_net(local->hw.wiphy)); netdev_assign_tstats(ndev, netdev_alloc_pcpu_stats(struct pcpu_sw_netstats)); if (!netdev_tstats(ndev)) { free_netdev(ndev); return -ENOMEM; } ndev->needed_headroom = local->tx_headroom + 4*6 /* four MAC addresses */ + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ + 6 /* mesh */ + 8 /* rfc1042/bridge tunnel */ - ETH_HLEN /* ethernet hard_header_len */ + IEEE80211_ENCRYPT_HEADROOM; ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) { ieee80211_if_free(ndev); free_netdev(ndev); return ret; } ieee80211_assign_perm_addr(local, ndev->perm_addr, type); if (is_valid_ether_addr(params->macaddr)) memcpy(ndev->dev_addr, params->macaddr, ETH_ALEN); else memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */ sdata = netdev_priv(ndev); ndev->ieee80211_ptr = &sdata->wdev; memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); memcpy(sdata->name, ndev->name, IFNAMSIZ); if (txq_size) { txqi = netdev_priv(ndev) + size; ieee80211_txq_init(sdata, NULL, txqi, 0); } sdata->dev = ndev; } /* initialise type-independent data */ sdata->wdev.wiphy = local->hw.wiphy; sdata->local = local; for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) skb_queue_head_init(&sdata->fragments[i].skb_list); INIT_LIST_HEAD(&sdata->key_list); INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work, ieee80211_dfs_cac_timer_work); INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk, ieee80211_delayed_tailroom_dec); for (i = 0; i < NUM_NL80211_BANDS; i++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[i]; sdata->rc_rateidx_mask[i] = sband ? (1 << sband->n_bitrates) - 1 : 0; if (sband) { __le16 cap; u16 *vht_rate_mask; memcpy(sdata->rc_rateidx_mcs_mask[i], sband->ht_cap.mcs.rx_mask, sizeof(sdata->rc_rateidx_mcs_mask[i])); cap = sband->vht_cap.vht_mcs.rx_mcs_map; vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i]; ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask); } else { memset(sdata->rc_rateidx_mcs_mask[i], 0, sizeof(sdata->rc_rateidx_mcs_mask[i])); memset(sdata->rc_rateidx_vht_mcs_mask[i], 0, sizeof(sdata->rc_rateidx_vht_mcs_mask[i])); } } ieee80211_set_default_queues(sdata); sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; sdata->user_power_level = local->user_power_level; sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; /* setup type-dependent data */ ieee80211_setup_sdata(sdata, type); if (ndev) { ndev->ieee80211_ptr->use_4addr = params->use_4addr; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = params->use_4addr; ndev->features |= local->hw.netdev_features; netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops); /* MTU range: 256 - 2304 */ #if LINUX_VERSION_IS_GEQ(4,10,0) ndev->min_mtu = 256; #endif #if LINUX_VERSION_IS_GEQ(4,10,0) ndev->max_mtu = IEEE80211_MAX_DATA_LEN; #endif ret = register_netdevice(ndev); if (ret) { #if LINUX_VERSION_IS_LESS(4,12,0) ieee80211_if_free(ndev); #endif free_netdev(ndev); return ret; } } mutex_lock(&local->iflist_mtx); list_add_tail_rcu(&sdata->list, &local->interfaces); mutex_unlock(&local->iflist_mtx); if (new_wdev) *new_wdev = &sdata->wdev; return 0; } void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) { ASSERT_RTNL(); mutex_lock(&sdata->local->iflist_mtx); list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); synchronize_rcu(); if (sdata->dev) { unregister_netdevice(sdata->dev); } else { cfg80211_unregister_wdev(&sdata->wdev); ieee80211_teardown_sdata(sdata); kfree(sdata); } } void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata) { if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) return; ieee80211_do_stop(sdata, true); } void ieee80211_remove_interfaces(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *tmp; LIST_HEAD(unreg_list); LIST_HEAD(wdev_list); ASSERT_RTNL(); /* Before destroying the interfaces, make sure they're all stopped so * that the hardware is stopped. Otherwise, the driver might still be * iterating the interfaces during the shutdown, e.g. from a worker * or from RX processing or similar, and if it does so (using atomic * iteration) while we're manipulating the list, the iteration will * crash. * * After this, the hardware should be stopped and the driver should * have stopped all of its activities, so that we can do RCU-unaware * manipulations of the interface list below. */ cfg80211_shutdown_all_interfaces(local->hw.wiphy); WARN(local->open_count, "%s: open count remains %d\n", wiphy_name(local->hw.wiphy), local->open_count); ieee80211_txq_teardown_flows(local); mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); if (sdata->dev) unregister_netdevice_queue(sdata->dev, &unreg_list); else list_add(&sdata->list, &wdev_list); } mutex_unlock(&local->iflist_mtx); unregister_netdevice_many(&unreg_list); list_for_each_entry_safe(sdata, tmp, &wdev_list, list) { list_del(&sdata->list); cfg80211_unregister_wdev(&sdata->wdev); kfree(sdata); } } static int netdev_notify(struct notifier_block *nb, unsigned long state, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ieee80211_sub_if_data *sdata; if (state != NETDEV_CHANGENAME) return NOTIFY_DONE; if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) return NOTIFY_DONE; if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(sdata->name, dev->name, IFNAMSIZ); ieee80211_debugfs_rename_netdev(sdata); return NOTIFY_OK; } static struct notifier_block mac80211_netdev_notifier = { .notifier_call = netdev_notify, }; int ieee80211_iface_init(void) { return register_netdevice_notifier(&mac80211_netdev_notifier); } void ieee80211_iface_exit(void) { unregister_netdevice_notifier(&mac80211_netdev_notifier); } void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_AP) atomic_inc(&sdata->u.ap.num_mcast_sta); else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) atomic_inc(&sdata->u.vlan.num_mcast_sta); } void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_AP) atomic_dec(&sdata->u.ap.num_mcast_sta); else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) atomic_dec(&sdata->u.vlan.num_mcast_sta); } backport-iwlwifi-dkms-7906/net/mac80211/key.c000066400000000000000000001037621351064634500205700ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007-2008 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "debugfs_key.h" #include "aes_ccm.h" #include "aes_cmac.h" #include "aes_gmac.h" #include "aes_gcm.h" /** * DOC: Key handling basics * * Key handling in mac80211 is done based on per-interface (sub_if_data) * keys and per-station keys. Since each station belongs to an interface, * each station key also belongs to that interface. * * Hardware acceleration is done on a best-effort basis for algorithms * that are implemented in software, for each key the hardware is asked * to enable that key for offloading but if it cannot do that the key is * simply kept for software encryption (unless it is for an algorithm * that isn't implemented in software). * There is currently no way of knowing whether a key is handled in SW * or HW except by looking into debugfs. * * All key management is internally protected by a mutex. Within all * other parts of mac80211, key references are, just as STA structure * references, protected by RCU. Note, however, that some things are * unprotected, namely the key->sta dereferences within the hardware * acceleration functions. This means that sta_info_destroy() must * remove the key which waits for an RCU grace period. */ static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; static void assert_key_lock(struct ieee80211_local *local) { lockdep_assert_held(&local->key_mtx); } static void update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) { struct ieee80211_sub_if_data *vlan; if (sdata->vif.type != NL80211_IFTYPE_AP) return; /* crypto_tx_tailroom_needed_cnt is protected by this */ assert_key_lock(sdata->local); rcu_read_lock(); list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list) vlan->crypto_tx_tailroom_needed_cnt += delta; rcu_read_unlock(); } static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) { /* * When this count is zero, SKB resizing for allocating tailroom * for IV or MMIC is skipped. But, this check has created two race * cases in xmit path while transiting from zero count to one: * * 1. SKB resize was skipped because no key was added but just before * the xmit key is added and SW encryption kicks off. * * 2. SKB resize was skipped because all the keys were hw planted but * just before xmit one of the key is deleted and SW encryption kicks * off. * * In both the above case SW encryption will find not enough space for * tailroom and exits with WARN_ON. (See WARN_ONs at wpa.c) * * Solution has been explained at * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net */ assert_key_lock(sdata->local); update_vlan_tailroom_need_count(sdata, 1); if (!sdata->crypto_tx_tailroom_needed_cnt++) { /* * Flush all XMIT packets currently using HW encryption or no * encryption at all if the count transition is from 0 -> 1. */ synchronize_net(); } } static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) { assert_key_lock(sdata->local); WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); update_vlan_tailroom_need_count(sdata, -delta); sdata->crypto_tx_tailroom_needed_cnt -= delta; } static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) { struct ieee80211_sub_if_data *sdata = key->sdata; struct sta_info *sta; int ret = -EOPNOTSUPP; might_sleep(); if (key->flags & KEY_FLAG_TAINTED) { /* If we get here, it's during resume and the key is * tainted so shouldn't be used/programmed any more. * However, its flags may still indicate that it was * programmed into the device (since we're in resume) * so clear that flag now to avoid trying to remove * it again later. */ key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; return -EINVAL; } if (!key->local->ops->set_key) goto out_unsupported; assert_key_lock(key->local); sta = key->sta; /* * If this is a per-STA GTK, check if it * is supported; if not, return. */ if (sta && !(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE) && !ieee80211_hw_check(&key->local->hw, SUPPORTS_PER_STA_GTK)) goto out_unsupported; if (sta && !sta->uploaded) goto out_unsupported; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { /* * The driver doesn't know anything about VLAN interfaces. * Hence, don't send GTKs for VLAN interfaces to the driver. */ if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) goto out_unsupported; } ret = drv_set_key(key->local, SET_KEY, sdata, sta ? &sta->sta : NULL, &key->conf); if (!ret) { key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) || (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) decrease_tailroom_need_count(sdata, 1); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)); return 0; } if (ret != -ENOSPC && ret != -EOPNOTSUPP && ret != 1) sdata_err(sdata, "failed to set key (%d, %pM) to hardware (%d)\n", key->conf.keyidx, sta ? sta->sta.addr : bcast_addr, ret); out_unsupported: switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: /* all of these we can do in software - if driver can */ if (ret == 1) return 0; if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return 0; return -EINVAL; } return 0; default: return -EINVAL; } } static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) { struct ieee80211_sub_if_data *sdata; struct sta_info *sta; int ret; might_sleep(); if (!key || !key->local->ops->set_key) return; assert_key_lock(key->local); if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return; sta = key->sta; sdata = key->sdata; if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) || (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(sdata); key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; ret = drv_set_key(key->local, DISABLE_KEY, sdata, sta ? &sta->sta : NULL, &key->conf); if (ret) sdata_err(sdata, "failed to remove key (%d, %pM) from hardware (%d)\n", key->conf.keyidx, sta ? sta->sta.addr : bcast_addr, ret); } static int ieee80211_hw_key_replace(struct ieee80211_key *old_key, struct ieee80211_key *new_key, bool ptk0rekey) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local; struct sta_info *sta; int ret; /* Aggregation sessions are OK when running on SW crypto. * A broken remote STA may cause issues not observed with HW * crypto, though. */ if (!(old_key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return 0; assert_key_lock(old_key->local); sta = old_key->sta; /* PTK only using key ID 0 needs special handling on rekey */ if (new_key && sta && ptk0rekey) { local = old_key->local; sdata = old_key->sdata; /* Stop TX till we are on the new key */ old_key->flags |= KEY_FLAG_TAINTED; ieee80211_clear_fast_xmit(sta); /* Aggregation sessions during rekey are complicated due to the * reorder buffer and retransmits. Side step that by blocking * aggregation during rekey and tear down running sessions. */ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) { set_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_LOCAL_REQUEST); } if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0)) { pr_warn_ratelimited("Rekeying PTK for STA %pM but driver can't safely do that.", sta->sta.addr); /* Flushing the driver queues *may* help prevent * the clear text leaks and freezes. */ ieee80211_flush_queues(local, sdata, false); } } ieee80211_key_disable_hw_accel(old_key); if (new_key) ret = ieee80211_key_enable_hw_accel(new_key); else ret = 0; return ret; } static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, bool uni, bool multi) { struct ieee80211_key *key = NULL; assert_key_lock(sdata->local); if (idx >= 0 && idx < NUM_DEFAULT_KEYS) key = key_mtx_dereference(sdata->local, sdata->keys[idx]); if (uni) { rcu_assign_pointer(sdata->default_unicast_key, key); ieee80211_check_fast_xmit_iface(sdata); if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN) drv_set_default_unicast_key(sdata->local, sdata, idx); } if (multi) rcu_assign_pointer(sdata->default_multicast_key, key); ieee80211_debugfs_key_update_default(sdata); } void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, bool uni, bool multi) { mutex_lock(&sdata->local->key_mtx); __ieee80211_set_default_key(sdata, idx, uni, multi); mutex_unlock(&sdata->local->key_mtx); } static void __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx) { struct ieee80211_key *key = NULL; assert_key_lock(sdata->local); if (idx >= NUM_DEFAULT_KEYS && idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) key = key_mtx_dereference(sdata->local, sdata->keys[idx]); rcu_assign_pointer(sdata->default_mgmt_key, key); ieee80211_debugfs_key_update_default(sdata); } void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx) { mutex_lock(&sdata->local->key_mtx); __ieee80211_set_default_mgmt_key(sdata, idx); mutex_unlock(&sdata->local->key_mtx); } static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, bool pairwise, struct ieee80211_key *old, struct ieee80211_key *new) { int idx; int ret; bool defunikey, defmultikey, defmgmtkey; /* caller must provide at least one old/new */ if (WARN_ON(!new && !old)) return 0; if (new) list_add_tail_rcu(&new->list, &sdata->key_list); WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx); if (old) { idx = old->conf.keyidx; /* TODO: proper implement and test "Extended Key ID for * Individually Addressed Frames" from IEEE 802.11-2016. * Till then always assume only key ID 0 is used for * pairwise keys.*/ ret = ieee80211_hw_key_replace(old, new, pairwise); } else { /* new must be provided in case old is not */ idx = new->conf.keyidx; if (!new->local->wowlan) ret = ieee80211_key_enable_hw_accel(new); else ret = 0; } if (ret) return ret; if (sta) { if (pairwise) { rcu_assign_pointer(sta->ptk[idx], new); sta->ptk_idx = idx; if (new) { clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_check_fast_xmit(sta); } } else { rcu_assign_pointer(sta->gtk[idx], new); } if (new) ieee80211_check_fast_rx(sta); } else { defunikey = old && old == key_mtx_dereference(sdata->local, sdata->default_unicast_key); defmultikey = old && old == key_mtx_dereference(sdata->local, sdata->default_multicast_key); defmgmtkey = old && old == key_mtx_dereference(sdata->local, sdata->default_mgmt_key); if (defunikey && !new) __ieee80211_set_default_key(sdata, -1, true, false); if (defmultikey && !new) __ieee80211_set_default_key(sdata, -1, false, true); if (defmgmtkey && !new) __ieee80211_set_default_mgmt_key(sdata, -1); rcu_assign_pointer(sdata->keys[idx], new); if (defunikey && new) __ieee80211_set_default_key(sdata, new->conf.keyidx, true, false); if (defmultikey && new) __ieee80211_set_default_key(sdata, new->conf.keyidx, false, true); if (defmgmtkey && new) __ieee80211_set_default_mgmt_key(sdata, new->conf.keyidx); } if (old) list_del_rcu(&old->list); return 0; } struct ieee80211_key * ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, const u8 *key_data, size_t seq_len, const u8 *seq, const struct ieee80211_cipher_scheme *cs) { struct ieee80211_key *key; int i, j, err; if (WARN_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)) return ERR_PTR(-EINVAL); key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); if (!key) return ERR_PTR(-ENOMEM); /* * Default to software encryption; we'll later upload the * key to the hardware if possible. */ key->conf.flags = 0; key->flags = 0; key->conf.cipher = cipher; key->conf.keyidx = idx; key->conf.keylen = key_len; switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: key->conf.iv_len = IEEE80211_WEP_IV_LEN; key->conf.icv_len = IEEE80211_WEP_ICV_LEN; break; case WLAN_CIPHER_SUITE_TKIP: key->conf.iv_len = IEEE80211_TKIP_IV_LEN; key->conf.icv_len = IEEE80211_TKIP_ICV_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { key->u.tkip.rx[i].iv32 = get_unaligned_le32(&seq[2]); key->u.tkip.rx[i].iv16 = get_unaligned_le16(seq); } } spin_lock_init(&key->u.tkip.txlock); break; case WLAN_CIPHER_SUITE_CCMP: key->conf.iv_len = IEEE80211_CCMP_HDR_LEN; key->conf.icv_len = IEEE80211_CCMP_MIC_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_CCMP_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = seq[IEEE80211_CCMP_PN_LEN - j - 1]; } /* * Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( key_data, key_len, IEEE80211_CCMP_MIC_LEN); if (IS_ERR(key->u.ccmp.tfm)) { err = PTR_ERR(key->u.ccmp.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_CCMP_256: key->conf.iv_len = IEEE80211_CCMP_256_HDR_LEN; key->conf.icv_len = IEEE80211_CCMP_256_MIC_LEN; for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_CCMP_256_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = seq[IEEE80211_CCMP_256_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( key_data, key_len, IEEE80211_CCMP_256_MIC_LEN); if (IS_ERR(key->u.ccmp.tfm)) { err = PTR_ERR(key->u.ccmp.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: key->conf.iv_len = 0; if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) key->conf.icv_len = sizeof(struct ieee80211_mmie); else key->conf.icv_len = sizeof(struct ieee80211_mmie_16); if (seq) for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++) key->u.aes_cmac.rx_pn[j] = seq[IEEE80211_CMAC_PN_LEN - j - 1]; /* * Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.aes_cmac.tfm = ieee80211_aes_cmac_key_setup(key_data, key_len); if (IS_ERR(key->u.aes_cmac.tfm)) { err = PTR_ERR(key->u.aes_cmac.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: key->conf.iv_len = 0; key->conf.icv_len = sizeof(struct ieee80211_mmie_16); if (seq) for (j = 0; j < IEEE80211_GMAC_PN_LEN; j++) key->u.aes_gmac.rx_pn[j] = seq[IEEE80211_GMAC_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.aes_gmac.tfm = ieee80211_aes_gmac_key_setup(key_data, key_len); if (IS_ERR(key->u.aes_gmac.tfm)) { err = PTR_ERR(key->u.aes_gmac.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: key->conf.iv_len = IEEE80211_GCMP_HDR_LEN; key->conf.icv_len = IEEE80211_GCMP_MIC_LEN; for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_GCMP_PN_LEN; j++) key->u.gcmp.rx_pn[i][j] = seq[IEEE80211_GCMP_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.gcmp.tfm = ieee80211_aes_gcm_key_setup_encrypt(key_data, key_len); if (IS_ERR(key->u.gcmp.tfm)) { err = PTR_ERR(key->u.gcmp.tfm); kfree(key); return ERR_PTR(err); } break; default: if (cs) { if (seq_len && seq_len != cs->pn_len) { kfree(key); return ERR_PTR(-EINVAL); } key->conf.iv_len = cs->hdr_len; key->conf.icv_len = cs->mic_len; for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < seq_len; j++) key->u.gen.rx_pn[i][j] = seq[seq_len - j - 1]; key->flags |= KEY_FLAG_CIPHER_SCHEME; } } memcpy(key->conf.key, key_data, key_len); INIT_LIST_HEAD(&key->list); return key; } static void ieee80211_key_free_common(struct ieee80211_key *key) { switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: ieee80211_aes_key_free(key->u.ccmp.tfm); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: ieee80211_aes_gmac_key_free(key->u.aes_gmac.tfm); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: ieee80211_aes_gcm_key_free(key->u.gcmp.tfm); break; } kzfree(key); } static void __ieee80211_key_destroy(struct ieee80211_key *key, bool delay_tailroom) { if (key->local) { struct ieee80211_sub_if_data *sdata = key->sdata; ieee80211_debugfs_key_remove(key); if (delay_tailroom) { /* see ieee80211_delayed_tailroom_dec */ sdata->crypto_tx_tailroom_pending_dec++; schedule_delayed_work(&sdata->dec_tailroom_needed_wk, HZ/2); } else { decrease_tailroom_need_count(sdata, 1); } } ieee80211_key_free_common(key); } static void ieee80211_key_destroy(struct ieee80211_key *key, bool delay_tailroom) { if (!key) return; /* * Synchronize so the TX path and rcu key iterators * can no longer be using this key before we free/remove it. */ synchronize_net(); __ieee80211_key_destroy(key, delay_tailroom); } void ieee80211_key_free_unused(struct ieee80211_key *key) { WARN_ON(key->sdata || key->local); ieee80211_key_free_common(key); } static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, struct ieee80211_key *old, struct ieee80211_key *new) { u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP]; u8 *tk_old, *tk_new; if (!old || new->conf.keylen != old->conf.keylen) return false; tk_old = old->conf.key; tk_new = new->conf.key; /* * In station mode, don't compare the TX MIC key, as it's never used * and offloaded rekeying may not care to send it to the host. This * is the case in iwlwifi, for example. */ if (sdata->vif.type == NL80211_IFTYPE_STATION && new->conf.cipher == WLAN_CIPHER_SUITE_TKIP && new->conf.keylen == WLAN_KEY_LEN_TKIP && !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP); memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP); memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); tk_old = tkip_old; tk_new = tkip_new; } return !crypto_memneq(tk_old, tk_new, new->conf.keylen); } int ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct ieee80211_key *old_key; int idx = key->conf.keyidx; bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; /* * We want to delay tailroom updates only for station - in that * case it helps roaming speed, but in other cases it hurts and * can cause warnings to appear. */ bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; int ret; mutex_lock(&sdata->local->key_mtx); if (sta && pairwise) old_key = key_mtx_dereference(sdata->local, sta->ptk[idx]); else if (sta) old_key = key_mtx_dereference(sdata->local, sta->gtk[idx]); else old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); /* * Silently accept key re-installation without really installing the * new version of the key to avoid nonce reuse or replay issues. */ if (ieee80211_key_identical(sdata, old_key, key)) { ieee80211_key_free_unused(key); ret = 0; goto out; } key->local = sdata->local; key->sdata = sdata; key->sta = sta; increment_tailroom_need_count(sdata); ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key); if (!ret) { ieee80211_debugfs_key_add(key); ieee80211_key_destroy(old_key, delay_tailroom); } else { ieee80211_key_free(key, delay_tailroom); } out: mutex_unlock(&sdata->local->key_mtx); return ret; } void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) { if (!key) return; /* * Replace key with nothingness if it was ever used. */ if (key->sdata) ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); ieee80211_key_destroy(key, delay_tailroom); } void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) { struct ieee80211_key *key; struct ieee80211_sub_if_data *vlan; ASSERT_RTNL(); if (WARN_ON(!ieee80211_sdata_running(sdata))) return; mutex_lock(&sdata->local->key_mtx); WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || sdata->crypto_tx_tailroom_pending_dec); if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || vlan->crypto_tx_tailroom_pending_dec); } list_for_each_entry(key, &sdata->key_list, list) { increment_tailroom_need_count(sdata); ieee80211_key_enable_hw_accel(key); } mutex_unlock(&sdata->local->key_mtx); } void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata) { struct ieee80211_sub_if_data *vlan; mutex_lock(&sdata->local->key_mtx); sdata->crypto_tx_tailroom_needed_cnt = 0; if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) vlan->crypto_tx_tailroom_needed_cnt = 0; } mutex_unlock(&sdata->local->key_mtx); } void ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_key *key, *tmp; struct ieee80211_sub_if_data *sdata; ASSERT_RTNL(); mutex_lock(&local->key_mtx); if (vif) { sdata = vif_to_sdata(vif); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) iter(hw, &sdata->vif, key->sta ? &key->sta->sta : NULL, &key->conf, iter_data); } else { list_for_each_entry(sdata, &local->interfaces, list) list_for_each_entry_safe(key, tmp, &sdata->key_list, list) iter(hw, &sdata->vif, key->sta ? &key->sta->sta : NULL, &key->conf, iter_data); } mutex_unlock(&local->key_mtx); } EXPORT_SYMBOL(ieee80211_iter_keys); static void _ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_sub_if_data *sdata, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_key *key; list_for_each_entry_rcu(key, &sdata->key_list, list) { /* skip keys of station in removal process */ if (key->sta && key->sta->removed) continue; if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) continue; iter(hw, &sdata->vif, key->sta ? &key->sta->sta : NULL, &key->conf, iter_data); } } void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; if (vif) { sdata = vif_to_sdata(vif); _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data); } else { list_for_each_entry_rcu(sdata, &local->interfaces, list) _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data); } } EXPORT_SYMBOL(ieee80211_iter_keys_rcu); static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, struct list_head *keys) { struct ieee80211_key *key, *tmp; decrease_tailroom_need_count(sdata, sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; ieee80211_debugfs_key_remove_mgmt_default(sdata); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); list_add_tail(&key->list, keys); } ieee80211_debugfs_key_update_default(sdata); } void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, bool force_synchronize) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *vlan; struct ieee80211_sub_if_data *master; struct ieee80211_key *key, *tmp; LIST_HEAD(keys); cancel_delayed_work_sync(&sdata->dec_tailroom_needed_wk); mutex_lock(&local->key_mtx); ieee80211_free_keys_iface(sdata, &keys); if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) ieee80211_free_keys_iface(vlan, &keys); } if (!list_empty(&keys) || force_synchronize) synchronize_net(); list_for_each_entry_safe(key, tmp, &keys, list) __ieee80211_key_destroy(key, false); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (sdata->bss) { master = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != master->crypto_tx_tailroom_needed_cnt); } } else { WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || sdata->crypto_tx_tailroom_pending_dec); } if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || vlan->crypto_tx_tailroom_pending_dec); } mutex_unlock(&local->key_mtx); } void ieee80211_free_sta_keys(struct ieee80211_local *local, struct sta_info *sta) { struct ieee80211_key *key; int i; mutex_lock(&local->key_mtx); for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) { key = key_mtx_dereference(local, sta->gtk[i]); if (!key) continue; ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); __ieee80211_key_destroy(key, key->sdata->vif.type == NL80211_IFTYPE_STATION); } for (i = 0; i < NUM_DEFAULT_KEYS; i++) { key = key_mtx_dereference(local, sta->ptk[i]); if (!key) continue; ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); __ieee80211_key_destroy(key, key->sdata->vif.type == NL80211_IFTYPE_STATION); } mutex_unlock(&local->key_mtx); } void ieee80211_delayed_tailroom_dec(struct work_struct *wk) { struct ieee80211_sub_if_data *sdata; sdata = container_of(wk, struct ieee80211_sub_if_data, dec_tailroom_needed_wk.work); /* * The reason for the delayed tailroom needed decrementing is to * make roaming faster: during roaming, all keys are first deleted * and then new keys are installed. The first new key causes the * crypto_tx_tailroom_needed_cnt to go from 0 to 1, which invokes * the cost of synchronize_net() (which can be slow). Avoid this * by deferring the crypto_tx_tailroom_needed_cnt decrementing on * key removal for a while, so if we roam the value is larger than * zero and no 0->1 transition happens. * * The cost is that if the AP switching was from an AP with keys * to one without, we still allocate tailroom while it would no * longer be needed. However, in the typical (fast) roaming case * within an ESS this usually won't happen. */ mutex_lock(&sdata->local->key_mtx); decrease_tailroom_need_count(sdata, sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; mutex_unlock(&sdata->local->key_mtx); } void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_gtk_rekey_notify(sdata, bssid, replay_ctr); cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp); } EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify); void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq) { struct ieee80211_key *key; const u8 *pn; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) return; seq->tkip.iv32 = key->u.tkip.rx[tid].iv32; seq->tkip.iv16 = key->u.tkip.rx[tid].iv16; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.ccmp.rx_pn[tid]; memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_gmac.rx_pn; memcpy(seq->aes_gmac.pn, pn, IEEE80211_GMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.gcmp.rx_pn[tid]; memcpy(seq->gcmp.pn, pn, IEEE80211_GCMP_PN_LEN); break; } } EXPORT_SYMBOL(ieee80211_get_key_rx_seq); void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq) { struct ieee80211_key *key; u8 *pn; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) return; key->u.tkip.rx[tid].iv32 = seq->tkip.iv32; key->u.tkip.rx[tid].iv16 = seq->tkip.iv16; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.ccmp.rx_pn[tid]; memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_gmac.rx_pn; memcpy(pn, seq->aes_gmac.pn, IEEE80211_GMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.gcmp.rx_pn[tid]; memcpy(pn, seq->gcmp.pn, IEEE80211_GCMP_PN_LEN); break; default: WARN_ON(1); break; } } EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq); void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) { struct ieee80211_key *key; key = container_of(keyconf, struct ieee80211_key, conf); assert_key_lock(key->local); /* * if key was uploaded, we assume the driver will/has remove(d) * it, so adjust bookkeeping accordingly */ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) || (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(key->sdata); } ieee80211_key_free(key, false); } EXPORT_SYMBOL_GPL(ieee80211_remove_key); struct ieee80211_key_conf * ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_key *key; int err; /* * TODO: we should check for local->wowlan here, because * calling gtk_rekey_add() without being in wowlan can cause * problems, unless we refactor the code to unify * gtk_rekey_add() with gtk_rekey_notify(). */ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return ERR_PTR(-EINVAL); key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx, keyconf->keylen, keyconf->key, 0, NULL, NULL); if (IS_ERR(key)) return ERR_CAST(key); if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; err = ieee80211_key_link(key, sdata, NULL); if (err) return ERR_PTR(err); return &key->conf; } EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add); backport-iwlwifi-dkms-7906/net/mac80211/key.h000066400000000000000000000111201351064634500205570ustar00rootroot00000000000000/* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef IEEE80211_KEY_H #define IEEE80211_KEY_H #include #include #include #include #include #define NUM_DEFAULT_KEYS 4 #define NUM_DEFAULT_MGMT_KEYS 2 struct ieee80211_local; struct ieee80211_sub_if_data; struct sta_info; /** * enum ieee80211_internal_key_flags - internal key flags * * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present * in the hardware for TX crypto hardware acceleration. * @KEY_FLAG_TAINTED: Key is tainted and packets should be dropped. * @KEY_FLAG_CIPHER_SCHEME: This key is for a hardware cipher scheme */ enum ieee80211_internal_key_flags { KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), KEY_FLAG_TAINTED = BIT(1), KEY_FLAG_CIPHER_SCHEME = BIT(2), }; enum ieee80211_internal_tkip_state { TKIP_STATE_NOT_INIT, TKIP_STATE_PHASE1_DONE, TKIP_STATE_PHASE1_HW_UPLOADED, }; struct tkip_ctx { u16 p1k[5]; /* p1k cache */ u32 p1k_iv32; /* iv32 for which p1k computed */ enum ieee80211_internal_tkip_state state; }; struct tkip_ctx_rx { struct tkip_ctx ctx; u32 iv32; /* current iv32 */ u16 iv16; /* current iv16 */ }; struct ieee80211_key { struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; /* for sdata list */ struct list_head list; /* protected by key mutex */ unsigned int flags; union { struct { /* protects tx context */ spinlock_t txlock; /* last used TSC */ struct tkip_ctx tx; /* last received RSC */ struct tkip_ctx_rx rx[IEEE80211_NUM_TIDS]; /* number of mic failures */ u32 mic_failures; } tkip; struct { /* * Last received packet number. The first * IEEE80211_NUM_TIDS counters are used with Data * frames and the last counter is used with Robust * Management frames. */ u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_CCMP_PN_LEN]; struct crypto_aead *tfm; u32 replays; /* dot11RSNAStatsCCMPReplays */ } ccmp; struct { u8 rx_pn[IEEE80211_CMAC_PN_LEN]; struct crypto_shash *tfm; u32 replays; /* dot11RSNAStatsCMACReplays */ u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ } aes_cmac; struct { u8 rx_pn[IEEE80211_GMAC_PN_LEN]; struct crypto_aead *tfm; u32 replays; /* dot11RSNAStatsCMACReplays */ u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ } aes_gmac; struct { /* Last received packet number. The first * IEEE80211_NUM_TIDS counters are used with Data * frames and the last counter is used with Robust * Management frames. */ u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_GCMP_PN_LEN]; struct crypto_aead *tfm; u32 replays; /* dot11RSNAStatsGCMPReplays */ } gcmp; struct { /* generic cipher scheme */ u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_MAX_PN_LEN]; } gen; } u; #ifdef CPTCFG_MAC80211_DEBUGFS struct { struct dentry *stalink; struct dentry *dir; int cnt; } debugfs; #endif /* * key config, must be last because it contains key * material as variable length member */ struct ieee80211_key_conf conf; }; struct ieee80211_key * ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, const u8 *key_data, size_t seq_len, const u8 *seq, const struct ieee80211_cipher_scheme *cs); /* * Insert a key into data structures (sdata, sta if necessary) * to make it used, free old key. On failure, also free the new key. */ int ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_sub_if_data *sdata, struct sta_info *sta); void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom); void ieee80211_key_free_unused(struct ieee80211_key *key); void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, bool uni, bool multi); void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx); void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, bool force_synchronize); void ieee80211_free_sta_keys(struct ieee80211_local *local, struct sta_info *sta); void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata); #define key_mtx_dereference(local, ref) \ rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) void ieee80211_delayed_tailroom_dec(struct work_struct *wk); #endif /* IEEE80211_KEY_H */ backport-iwlwifi-dkms-7906/net/mac80211/led.c000066400000000000000000000266261351064634500205470ustar00rootroot00000000000000/* * Copyright 2006, Johannes Berg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* just for IFNAMSIZ */ #include #include #include #include "led.h" void ieee80211_led_assoc(struct ieee80211_local *local, bool associated) { if (!atomic_read(&local->assoc_led_active)) return; if (associated) led_trigger_event(&local->assoc_led, LED_FULL); else led_trigger_event(&local->assoc_led, LED_OFF); } void ieee80211_led_radio(struct ieee80211_local *local, bool enabled) { if (!atomic_read(&local->radio_led_active)) return; if (enabled) led_trigger_event(&local->radio_led, LED_FULL); else led_trigger_event(&local->radio_led, LED_OFF); } void ieee80211_alloc_led_names(struct ieee80211_local *local) { local->rx_led.name = kasprintf(GFP_KERNEL, "%srx", wiphy_name(local->hw.wiphy)); local->tx_led.name = kasprintf(GFP_KERNEL, "%stx", wiphy_name(local->hw.wiphy)); local->assoc_led.name = kasprintf(GFP_KERNEL, "%sassoc", wiphy_name(local->hw.wiphy)); local->radio_led.name = kasprintf(GFP_KERNEL, "%sradio", wiphy_name(local->hw.wiphy)); } void ieee80211_free_led_names(struct ieee80211_local *local) { kfree(local->rx_led.name); kfree(local->tx_led.name); kfree(local->assoc_led.name); kfree(local->radio_led.name); } static int ieee80211_tx_led_activate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, tx_led); atomic_inc(&local->tx_led_active); return 0; } #if LINUX_VERSION_IS_LESS(4,19,0) static void bp_ieee80211_tx_led_activate(struct led_classdev *led_cdev){ ieee80211_tx_led_activate(led_cdev); } #endif static void ieee80211_tx_led_deactivate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, tx_led); atomic_dec(&local->tx_led_active); } static int ieee80211_rx_led_activate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, rx_led); atomic_inc(&local->rx_led_active); return 0; } #if LINUX_VERSION_IS_LESS(4,19,0) static void bp_ieee80211_rx_led_activate(struct led_classdev *led_cdev){ ieee80211_rx_led_activate(led_cdev); } #endif static void ieee80211_rx_led_deactivate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, rx_led); atomic_dec(&local->rx_led_active); } static int ieee80211_assoc_led_activate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, assoc_led); atomic_inc(&local->assoc_led_active); return 0; } #if LINUX_VERSION_IS_LESS(4,19,0) static void bp_ieee80211_assoc_led_activate(struct led_classdev *led_cdev){ ieee80211_assoc_led_activate(led_cdev); } #endif static void ieee80211_assoc_led_deactivate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, assoc_led); atomic_dec(&local->assoc_led_active); } static int ieee80211_radio_led_activate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, radio_led); atomic_inc(&local->radio_led_active); return 0; } #if LINUX_VERSION_IS_LESS(4,19,0) static void bp_ieee80211_radio_led_activate(struct led_classdev *led_cdev){ ieee80211_radio_led_activate(led_cdev); } #endif static void ieee80211_radio_led_deactivate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, radio_led); atomic_dec(&local->radio_led_active); } static int ieee80211_tpt_led_activate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, tpt_led); atomic_inc(&local->tpt_led_active); return 0; } #if LINUX_VERSION_IS_LESS(4,19,0) static void bp_ieee80211_tpt_led_activate(struct led_classdev *led_cdev){ ieee80211_tpt_led_activate(led_cdev); } #endif static void ieee80211_tpt_led_deactivate(struct led_classdev *led_cdev) { struct ieee80211_local *local = container_of(led_cdev->trigger, struct ieee80211_local, tpt_led); atomic_dec(&local->tpt_led_active); } void ieee80211_led_init(struct ieee80211_local *local) { atomic_set(&local->rx_led_active, 0); #if LINUX_VERSION_IS_GEQ(4,19,0) local->rx_led.activate = ieee80211_rx_led_activate; #else local->rx_led.activate = bp_ieee80211_rx_led_activate; #endif local->rx_led.deactivate = ieee80211_rx_led_deactivate; if (local->rx_led.name && led_trigger_register(&local->rx_led)) { kfree(local->rx_led.name); local->rx_led.name = NULL; } atomic_set(&local->tx_led_active, 0); #if LINUX_VERSION_IS_GEQ(4,19,0) local->tx_led.activate = ieee80211_tx_led_activate; #else local->tx_led.activate = bp_ieee80211_tx_led_activate; #endif local->tx_led.deactivate = ieee80211_tx_led_deactivate; if (local->tx_led.name && led_trigger_register(&local->tx_led)) { kfree(local->tx_led.name); local->tx_led.name = NULL; } atomic_set(&local->assoc_led_active, 0); #if LINUX_VERSION_IS_GEQ(4,19,0) local->assoc_led.activate = ieee80211_assoc_led_activate; #else local->assoc_led.activate = bp_ieee80211_assoc_led_activate; #endif local->assoc_led.deactivate = ieee80211_assoc_led_deactivate; if (local->assoc_led.name && led_trigger_register(&local->assoc_led)) { kfree(local->assoc_led.name); local->assoc_led.name = NULL; } atomic_set(&local->radio_led_active, 0); #if LINUX_VERSION_IS_GEQ(4,19,0) local->radio_led.activate = ieee80211_radio_led_activate; #else local->radio_led.activate = bp_ieee80211_radio_led_activate; #endif local->radio_led.deactivate = ieee80211_radio_led_deactivate; if (local->radio_led.name && led_trigger_register(&local->radio_led)) { kfree(local->radio_led.name); local->radio_led.name = NULL; } atomic_set(&local->tpt_led_active, 0); if (local->tpt_led_trigger) { #if LINUX_VERSION_IS_GEQ(4,19,0) local->tpt_led.activate = ieee80211_tpt_led_activate; #else local->tpt_led.activate = bp_ieee80211_tpt_led_activate; #endif local->tpt_led.deactivate = ieee80211_tpt_led_deactivate; if (led_trigger_register(&local->tpt_led)) { kfree(local->tpt_led_trigger); local->tpt_led_trigger = NULL; } } } void ieee80211_led_exit(struct ieee80211_local *local) { if (local->radio_led.name) led_trigger_unregister(&local->radio_led); if (local->assoc_led.name) led_trigger_unregister(&local->assoc_led); if (local->tx_led.name) led_trigger_unregister(&local->tx_led); if (local->rx_led.name) led_trigger_unregister(&local->rx_led); if (local->tpt_led_trigger) { led_trigger_unregister(&local->tpt_led); kfree(local->tpt_led_trigger); } } const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); return local->radio_led.name; } EXPORT_SYMBOL(__ieee80211_get_radio_led_name); const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); return local->assoc_led.name; } EXPORT_SYMBOL(__ieee80211_get_assoc_led_name); const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); return local->tx_led.name; } EXPORT_SYMBOL(__ieee80211_get_tx_led_name); const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); return local->rx_led.name; } EXPORT_SYMBOL(__ieee80211_get_rx_led_name); static unsigned long tpt_trig_traffic(struct ieee80211_local *local, struct tpt_led_trigger *tpt_trig) { unsigned long traffic, delta; traffic = tpt_trig->tx_bytes + tpt_trig->rx_bytes; delta = traffic - tpt_trig->prev_traffic; tpt_trig->prev_traffic = traffic; return DIV_ROUND_UP(delta, 1024 / 8); } static void tpt_trig_timer(struct timer_list *t) { struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer); struct ieee80211_local *local = tpt_trig->local; struct led_classdev *led_cdev; unsigned long on, off, tpt; int i; if (!tpt_trig->running) return; mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ)); tpt = tpt_trig_traffic(local, tpt_trig); /* default to just solid on */ on = 1; off = 0; for (i = tpt_trig->blink_table_len - 1; i >= 0; i--) { if (tpt_trig->blink_table[i].throughput < 0 || tpt > tpt_trig->blink_table[i].throughput) { off = tpt_trig->blink_table[i].blink_time / 2; on = tpt_trig->blink_table[i].blink_time - off; break; } } read_lock(&local->tpt_led.leddev_list_lock); list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list) led_blink_set(led_cdev, &on, &off); read_unlock(&local->tpt_led.leddev_list_lock); } const char * __ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags, const struct ieee80211_tpt_blink *blink_table, unsigned int blink_table_len) { struct ieee80211_local *local = hw_to_local(hw); struct tpt_led_trigger *tpt_trig; if (WARN_ON(local->tpt_led_trigger)) return NULL; tpt_trig = kzalloc(sizeof(struct tpt_led_trigger), GFP_KERNEL); if (!tpt_trig) return NULL; snprintf(tpt_trig->name, sizeof(tpt_trig->name), "%stpt", wiphy_name(local->hw.wiphy)); local->tpt_led.name = tpt_trig->name; tpt_trig->blink_table = blink_table; tpt_trig->blink_table_len = blink_table_len; tpt_trig->want = flags; tpt_trig->local = local; timer_setup(&tpt_trig->timer, tpt_trig_timer, 0); local->tpt_led_trigger = tpt_trig; return tpt_trig->name; } EXPORT_SYMBOL(__ieee80211_create_tpt_led_trigger); static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local) { struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; if (tpt_trig->running) return; /* reset traffic */ tpt_trig_traffic(local, tpt_trig); tpt_trig->running = true; tpt_trig_timer(&tpt_trig->timer); mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ)); } static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local) { struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; struct led_classdev *led_cdev; if (!tpt_trig->running) return; tpt_trig->running = false; del_timer_sync(&tpt_trig->timer); read_lock(&local->tpt_led.leddev_list_lock); list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list) led_set_brightness(led_cdev, LED_OFF); read_unlock(&local->tpt_led.leddev_list_lock); } void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, unsigned int types_on, unsigned int types_off) { struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; bool allowed; WARN_ON(types_on & types_off); if (!tpt_trig) return; tpt_trig->active &= ~types_off; tpt_trig->active |= types_on; /* * Regardless of wanted state, we shouldn't blink when * the radio is disabled -- this can happen due to some * code ordering issues with __ieee80211_recalc_idle() * being called before the radio is started. */ allowed = tpt_trig->active & IEEE80211_TPT_LEDTRIG_FL_RADIO; if (!allowed || !(tpt_trig->active & tpt_trig->want)) ieee80211_stop_tpt_led_trig(local); else ieee80211_start_tpt_led_trig(local); } backport-iwlwifi-dkms-7906/net/mac80211/led.h000066400000000000000000000057651351064634500205550ustar00rootroot00000000000000/* * Copyright 2006, Johannes Berg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include "ieee80211_i.h" #define MAC80211_BLINK_DELAY 50 /* ms */ static inline void ieee80211_led_rx(struct ieee80211_local *local) { #ifdef CPTCFG_MAC80211_LEDS unsigned long led_delay __maybe_unused = MAC80211_BLINK_DELAY; if (!atomic_read(&local->rx_led_active)) return; #if LINUX_VERSION_IS_GEQ(3,6,0) led_trigger_blink_oneshot(&local->rx_led, &led_delay, &led_delay, 0); #else if (local->rx_led_counter++ % 2 == 0) led_trigger_event(&local->rx_led, LED_OFF); else led_trigger_event(&local->rx_led, LED_FULL); #endif #endif } static inline void ieee80211_led_tx(struct ieee80211_local *local) { #ifdef CPTCFG_MAC80211_LEDS unsigned long led_delay __maybe_unused = MAC80211_BLINK_DELAY; if (!atomic_read(&local->tx_led_active)) return; #if LINUX_VERSION_IS_GEQ(3,6,0) led_trigger_blink_oneshot(&local->tx_led, &led_delay, &led_delay, 0); #else if (local->tx_led_counter++ % 2 == 0) led_trigger_event(&local->tx_led, LED_OFF); else led_trigger_event(&local->tx_led, LED_FULL); #endif #endif } #ifdef CPTCFG_MAC80211_LEDS void ieee80211_led_assoc(struct ieee80211_local *local, bool associated); void ieee80211_led_radio(struct ieee80211_local *local, bool enabled); void ieee80211_alloc_led_names(struct ieee80211_local *local); void ieee80211_free_led_names(struct ieee80211_local *local); void ieee80211_led_init(struct ieee80211_local *local); void ieee80211_led_exit(struct ieee80211_local *local); void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, unsigned int types_on, unsigned int types_off); #else static inline void ieee80211_led_assoc(struct ieee80211_local *local, bool associated) { } static inline void ieee80211_led_radio(struct ieee80211_local *local, bool enabled) { } static inline void ieee80211_alloc_led_names(struct ieee80211_local *local) { } static inline void ieee80211_free_led_names(struct ieee80211_local *local) { } static inline void ieee80211_led_init(struct ieee80211_local *local) { } static inline void ieee80211_led_exit(struct ieee80211_local *local) { } static inline void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, unsigned int types_on, unsigned int types_off) { } #endif static inline void ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, __le16 fc, int bytes) { #ifdef CPTCFG_MAC80211_LEDS if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active)) local->tpt_led_trigger->tx_bytes += bytes; #endif } static inline void ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, __le16 fc, int bytes) { #ifdef CPTCFG_MAC80211_LEDS if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active)) local->tpt_led_trigger->rx_bytes += bytes; #endif } backport-iwlwifi-dkms-7906/net/mac80211/main.c000066400000000000000000001220471351064634500207210ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wep.h" #include "led.h" #include "debugfs.h" void ieee80211_configure_filter(struct ieee80211_local *local) { u64 mc; unsigned int changed_flags; unsigned int new_flags = 0; if (atomic_read(&local->iff_allmultis)) new_flags |= FIF_ALLMULTI; if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) || test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) new_flags |= FIF_BCN_PRBRESP_PROMISC; if (local->fif_probe_req || local->probe_req_reg) new_flags |= FIF_PROBE_REQ; if (local->fif_fcsfail) new_flags |= FIF_FCSFAIL; if (local->fif_plcpfail) new_flags |= FIF_PLCPFAIL; if (local->fif_control) new_flags |= FIF_CONTROL; if (local->fif_other_bss) new_flags |= FIF_OTHER_BSS; if (local->fif_pspoll) new_flags |= FIF_PSPOLL; spin_lock_bh(&local->filter_lock); changed_flags = local->filter_flags ^ new_flags; mc = drv_prepare_multicast(local, &local->mc_list); spin_unlock_bh(&local->filter_lock); /* be a bit nasty */ new_flags |= (1<<31); drv_configure_filter(local, changed_flags, &new_flags, mc); WARN_ON(new_flags & (1<<31)); local->filter_flags = new_flags & ~(1<<31); } static void ieee80211_reconfig_filter(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, reconfig_filter); ieee80211_configure_filter(local); } static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; struct cfg80211_chan_def chandef = {}; u32 changed = 0; int power; u32 offchannel_flag; offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; if (local->scan_chandef.chan) { chandef = local->scan_chandef; } else if (local->tmp_channel) { chandef.chan = local->tmp_channel; chandef.width = NL80211_CHAN_WIDTH_20_NOHT; chandef.center_freq1 = chandef.chan->center_freq; } else chandef = local->_oper_chandef; WARN(!cfg80211_chandef_valid(&chandef), "control:%d MHz width:%d center: %d/%d MHz", chandef.chan->center_freq, chandef.width, chandef.center_freq1, chandef.center_freq2); if (!cfg80211_chandef_identical(&chandef, &local->_oper_chandef)) local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; else local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; if (offchannel_flag || !cfg80211_chandef_identical(&local->hw.conf.chandef, &local->_oper_chandef)) { local->hw.conf.chandef = chandef; changed |= IEEE80211_CONF_CHANGE_CHANNEL; } if (!conf_is_ht(&local->hw.conf)) { /* * mac80211.h documents that this is only valid * when the channel is set to an HT type, and * that otherwise STATIC is used. */ local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC; } else if (local->hw.conf.smps_mode != local->smps_mode) { local->hw.conf.smps_mode = local->smps_mode; changed |= IEEE80211_CONF_CHANGE_SMPS; } power = ieee80211_chandef_max_power(&chandef); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!rcu_access_pointer(sdata->vif.chanctx_conf)) continue; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; power = min(power, sdata->vif.bss_conf.txpower); } rcu_read_unlock(); if (local->hw.conf.power_level != power) { changed |= IEEE80211_CONF_CHANGE_POWER; local->hw.conf.power_level = power; } return changed; } int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) { int ret = 0; might_sleep(); if (!local->use_chanctx) changed |= ieee80211_hw_conf_chan(local); else changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER); if (changed && local->open_count) { ret = drv_config(local, changed); /* * Goal: * HW reconfiguration should never fail, the driver has told * us what it can support so it should live up to that promise. * * Current status: * rfkill is not integrated with mac80211 and a * configuration command can thus fail if hardware rfkill * is enabled * * FIXME: integrate rfkill with mac80211 and then add this * WARN_ON() back * */ /* WARN_ON(ret); */ } return ret; } void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, u32 changed) { struct ieee80211_local *local = sdata->local; if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed); } u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) { sdata->vif.bss_conf.use_cts_prot = false; sdata->vif.bss_conf.use_short_preamble = false; sdata->vif.bss_conf.use_short_slot = false; return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT; } static void ieee80211_tasklet_handler(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *) data; struct sk_buff *skb; while ((skb = skb_dequeue(&local->skb_queue)) || (skb = skb_dequeue(&local->skb_queue_unreliable))) { switch (skb->pkt_type) { case IEEE80211_RX_MSG: /* Clear skb->pkt_type in order to not confuse kernel * netstack. */ skb->pkt_type = 0; ieee80211_rx(&local->hw, skb); break; case IEEE80211_TX_STATUS_MSG: skb->pkt_type = 0; ieee80211_tx_status(&local->hw, skb); break; default: WARN(1, "mac80211: Packet is of unknown type %d\n", skb->pkt_type); dev_kfree_skb(skb); break; } } } static void ieee80211_restart_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, restart_work); struct ieee80211_sub_if_data *sdata; /* wait for scan work complete */ flush_workqueue(local->workqueue); flush_work(&local->sched_scan_stopped_work); WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), "%s called with hardware scan in progress\n", __func__); flush_work(&local->radar_detected_work); rtnl_lock(); list_for_each_entry(sdata, &local->interfaces, list) { /* * XXX: there may be more work for other vif types and even * for station mode: a good thing would be to run most of * the iface type's dependent _stop (ieee80211_mg_stop, * ieee80211_ibss_stop) etc... * For now, fix only the specific bug that was seen: race * between csa_connection_drop_work and us. */ if (sdata->vif.type == NL80211_IFTYPE_STATION) { /* * This worker is scheduled from the iface worker that * runs on mac80211's workqueue, so we can't be * scheduling this worker after the cancel right here. * The exception is ieee80211_chswitch_done. * Then we can have a race... */ cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work); } flush_delayed_work(&sdata->dec_tailroom_needed_wk); } ieee80211_scan_cancel(local); /* make sure any new ROC will consider local->in_reconfig */ flush_delayed_work(&local->roc_work); flush_work(&local->hw_roc_done); /* wait for all packet processing to be done */ synchronize_net(); ieee80211_reconfig(local); rtnl_unlock(); } void ieee80211_restart_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_restart_hw(local); wiphy_info(hw->wiphy, "Hardware restart was requested\n"); /* use this reason, ieee80211_reconfig will unblock it */ ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); /* * Stop all Rx during the reconfig. We don't want state changes * or driver callbacks while this is in progress. */ local->in_reconfig = true; barrier(); queue_work(system_freezable_wq, &local->restart_work); } EXPORT_SYMBOL(ieee80211_restart_hw); #ifdef CONFIG_INET static int ieee80211_ifa_changed(struct notifier_block *nb, unsigned long data, void *arg) { struct in_ifaddr *ifa = arg; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, ifa_notifier); struct net_device *ndev = ifa->ifa_dev->dev; struct wireless_dev *wdev = ndev->ieee80211_ptr; struct in_device *idev; struct ieee80211_sub_if_data *sdata; struct ieee80211_bss_conf *bss_conf; struct ieee80211_if_managed *ifmgd; int c = 0; /* Make sure it's our interface that got changed */ if (!wdev) return NOTIFY_DONE; if (wdev->wiphy != local->hw.wiphy) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(ndev); bss_conf = &sdata->vif.bss_conf; /* ARP filtering is only supported in managed mode */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; idev = __in_dev_get_rtnl(sdata->dev); if (!idev) return NOTIFY_DONE; ifmgd = &sdata->u.mgd; sdata_lock(sdata); /* Copy the addresses to the bss_conf list */ ifa = idev->ifa_list; while (ifa) { if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN) bss_conf->arp_addr_list[c] = ifa->ifa_address; ifa = ifa->ifa_next; c++; } bss_conf->arp_addr_cnt = c; /* Configure driver only if associated (which also implies it is up) */ if (ifmgd->associated) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_ARP_FILTER); sdata_unlock(sdata); #ifdef CPTCFG_IWLMVM_VENDOR_CMDS ieee80211_check_fast_rx_iface(sdata); #endif return NOTIFY_OK; } #endif #if IS_ENABLED(CONFIG_IPV6) static int ieee80211_ifa6_changed(struct notifier_block *nb, unsigned long data, void *arg) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg; struct inet6_dev *idev = ifa->idev; struct net_device *ndev = ifa->idev->dev; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, ifa6_notifier); struct wireless_dev *wdev = ndev->ieee80211_ptr; struct ieee80211_sub_if_data *sdata; /* Make sure it's our interface that got changed */ if (!wdev || wdev->wiphy != local->hw.wiphy) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(ndev); /* * For now only support station mode. This is mostly because * doing AP would have to handle AP_VLAN in some way ... */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; drv_ipv6_addr_change(local, sdata, idev); return NOTIFY_OK; } #endif /* There isn't a lot of sense in it, but you can transmit anything you like */ static const struct ieee80211_txrx_stypes ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_ADHOC] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_STATION] = { .tx = 0xffff, /* * To support Pre Association Security Negotiation (PASN) while * already associated to one AP, allow user space to register to * Rx authentication frames, so that the user space logic would * be able to receive/handle authentication frames from a * different AP as part of PASN. * It is expected that user space would intelligently register * for Rx authentication frames, i.e., only when PASN is used * and configure a match filter only for PASN authentication * algorithm, as otherwise the MLME functionality of mac80211 * would be broken. */ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_AP] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_AP_VLAN] = { /* copy AP */ .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_P2P_GO] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_MESH_POINT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4), }, [NL80211_IFTYPE_P2P_DEVICE] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, }; static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR | IEEE80211_HT_AMPDU_PARM_DENSITY, .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_MAX_AMSDU | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_40MHZ_INTOLERANT), .mcs = { .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, }, }; static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { .vht_cap_info = cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_RXSTBC_MASK | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK), .supp_mcs = { .rx_mcs_map = cpu_to_le16(~0), .tx_mcs_map = cpu_to_le16(~0), }, }; struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, const struct ieee80211_ops *ops, const char *requested_name) { struct ieee80211_local *local; int priv_size, i; struct wiphy *wiphy; bool use_chanctx; if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || !ops->add_interface || !ops->remove_interface || !ops->configure_filter)) return NULL; if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) return NULL; /* check all or no channel context operations exist */ i = !!ops->add_chanctx + !!ops->remove_chanctx + !!ops->change_chanctx + !!ops->assign_vif_chanctx + !!ops->unassign_vif_chanctx; if (WARN_ON(i != 0 && i != 5)) return NULL; use_chanctx = i == 5; /* Ensure 32-byte alignment of our private data and hw private data. * We use the wiphy priv data for both our ieee80211_local and for * the driver's private data * * In memory it'll be like this: * * +-------------------------+ * | struct wiphy | * +-------------------------+ * | struct ieee80211_local | * +-------------------------+ * | driver's private data | * +-------------------------+ * */ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; wiphy = wiphy_new_nm(&mac80211_config_ops, priv_size, requested_name); if (!wiphy) return NULL; wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes; wiphy->privid = mac80211_wiphy_privid; wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION | WIPHY_FLAG_REPORTS_OBSS | WIPHY_FLAG_OFFCHAN_TX; if (ops->remain_on_channel) wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | NL80211_FEATURE_SAE | NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_VIF_TXPOWER | NL80211_FEATURE_MAC_ON_CREATE | NL80211_FEATURE_USERSPACE_MPM | NL80211_FEATURE_FULL_AP_CLIENT_STATE; #if LINUX_VERSION_IS_GEQ(4,3,0) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_STA); #endif wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211); if (!ops->hw_scan) { wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_AP_SCAN; /* * if the driver behaves correctly using the probe request * (template) from mac80211, then both of these should be * supported even with hw scan - but let drivers opt in. */ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); } if (!ops->set_key) wiphy->flags |= WIPHY_FLAG_IBSS_RSN; if (ops->wake_tx_queue) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM); wiphy->bss_priv_size = sizeof(struct ieee80211_bss); local = wiphy_priv(wiphy); if (sta_info_init(local)) goto err_free; local->hw.wiphy = wiphy; local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); local->ops = ops; local->use_chanctx = use_chanctx; /* * We need a bit of data queued to build aggregates properly, so * instruct the TCP stack to allow more than a single ms of data * to be queued in the stack. The value is a bit-shift of 1 * second, so 8 is ~4ms of queued data. Only affects local TCP * sockets. * This is the default, anyhow - drivers may need to override it * for local reasons (longer buffers, longer completion time, or * similar). */ local->hw.tx_sk_pacing_shift = 8; /* set up some defaults */ local->hw.queues = 1; local->hw.max_rates = 1; local->hw.max_report_rates = 0; local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS | IEEE80211_RADIOTAP_MCS_HAVE_GI | IEEE80211_RADIOTAP_MCS_HAVE_BW; local->hw.radiotap_vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI | IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; local->hw.uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; local->hw.uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask; local->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF; wiphy->extended_capabilities = local->ext_capa; wiphy->extended_capabilities_mask = local->ext_capa; wiphy->extended_capabilities_len = ARRAY_SIZE(local->ext_capa); INIT_LIST_HEAD(&local->interfaces); INIT_LIST_HEAD(&local->mon_list); __hw_addr_init(&local->mc_list); mutex_init(&local->iflist_mtx); mutex_init(&local->mtx); mutex_init(&local->key_mtx); spin_lock_init(&local->filter_lock); spin_lock_init(&local->rx_path_lock); spin_lock_init(&local->queue_stop_reason_lock); INIT_LIST_HEAD(&local->chanctx_list); mutex_init(&local->chanctx_mtx); INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); INIT_WORK(&local->restart_work, ieee80211_restart_work); INIT_WORK(&local->radar_detected_work, ieee80211_dfs_radar_detected_work); INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); local->smps_mode = IEEE80211_SMPS_OFF; INIT_WORK(&local->dynamic_ps_enable_work, ieee80211_dynamic_ps_enable_work); INIT_WORK(&local->dynamic_ps_disable_work, ieee80211_dynamic_ps_disable_work); timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0); INIT_WORK(&local->sched_scan_stopped_work, ieee80211_sched_scan_stopped_work); INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work); spin_lock_init(&local->ack_status_lock); idr_init(&local->ack_status_frames); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_head_init(&local->pending[i]); atomic_set(&local->agg_queue_stop[i], 0); } tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, (unsigned long)local); if (ops->wake_tx_queue) tasklet_init(&local->wake_txqs_tasklet, ieee80211_wake_txqs, (unsigned long)local); tasklet_init(&local->tasklet, ieee80211_tasklet_handler, (unsigned long) local); skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue_unreliable); skb_queue_head_init(&local->skb_queue_tdls_chsw); ieee80211_alloc_led_names(local); ieee80211_roc_setup(local); local->hw.radiotap_timestamp.units_pos = -1; local->hw.radiotap_timestamp.accuracy = -1; return &local->hw; err_free: wiphy_free(wiphy); return NULL; } EXPORT_SYMBOL(ieee80211_alloc_hw_nm); static int ieee80211_init_cipher_suites(struct ieee80211_local *local) { bool have_wep = !(IS_ERR(local->wep_tx_tfm) || IS_ERR(local->wep_rx_tfm)); bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE); int n_suites = 0, r = 0, w = 0; u32 *suites; static const u32 cipher_suites[] = { /* keep WEP first, it may be removed below */ WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_CCMP_256, #if IS_ENABLED(CONFIG_CRYPTO_GCM) WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, #endif /* keep last -- depends on hw flags! */ WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, #if IS_ENABLED(CONFIG_CRYPTO_GCM) WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, #endif }; if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) || local->hw.wiphy->cipher_suites) { /* If the driver advertises, or doesn't support SW crypto, * we only need to remove WEP if necessary. */ if (have_wep) return 0; /* well if it has _no_ ciphers ... fine */ if (!local->hw.wiphy->n_cipher_suites) return 0; /* Driver provides cipher suites, but we need to exclude WEP */ suites = kmemdup(local->hw.wiphy->cipher_suites, sizeof(u32) * local->hw.wiphy->n_cipher_suites, GFP_KERNEL); if (!suites) return -ENOMEM; for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { u32 suite = local->hw.wiphy->cipher_suites[r]; if (suite == WLAN_CIPHER_SUITE_WEP40 || suite == WLAN_CIPHER_SUITE_WEP104) continue; suites[w++] = suite; } } else if (!local->hw.cipher_schemes) { /* If the driver doesn't have cipher schemes, there's nothing * else to do other than assign the (software supported and * perhaps offloaded) cipher suites. */ local->hw.wiphy->cipher_suites = cipher_suites; local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); if (!have_mfp) local->hw.wiphy->n_cipher_suites -= 2 + 2 * IS_ENABLED(CONFIG_CRYPTO_GCM); if (!have_wep) { local->hw.wiphy->cipher_suites += 2; local->hw.wiphy->n_cipher_suites -= 2; } /* not dynamically allocated, so just return */ return 0; } else { const struct ieee80211_cipher_scheme *cs; cs = local->hw.cipher_schemes; /* Driver specifies cipher schemes only (but not cipher suites * including the schemes) * * We start counting ciphers defined by schemes, TKIP, CCMP, * CCMP-256, GCMP, and GCMP-256 */ n_suites = local->hw.n_cipher_schemes + 3 + 2 * IS_ENABLED(CONFIG_CRYPTO_GCM); /* check if we have WEP40 and WEP104 */ if (have_wep) n_suites += 2; /* check if we have AES_CMAC, BIP-CMAC-256, BIP-GMAC-128, * BIP-GMAC-256 */ if (have_mfp) n_suites += 1 + 2 * IS_ENABLED(CONFIG_CRYPTO_GCM); suites = kmalloc_array(n_suites, sizeof(u32), GFP_KERNEL); if (!suites) return -ENOMEM; suites[w++] = WLAN_CIPHER_SUITE_CCMP; suites[w++] = WLAN_CIPHER_SUITE_CCMP_256; suites[w++] = WLAN_CIPHER_SUITE_TKIP; #if IS_ENABLED(CONFIG_CRYPTO_GCM) suites[w++] = WLAN_CIPHER_SUITE_GCMP; suites[w++] = WLAN_CIPHER_SUITE_GCMP_256; #endif if (have_wep) { suites[w++] = WLAN_CIPHER_SUITE_WEP40; suites[w++] = WLAN_CIPHER_SUITE_WEP104; } if (have_mfp) { suites[w++] = WLAN_CIPHER_SUITE_AES_CMAC; suites[w++] = WLAN_CIPHER_SUITE_BIP_CMAC_256; #if IS_ENABLED(CONFIG_CRYPTO_GCM) suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_128; suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_256; #endif } for (r = 0; r < local->hw.n_cipher_schemes; r++) { suites[w++] = cs[r].cipher; if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) { kfree(suites); return -EINVAL; } } } local->hw.wiphy->cipher_suites = suites; local->hw.wiphy->n_cipher_suites = w; local->wiphy_ciphers_allocated = true; return 0; } int ieee80211_register_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); int result, i; enum nl80211_band band; int channels, max_bitrates; bool supp_ht, supp_vht, supp_he; netdev_features_t feature_whitelist; struct cfg80211_chan_def dflt_chandef = {}; if (ieee80211_hw_check(hw, QUEUE_CONTROL) && (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE || local->hw.offchannel_tx_hw_queue >= local->hw.queues)) return -EINVAL; if ((hw->wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && (!local->ops->tdls_channel_switch || !local->ops->tdls_cancel_channel_switch || !local->ops->tdls_recv_channel_switch)) return -EOPNOTSUPP; if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_TX_FRAG) && !local->ops->set_frag_threshold)) return -EINVAL; if (WARN_ON(local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN) && (!local->ops->start_nan || !local->ops->stop_nan))) return -EINVAL; #ifdef CONFIG_PM if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) return -EINVAL; #endif if (!local->use_chanctx) { for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *comb; comb = &local->hw.wiphy->iface_combinations[i]; if (comb->num_different_channels > 1) return -EINVAL; } } else { /* * WDS is currently prohibited when channel contexts are used * because there's no clear definition of which channel WDS * type interfaces use */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS)) return -EINVAL; /* DFS is not supported with multi-channel combinations yet */ for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *comb; comb = &local->hw.wiphy->iface_combinations[i]; if (comb->radar_detect_widths && comb->num_different_channels > 1) return -EINVAL; } } /* Only HW csum features are currently compatible with mac80211 */ feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_GSO_SOFTWARE | NETIF_F_RXCSUM; if (WARN_ON(hw->netdev_features & ~feature_whitelist)) return -EINVAL; if (hw->max_report_rates == 0) hw->max_report_rates = hw->max_rates; local->rx_chains = 1; /* * generic code guarantees at least one band, * set this very early because much code assumes * that hw.conf.channel is assigned */ channels = 0; max_bitrates = 0; supp_ht = false; supp_vht = false; supp_he = false; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[band]; if (!sband) continue; if (!dflt_chandef.chan) { cfg80211_chandef_create(&dflt_chandef, &sband->channels[0], NL80211_CHAN_NO_HT); /* init channel we're on */ if (!local->use_chanctx && !local->_oper_chandef.chan) { local->hw.conf.chandef = dflt_chandef; local->_oper_chandef = dflt_chandef; } local->monitor_chandef = dflt_chandef; } channels += sband->n_channels; if (max_bitrates < sband->n_bitrates) max_bitrates = sband->n_bitrates; supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; if (!supp_he) supp_he = !!ieee80211_get_he_sta_cap(sband); if (!sband->ht_cap.ht_supported) continue; /* TODO: consider VHT for RX chains, hopefully it's the same */ local->rx_chains = max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), local->rx_chains); /* no need to mask, SM_PS_DISABLED has all bits set */ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; } /* TODO: Add support for NAN Data interfaces */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN_DATA)) return -EINVAL; /* if low-level driver supports AP, we also support VLAN. * drivers advertising SW_CRYPTO_CONTROL should enable AP_VLAN * based on their support to transmit SW encrypted packets. */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP) && !ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL)) { hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); } /* mac80211 always supports monitor */ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); /* mac80211 doesn't support more than one IBSS interface right now */ for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *c; int j; c = &hw->wiphy->iface_combinations[i]; for (j = 0; j < c->n_limits; j++) if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && c->limits[j].max > 1) return -EINVAL; } local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + sizeof(void *) * channels, GFP_KERNEL); if (!local->int_scan_req) return -ENOMEM; for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!local->hw.wiphy->bands[band]) continue; local->int_scan_req->rates[band] = (u32) -1; } #ifndef CPTCFG_MAC80211_MESH /* mesh depends on Kconfig, but drivers should set it if they want */ local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); #endif /* if the underlying driver supports mesh, mac80211 will (at least) * provide routing of mesh authentication frames to userspace */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH; /* mac80211 supports control port protocol changing */ local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) { local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; } else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) { local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; if (hw->max_signal <= 0) { result = -EINVAL; goto fail_wiphy_register; } } /* * Calculate scan IE length -- we need this to alloc * memory and to subtract from the driver limit. It * includes the DS Params, (extended) supported rates, and HT * information -- SSID is the driver's responsibility. */ local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ + 3 /* DS Params */; if (supp_ht) local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); if (supp_vht) local->scan_ies_len += 2 + sizeof(struct ieee80211_vht_cap); /* HE cap element is variable in size - set len to allow max size */ /* * TODO: 1 is added at the end of the calculation to accommodate for * the temporary placing of the HE capabilities IE under EXT. * Remove it once it is placed in the final place. */ if (supp_he) local->scan_ies_len += 2 + sizeof(struct ieee80211_he_cap_elem) + sizeof(struct ieee80211_he_mcs_nss_supp) + IEEE80211_HE_PPE_THRES_MAX_LEN + 1; if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ local->hw.wiphy->max_scan_ssids = 4; local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; } /* * If the driver supports any scan IEs, then assume the * limit includes the IEs mac80211 will add, otherwise * leave it at zero and let the driver sort it out; we * still pass our IEs to the driver but userspace will * not be allowed to in that case. */ if (local->hw.wiphy->max_scan_ie_len) local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes, local->hw.n_cipher_schemes)); result = ieee80211_init_cipher_suites(local); if (result < 0) goto fail_wiphy_register; if (!local->ops->remain_on_channel) local->hw.wiphy->max_remain_on_channel_duration = 5000; /* mac80211 based drivers don't support internal TDLS setup */ if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; /* mac80211 supports eCSA, if the driver supports STA CSA at all */ if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING; /* mac80211 supports multi BSSID, if the driver supports it */ if (ieee80211_hw_check(&local->hw, SUPPORTS_MULTI_BSSID)) { local->hw.wiphy->support_mbssid = true; if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID)) local->hw.wiphy->support_only_he_mbssid = true; else local->ext_capa[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT; } local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM; result = wiphy_register(local->hw.wiphy); if (result < 0) goto fail_wiphy_register; /* * We use the number of queues for feature tests (QoS, HT) internally * so restrict them appropriately. */ if (hw->queues > IEEE80211_MAX_QUEUES) hw->queues = IEEE80211_MAX_QUEUES; local->workqueue = alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy)); if (!local->workqueue) { result = -ENOMEM; goto fail_workqueue; } /* * The hardware needs headroom for sending the frame, * and we need some headroom for passing the frame to monitor * interfaces, but never both at the same time. */ local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, IEEE80211_TX_STATUS_HEADROOM); debugfs_hw_add(local); /* * if the driver doesn't specify a max listen interval we * use 5 which should be a safe default */ if (local->hw.max_listen_interval == 0) local->hw.max_listen_interval = 5; local->hw.conf.listen_interval = local->hw.max_listen_interval; local->dynamic_ps_forced_timeout = -1; if (!local->hw.max_nan_de_entries) local->hw.max_nan_de_entries = IEEE80211_MAX_NAN_INSTANCE_ID; result = ieee80211_wep_init(local); if (result < 0) wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", result); local->hw.conf.flags = IEEE80211_CONF_IDLE; ieee80211_led_init(local); result = ieee80211_txq_setup_flows(local); if (result) goto fail_flows; rtnl_lock(); result = ieee80211_init_rate_ctrl_alg(local, hw->rate_control_algorithm); if (result < 0) { wiphy_debug(local->hw.wiphy, "Failed to initialize rate control algorithm\n"); goto fail_rate; } if (local->rate_ctrl) { clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags); if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW) ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); } /* * If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE, * or have it when we don't, copy the sband structure and set/clear it. * This is necessary because rate scaling algorithms could be switched * and have different support values. * Print a message so that in the common case the reallocation can be * avoided. */ BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated)); for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; bool local_cap, ie_cap; local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW); sband = local->hw.wiphy->bands[band]; if (!sband || !sband->vht_cap.vht_supported) continue; ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)); if (local_cap == ie_cap) continue; sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL); if (!sband) { result = -ENOMEM; goto fail_rate; } wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n", band); sband->vht_cap.vht_mcs.tx_highest ^= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); local->hw.wiphy->bands[band] = sband; local->sband_allocated |= BIT(band); } /* add one default STA interface if supported */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) && !ieee80211_hw_check(hw, NO_AUTO_VIF)) { struct vif_params params = {0}; result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL, NL80211_IFTYPE_STATION, ¶ms); if (result) wiphy_warn(local->hw.wiphy, "Failed to add default virtual iface\n"); } rtnl_unlock(); #ifdef CONFIG_INET local->ifa_notifier.notifier_call = ieee80211_ifa_changed; result = register_inetaddr_notifier(&local->ifa_notifier); if (result) goto fail_ifa; #endif #if IS_ENABLED(CONFIG_IPV6) local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed; result = register_inet6addr_notifier(&local->ifa6_notifier); if (result) goto fail_ifa6; #endif return 0; #if IS_ENABLED(CONFIG_IPV6) fail_ifa6: #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif #endif #if defined(CONFIG_INET) || defined(CONFIG_IPV6) fail_ifa: #endif rtnl_lock(); rate_control_deinitialize(local); ieee80211_remove_interfaces(local); fail_rate: rtnl_unlock(); ieee80211_led_exit(local); ieee80211_wep_free(local); fail_flows: destroy_workqueue(local->workqueue); fail_workqueue: wiphy_unregister(local->hw.wiphy); fail_wiphy_register: if (local->wiphy_ciphers_allocated) kfree(local->hw.wiphy->cipher_suites); kfree(local->int_scan_req); return result; } EXPORT_SYMBOL(ieee80211_register_hw); void ieee80211_unregister_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); tasklet_kill(&local->tx_pending_tasklet); tasklet_kill(&local->tasklet); #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&local->ifa6_notifier); #endif rtnl_lock(); /* * At this point, interface list manipulations are fine * because the driver cannot be handing us frames any * more and the tasklet is killed. */ ieee80211_remove_interfaces(local); rtnl_unlock(); cancel_delayed_work_sync(&local->roc_work); cancel_work_sync(&local->restart_work); cancel_work_sync(&local->reconfig_filter); cancel_work_sync(&local->tdls_chsw_work); flush_work(&local->sched_scan_stopped_work); flush_work(&local->radar_detected_work); ieee80211_clear_tx_pending(local); rate_control_deinitialize(local); if (skb_queue_len(&local->skb_queue) || skb_queue_len(&local->skb_queue_unreliable)) wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); skb_queue_purge(&local->skb_queue); skb_queue_purge(&local->skb_queue_unreliable); skb_queue_purge(&local->skb_queue_tdls_chsw); destroy_workqueue(local->workqueue); wiphy_unregister(local->hw.wiphy); ieee80211_wep_free(local); ieee80211_led_exit(local); kfree(local->int_scan_req); } EXPORT_SYMBOL(ieee80211_unregister_hw); static int ieee80211_free_ack_frame(int id, void *p, void *data) { WARN_ONCE(1, "Have pending ack frames!\n"); kfree_skb(p); return 0; } void ieee80211_free_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); enum nl80211_band band; mutex_destroy(&local->iflist_mtx); mutex_destroy(&local->mtx); if (local->wiphy_ciphers_allocated) kfree(local->hw.wiphy->cipher_suites); idr_for_each(&local->ack_status_frames, ieee80211_free_ack_frame, NULL); idr_destroy(&local->ack_status_frames); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS { struct ieee80211_tx_latency_threshold *tx_thrshld; kfree(rcu_access_pointer(local->tx_latency)); kfree(rcu_access_pointer(local->tx_consec)); tx_thrshld = rcu_access_pointer(local->tx_threshold); if (tx_thrshld) { kfree(tx_thrshld->thresholds_bss); kfree(tx_thrshld->thresholds_p2p); } kfree(tx_thrshld); } #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ sta_info_stop(local); ieee80211_free_led_names(local); kfree(rcu_access_pointer(local->uapsd_black_list)); for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!(local->sband_allocated & BIT(band))) continue; kfree(local->hw.wiphy->bands[band]); } wiphy_free(local->hw.wiphy); } EXPORT_SYMBOL(ieee80211_free_hw); static int __init ieee80211_init(void) { struct sk_buff *skb; int ret; BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb)); BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) + IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb)); ret = rc80211_minstrel_init(); if (ret) return ret; ret = ieee80211_iface_init(); if (ret) goto err_netdev; return 0; err_netdev: rc80211_minstrel_exit(); return ret; } static void __exit ieee80211_exit(void) { rc80211_minstrel_exit(); ieee80211s_stop(); ieee80211_iface_exit(); rcu_barrier(); } subsys_initcall(ieee80211_init); module_exit(ieee80211_exit); MODULE_DESCRIPTION("IEEE 802.11 subsystem"); MODULE_LICENSE("GPL"); backport-iwlwifi-dkms-7906/net/mac80211/mesh.c000066400000000000000000001216611351064634500207320ustar00rootroot00000000000000/* * Copyright (c) 2008, 2009 open80211s Ltd. * Copyright (C) 2018 - 2019 Intel Corporation * Authors: Luis Carlos Cobo * Javier Cardona * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include "ieee80211_i.h" #include "mesh.h" #include "driver-ops.h" static int mesh_allocated; static struct kmem_cache *rm_cache; bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt) { return (mgmt->u.action.u.mesh_action.action_code == WLAN_MESH_ACTION_HWMP_PATH_SELECTION); } void ieee80211s_init(void) { mesh_allocated = 1; rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), 0, 0, NULL); } void ieee80211s_stop(void) { if (!mesh_allocated) return; kmem_cache_destroy(rm_cache); } static void ieee80211_mesh_housekeeping_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mesh.housekeeping_timer); struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_queue_work(&local->hw, &sdata->work); } /** * mesh_matches_local - check if the config of a mesh point matches ours * * @sdata: local mesh subif * @ie: information elements of a management frame from the mesh peer * * This function checks if the mesh configuration of a mesh point matches the * local mesh configuration, i.e. if both nodes belong to the same mesh network. */ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *ie) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u32 basic_rates = 0; struct cfg80211_chan_def sta_chan_def; struct ieee80211_supported_band *sband; /* * As support for each feature is added, check for matching * - On mesh config capabilities * - Power Save Support En * - Sync support enabled * - Sync support active * - Sync support required from peer * - MDA enabled * - Power management control on fc */ if (!(ifmsh->mesh_id_len == ie->mesh_id_len && memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) return false; sband = ieee80211_get_sband(sdata); if (!sband) return false; ieee80211_sta_get_rates(sdata, ie, sband->band, &basic_rates); if (sdata->vif.bss_conf.basic_rates != basic_rates) return false; cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan, NL80211_CHAN_NO_HT); ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def); ieee80211_chandef_vht_oper(&sdata->local->hw, ie->vht_operation, ie->ht_operation, &sta_chan_def); if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef, &sta_chan_def)) return false; return true; } /** * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links * * @ie: information elements of a management frame from the mesh peer */ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) { return (ie->mesh_config->meshconf_cap & IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; } /** * mesh_accept_plinks_update - update accepting_plink in local mesh beacons * * @sdata: mesh interface in which mesh beacons are going to be updated * * Returns: beacon changed flag if the beacon content changed. */ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) { bool free_plinks; u32 changed = 0; /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0, * the mesh interface might be able to establish plinks with peers that * are already on the table but are not on PLINK_ESTAB state. However, * in general the mesh interface is not accepting peer link requests * from new peers, and that must be reflected in the beacon */ free_plinks = mesh_plink_availables(sdata); if (free_plinks != sdata->u.mesh.accepting_plinks) { sdata->u.mesh.accepting_plinks = free_plinks; changed = BSS_CHANGED_BEACON; } return changed; } /* * mesh_sta_cleanup - clean up any mesh sta state * * @sta: mesh sta to clean up. */ void mesh_sta_cleanup(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; u32 changed = mesh_plink_deactivate(sta); if (changed) ieee80211_mbss_info_change_notify(sdata, changed); } int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) { int i; sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); if (!sdata->u.mesh.rmc) return -ENOMEM; sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; for (i = 0; i < RMC_BUCKETS; i++) INIT_HLIST_HEAD(&sdata->u.mesh.rmc->bucket[i]); return 0; } void mesh_rmc_free(struct ieee80211_sub_if_data *sdata) { struct mesh_rmc *rmc = sdata->u.mesh.rmc; struct rmc_entry *p; struct hlist_node *n; int i; if (!sdata->u.mesh.rmc) return; for (i = 0; i < RMC_BUCKETS; i++) { hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) { hlist_del(&p->list); kmem_cache_free(rm_cache, p); } } kfree(rmc); sdata->u.mesh.rmc = NULL; } /** * mesh_rmc_check - Check frame in recent multicast cache and add if absent. * * @sdata: interface * @sa: source address * @mesh_hdr: mesh_header * * Returns: 0 if the frame is not in the cache, nonzero otherwise. * * Checks using the source address and the mesh sequence number if we have * received this frame lately. If the frame is not in the cache, it is added to * it. */ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, const u8 *sa, struct ieee80211s_hdr *mesh_hdr) { struct mesh_rmc *rmc = sdata->u.mesh.rmc; u32 seqnum = 0; int entries = 0; u8 idx; struct rmc_entry *p; struct hlist_node *n; if (!rmc) return -1; /* Don't care about endianness since only match matters */ memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) { ++entries; if (time_after(jiffies, p->exp_time) || entries == RMC_QUEUE_MAX_LEN) { hlist_del(&p->list); kmem_cache_free(rm_cache, p); --entries; } else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa)) return -1; } p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); if (!p) return 0; p->seqnum = seqnum; p->exp_time = jiffies + RMC_TIMEOUT; memcpy(p->sa, sa, ETH_ALEN); hlist_add_head(&p->list, &rmc->bucket[idx]); return 0; } int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u8 *pos, neighbors; u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie); bool is_connected_to_gate = ifmsh->num_gates > 0 || ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol || ifmsh->mshcfg.dot11MeshConnectedToMeshGate; if (skb_tailroom(skb) < 2 + meshconf_len) return -ENOMEM; pos = skb_put(skb, 2 + meshconf_len); *pos++ = WLAN_EID_MESH_CONFIG; *pos++ = meshconf_len; /* save a pointer for quick updates in pre-tbtt */ ifmsh->meshconf_offset = pos - skb->data; /* Active path selection protocol ID */ *pos++ = ifmsh->mesh_pp_id; /* Active path selection metric ID */ *pos++ = ifmsh->mesh_pm_id; /* Congestion control mode identifier */ *pos++ = ifmsh->mesh_cc_id; /* Synchronization protocol identifier */ *pos++ = ifmsh->mesh_sp_id; /* Authentication Protocol identifier */ *pos++ = ifmsh->mesh_auth_id; /* Mesh Formation Info - number of neighbors */ neighbors = atomic_read(&ifmsh->estab_plinks); neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS); *pos++ = (neighbors << 1) | is_connected_to_gate; /* Mesh capability */ *pos = 0x00; *pos |= ifmsh->mshcfg.dot11MeshForwarding ? IEEE80211_MESHCONF_CAPAB_FORWARDING : 0x00; *pos |= ifmsh->accepting_plinks ? IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */ *pos |= ifmsh->ps_peers_deep_sleep ? IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00; return 0; } int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u8 *pos; if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len) return -ENOMEM; pos = skb_put(skb, 2 + ifmsh->mesh_id_len); *pos++ = WLAN_EID_MESH_ID; *pos++ = ifmsh->mesh_id_len; if (ifmsh->mesh_id_len) memcpy(pos, ifmsh->mesh_id, ifmsh->mesh_id_len); return 0; } static int mesh_add_awake_window_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u8 *pos; /* see IEEE802.11-2012 13.14.6 */ if (ifmsh->ps_peers_light_sleep == 0 && ifmsh->ps_peers_deep_sleep == 0 && ifmsh->nonpeer_pm == NL80211_MESH_POWER_ACTIVE) return 0; if (skb_tailroom(skb) < 4) return -ENOMEM; pos = skb_put(skb, 2 + 2); *pos++ = WLAN_EID_MESH_AWAKE_WINDOW; *pos++ = 2; put_unaligned_le16(ifmsh->mshcfg.dot11MeshAwakeWindowDuration, pos); return 0; } int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u8 offset, len; const u8 *data; if (!ifmsh->ie || !ifmsh->ie_len) return 0; /* fast-forward to vendor IEs */ offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); if (offset < ifmsh->ie_len) { len = ifmsh->ie_len - offset; data = ifmsh->ie + offset; if (skb_tailroom(skb) < len) return -ENOMEM; skb_put_data(skb, data, len); } return 0; } int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u8 len = 0; const u8 *data; if (!ifmsh->ie || !ifmsh->ie_len) return 0; /* find RSN IE */ data = cfg80211_find_ie(WLAN_EID_RSN, ifmsh->ie, ifmsh->ie_len); if (!data) return 0; len = data[1] + 2; if (skb_tailroom(skb) < len) return -ENOMEM; skb_put_data(skb, data, len); return 0; } static int mesh_add_ds_params_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; u8 *pos; if (skb_tailroom(skb) < 3) return -ENOMEM; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return -EINVAL; } chan = chanctx_conf->def.chan; rcu_read_unlock(); pos = skb_put(skb, 2 + 1); *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; *pos++ = ieee80211_frequency_to_channel(chan->center_freq); return 0; } int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_supported_band *sband; u8 *pos; sband = ieee80211_get_sband(sdata); if (!sband) return -EINVAL; if (!sband->ht_cap.ht_supported || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) return 0; if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) return -ENOMEM; pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap)); ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); return 0; } int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; struct ieee80211_supported_band *sband; struct ieee80211_sta_ht_cap *ht_cap; u8 *pos; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return -EINVAL; } channel = chanctx_conf->def.chan; rcu_read_unlock(); sband = local->hw.wiphy->bands[channel->band]; ht_cap = &sband->ht_cap; if (!ht_cap->ht_supported || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) return 0; if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation)) return -ENOMEM; pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef, sdata->vif.bss_conf.ht_operation_mode, false); return 0; } int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_supported_band *sband; u8 *pos; sband = ieee80211_get_sband(sdata); if (!sband) return -EINVAL; if (!sband->vht_cap.vht_supported || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) return 0; if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap)) return -ENOMEM; pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap)); ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap); return 0; } int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; struct ieee80211_supported_band *sband; struct ieee80211_sta_vht_cap *vht_cap; u8 *pos; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return -EINVAL; } channel = chanctx_conf->def.chan; rcu_read_unlock(); sband = local->hw.wiphy->bands[channel->band]; vht_cap = &sband->vht_cap; if (!vht_cap->vht_supported || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) return 0; if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation)) return -ENOMEM; pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); ieee80211_ie_build_vht_oper(pos, vht_cap, &sdata->vif.bss_conf.chandef); return 0; } static void ieee80211_mesh_path_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mesh.mesh_path_timer); ieee80211_queue_work(&sdata->local->hw, &sdata->work); } static void ieee80211_mesh_path_root_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mesh.mesh_path_root_timer); struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); ieee80211_queue_work(&sdata->local->hw, &sdata->work); } void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) { if (ifmsh->mshcfg.dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT) set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); else { clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); /* stop running timer */ del_timer_sync(&ifmsh->mesh_path_root_timer); } } /** * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame * @hdr: 802.11 frame header * @fc: frame control field * @meshda: destination address in the mesh * @meshsa: source address address in the mesh. Same as TA, as frame is * locally originated. * * Return the length of the 802.11 (does not include a mesh control header) */ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, const u8 *meshda, const u8 *meshsa) { if (is_multicast_ether_addr(meshda)) { *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA TA SA */ memcpy(hdr->addr1, meshda, ETH_ALEN); memcpy(hdr->addr2, meshsa, ETH_ALEN); memcpy(hdr->addr3, meshsa, ETH_ALEN); return 24; } else { *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ eth_zero_addr(hdr->addr1); /* RA is resolved later */ memcpy(hdr->addr2, meshsa, ETH_ALEN); memcpy(hdr->addr3, meshda, ETH_ALEN); memcpy(hdr->addr4, meshsa, ETH_ALEN); return 30; } } /** * ieee80211_new_mesh_header - create a new mesh header * @sdata: mesh interface to be used * @meshhdr: uninitialized mesh header * @addr4or5: 1st address in the ae header, which may correspond to address 4 * (if addr6 is NULL) or address 5 (if addr6 is present). It may * be NULL. * @addr6: 2nd address in the ae header, which corresponds to addr6 of the * mesh frame * * Return the header length. */ unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, struct ieee80211s_hdr *meshhdr, const char *addr4or5, const char *addr6) { if (WARN_ON(!addr4or5 && addr6)) return 0; memset(meshhdr, 0, sizeof(*meshhdr)); meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; /* FIXME: racy -- TX on multiple queues can be concurrent */ put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); sdata->u.mesh.mesh_seqnum++; if (addr4or5 && !addr6) { meshhdr->flags |= MESH_FLAGS_AE_A4; memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); return 2 * ETH_ALEN; } else if (addr4or5 && addr6) { meshhdr->flags |= MESH_FLAGS_AE_A5_A6; memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); memcpy(meshhdr->eaddr2, addr6, ETH_ALEN); return 3 * ETH_ALEN; } return ETH_ALEN; } static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u32 changed; if (ifmsh->mshcfg.plink_timeout > 0) ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ); mesh_path_expire(sdata); changed = mesh_accept_plinks_update(sdata); ieee80211_mbss_info_change_notify(sdata, changed); mod_timer(&ifmsh->housekeeping_timer, round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); } static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u32 interval; mesh_path_tx_root_frame(sdata); if (ifmsh->mshcfg.dot11MeshHWMPRootMode == IEEE80211_PROACTIVE_RANN) interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; else interval = ifmsh->mshcfg.dot11MeshHWMProotInterval; mod_timer(&ifmsh->mesh_path_root_timer, round_jiffies(TU_TO_EXP_TIME(interval))); } static int ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) { struct beacon_data *bcn; int head_len, tail_len; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; struct ieee80211_chanctx_conf *chanctx_conf; struct mesh_csa_settings *csa; enum nl80211_band band; u8 *pos; struct ieee80211_sub_if_data *sdata; int hdr_len = offsetofend(struct ieee80211_mgmt, u.beacon); sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); band = chanctx_conf->def.chan->band; rcu_read_unlock(); head_len = hdr_len + 2 + /* NULL SSID */ /* Channel Switch Announcement */ 2 + sizeof(struct ieee80211_channel_sw_ie) + /* Mesh Channel Switch Parameters */ 2 + sizeof(struct ieee80211_mesh_chansw_params_ie) + /* Channel Switch Wrapper + Wide Bandwidth CSA IE */ 2 + 2 + sizeof(struct ieee80211_wide_bw_chansw_ie) + 2 + sizeof(struct ieee80211_sec_chan_offs_ie) + 2 + 8 + /* supported rates */ 2 + 3; /* DS params */ tail_len = 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + 2 + sizeof(struct ieee80211_ht_operation) + 2 + ifmsh->mesh_id_len + 2 + sizeof(struct ieee80211_meshconf_ie) + 2 + sizeof(__le16) + /* awake window */ 2 + sizeof(struct ieee80211_vht_cap) + 2 + sizeof(struct ieee80211_vht_operation) + ifmsh->ie_len; bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL); /* need an skb for IE builders to operate on */ skb = dev_alloc_skb(max(head_len, tail_len)); if (!bcn || !skb) goto out_free; /* * pointers go into the block we allocated, * memory is | beacon_data | head | tail | */ bcn->head = ((u8 *) bcn) + sizeof(*bcn); /* fill in the head */ mgmt = skb_put_zero(skb, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); eth_broadcast_addr(mgmt->da); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); ieee80211_mps_set_frame_flags(sdata, NULL, (void *) mgmt); mgmt->u.beacon.beacon_int = cpu_to_le16(sdata->vif.bss_conf.beacon_int); mgmt->u.beacon.capab_info |= cpu_to_le16( sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0); pos = skb_put(skb, 2); *pos++ = WLAN_EID_SSID; *pos++ = 0x0; rcu_read_lock(); csa = rcu_dereference(ifmsh->csa); if (csa) { enum nl80211_channel_type ct; struct cfg80211_chan_def *chandef; int ie_len = 2 + sizeof(struct ieee80211_channel_sw_ie) + 2 + sizeof(struct ieee80211_mesh_chansw_params_ie); pos = skb_put_zero(skb, ie_len); *pos++ = WLAN_EID_CHANNEL_SWITCH; *pos++ = 3; *pos++ = 0x0; *pos++ = ieee80211_frequency_to_channel( csa->settings.chandef.chan->center_freq); bcn->csa_current_counter = csa->settings.count; bcn->csa_counter_offsets[0] = hdr_len + 6; *pos++ = csa->settings.count; *pos++ = WLAN_EID_CHAN_SWITCH_PARAM; *pos++ = 6; if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) { *pos++ = ifmsh->mshcfg.dot11MeshTTL; *pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; } else { *pos++ = ifmsh->chsw_ttl; } *pos++ |= csa->settings.block_tx ? WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); pos += 2; put_unaligned_le16(ifmsh->pre_value, pos); pos += 2; switch (csa->settings.chandef.width) { case NL80211_CHAN_WIDTH_40: ie_len = 2 + sizeof(struct ieee80211_sec_chan_offs_ie); pos = skb_put_zero(skb, ie_len); *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; /* EID */ *pos++ = 1; /* len */ ct = cfg80211_get_chandef_type(&csa->settings.chandef); if (ct == NL80211_CHAN_HT40PLUS) *pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; else *pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: /* Channel Switch Wrapper + Wide Bandwidth CSA IE */ ie_len = 2 + 2 + sizeof(struct ieee80211_wide_bw_chansw_ie); pos = skb_put_zero(skb, ie_len); *pos++ = WLAN_EID_CHANNEL_SWITCH_WRAPPER; /* EID */ *pos++ = 5; /* len */ /* put sub IE */ chandef = &csa->settings.chandef; ieee80211_ie_build_wide_bw_cs(pos, chandef); break; default: break; } } rcu_read_unlock(); if (ieee80211_add_srates_ie(sdata, skb, true, band) || mesh_add_ds_params_ie(sdata, skb)) goto out_free; bcn->head_len = skb->len; memcpy(bcn->head, skb->data, bcn->head_len); /* now the tail */ skb_trim(skb, 0); bcn->tail = bcn->head + bcn->head_len; if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) || mesh_add_rsn_ie(sdata, skb) || mesh_add_ht_cap_ie(sdata, skb) || mesh_add_ht_oper_ie(sdata, skb) || mesh_add_meshid_ie(sdata, skb) || mesh_add_meshconf_ie(sdata, skb) || mesh_add_awake_window_ie(sdata, skb) || mesh_add_vht_cap_ie(sdata, skb) || mesh_add_vht_oper_ie(sdata, skb) || mesh_add_vendor_ies(sdata, skb)) goto out_free; bcn->tail_len = skb->len; memcpy(bcn->tail, skb->data, bcn->tail_len); bcn->meshconf = (struct ieee80211_meshconf_ie *) (bcn->tail + ifmsh->meshconf_offset); dev_kfree_skb(skb); rcu_assign_pointer(ifmsh->beacon, bcn); return 0; out_free: kfree(bcn); dev_kfree_skb(skb); return -ENOMEM; } static int ieee80211_mesh_rebuild_beacon(struct ieee80211_sub_if_data *sdata) { struct beacon_data *old_bcn; int ret; old_bcn = rcu_dereference_protected(sdata->u.mesh.beacon, lockdep_is_held(&sdata->wdev.mtx)); ret = ieee80211_mesh_build_beacon(&sdata->u.mesh); if (ret) /* just reuse old beacon */ return ret; if (old_bcn) kfree_rcu(old_bcn, rcu_head); return 0; } void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, u32 changed) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; unsigned long bits = changed; u32 bit; if (!bits) return; /* if we race with running work, worst case this work becomes a noop */ for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE) set_bit(bit, &ifmsh->mbss_changed); set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags); ieee80211_queue_work(&sdata->local->hw, &sdata->work); } int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; u32 changed = BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT | BSS_CHANGED_MCAST_RATE; local->fif_other_bss++; /* mesh ifaces must set allmulti to forward mcast traffic */ atomic_inc(&local->iff_allmultis); ieee80211_configure_filter(local); ifmsh->mesh_cc_id = 0; /* Disabled */ /* register sync ops from extensible synchronization framework */ ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id); ifmsh->sync_offset_clockdrift_max = 0; set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); ieee80211_queue_work(&local->hw, &sdata->work); sdata->vif.bss_conf.ht_operation_mode = ifmsh->mshcfg.ht_opmode; sdata->vif.bss_conf.enable_beacon = true; changed |= ieee80211_mps_local_status_update(sdata); if (ieee80211_mesh_build_beacon(ifmsh)) { ieee80211_stop_mesh(sdata); return -ENOMEM; } ieee80211_recalc_dtim(local, sdata); ieee80211_bss_info_change_notify(sdata, changed); netif_carrier_on(sdata->dev); return 0; } void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct beacon_data *bcn; netif_carrier_off(sdata->dev); /* flush STAs and mpaths on this iface */ sta_info_flush(sdata); mesh_path_flush_by_iface(sdata); /* stop the beacon */ ifmsh->mesh_id_len = 0; sdata->vif.bss_conf.enable_beacon = false; clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); /* remove beacon */ bcn = rcu_dereference_protected(ifmsh->beacon, lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(ifmsh->beacon, NULL); kfree_rcu(bcn, rcu_head); /* free all potentially still buffered group-addressed frames */ local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); skb_queue_purge(&ifmsh->ps.bc_buf); del_timer_sync(&sdata->u.mesh.housekeeping_timer); del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); del_timer_sync(&sdata->u.mesh.mesh_path_timer); /* clear any mesh work (for next join) we may have accrued */ ifmsh->wrkq_flags = 0; ifmsh->mbss_changed = 0; local->fif_other_bss--; atomic_dec(&local->iff_allmultis); ieee80211_configure_filter(local); } static void ieee80211_mesh_csa_mark_radar(struct ieee80211_sub_if_data *sdata) { int err; /* if the current channel is a DFS channel, mark the channel as * unavailable. */ err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, &sdata->vif.bss_conf.chandef, NL80211_IFTYPE_MESH_POINT); if (err > 0) cfg80211_radar_event(sdata->local->hw.wiphy, &sdata->vif.bss_conf.chandef, GFP_ATOMIC); } static bool ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, bool beacon) { struct cfg80211_csa_settings params; struct ieee80211_csa_ie csa_ie; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_supported_band *sband; int err; u32 sta_flags; sdata_assert_lock(sdata); sband = ieee80211_get_sband(sdata); if (!sband) return false; sta_flags = 0; switch (sdata->vif.bss_conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: sta_flags |= IEEE80211_STA_DISABLE_HT; /* fall through */ case NL80211_CHAN_WIDTH_20: sta_flags |= IEEE80211_STA_DISABLE_40MHZ; /* fall through */ case NL80211_CHAN_WIDTH_40: sta_flags |= IEEE80211_STA_DISABLE_VHT; break; default: break; } memset(¶ms, 0, sizeof(params)); err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band, sta_flags, sdata->vif.addr, &csa_ie); if (err < 0) return false; if (err) return false; /* Mark the channel unavailable if the reason for the switch is * regulatory. */ if (csa_ie.reason_code == WLAN_REASON_MESH_CHAN_REGULATORY) ieee80211_mesh_csa_mark_radar(sdata); params.chandef = csa_ie.chandef; params.count = csa_ie.count; if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, ¶ms.chandef, IEEE80211_CHAN_DISABLED) || !cfg80211_reg_can_beacon(sdata->local->hw.wiphy, ¶ms.chandef, NL80211_IFTYPE_MESH_POINT)) { sdata_info(sdata, "mesh STA %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n", sdata->vif.addr, params.chandef.chan->center_freq, params.chandef.width, params.chandef.center_freq1, params.chandef.center_freq2); return false; } err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, ¶ms.chandef, NL80211_IFTYPE_MESH_POINT); if (err < 0) return false; if (err > 0 && !ifmsh->userspace_handles_dfs) { sdata_info(sdata, "mesh STA %pM switches to channel requiring DFS (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n", sdata->vif.addr, params.chandef.chan->center_freq, params.chandef.width, params.chandef.center_freq1, params.chandef.center_freq2); return false; } params.radar_required = err; if (cfg80211_chandef_identical(¶ms.chandef, &sdata->vif.bss_conf.chandef)) { mcsa_dbg(sdata, "received csa with an identical chandef, ignoring\n"); return true; } mcsa_dbg(sdata, "received channel switch announcement to go to channel %d MHz\n", params.chandef.chan->center_freq); params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT; if (beacon) { ifmsh->chsw_ttl = csa_ie.ttl - 1; if (ifmsh->pre_value >= csa_ie.pre_value) return false; ifmsh->pre_value = csa_ie.pre_value; } if (ifmsh->chsw_ttl >= ifmsh->mshcfg.dot11MeshTTL) return false; ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_REPEATER; if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev, ¶ms) < 0) return false; return true; } static void ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct sk_buff *presp; struct beacon_data *bcn; struct ieee80211_mgmt *hdr; struct ieee802_11_elems elems; size_t baselen; u8 *pos; pos = mgmt->u.probe_req.variable; baselen = (u8 *) pos - (u8 *) mgmt; if (baselen > len) return; ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid, NULL); if (!elems.mesh_id) return; /* 802.11-2012 10.1.4.3.2 */ if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) && !is_broadcast_ether_addr(mgmt->da)) || elems.ssid_len != 0) return; if (elems.mesh_id_len != 0 && (elems.mesh_id_len != ifmsh->mesh_id_len || memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) return; rcu_read_lock(); bcn = rcu_dereference(ifmsh->beacon); if (!bcn) goto out; presp = dev_alloc_skb(local->tx_headroom + bcn->head_len + bcn->tail_len); if (!presp) goto out; skb_reserve(presp, local->tx_headroom); skb_put_data(presp, bcn->head, bcn->head_len); skb_put_data(presp, bcn->tail, bcn->tail_len); hdr = (struct ieee80211_mgmt *) presp->data; hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); memcpy(hdr->da, mgmt->sa, ETH_ALEN); IEEE80211_SKB_CB(presp)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, presp); out: rcu_read_unlock(); } static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, u16 stype, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee802_11_elems elems; struct ieee80211_channel *channel; size_t baselen; int freq; enum nl80211_band band = rx_status->band; /* ignore ProbeResp to foreign address */ if (stype == IEEE80211_STYPE_PROBE_RESP && !ether_addr_equal(mgmt->da, sdata->vif.addr)) return; baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return; ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, false, &elems, mgmt->bssid, NULL); /* ignore non-mesh or secure / unsecure mismatch */ if ((!elems.mesh_id || !elems.mesh_config) || (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) return; if (elems.ds_params) freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); else freq = rx_status->freq; channel = ieee80211_get_channel(local->hw.wiphy, freq); if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) return; if (mesh_matches_local(sdata, &elems)) { mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n", sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal); if (!sdata->u.mesh.user_mpm || sdata->u.mesh.mshcfg.rssi_threshold == 0 || sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) mesh_neighbour_update(sdata, mgmt->sa, &elems, rx_status); } if (ifmsh->sync_ops) ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, &elems, rx_status); if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && !sdata->vif.csa_active) ieee80211_mesh_process_chnswitch(sdata, &elems, true); } int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_csa_settings *tmp_csa_settings; int ret = 0; int changed = 0; /* Reset the TTL value and Initiator flag */ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; ifmsh->chsw_ttl = 0; /* Remove the CSA and MCSP elements from the beacon */ tmp_csa_settings = rcu_dereference(ifmsh->csa); RCU_INIT_POINTER(ifmsh->csa, NULL); if (tmp_csa_settings) kfree_rcu(tmp_csa_settings, rcu_head); ret = ieee80211_mesh_rebuild_beacon(sdata); if (ret) return -EINVAL; changed |= BSS_CHANGED_BEACON; mcsa_dbg(sdata, "complete switching to center freq %d MHz", sdata->vif.bss_conf.chandef.chan->center_freq); return changed; } int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_csa_settings *tmp_csa_settings; int ret = 0; tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings), GFP_ATOMIC); if (!tmp_csa_settings) return -ENOMEM; memcpy(&tmp_csa_settings->settings, csa_settings, sizeof(struct cfg80211_csa_settings)); rcu_assign_pointer(ifmsh->csa, tmp_csa_settings); ret = ieee80211_mesh_rebuild_beacon(sdata); if (ret) { tmp_csa_settings = rcu_dereference(ifmsh->csa); RCU_INIT_POINTER(ifmsh->csa, NULL); kfree_rcu(tmp_csa_settings, rcu_head); return ret; } return BSS_CHANGED_BEACON; } static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee802_11_elems *elems) { struct ieee80211_mgmt *mgmt_fwd; struct sk_buff *skb; struct ieee80211_local *local = sdata->local; skb = dev_alloc_skb(local->tx_headroom + len); if (!skb) return -ENOMEM; skb_reserve(skb, local->tx_headroom); mgmt_fwd = skb_put(skb, len); elems->mesh_chansw_params_ie->mesh_ttl--; elems->mesh_chansw_params_ie->mesh_flags &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; memcpy(mgmt_fwd, mgmt, len); eth_broadcast_addr(mgmt_fwd->da); memcpy(mgmt_fwd->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt_fwd->bssid, sdata->vif.addr, ETH_ALEN); ieee80211_tx_skb(sdata, skb); return 0; } static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee802_11_elems elems; u16 pre_value; bool fwd_csa = true; size_t baselen; u8 *pos; if (mgmt->u.action.u.measurement.action_code != WLAN_ACTION_SPCT_CHL_SWITCH) return; pos = mgmt->u.action.u.chan_switch.variable; baselen = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); ieee802_11_parse_elems(pos, len - baselen, true, &elems, mgmt->bssid, NULL); ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; if (!--ifmsh->chsw_ttl) fwd_csa = false; pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); if (ifmsh->pre_value >= pre_value) return; ifmsh->pre_value = pre_value; if (!sdata->vif.csa_active && !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) { mcsa_dbg(sdata, "Failed to process CSA action frame"); return; } /* forward or re-broadcast the CSA frame */ if (fwd_csa) { if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) mcsa_dbg(sdata, "Failed to forward the CSA frame"); } } static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { switch (mgmt->u.action.category) { case WLAN_CATEGORY_SELF_PROTECTED: switch (mgmt->u.action.u.self_prot.action_code) { case WLAN_SP_MESH_PEERING_OPEN: case WLAN_SP_MESH_PEERING_CLOSE: case WLAN_SP_MESH_PEERING_CONFIRM: mesh_rx_plink_frame(sdata, mgmt, len, rx_status); break; } break; case WLAN_CATEGORY_MESH_ACTION: if (mesh_action_is_path_sel(mgmt)) mesh_rx_path_sel_frame(sdata, mgmt, len); break; case WLAN_CATEGORY_SPECTRUM_MGMT: mesh_rx_csa_frame(sdata, mgmt, len); break; } } void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; u16 stype; sdata_lock(sdata); /* mesh already went down */ if (!sdata->u.mesh.mesh_id_len) goto out; rx_status = IEEE80211_SKB_RXCB(skb); mgmt = (struct ieee80211_mgmt *) skb->data; stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; switch (stype) { case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_BEACON: ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_REQ: ieee80211_mesh_rx_probe_req(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); break; } out: sdata_unlock(sdata); } static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u32 bit, changed = 0; for_each_set_bit(bit, &ifmsh->mbss_changed, sizeof(changed) * BITS_PER_BYTE) { clear_bit(bit, &ifmsh->mbss_changed); changed |= BIT(bit); } if (sdata->vif.bss_conf.enable_beacon && (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT))) if (ieee80211_mesh_rebuild_beacon(sdata)) return; ieee80211_bss_info_change_notify(sdata, changed); } void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; sdata_lock(sdata); /* mesh already went down */ if (!sdata->u.mesh.mesh_id_len) goto out; if (ifmsh->preq_queue_len && time_after(jiffies, ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) mesh_path_start_discovery(sdata); if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) ieee80211_mesh_housekeeping(sdata); if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) ieee80211_mesh_rootpath(sdata); if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags)) mesh_sync_adjust_tsf(sdata); if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags)) mesh_bss_info_changed(sdata); out: sdata_unlock(sdata); } void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; static u8 zero_addr[ETH_ALEN] = {}; timer_setup(&ifmsh->housekeeping_timer, ieee80211_mesh_housekeeping_timer, 0); ifmsh->accepting_plinks = true; atomic_set(&ifmsh->mpaths, 0); mesh_rmc_init(sdata); ifmsh->last_preq = jiffies; ifmsh->next_perr = jiffies; ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; /* Allocate all mesh structures when creating the first mesh interface. */ if (!mesh_allocated) ieee80211s_init(); mesh_pathtbl_init(sdata); timer_setup(&ifmsh->mesh_path_timer, ieee80211_mesh_path_timer, 0); timer_setup(&ifmsh->mesh_path_root_timer, ieee80211_mesh_path_root_timer, 0); INIT_LIST_HEAD(&ifmsh->preq_queue.list); skb_queue_head_init(&ifmsh->ps.bc_buf); spin_lock_init(&ifmsh->mesh_preq_queue_lock); spin_lock_init(&ifmsh->sync_offset_lock); RCU_INIT_POINTER(ifmsh->beacon, NULL); sdata->vif.bss_conf.bssid = zero_addr; } void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata) { mesh_rmc_free(sdata); mesh_pathtbl_unregister(sdata); } backport-iwlwifi-dkms-7906/net/mac80211/mesh.h000066400000000000000000000312661351064634500207400ustar00rootroot00000000000000/* * Copyright (c) 2008, 2009 open80211s Ltd. * Authors: Luis Carlos Cobo * Javier Cardona * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef IEEE80211S_H #define IEEE80211S_H #include #include #include "ieee80211_i.h" /* Data structures */ /** * enum mesh_path_flags - mac80211 mesh path flags * * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence * number * @MESH_PATH_FIXED: the mesh path has been manually set and should not be * modified * @MESH_PATH_RESOLVED: the mesh path can has been resolved * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination * already queued up, waiting for the discovery process to start. * @MESH_PATH_DELETED: the mesh path has been deleted and should no longer * be used * * MESH_PATH_RESOLVED is used by the mesh path timer to * decide when to stop or cancel the mesh path discovery. */ enum mesh_path_flags { MESH_PATH_ACTIVE = BIT(0), MESH_PATH_RESOLVING = BIT(1), MESH_PATH_SN_VALID = BIT(2), MESH_PATH_FIXED = BIT(3), MESH_PATH_RESOLVED = BIT(4), MESH_PATH_REQ_QUEUED = BIT(5), MESH_PATH_DELETED = BIT(6), }; /** * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks * * * * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks * @MESH_WORK_ROOT: the mesh root station needs to send a frame * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other * mesh nodes * @MESH_WORK_MBSS_CHANGED: rebuild beacon and notify driver of BSS changes */ enum mesh_deferred_task_flags { MESH_WORK_HOUSEKEEPING, MESH_WORK_ROOT, MESH_WORK_DRIFT_ADJUST, MESH_WORK_MBSS_CHANGED, }; /** * struct mesh_path - mac80211 mesh path structure * * @dst: mesh path destination mac address * @mpp: mesh proxy mac address * @rhash: rhashtable list pointer * @gate_list: list pointer for known gates list * @sdata: mesh subif * @next_hop: mesh neighbor to which frames for this destination will be * forwarded * @timer: mesh path discovery timer * @frame_queue: pending queue for frames sent to this destination while the * path is unresolved * @rcu: rcu head for freeing mesh path * @sn: target sequence number * @metric: current metric to this destination * @hop_count: hops to destination * @exp_time: in jiffies, when the path will expire or when it expired * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery * retry * @discovery_retries: number of discovery retries * @flags: mesh path flags, as specified on &enum mesh_path_flags * @state_lock: mesh path state lock used to protect changes to the * mpath itself. No need to take this lock when adding or removing * an mpath to a hash bucket on a path table. * @rann_snd_addr: the RANN sender address * @rann_metric: the aggregated path metric towards the root node * @last_preq_to_root: Timestamp of last PREQ sent to root * @is_root: the destination station of this path is a root node * @is_gate: the destination station of this path is a mesh gate * * * The dst address is unique in the mesh path table. Since the mesh_path is * protected by RCU, deleting the next_hop STA must remove / substitute the * mesh_path structure and wait until that is no longer reachable before * destroying the STA completely. */ struct mesh_path { u8 dst[ETH_ALEN]; u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ struct rhash_head rhash; struct hlist_node gate_list; struct ieee80211_sub_if_data *sdata; struct sta_info __rcu *next_hop; struct timer_list timer; struct sk_buff_head frame_queue; struct rcu_head rcu; u32 sn; u32 metric; u8 hop_count; unsigned long exp_time; u32 discovery_timeout; u8 discovery_retries; enum mesh_path_flags flags; spinlock_t state_lock; u8 rann_snd_addr[ETH_ALEN]; u32 rann_metric; unsigned long last_preq_to_root; bool is_root; bool is_gate; }; /** * struct mesh_table * * @known_gates: list of known mesh gates and their mpaths by the station. The * gate's mpath may or may not be resolved and active. * @gates_lock: protects updates to known_gates * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr * @entries: number of entries in the table */ struct mesh_table { struct hlist_head known_gates; spinlock_t gates_lock; struct rhashtable rhead; atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ }; /* Recent multicast cache */ /* RMC_BUCKETS must be a power of 2, maximum 256 */ #define RMC_BUCKETS 256 #define RMC_QUEUE_MAX_LEN 4 #define RMC_TIMEOUT (3 * HZ) /** * struct rmc_entry - entry in the Recent Multicast Cache * * @seqnum: mesh sequence number of the frame * @exp_time: expiration time of the entry, in jiffies * @sa: source address of the frame * @list: hashtable list pointer * * The Recent Multicast Cache keeps track of the latest multicast frames that * have been received by a mesh interface and discards received multicast frames * that are found in the cache. */ struct rmc_entry { struct hlist_node list; unsigned long exp_time; u32 seqnum; u8 sa[ETH_ALEN]; }; struct mesh_rmc { struct hlist_head bucket[RMC_BUCKETS]; u32 idx_mask; }; #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) #define MESH_PATH_EXPIRE (600 * HZ) /* Default maximum number of plinks per interface */ #define MESH_MAX_PLINKS 256 /* Maximum number of paths per interface */ #define MESH_MAX_MPATHS 1024 /* Number of frames buffered per destination for unresolved destinations */ #define MESH_FRAME_QUEUE_LEN 10 /* Public interfaces */ /* Various */ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, const u8 *da, const u8 *sa); unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, struct ieee80211s_hdr *meshhdr, const char *addr4or5, const char *addr6); int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, const u8 *addr, struct ieee80211s_hdr *mesh_hdr); bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *ie); void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); void ieee80211s_init(void); void ieee80211s_update_metric(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_tx_status *st); void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata); int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); /* wrapper for ieee80211_bss_info_change_notify() */ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, u32 changed); /* mesh power save */ u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata); u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, enum nl80211_mesh_power_mode pm); void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_hdr *hdr); void ieee80211_mps_sta_status_update(struct sta_info *sta); void ieee80211_mps_rx_h_sta_process(struct sta_info *sta, struct ieee80211_hdr *hdr); void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta, bool tx, bool acked); void ieee80211_mps_frame_release(struct sta_info *sta, struct ieee802_11_elems *elems); /* Mesh paths */ int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); struct mesh_path *mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst); struct mesh_path *mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst); int mpp_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst, const u8 *mpp); struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx); struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx); void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); void mesh_path_expire(struct ieee80211_sub_if_data *sdata); void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); struct mesh_path * mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst); int mesh_path_add_gate(struct mesh_path *mpath); int mesh_path_send_to_gates(struct mesh_path *mpath); int mesh_gate_num(struct ieee80211_sub_if_data *sdata); /* Mesh plinks */ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, u8 *hw_addr, struct ieee802_11_elems *ie, struct ieee80211_rx_status *rx_status); bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); void mesh_plink_timer(struct timer_list *t); void mesh_plink_broken(struct sta_info *sta); u32 mesh_plink_deactivate(struct sta_info *sta); u32 mesh_plink_open(struct sta_info *sta); u32 mesh_plink_block(struct sta_info *sta); void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status); void mesh_sta_cleanup(struct sta_info *sta); /* Private interfaces */ /* Mesh paths */ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, u8 ttl, const u8 *target, u32 target_sn, u16 target_rcode, const u8 *ra); void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); void mesh_path_flush_pending(struct mesh_path *mpath); void mesh_path_tx_pending(struct mesh_path *mpath); int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata); int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); void mesh_path_timer(struct timer_list *t); void mesh_path_flush_by_nexthop(struct sta_info *sta); void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); #ifdef CPTCFG_MAC80211_MESH static inline u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_inc(&sdata->u.mesh.estab_plinks); return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; } static inline u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_dec(&sdata->u.mesh.estab_plinks); return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; } static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) { return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - atomic_read(&sdata->u.mesh.estab_plinks); } static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) { return (min_t(long, mesh_plink_free_count(sdata), MESH_MAX_PLINKS - sdata->local->num_sta)) > 0; } static inline void mesh_path_activate(struct mesh_path *mpath) { mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED; } static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) { return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; } void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata); void ieee80211s_stop(void); #else static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) { return false; } static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) {} static inline void ieee80211s_stop(void) {} #endif #endif /* IEEE80211S_H */ backport-iwlwifi-dkms-7906/net/mac80211/mesh_hwmp.c000066400000000000000000001074711351064634500217700ustar00rootroot00000000000000/* * Copyright (c) 2008, 2009 open80211s Ltd. * Copyright (C) 2019 Intel Corporation * Author: Luis Carlos Cobo * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include "wme.h" #include "mesh.h" #define TEST_FRAME_LEN 8192 #define MAX_METRIC 0xffffffff #define ARITH_SHIFT 8 #define LINK_FAIL_THRESH 95 #define MAX_PREQ_QUEUE_LEN 64 static void mesh_queue_preq(struct mesh_path *, u8); static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae) { if (ae) offset += 6; return get_unaligned_le32(preq_elem + offset); } static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae) { if (ae) offset += 6; return get_unaligned_le16(preq_elem + offset); } /* HWMP IE processing macros */ #define AE_F (1<<6) #define AE_F_SET(x) (*x & AE_F) #define PREQ_IE_FLAGS(x) (*(x)) #define PREQ_IE_HOPCOUNT(x) (*(x + 1)) #define PREQ_IE_TTL(x) (*(x + 2)) #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) #define PREQ_IE_ORIG_ADDR(x) (x + 7) #define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0) #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)) #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)) #define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) #define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) #define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x)) #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) #define PREP_IE_TTL(x) PREQ_IE_TTL(x) #define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) #define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x)) #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)) #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)) #define PREP_IE_TARGET_ADDR(x) (x + 3) #define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0) #define PERR_IE_TTL(x) (*(x)) #define PERR_IE_TARGET_FLAGS(x) (*(x + 2)) #define PERR_IE_TARGET_ADDR(x) (x + 3) #define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0) #define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0) #define MSEC_TO_TU(x) (x*1000/1024) #define SN_GT(x, y) ((s32)(y - x) < 0) #define SN_LT(x, y) ((s32)(x - y) < 0) #define MAX_SANE_SN_DELTA 32 static inline u32 SN_DELTA(u32 x, u32 y) { return x >= y ? x - y : y - x; } #define net_traversal_jiffies(s) \ msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) #define default_lifetime(s) \ MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout) #define min_preq_int_jiff(s) \ (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval)) #define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries) #define disc_timeout_jiff(s) \ msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout) #define root_path_confirmation_jiffies(s) \ msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval) enum mpath_frame_type { MPATH_PREQ = 0, MPATH_PREP, MPATH_PERR, MPATH_RANN }; static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, const u8 *orig_addr, u32 orig_sn, u8 target_flags, const u8 *target, u32 target_sn, const u8 *da, u8 hop_count, u8 ttl, u32 lifetime, u32 metric, u32 preq_id, struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u8 *pos, ie_len; int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.mesh_action); skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + 37); /* max HWMP IE */ if (!skb) return -1; skb_reserve(skb, local->tx_headroom); mgmt = skb_put_zero(skb, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); /* BSSID == SA */ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION; mgmt->u.action.u.mesh_action.action_code = WLAN_MESH_ACTION_HWMP_PATH_SELECTION; switch (action) { case MPATH_PREQ: mhwmp_dbg(sdata, "sending PREQ to %pM\n", target); ie_len = 37; pos = skb_put(skb, 2 + ie_len); *pos++ = WLAN_EID_PREQ; break; case MPATH_PREP: mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr); ie_len = 31; pos = skb_put(skb, 2 + ie_len); *pos++ = WLAN_EID_PREP; break; case MPATH_RANN: mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr); ie_len = sizeof(struct ieee80211_rann_ie); pos = skb_put(skb, 2 + ie_len); *pos++ = WLAN_EID_RANN; break; default: kfree_skb(skb); return -ENOTSUPP; } *pos++ = ie_len; *pos++ = flags; *pos++ = hop_count; *pos++ = ttl; if (action == MPATH_PREP) { memcpy(pos, target, ETH_ALEN); pos += ETH_ALEN; put_unaligned_le32(target_sn, pos); pos += 4; } else { if (action == MPATH_PREQ) { put_unaligned_le32(preq_id, pos); pos += 4; } memcpy(pos, orig_addr, ETH_ALEN); pos += ETH_ALEN; put_unaligned_le32(orig_sn, pos); pos += 4; } put_unaligned_le32(lifetime, pos); /* interval for RANN */ pos += 4; put_unaligned_le32(metric, pos); pos += 4; if (action == MPATH_PREQ) { *pos++ = 1; /* destination count */ *pos++ = target_flags; memcpy(pos, target, ETH_ALEN); pos += ETH_ALEN; put_unaligned_le32(target_sn, pos); pos += 4; } else if (action == MPATH_PREP) { memcpy(pos, orig_addr, ETH_ALEN); pos += ETH_ALEN; put_unaligned_le32(orig_sn, pos); pos += 4; } ieee80211_tx_skb(sdata, skb); return 0; } /* Headroom is not adjusted. Caller should ensure that skb has sufficient * headroom in case the frame is encrypted. */ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ skb_set_queue_mapping(skb, IEEE80211_AC_VO); skb->priority = 7; info->control.vif = &sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; ieee80211_set_qos_hdr(sdata, skb); ieee80211_mps_set_frame_flags(sdata, NULL, hdr); } /** * mesh_path_error_tx - Sends a PERR mesh management frame * * @ttl: allowed remaining hops * @target: broken destination * @target_sn: SN of the broken destination * @target_rcode: reason code for this PERR * @ra: node this frame is addressed to * @sdata: local mesh subif * * Note: This function may be called with driver locks taken that the driver * also acquires in the TX path. To avoid a deadlock we don't transmit the * frame directly but add it to the pending queue instead. */ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, u8 ttl, const u8 *target, u32 target_sn, u16 target_rcode, const u8 *ra) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_mgmt *mgmt; u8 *pos, ie_len; int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.mesh_action); if (time_before(jiffies, ifmsh->next_perr)) return -EAGAIN; skb = dev_alloc_skb(local->tx_headroom + sdata->encrypt_headroom + IEEE80211_ENCRYPT_TAILROOM + hdr_len + 2 + 15 /* PERR IE */); if (!skb) return -1; skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom); mgmt = skb_put_zero(skb, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, ra, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); /* BSSID == SA */ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION; mgmt->u.action.u.mesh_action.action_code = WLAN_MESH_ACTION_HWMP_PATH_SELECTION; ie_len = 15; pos = skb_put(skb, 2 + ie_len); *pos++ = WLAN_EID_PERR; *pos++ = ie_len; /* ttl */ *pos++ = ttl; /* number of destinations */ *pos++ = 1; /* Flags field has AE bit only as defined in * sec 8.4.2.117 IEEE802.11-2012 */ *pos = 0; pos++; memcpy(pos, target, ETH_ALEN); pos += ETH_ALEN; put_unaligned_le32(target_sn, pos); pos += 4; put_unaligned_le16(target_rcode, pos); /* see note in function header */ prepare_frame_for_deferred_tx(sdata, skb); ifmsh->next_perr = TU_TO_EXP_TIME( ifmsh->mshcfg.dot11MeshHWMPperrMinInterval); ieee80211_add_pending_skb(local, skb); return 0; } void ieee80211s_update_metric(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_tx_status *st) { struct ieee80211_tx_info *txinfo = st->info; int failed; failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); /* moving average, scaled to 100. * feed failure as 100 and success as 0 */ ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100); if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) > LINK_FAIL_THRESH) mesh_plink_broken(sta); } static u32 airtime_link_metric_get(struct ieee80211_local *local, struct sta_info *sta) { struct rate_info rinfo; /* This should be adjusted for each device */ int device_constant = 1 << ARITH_SHIFT; int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT; int s_unit = 1 << ARITH_SHIFT; int rate, err; u32 tx_time, estimated_retx; u64 result; unsigned long fail_avg = ewma_mesh_fail_avg_read(&sta->mesh->fail_avg); /* Try to get rate based on HW/SW RC algorithm. * Rate is returned in units of Kbps, correct this * to comply with airtime calculation units * Round up in case we get rate < 100Kbps */ rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100); if (rate) { err = 0; } else { if (fail_avg > LINK_FAIL_THRESH) return MAX_METRIC; sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo); rate = cfg80211_calculate_bitrate(&rinfo); if (WARN_ON(!rate)) return MAX_METRIC; err = (fail_avg << ARITH_SHIFT) / 100; } /* bitrate is in units of 100 Kbps, while we need rate in units of * 1Mbps. This will be corrected on tx_time computation. */ tx_time = (device_constant + 10 * test_frame_len / rate); estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT); return (u32)result; } /** * hwmp_route_info_get - Update routing info to originator and transmitter * * @sdata: local mesh subif * @mgmt: mesh management frame * @hwmp_ie: hwmp information element (PREP or PREQ) * @action: type of hwmp ie * * This function updates the path routing information to the originator and the * transmitter of a HWMP PREQ or PREP frame. * * Returns: metric to frame originator or 0 if the frame should not be further * processed * * Notes: this function is the only place (besides user-provided info) where * path routing information is updated. */ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, const u8 *hwmp_ie, enum mpath_frame_type action) { struct ieee80211_local *local = sdata->local; struct mesh_path *mpath; struct sta_info *sta; bool fresh_info; const u8 *orig_addr, *ta; u32 orig_sn, orig_metric; unsigned long orig_lifetime, exp_time; u32 last_hop_metric, new_metric; bool process = true; rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (!sta) { rcu_read_unlock(); return 0; } last_hop_metric = airtime_link_metric_get(local, sta); /* Update and check originator routing info */ fresh_info = true; switch (action) { case MPATH_PREQ: orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); orig_sn = PREQ_IE_ORIG_SN(hwmp_ie); orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); orig_metric = PREQ_IE_METRIC(hwmp_ie); break; case MPATH_PREP: /* Originator here refers to the MP that was the target in the * Path Request. We divert from the nomenclature in the draft * so that we can easily use a single function to gather path * information from both PREQ and PREP frames. */ orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie); orig_sn = PREP_IE_TARGET_SN(hwmp_ie); orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); orig_metric = PREP_IE_METRIC(hwmp_ie); break; default: rcu_read_unlock(); return 0; } new_metric = orig_metric + last_hop_metric; if (new_metric < orig_metric) new_metric = MAX_METRIC; exp_time = TU_TO_EXP_TIME(orig_lifetime); if (ether_addr_equal(orig_addr, sdata->vif.addr)) { /* This MP is the originator, we are not interested in this * frame, except for updating transmitter's path info. */ process = false; fresh_info = false; } else { mpath = mesh_path_lookup(sdata, orig_addr); if (mpath) { spin_lock_bh(&mpath->state_lock); if (mpath->flags & MESH_PATH_FIXED) fresh_info = false; else if ((mpath->flags & MESH_PATH_ACTIVE) && (mpath->flags & MESH_PATH_SN_VALID)) { if (SN_GT(mpath->sn, orig_sn) || (mpath->sn == orig_sn && new_metric >= mpath->metric)) { process = false; fresh_info = false; } } else if (!(mpath->flags & MESH_PATH_ACTIVE)) { bool have_sn, newer_sn, bounced; have_sn = mpath->flags & MESH_PATH_SN_VALID; newer_sn = have_sn && SN_GT(orig_sn, mpath->sn); bounced = have_sn && (SN_DELTA(orig_sn, mpath->sn) > MAX_SANE_SN_DELTA); if (!have_sn || newer_sn) { /* if SN is newer than what we had * then we can take it */; } else if (bounced) { /* if SN is way different than what * we had then assume the other side * rebooted or restarted */; } else { process = false; fresh_info = false; } } } else { mpath = mesh_path_add(sdata, orig_addr); if (IS_ERR(mpath)) { rcu_read_unlock(); return 0; } spin_lock_bh(&mpath->state_lock); } if (fresh_info) { mesh_path_assign_nexthop(mpath, sta); mpath->flags |= MESH_PATH_SN_VALID; mpath->metric = new_metric; mpath->sn = orig_sn; mpath->exp_time = time_after(mpath->exp_time, exp_time) ? mpath->exp_time : exp_time; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); /* init it at a low value - 0 start is tricky */ ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); /* draft says preq_id should be saved to, but there does * not seem to be any use for it, skipping by now */ } else spin_unlock_bh(&mpath->state_lock); } /* Update and check transmitter routing info */ ta = mgmt->sa; if (ether_addr_equal(orig_addr, ta)) fresh_info = false; else { fresh_info = true; mpath = mesh_path_lookup(sdata, ta); if (mpath) { spin_lock_bh(&mpath->state_lock); if ((mpath->flags & MESH_PATH_FIXED) || ((mpath->flags & MESH_PATH_ACTIVE) && (last_hop_metric > mpath->metric))) fresh_info = false; } else { mpath = mesh_path_add(sdata, ta); if (IS_ERR(mpath)) { rcu_read_unlock(); return 0; } spin_lock_bh(&mpath->state_lock); } if (fresh_info) { mesh_path_assign_nexthop(mpath, sta); mpath->metric = last_hop_metric; mpath->exp_time = time_after(mpath->exp_time, exp_time) ? mpath->exp_time : exp_time; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); /* init it at a low value - 0 start is tricky */ ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); } else spin_unlock_bh(&mpath->state_lock); } rcu_read_unlock(); return process ? new_metric : 0; } static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, const u8 *preq_elem, u32 orig_metric) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_path *mpath = NULL; const u8 *target_addr, *orig_addr; const u8 *da; u8 target_flags, ttl, flags; u32 orig_sn, target_sn, lifetime, target_metric = 0; bool reply = false; bool forward = true; bool root_is_gate; /* Update target SN, if present */ target_addr = PREQ_IE_TARGET_ADDR(preq_elem); orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); target_sn = PREQ_IE_TARGET_SN(preq_elem); orig_sn = PREQ_IE_ORIG_SN(preq_elem); target_flags = PREQ_IE_TARGET_F(preq_elem); /* Proactive PREQ gate announcements */ flags = PREQ_IE_FLAGS(preq_elem); root_is_gate = !!(flags & RANN_FLAG_IS_GATE); mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr); if (ether_addr_equal(target_addr, sdata->vif.addr)) { mhwmp_dbg(sdata, "PREQ is for us\n"); forward = false; reply = true; target_metric = 0; if (SN_GT(target_sn, ifmsh->sn)) ifmsh->sn = target_sn; if (time_after(jiffies, ifmsh->last_sn_update + net_traversal_jiffies(sdata)) || time_before(jiffies, ifmsh->last_sn_update)) { ++ifmsh->sn; ifmsh->last_sn_update = jiffies; } target_sn = ifmsh->sn; } else if (is_broadcast_ether_addr(target_addr) && (target_flags & IEEE80211_PREQ_TO_FLAG)) { rcu_read_lock(); mpath = mesh_path_lookup(sdata, orig_addr); if (mpath) { if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { reply = true; target_addr = sdata->vif.addr; target_sn = ++ifmsh->sn; target_metric = 0; ifmsh->last_sn_update = jiffies; } if (root_is_gate) mesh_path_add_gate(mpath); } rcu_read_unlock(); } else { rcu_read_lock(); mpath = mesh_path_lookup(sdata, target_addr); if (mpath) { if ((!(mpath->flags & MESH_PATH_SN_VALID)) || SN_LT(mpath->sn, target_sn)) { mpath->sn = target_sn; mpath->flags |= MESH_PATH_SN_VALID; } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) && (mpath->flags & MESH_PATH_ACTIVE)) { reply = true; target_metric = mpath->metric; target_sn = mpath->sn; /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/ target_flags |= IEEE80211_PREQ_TO_FLAG; } } rcu_read_unlock(); } if (reply) { lifetime = PREQ_IE_LIFETIME(preq_elem); ttl = ifmsh->mshcfg.element_ttl; if (ttl != 0) { mhwmp_dbg(sdata, "replying to the PREQ\n"); mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr, orig_sn, 0, target_addr, target_sn, mgmt->sa, 0, ttl, lifetime, target_metric, 0, sdata); } else { ifmsh->mshstats.dropped_frames_ttl++; } } if (forward && ifmsh->mshcfg.dot11MeshForwarding) { u32 preq_id; u8 hopcount; ttl = PREQ_IE_TTL(preq_elem); lifetime = PREQ_IE_LIFETIME(preq_elem); if (ttl <= 1) { ifmsh->mshstats.dropped_frames_ttl++; return; } mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr); --ttl; preq_id = PREQ_IE_PREQ_ID(preq_elem); hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; da = (mpath && mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr; if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { target_addr = PREQ_IE_TARGET_ADDR(preq_elem); target_sn = PREQ_IE_TARGET_SN(preq_elem); } mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, orig_sn, target_flags, target_addr, target_sn, da, hopcount, ttl, lifetime, orig_metric, preq_id, sdata); if (!is_multicast_ether_addr(da)) ifmsh->mshstats.fwded_unicast++; else ifmsh->mshstats.fwded_mcast++; ifmsh->mshstats.fwded_frames++; } } static inline struct sta_info * next_hop_deref_protected(struct mesh_path *mpath) { return rcu_dereference_protected(mpath->next_hop, lockdep_is_held(&mpath->state_lock)); } static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, const u8 *prep_elem, u32 metric) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_path *mpath; const u8 *target_addr, *orig_addr; u8 ttl, hopcount, flags; u8 next_hop[ETH_ALEN]; u32 target_sn, orig_sn, lifetime; mhwmp_dbg(sdata, "received PREP from %pM\n", PREP_IE_TARGET_ADDR(prep_elem)); orig_addr = PREP_IE_ORIG_ADDR(prep_elem); if (ether_addr_equal(orig_addr, sdata->vif.addr)) /* destination, no forwarding required */ return; if (!ifmsh->mshcfg.dot11MeshForwarding) return; ttl = PREP_IE_TTL(prep_elem); if (ttl <= 1) { sdata->u.mesh.mshstats.dropped_frames_ttl++; return; } rcu_read_lock(); mpath = mesh_path_lookup(sdata, orig_addr); if (mpath) spin_lock_bh(&mpath->state_lock); else goto fail; if (!(mpath->flags & MESH_PATH_ACTIVE)) { spin_unlock_bh(&mpath->state_lock); goto fail; } memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN); spin_unlock_bh(&mpath->state_lock); --ttl; flags = PREP_IE_FLAGS(prep_elem); lifetime = PREP_IE_LIFETIME(prep_elem); hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; target_addr = PREP_IE_TARGET_ADDR(prep_elem); target_sn = PREP_IE_TARGET_SN(prep_elem); orig_sn = PREP_IE_ORIG_SN(prep_elem); mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0, target_addr, target_sn, next_hop, hopcount, ttl, lifetime, metric, 0, sdata); rcu_read_unlock(); sdata->u.mesh.mshstats.fwded_unicast++; sdata->u.mesh.mshstats.fwded_frames++; return; fail: rcu_read_unlock(); sdata->u.mesh.mshstats.dropped_frames_no_route++; } static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, const u8 *perr_elem) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_path *mpath; u8 ttl; const u8 *ta, *target_addr; u32 target_sn; u16 target_rcode; ta = mgmt->sa; ttl = PERR_IE_TTL(perr_elem); if (ttl <= 1) { ifmsh->mshstats.dropped_frames_ttl++; return; } ttl--; target_addr = PERR_IE_TARGET_ADDR(perr_elem); target_sn = PERR_IE_TARGET_SN(perr_elem); target_rcode = PERR_IE_TARGET_RCODE(perr_elem); rcu_read_lock(); mpath = mesh_path_lookup(sdata, target_addr); if (mpath) { struct sta_info *sta; spin_lock_bh(&mpath->state_lock); sta = next_hop_deref_protected(mpath); if (mpath->flags & MESH_PATH_ACTIVE && ether_addr_equal(ta, sta->sta.addr) && !(mpath->flags & MESH_PATH_FIXED) && (!(mpath->flags & MESH_PATH_SN_VALID) || SN_GT(target_sn, mpath->sn) || target_sn == 0)) { mpath->flags &= ~MESH_PATH_ACTIVE; if (target_sn != 0) mpath->sn = target_sn; else mpath->sn += 1; spin_unlock_bh(&mpath->state_lock); if (!ifmsh->mshcfg.dot11MeshForwarding) goto endperr; mesh_path_error_tx(sdata, ttl, target_addr, target_sn, target_rcode, broadcast_addr); } else spin_unlock_bh(&mpath->state_lock); } endperr: rcu_read_unlock(); } static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, const struct ieee80211_rann_ie *rann) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct mesh_path *mpath; u8 ttl, flags, hopcount; const u8 *orig_addr; u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval; bool root_is_gate; ttl = rann->rann_ttl; flags = rann->rann_flags; root_is_gate = !!(flags & RANN_FLAG_IS_GATE); orig_addr = rann->rann_addr; orig_sn = le32_to_cpu(rann->rann_seq); interval = le32_to_cpu(rann->rann_interval); hopcount = rann->rann_hopcount; hopcount++; orig_metric = le32_to_cpu(rann->rann_metric); /* Ignore our own RANNs */ if (ether_addr_equal(orig_addr, sdata->vif.addr)) return; mhwmp_dbg(sdata, "received RANN from %pM via neighbour %pM (is_gate=%d)\n", orig_addr, mgmt->sa, root_is_gate); rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (!sta) { rcu_read_unlock(); return; } last_hop_metric = airtime_link_metric_get(local, sta); new_metric = orig_metric + last_hop_metric; if (new_metric < orig_metric) new_metric = MAX_METRIC; mpath = mesh_path_lookup(sdata, orig_addr); if (!mpath) { mpath = mesh_path_add(sdata, orig_addr); if (IS_ERR(mpath)) { rcu_read_unlock(); sdata->u.mesh.mshstats.dropped_frames_no_route++; return; } } if (!(SN_LT(mpath->sn, orig_sn)) && !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) { rcu_read_unlock(); return; } if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) || (time_after(jiffies, mpath->last_preq_to_root + root_path_confirmation_jiffies(sdata)) || time_before(jiffies, mpath->last_preq_to_root))) && !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) { mhwmp_dbg(sdata, "time to refresh root mpath %pM\n", orig_addr); mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); mpath->last_preq_to_root = jiffies; } mpath->sn = orig_sn; mpath->rann_metric = new_metric; mpath->is_root = true; /* Recording RANNs sender address to send individually * addressed PREQs destined for root mesh STA */ memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN); if (root_is_gate) mesh_path_add_gate(mpath); if (ttl <= 1) { ifmsh->mshstats.dropped_frames_ttl++; rcu_read_unlock(); return; } ttl--; if (ifmsh->mshcfg.dot11MeshForwarding) { mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, orig_sn, 0, NULL, 0, broadcast_addr, hopcount, ttl, interval, new_metric, 0, sdata); } rcu_read_unlock(); } void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee802_11_elems elems; size_t baselen; u32 path_metric; struct sta_info *sta; /* need action_code */ if (len < IEEE80211_MIN_ACTION_SIZE + 1) return; rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) { rcu_read_unlock(); return; } rcu_read_unlock(); baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, len - baselen, false, &elems, mgmt->bssid, NULL); if (elems.preq) { if (elems.preq_len != 37) /* Right now we support just 1 destination and no AE */ return; path_metric = hwmp_route_info_get(sdata, mgmt, elems.preq, MPATH_PREQ); if (path_metric) hwmp_preq_frame_process(sdata, mgmt, elems.preq, path_metric); } if (elems.prep) { if (elems.prep_len != 31) /* Right now we support no AE */ return; path_metric = hwmp_route_info_get(sdata, mgmt, elems.prep, MPATH_PREP); if (path_metric) hwmp_prep_frame_process(sdata, mgmt, elems.prep, path_metric); } if (elems.perr) { if (elems.perr_len != 15) /* Right now we support only one destination per PERR */ return; hwmp_perr_frame_process(sdata, mgmt, elems.perr); } if (elems.rann) hwmp_rann_frame_process(sdata, mgmt, elems.rann); } /** * mesh_queue_preq - queue a PREQ to a given destination * * @mpath: mesh path to discover * @flags: special attributes of the PREQ to be sent * * Locking: the function must be called from within a rcu read lock block. * */ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) { struct ieee80211_sub_if_data *sdata = mpath->sdata; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_preq_queue *preq_node; preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); if (!preq_node) { mhwmp_dbg(sdata, "could not allocate PREQ node\n"); return; } spin_lock_bh(&ifmsh->mesh_preq_queue_lock); if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) { spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); kfree(preq_node); if (printk_ratelimit()) mhwmp_dbg(sdata, "PREQ node queue full\n"); return; } spin_lock(&mpath->state_lock); if (mpath->flags & MESH_PATH_REQ_QUEUED) { spin_unlock(&mpath->state_lock); spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); kfree(preq_node); return; } memcpy(preq_node->dst, mpath->dst, ETH_ALEN); preq_node->flags = flags; mpath->flags |= MESH_PATH_REQ_QUEUED; spin_unlock(&mpath->state_lock); list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); ++ifmsh->preq_queue_len; spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) ieee80211_queue_work(&sdata->local->hw, &sdata->work); else if (time_before(jiffies, ifmsh->last_preq)) { /* avoid long wait if did not send preqs for a long time * and jiffies wrapped around */ ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; ieee80211_queue_work(&sdata->local->hw, &sdata->work); } else mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + min_preq_int_jiff(sdata)); } /** * mesh_path_start_discovery - launch a path discovery from the PREQ queue * * @sdata: local mesh subif */ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_preq_queue *preq_node; struct mesh_path *mpath; u8 ttl, target_flags = 0; const u8 *da; u32 lifetime; spin_lock_bh(&ifmsh->mesh_preq_queue_lock); if (!ifmsh->preq_queue_len || time_before(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) { spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); return; } preq_node = list_first_entry(&ifmsh->preq_queue.list, struct mesh_preq_queue, list); list_del(&preq_node->list); --ifmsh->preq_queue_len; spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); rcu_read_lock(); mpath = mesh_path_lookup(sdata, preq_node->dst); if (!mpath) goto enddiscovery; spin_lock_bh(&mpath->state_lock); if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) { spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } mpath->flags &= ~MESH_PATH_REQ_QUEUED; if (preq_node->flags & PREQ_Q_F_START) { if (mpath->flags & MESH_PATH_RESOLVING) { spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } else { mpath->flags &= ~MESH_PATH_RESOLVED; mpath->flags |= MESH_PATH_RESOLVING; mpath->discovery_retries = 0; mpath->discovery_timeout = disc_timeout_jiff(sdata); } } else if (!(mpath->flags & MESH_PATH_RESOLVING) || mpath->flags & MESH_PATH_RESOLVED) { mpath->flags &= ~MESH_PATH_RESOLVING; spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } ifmsh->last_preq = jiffies; if (time_after(jiffies, ifmsh->last_sn_update + net_traversal_jiffies(sdata)) || time_before(jiffies, ifmsh->last_sn_update)) { ++ifmsh->sn; sdata->u.mesh.last_sn_update = jiffies; } lifetime = default_lifetime(sdata); ttl = sdata->u.mesh.mshcfg.element_ttl; if (ttl == 0) { sdata->u.mesh.mshstats.dropped_frames_ttl++; spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } if (preq_node->flags & PREQ_Q_F_REFRESH) target_flags |= IEEE80211_PREQ_TO_FLAG; else target_flags &= ~IEEE80211_PREQ_TO_FLAG; spin_unlock_bh(&mpath->state_lock); da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr; mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, target_flags, mpath->dst, mpath->sn, da, 0, ttl, lifetime, 0, ifmsh->preq_id++, sdata); mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); enddiscovery: rcu_read_unlock(); kfree(preq_node); } /** * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery * * @skb: 802.11 frame to be sent * @sdata: network subif the frame will be sent through * * Lookup next hop for given skb and start path discovery if no * forwarding information is found. * * Returns: 0 if the next hop was found and -ENOENT if the frame was queued. * skb is freeed here if no mpath could be allocated. */ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mesh_path *mpath; struct sk_buff *skb_to_free = NULL; u8 *target_addr = hdr->addr3; int err = 0; /* Nulls are only sent to peers for PS and should be pre-addressed */ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) return 0; rcu_read_lock(); err = mesh_nexthop_lookup(sdata, skb); if (!err) goto endlookup; /* no nexthop found, start resolving */ mpath = mesh_path_lookup(sdata, target_addr); if (!mpath) { mpath = mesh_path_add(sdata, target_addr); if (IS_ERR(mpath)) { mesh_path_discard_frame(sdata, skb); err = PTR_ERR(mpath); goto endlookup; } } if (!(mpath->flags & MESH_PATH_RESOLVING)) mesh_queue_preq(mpath, PREQ_Q_F_START); if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN) skb_to_free = skb_dequeue(&mpath->frame_queue); info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; ieee80211_set_qos_hdr(sdata, skb); skb_queue_tail(&mpath->frame_queue, skb); err = -ENOENT; if (skb_to_free) mesh_path_discard_frame(sdata, skb_to_free); endlookup: rcu_read_unlock(); return err; } /** * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling * this function is considered "using" the associated mpath, so preempt a path * refresh if this mpath expires soon. * * @skb: 802.11 frame to be sent * @sdata: network subif the frame will be sent through * * Returns: 0 if the next hop was found. Nonzero otherwise. */ int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct mesh_path *mpath; struct sta_info *next_hop; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u8 *target_addr = hdr->addr3; int err = -ENOENT; rcu_read_lock(); mpath = mesh_path_lookup(sdata, target_addr); if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE)) goto endlookup; if (time_after(jiffies, mpath->exp_time - msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && ether_addr_equal(sdata->vif.addr, hdr->addr4) && !(mpath->flags & MESH_PATH_RESOLVING) && !(mpath->flags & MESH_PATH_FIXED)) mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); next_hop = rcu_dereference(mpath->next_hop); if (next_hop) { memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); ieee80211_mps_set_frame_flags(sdata, next_hop, hdr); err = 0; } endlookup: rcu_read_unlock(); return err; } void mesh_path_timer(struct timer_list *t) { struct mesh_path *mpath = from_timer(mpath, t, timer); struct ieee80211_sub_if_data *sdata = mpath->sdata; int ret; if (sdata->local->quiescing) return; spin_lock_bh(&mpath->state_lock); if (mpath->flags & MESH_PATH_RESOLVED || (!(mpath->flags & MESH_PATH_RESOLVING))) { mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); spin_unlock_bh(&mpath->state_lock); } else if (mpath->discovery_retries < max_preq_retries(sdata)) { ++mpath->discovery_retries; mpath->discovery_timeout *= 2; mpath->flags &= ~MESH_PATH_REQ_QUEUED; spin_unlock_bh(&mpath->state_lock); mesh_queue_preq(mpath, 0); } else { mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED | MESH_PATH_REQ_QUEUED); mpath->exp_time = jiffies; spin_unlock_bh(&mpath->state_lock); if (!mpath->is_gate && mesh_gate_num(sdata) > 0) { ret = mesh_path_send_to_gates(mpath); if (ret) mhwmp_dbg(sdata, "no gate was reachable\n"); } else mesh_path_flush_pending(mpath); } } void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; u8 flags, target_flags = 0; flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol) ? RANN_FLAG_IS_GATE : 0; switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) { case IEEE80211_PROACTIVE_RANN: mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr, ++ifmsh->sn, 0, NULL, 0, broadcast_addr, 0, ifmsh->mshcfg.element_ttl, interval, 0, 0, sdata); break; case IEEE80211_PROACTIVE_PREQ_WITH_PREP: flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG; /* fall through */ case IEEE80211_PROACTIVE_PREQ_NO_PREP: interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout; target_flags |= IEEE80211_PREQ_TO_FLAG | IEEE80211_PREQ_USN_FLAG; mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr, ++ifmsh->sn, target_flags, (u8 *) broadcast_addr, 0, broadcast_addr, 0, ifmsh->mshcfg.element_ttl, interval, 0, ifmsh->preq_id++, sdata); break; default: mhwmp_dbg(sdata, "Proactive mechanism not supported\n"); return; } } backport-iwlwifi-dkms-7906/net/mac80211/mesh_pathtbl.c000066400000000000000000000545541351064634500224560ustar00rootroot00000000000000/* * Copyright (c) 2008, 2009 open80211s Ltd. * Author: Luis Carlos Cobo * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include "wme.h" #include "ieee80211_i.h" #include "mesh.h" static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) { /* Use last four bytes of hw addr as hash index */ return jhash_1word(*(u32 *)(addr+2), seed); } static const struct rhashtable_params mesh_rht_params = { .nelem_hint = 2, .automatic_shrinking = true, .key_len = ETH_ALEN, .key_offset = offsetof(struct mesh_path, dst), .head_offset = offsetof(struct mesh_path, rhash), .hashfn = mesh_table_hash, }; static inline bool mpath_expired(struct mesh_path *mpath) { return (mpath->flags & MESH_PATH_ACTIVE) && time_after(jiffies, mpath->exp_time) && !(mpath->flags & MESH_PATH_FIXED); } static void mesh_path_rht_free(void *ptr, void *tblptr) { struct mesh_path *mpath = ptr; struct mesh_table *tbl = tblptr; mesh_path_free_rcu(tbl, mpath); } static struct mesh_table *mesh_table_alloc(void) { struct mesh_table *newtbl; newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); if (!newtbl) return NULL; INIT_HLIST_HEAD(&newtbl->known_gates); atomic_set(&newtbl->entries, 0); spin_lock_init(&newtbl->gates_lock); return newtbl; } static void mesh_table_free(struct mesh_table *tbl) { rhashtable_free_and_destroy(&tbl->rhead, mesh_path_rht_free, tbl); kfree(tbl); } /** * * mesh_path_assign_nexthop - update mesh path next hop * * @mpath: mesh path to update * @sta: next hop to assign * * Locking: mpath->state_lock must be held when calling this function */ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) { struct sk_buff *skb; struct ieee80211_hdr *hdr; unsigned long flags; rcu_assign_pointer(mpath->next_hop, sta); spin_lock_irqsave(&mpath->frame_queue.lock, flags); skb_queue_walk(&mpath->frame_queue, skb) { hdr = (struct ieee80211_hdr *) skb->data; memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); } spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); } static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, struct mesh_path *gate_mpath) { struct ieee80211_hdr *hdr; struct ieee80211s_hdr *mshdr; int mesh_hdrlen, hdrlen; char *next_hop; hdr = (struct ieee80211_hdr *) skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); if (!(mshdr->flags & MESH_FLAGS_AE)) { /* size of the fixed part of the mesh header */ mesh_hdrlen = 6; /* make room for the two extended addresses */ skb_push(skb, 2 * ETH_ALEN); memmove(skb->data, hdr, hdrlen + mesh_hdrlen); hdr = (struct ieee80211_hdr *) skb->data; /* we preserve the previous mesh header and only add * the new addreses */ mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); mshdr->flags = MESH_FLAGS_AE_A5_A6; memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); } /* update next hop */ hdr = (struct ieee80211_hdr *) skb->data; rcu_read_lock(); next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; memcpy(hdr->addr1, next_hop, ETH_ALEN); rcu_read_unlock(); memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, dst_addr, ETH_ALEN); } /** * * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another * * This function is used to transfer or copy frames from an unresolved mpath to * a gate mpath. The function also adds the Address Extension field and * updates the next hop. * * If a frame already has an Address Extension field, only the next hop and * destination addresses are updated. * * The gate mpath must be an active mpath with a valid mpath->next_hop. * * @mpath: An active mpath the frames will be sent to (i.e. the gate) * @from_mpath: The failed mpath * @copy: When true, copy all the frames to the new mpath queue. When false, * move them. */ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, struct mesh_path *from_mpath, bool copy) { struct sk_buff *skb, *fskb, *tmp; struct sk_buff_head failq; unsigned long flags; if (WARN_ON(gate_mpath == from_mpath)) return; if (WARN_ON(!gate_mpath->next_hop)) return; __skb_queue_head_init(&failq); spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); skb_queue_splice_init(&from_mpath->frame_queue, &failq); spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); skb_queue_walk_safe(&failq, fskb, tmp) { if (skb_queue_len(&gate_mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN) { mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); break; } skb = skb_copy(fskb, GFP_ATOMIC); if (WARN_ON(!skb)) break; prepare_for_gate(skb, gate_mpath->dst, gate_mpath); skb_queue_tail(&gate_mpath->frame_queue, skb); if (copy) continue; __skb_unlink(fskb, &failq); kfree_skb(fskb); } mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); if (!copy) return; spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); skb_queue_splice(&failq, &from_mpath->frame_queue); spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); } static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct ieee80211_sub_if_data *sdata) { struct mesh_path *mpath; mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params); if (mpath && mpath_expired(mpath)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; spin_unlock_bh(&mpath->state_lock); } return mpath; } /** * mesh_path_lookup - look up a path in the mesh path table * @sdata: local subif * @dst: hardware address (ETH_ALEN length) of destination * * Returns: pointer to the mesh path structure, or NULL if not found * * Locking: must be called within a read rcu section. */ struct mesh_path * mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); } struct mesh_path * mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); } static struct mesh_path * __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) { int i = 0, ret; struct mesh_path *mpath = NULL; struct rhashtable_iter iter; ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); if (ret) return NULL; rhashtable_walk_start(&iter); while ((mpath = rhashtable_walk_next(&iter))) { if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) continue; if (IS_ERR(mpath)) break; if (i++ == idx) break; } rhashtable_walk_stop(&iter); rhashtable_walk_exit(&iter); if (IS_ERR(mpath) || !mpath) return NULL; if (mpath_expired(mpath)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; spin_unlock_bh(&mpath->state_lock); } return mpath; } /** * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index * @idx: index * @sdata: local subif, or NULL for all entries * * Returns: pointer to the mesh path structure, or NULL if not found. * * Locking: must be called within a read rcu section. */ struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); } /** * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index * @idx: index * @sdata: local subif, or NULL for all entries * * Returns: pointer to the proxy path structure, or NULL if not found. * * Locking: must be called within a read rcu section. */ struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); } /** * mesh_path_add_gate - add the given mpath to a mesh gate to our path table * @mpath: gate path to add to table */ int mesh_path_add_gate(struct mesh_path *mpath) { struct mesh_table *tbl; int err; rcu_read_lock(); tbl = mpath->sdata->u.mesh.mesh_paths; spin_lock_bh(&mpath->state_lock); if (mpath->is_gate) { err = -EEXIST; spin_unlock_bh(&mpath->state_lock); goto err_rcu; } mpath->is_gate = true; mpath->sdata->u.mesh.num_gates++; spin_lock(&tbl->gates_lock); hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); spin_unlock(&tbl->gates_lock); spin_unlock_bh(&mpath->state_lock); mpath_dbg(mpath->sdata, "Mesh path: Recorded new gate: %pM. %d known gates\n", mpath->dst, mpath->sdata->u.mesh.num_gates); err = 0; err_rcu: rcu_read_unlock(); return err; } /** * mesh_gate_del - remove a mesh gate from the list of known gates * @tbl: table which holds our list of known gates * @mpath: gate mpath */ static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) { lockdep_assert_held(&mpath->state_lock); if (!mpath->is_gate) return; mpath->is_gate = false; spin_lock_bh(&tbl->gates_lock); hlist_del_rcu(&mpath->gate_list); mpath->sdata->u.mesh.num_gates--; spin_unlock_bh(&tbl->gates_lock); mpath_dbg(mpath->sdata, "Mesh path: Deleted gate: %pM. %d known gates\n", mpath->dst, mpath->sdata->u.mesh.num_gates); } /** * mesh_gate_num - number of gates known to this interface * @sdata: subif data */ int mesh_gate_num(struct ieee80211_sub_if_data *sdata) { return sdata->u.mesh.num_gates; } static struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, const u8 *dst, gfp_t gfp_flags) { struct mesh_path *new_mpath; new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); if (!new_mpath) return NULL; memcpy(new_mpath->dst, dst, ETH_ALEN); eth_broadcast_addr(new_mpath->rann_snd_addr); new_mpath->is_root = false; new_mpath->sdata = sdata; new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); new_mpath->exp_time = jiffies; spin_lock_init(&new_mpath->state_lock); timer_setup(&new_mpath->timer, mesh_path_timer, 0); return new_mpath; } /** * mesh_path_add - allocate and add a new path to the mesh path table * @dst: destination address of the path (ETH_ALEN length) * @sdata: local subif * * Returns: 0 on success * * State: the initial state of the new path is set to 0 */ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) { struct mesh_table *tbl; struct mesh_path *mpath, *new_mpath; int ret; if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return ERR_PTR(-ENOTSUPP); if (is_multicast_ether_addr(dst)) return ERR_PTR(-ENOTSUPP); if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) return ERR_PTR(-ENOSPC); new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) return ERR_PTR(-ENOMEM); tbl = sdata->u.mesh.mesh_paths; do { ret = rhashtable_lookup_insert_fast(&tbl->rhead, &new_mpath->rhash, mesh_rht_params); if (ret == -EEXIST) mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params); } while (unlikely(ret == -EEXIST && !mpath)); if (ret && ret != -EEXIST) return ERR_PTR(ret); /* At this point either new_mpath was added, or we found a * matching entry already in the table; in the latter case * free the unnecessary new entry. */ if (ret == -EEXIST) { kfree(new_mpath); new_mpath = mpath; } sdata->u.mesh.mesh_paths_generation++; return new_mpath; } int mpp_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst, const u8 *mpp) { struct mesh_table *tbl; struct mesh_path *new_mpath; int ret; if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return -ENOTSUPP; if (is_multicast_ether_addr(dst)) return -ENOTSUPP; new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) return -ENOMEM; memcpy(new_mpath->mpp, mpp, ETH_ALEN); tbl = sdata->u.mesh.mpp_paths; ret = rhashtable_lookup_insert_fast(&tbl->rhead, &new_mpath->rhash, mesh_rht_params); sdata->u.mesh.mpp_paths_generation++; return ret; } /** * mesh_plink_broken - deactivates paths and sends perr when a link breaks * * @sta: broken peer link * * This function must be called from the rate control algorithm if enough * delivery errors suggest that a peer link is no longer usable. */ void mesh_plink_broken(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl = sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; struct rhashtable_iter iter; int ret; ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); if (ret) return; rhashtable_walk_start(&iter); while ((mpath = rhashtable_walk_next(&iter))) { if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) continue; if (IS_ERR(mpath)) break; if (rcu_access_pointer(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; ++mpath->sn; spin_unlock_bh(&mpath->state_lock); mesh_path_error_tx(sdata, sdata->u.mesh.mshcfg.element_ttl, mpath->dst, mpath->sn, WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); } } rhashtable_walk_stop(&iter); rhashtable_walk_exit(&iter); } static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; spin_lock_bh(&mpath->state_lock); mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; mesh_gate_del(tbl, mpath); spin_unlock_bh(&mpath->state_lock); del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); kfree_rcu(mpath, rcu); } static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) { rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); mesh_path_free_rcu(tbl, mpath); } /** * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches * * @sta: mesh peer to match * * RCU notes: this function is called when a mesh plink transitions from * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that * allows path creation. This will happen before the sta can be freed (because * sta_info_destroy() calls this) so any reader in a rcu read block will be * protected against the plink disappearing. */ void mesh_path_flush_by_nexthop(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_path *mpath; struct rhashtable_iter iter; int ret; ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); if (ret) return; rhashtable_walk_start(&iter); while ((mpath = rhashtable_walk_next(&iter))) { if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) continue; if (IS_ERR(mpath)) break; if (rcu_access_pointer(mpath->next_hop) == sta) __mesh_path_del(tbl, mpath); } rhashtable_walk_stop(&iter); rhashtable_walk_exit(&iter); } static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, const u8 *proxy) { struct mesh_table *tbl = sdata->u.mesh.mpp_paths; struct mesh_path *mpath; struct rhashtable_iter iter; int ret; ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); if (ret) return; rhashtable_walk_start(&iter); while ((mpath = rhashtable_walk_next(&iter))) { if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) continue; if (IS_ERR(mpath)) break; if (ether_addr_equal(mpath->mpp, proxy)) __mesh_path_del(tbl, mpath); } rhashtable_walk_stop(&iter); rhashtable_walk_exit(&iter); } static void table_flush_by_iface(struct mesh_table *tbl) { struct mesh_path *mpath; struct rhashtable_iter iter; int ret; ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); if (ret) return; rhashtable_walk_start(&iter); while ((mpath = rhashtable_walk_next(&iter))) { if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) continue; if (IS_ERR(mpath)) break; __mesh_path_del(tbl, mpath); } rhashtable_walk_stop(&iter); rhashtable_walk_exit(&iter); } /** * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface * * This function deletes both mesh paths as well as mesh portal paths. * * @sdata: interface data to match * */ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) { table_flush_by_iface(sdata->u.mesh.mesh_paths); table_flush_by_iface(sdata->u.mesh.mpp_paths); } /** * table_path_del - delete a path from the mesh or mpp table * * @tbl: mesh or mpp path table * @sdata: local subif * @addr: dst address (ETH_ALEN length) * * Returns: 0 if successful */ static int table_path_del(struct mesh_table *tbl, struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct mesh_path *mpath; rcu_read_lock(); mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); if (!mpath) { rcu_read_unlock(); return -ENXIO; } __mesh_path_del(tbl, mpath); rcu_read_unlock(); return 0; } /** * mesh_path_del - delete a mesh path from the table * * @addr: dst address (ETH_ALEN length) * @sdata: local subif * * Returns: 0 if successful */ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) { int err; /* flush relevant mpp entries first */ mpp_flush_by_proxy(sdata, addr); err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); sdata->u.mesh.mesh_paths_generation++; return err; } /** * mesh_path_tx_pending - sends pending frames in a mesh path queue * * @mpath: mesh path to activate * * Locking: the state_lock of the mpath structure must NOT be held when calling * this function. */ void mesh_path_tx_pending(struct mesh_path *mpath) { if (mpath->flags & MESH_PATH_ACTIVE) ieee80211_add_pending_skbs(mpath->sdata->local, &mpath->frame_queue); } /** * mesh_path_send_to_gates - sends pending frames to all known mesh gates * * @mpath: mesh path whose queue will be emptied * * If there is only one gate, the frames are transferred from the failed mpath * queue to that gate's queue. If there are more than one gates, the frames * are copied from each gate to the next. After frames are copied, the * mpath queues are emptied onto the transmission queue. */ int mesh_path_send_to_gates(struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; struct mesh_table *tbl; struct mesh_path *from_mpath = mpath; struct mesh_path *gate; bool copy = false; tbl = sdata->u.mesh.mesh_paths; rcu_read_lock(); hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { if (gate->flags & MESH_PATH_ACTIVE) { mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); mesh_path_move_to_queue(gate, from_mpath, copy); from_mpath = gate; copy = true; } else { mpath_dbg(sdata, "Not forwarding to %pM (flags %#x)\n", gate->dst, gate->flags); } } hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { mpath_dbg(sdata, "Sending to %pM\n", gate->dst); mesh_path_tx_pending(gate); } rcu_read_unlock(); return (from_mpath == mpath) ? -EHOSTUNREACH : 0; } /** * mesh_path_discard_frame - discard a frame whose path could not be resolved * * @skb: frame to discard * @sdata: network subif the frame was to be sent through * * Locking: the function must me called within a rcu_read_lock region */ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { kfree_skb(skb); sdata->u.mesh.mshstats.dropped_frames_no_route++; } /** * mesh_path_flush_pending - free the pending queue of a mesh path * * @mpath: mesh path whose queue has to be freed * * Locking: the function must me called within a rcu_read_lock region */ void mesh_path_flush_pending(struct mesh_path *mpath) { struct sk_buff *skb; while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) mesh_path_discard_frame(mpath->sdata, skb); } /** * mesh_path_fix_nexthop - force a specific next hop for a mesh path * * @mpath: the mesh path to modify * @next_hop: the next hop to force * * Locking: this function must be called holding mpath->state_lock */ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) { spin_lock_bh(&mpath->state_lock); mesh_path_assign_nexthop(mpath, next_hop); mpath->sn = 0xffff; mpath->metric = 0; mpath->hop_count = 0; mpath->exp_time = 0; mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); /* init it at a low value - 0 start is tricky */ ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); } int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) { struct mesh_table *tbl_path, *tbl_mpp; int ret; tbl_path = mesh_table_alloc(); if (!tbl_path) return -ENOMEM; tbl_mpp = mesh_table_alloc(); if (!tbl_mpp) { ret = -ENOMEM; goto free_path; } rhashtable_init(&tbl_path->rhead, &mesh_rht_params); rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); sdata->u.mesh.mesh_paths = tbl_path; sdata->u.mesh.mpp_paths = tbl_mpp; return 0; free_path: mesh_table_free(tbl_path); return ret; } static void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) { struct mesh_path *mpath; struct rhashtable_iter iter; int ret; ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); if (ret) return; rhashtable_walk_start(&iter); while ((mpath = rhashtable_walk_next(&iter))) { if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) continue; if (IS_ERR(mpath)) break; if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) __mesh_path_del(tbl, mpath); } rhashtable_walk_stop(&iter); rhashtable_walk_exit(&iter); } void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); } void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) { mesh_table_free(sdata->u.mesh.mesh_paths); mesh_table_free(sdata->u.mesh.mpp_paths); } backport-iwlwifi-dkms-7906/net/mac80211/mesh_plink.c000066400000000000000000000773701351064634500221360ustar00rootroot00000000000000/* * Copyright (c) 2008, 2009 open80211s Ltd. * Copyright (C) 2019 Intel Corporation * Author: Luis Carlos Cobo * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include "ieee80211_i.h" #include "rate.h" #include "mesh.h" #define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2) #define PLINK_GET_LLID(p) (p + 2) #define PLINK_GET_PLID(p) (p + 4) #define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \ jiffies + msecs_to_jiffies(t))) enum plink_event { PLINK_UNDEFINED, OPN_ACPT, OPN_RJCT, OPN_IGNR, CNF_ACPT, CNF_RJCT, CNF_IGNR, CLS_ACPT, CLS_IGNR }; static const char * const mplstates[] = { [NL80211_PLINK_LISTEN] = "LISTEN", [NL80211_PLINK_OPN_SNT] = "OPN-SNT", [NL80211_PLINK_OPN_RCVD] = "OPN-RCVD", [NL80211_PLINK_CNF_RCVD] = "CNF_RCVD", [NL80211_PLINK_ESTAB] = "ESTAB", [NL80211_PLINK_HOLDING] = "HOLDING", [NL80211_PLINK_BLOCKED] = "BLOCKED" }; static const char * const mplevents[] = { [PLINK_UNDEFINED] = "NONE", [OPN_ACPT] = "OPN_ACPT", [OPN_RJCT] = "OPN_RJCT", [OPN_IGNR] = "OPN_IGNR", [CNF_ACPT] = "CNF_ACPT", [CNF_RJCT] = "CNF_RJCT", [CNF_IGNR] = "CNF_IGNR", [CLS_ACPT] = "CLS_ACPT", [CLS_IGNR] = "CLS_IGNR" }; /* We only need a valid sta if user configured a minimum rssi_threshold. */ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold; return rssi_threshold == 0 || (sta && (s8)-ewma_signal_read(&sta->rx_stats_avg.signal) > rssi_threshold); } /** * mesh_plink_fsm_restart - restart a mesh peer link finite state machine * * @sta: mesh peer link to restart * * Locking: this function must be called holding sta->mesh->plink_lock */ static inline void mesh_plink_fsm_restart(struct sta_info *sta) { lockdep_assert_held(&sta->mesh->plink_lock); sta->mesh->plink_state = NL80211_PLINK_LISTEN; sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0; sta->mesh->plink_retries = 0; } /* * mesh_set_short_slot_time - enable / disable ERP short slot time. * * The standard indirectly mandates mesh STAs to turn off short slot time by * disallowing advertising this (802.11-2012 8.4.1.4), but that doesn't mean we * can't be sneaky about it. Enable short slot time if all mesh STAs in the * MBSS support ERP rates. * * Returns BSS_CHANGED_ERP_SLOT or 0 for no change. */ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct sta_info *sta; u32 erp_rates = 0, changed = 0; int i; bool short_slot = false; sband = ieee80211_get_sband(sdata); if (!sband) return changed; if (sband->band == NL80211_BAND_5GHZ) { /* (IEEE 802.11-2012 19.4.5) */ short_slot = true; goto out; } else if (sband->band != NL80211_BAND_2GHZ) { goto out; } for (i = 0; i < sband->n_bitrates; i++) if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G) erp_rates |= BIT(i); if (!erp_rates) goto out; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata || sta->mesh->plink_state != NL80211_PLINK_ESTAB) continue; short_slot = false; if (erp_rates & sta->sta.supp_rates[sband->band]) short_slot = true; else break; } rcu_read_unlock(); out: if (sdata->vif.bss_conf.use_short_slot != short_slot) { sdata->vif.bss_conf.use_short_slot = short_slot; changed = BSS_CHANGED_ERP_SLOT; mpl_dbg(sdata, "mesh_plink %pM: ERP short slot time %d\n", sdata->vif.addr, short_slot); } return changed; } /** * mesh_set_ht_prot_mode - set correct HT protection mode * * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT * mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT * mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is * selected if any non-HT peers are present in our MBSS. 20MHz-protection mode * is selected if all peers in our 20/40MHz MBSS support HT and atleast one * HT20 peer is present. Otherwise no-protection mode is selected. */ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; u16 ht_opmode; bool non_ht_sta = false, ht20_sta = false; switch (sdata->vif.bss_conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: return 0; default: break; } rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata || sta->mesh->plink_state != NL80211_PLINK_ESTAB) continue; if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20) continue; if (!sta->sta.ht_cap.ht_supported) { mpl_dbg(sdata, "nonHT sta (%pM) is present\n", sta->sta.addr); non_ht_sta = true; break; } mpl_dbg(sdata, "HT20 sta (%pM) is present\n", sta->sta.addr); ht20_sta = true; } rcu_read_unlock(); if (non_ht_sta) ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; else if (ht20_sta && sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; else ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; if (sdata->vif.bss_conf.ht_operation_mode == ht_opmode) return 0; sdata->vif.bss_conf.ht_operation_mode = ht_opmode; sdata->u.mesh.mshcfg.ht_opmode = ht_opmode; mpl_dbg(sdata, "selected new HT protection mode %d\n", ht_opmode); return BSS_CHANGED_HT; } static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, enum ieee80211_self_protected_actioncode action, u8 *da, u16 llid, u16 plid, u16 reason) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_mgmt *mgmt; bool include_plid = false; u16 peering_proto = 0; u8 *pos, ie_len = 4; int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.self_prot); int err = -ENOMEM; skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + /* capability info */ 2 + /* AID */ 2 + 8 + /* supported rates */ 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sdata->u.mesh.mesh_id_len + 2 + sizeof(struct ieee80211_meshconf_ie) + 2 + sizeof(struct ieee80211_ht_cap) + 2 + sizeof(struct ieee80211_ht_operation) + 2 + sizeof(struct ieee80211_vht_cap) + 2 + sizeof(struct ieee80211_vht_operation) + 2 + 8 + /* peering IE */ sdata->u.mesh.ie_len); if (!skb) return err; info = IEEE80211_SKB_CB(skb); skb_reserve(skb, local->tx_headroom); mgmt = skb_put_zero(skb, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); mgmt->u.action.category = WLAN_CATEGORY_SELF_PROTECTED; mgmt->u.action.u.self_prot.action_code = action; if (action != WLAN_SP_MESH_PEERING_CLOSE) { struct ieee80211_supported_band *sband; enum nl80211_band band; sband = ieee80211_get_sband(sdata); if (!sband) { err = -EINVAL; goto free; } band = sband->band; /* capability info */ pos = skb_put_zero(skb, 2); if (action == WLAN_SP_MESH_PEERING_CONFIRM) { /* AID */ pos = skb_put(skb, 2); put_unaligned_le16(sta->sta.aid, pos); } if (ieee80211_add_srates_ie(sdata, skb, true, band) || ieee80211_add_ext_srates_ie(sdata, skb, true, band) || mesh_add_rsn_ie(sdata, skb) || mesh_add_meshid_ie(sdata, skb) || mesh_add_meshconf_ie(sdata, skb)) goto free; } else { /* WLAN_SP_MESH_PEERING_CLOSE */ info->flags |= IEEE80211_TX_CTL_NO_ACK; if (mesh_add_meshid_ie(sdata, skb)) goto free; } /* Add Mesh Peering Management element */ switch (action) { case WLAN_SP_MESH_PEERING_OPEN: break; case WLAN_SP_MESH_PEERING_CONFIRM: ie_len += 2; include_plid = true; break; case WLAN_SP_MESH_PEERING_CLOSE: if (plid) { ie_len += 2; include_plid = true; } ie_len += 2; /* reason code */ break; default: err = -EINVAL; goto free; } if (WARN_ON(skb_tailroom(skb) < 2 + ie_len)) goto free; pos = skb_put(skb, 2 + ie_len); *pos++ = WLAN_EID_PEER_MGMT; *pos++ = ie_len; memcpy(pos, &peering_proto, 2); pos += 2; put_unaligned_le16(llid, pos); pos += 2; if (include_plid) { put_unaligned_le16(plid, pos); pos += 2; } if (action == WLAN_SP_MESH_PEERING_CLOSE) { put_unaligned_le16(reason, pos); pos += 2; } if (action != WLAN_SP_MESH_PEERING_CLOSE) { if (mesh_add_ht_cap_ie(sdata, skb) || mesh_add_ht_oper_ie(sdata, skb) || mesh_add_vht_cap_ie(sdata, skb) || mesh_add_vht_oper_ie(sdata, skb)) goto free; } if (mesh_add_vendor_ies(sdata, skb)) goto free; ieee80211_tx_skb(sdata, skb); return 0; free: kfree_skb(skb); return err; } /** * __mesh_plink_deactivate - deactivate mesh peer link * * @sta: mesh peer link to deactivate * * Mesh paths with this peer as next hop should be flushed * by the caller outside of plink_lock. * * Returns beacon changed flag if the beacon content changed. * * Locking: the caller must hold sta->mesh->plink_lock */ static u32 __mesh_plink_deactivate(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; u32 changed = 0; lockdep_assert_held(&sta->mesh->plink_lock); if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) changed = mesh_plink_dec_estab_count(sdata); sta->mesh->plink_state = NL80211_PLINK_BLOCKED; ieee80211_mps_sta_status_update(sta); changed |= ieee80211_mps_set_sta_local_pm(sta, NL80211_MESH_POWER_UNKNOWN); return changed; } /** * mesh_plink_deactivate - deactivate mesh peer link * * @sta: mesh peer link to deactivate * * All mesh paths with this peer as next hop will be flushed */ u32 mesh_plink_deactivate(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; u32 changed; spin_lock_bh(&sta->mesh->plink_lock); changed = __mesh_plink_deactivate(sta); if (!sdata->u.mesh.user_mpm) { sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED; mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE, sta->sta.addr, sta->mesh->llid, sta->mesh->plid, sta->mesh->reason); } spin_unlock_bh(&sta->mesh->plink_lock); if (!sdata->u.mesh.user_mpm) del_timer_sync(&sta->mesh->plink_timer); mesh_path_flush_by_nexthop(sta); /* make sure no readers can access nexthop sta from here on */ synchronize_net(); return changed; } static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; u32 rates, basic_rates = 0, changed = 0; enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth; sband = ieee80211_get_sband(sdata); if (!sband) return; rates = ieee80211_sta_get_rates(sdata, elems, sband->band, &basic_rates); spin_lock_bh(&sta->mesh->plink_lock); sta->rx_stats.last_rx = jiffies; /* rates and capabilities don't change during peering */ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB && sta->mesh->processed_beacon) goto out; sta->mesh->processed_beacon = true; if (sta->sta.supp_rates[sband->band] != rates) changed |= IEEE80211_RC_SUPP_RATES_CHANGED; sta->sta.supp_rates[sband->band] = rates; if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, sta)) changed |= IEEE80211_RC_BW_CHANGED; ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, elems->vht_cap_elem, sta); if (bw != sta->sta.bandwidth) changed |= IEEE80211_RC_BW_CHANGED; /* HT peer is operating 20MHz-only */ if (elems->ht_operation && !(elems->ht_operation->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { if (sta->sta.bandwidth != IEEE80211_STA_RX_BW_20) changed |= IEEE80211_RC_BW_CHANGED; sta->sta.bandwidth = IEEE80211_STA_RX_BW_20; } if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) rate_control_rate_init(sta); else rate_control_rate_update(local, sband, sta, changed); out: spin_unlock_bh(&sta->mesh->plink_lock); } static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata) { struct sta_info *sta; unsigned long *aid_map; int aid; aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1), sizeof(*aid_map), GFP_KERNEL); if (!aid_map) return -ENOMEM; /* reserve aid 0 for mcast indication */ __set_bit(0, aid_map); rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) __set_bit(sta->sta.aid, aid_map); rcu_read_unlock(); aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1); kfree(aid_map); if (aid > IEEE80211_MAX_AID) return -ENOBUFS; return aid; } static struct sta_info * __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr) { struct sta_info *sta; int aid; if (sdata->local->num_sta >= MESH_MAX_PLINKS) return NULL; aid = mesh_allocate_aid(sdata); if (aid < 0) return NULL; sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); if (!sta) return NULL; sta->mesh->plink_state = NL80211_PLINK_LISTEN; sta->sta.wme = true; sta->sta.aid = aid; sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); return sta; } static struct sta_info * mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status) { struct sta_info *sta = NULL; /* Userspace handles station allocation */ if (sdata->u.mesh.user_mpm || sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) { if (mesh_peer_accepts_plinks(elems) && mesh_plink_availables(sdata)) { int sig = 0; if (ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM)) sig = rx_status->signal; cfg80211_notify_new_peer_candidate(sdata->dev, addr, elems->ie_start, elems->total_len, sig, GFP_KERNEL); } } else sta = __mesh_sta_info_alloc(sdata, addr); return sta; } /* * mesh_sta_info_get - return mesh sta info entry for @addr. * * @sdata: local meshif * @addr: peer's address * @elems: IEs from beacon or mesh peering frame. * @rx_status: rx status for the frame for signal reporting * * Return existing or newly allocated sta_info under RCU read lock. * (re)initialize with given IEs. */ static struct sta_info * mesh_sta_info_get(struct ieee80211_sub_if_data *sdata, u8 *addr, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status) __acquires(RCU) { struct sta_info *sta = NULL; rcu_read_lock(); sta = sta_info_get(sdata, addr); if (sta) { mesh_sta_info_init(sdata, sta, elems); } else { rcu_read_unlock(); /* can't run atomic */ sta = mesh_sta_info_alloc(sdata, addr, elems, rx_status); if (!sta) { rcu_read_lock(); return NULL; } mesh_sta_info_init(sdata, sta, elems); if (sta_info_insert_rcu(sta)) return NULL; } return sta; } /* * mesh_neighbour_update - update or initialize new mesh neighbor. * * @sdata: local meshif * @addr: peer's address * @elems: IEs from beacon or mesh peering frame * @rx_status: rx status for the frame for signal reporting * * Initiates peering if appropriate. */ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, u8 *hw_addr, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status) { struct sta_info *sta; u32 changed = 0; sta = mesh_sta_info_get(sdata, hw_addr, elems, rx_status); if (!sta) goto out; sta->mesh->connected_to_gate = elems->mesh_config->meshconf_form & IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE; if (mesh_peer_accepts_plinks(elems) && sta->mesh->plink_state == NL80211_PLINK_LISTEN && sdata->u.mesh.accepting_plinks && sdata->u.mesh.mshcfg.auto_open_plinks && rssi_threshold_check(sdata, sta)) changed = mesh_plink_open(sta); ieee80211_mps_frame_release(sta, elems); out: rcu_read_unlock(); ieee80211_mbss_info_change_notify(sdata, changed); } void mesh_plink_timer(struct timer_list *t) { struct mesh_sta *mesh = from_timer(mesh, t, plink_timer); struct sta_info *sta; u16 reason = 0; struct ieee80211_sub_if_data *sdata; struct mesh_config *mshcfg; enum ieee80211_self_protected_actioncode action = 0; /* * This STA is valid because sta_info_destroy() will * del_timer_sync() this timer after having made sure * it cannot be readded (by deleting the plink.) */ sta = mesh->plink_sta; if (sta->sdata->local->quiescing) return; spin_lock_bh(&sta->mesh->plink_lock); /* If a timer fires just before a state transition on another CPU, * we may have already extended the timeout and changed state by the * time we've acquired the lock and arrived here. In that case, * skip this timer and wait for the new one. */ if (time_before(jiffies, sta->mesh->plink_timer.expires)) { mpl_dbg(sta->sdata, "Ignoring timer for %pM in state %s (timer adjusted)", sta->sta.addr, mplstates[sta->mesh->plink_state]); spin_unlock_bh(&sta->mesh->plink_lock); return; } /* del_timer() and handler may race when entering these states */ if (sta->mesh->plink_state == NL80211_PLINK_LISTEN || sta->mesh->plink_state == NL80211_PLINK_ESTAB) { mpl_dbg(sta->sdata, "Ignoring timer for %pM in state %s (timer deleted)", sta->sta.addr, mplstates[sta->mesh->plink_state]); spin_unlock_bh(&sta->mesh->plink_lock); return; } mpl_dbg(sta->sdata, "Mesh plink timer for %pM fired on state %s\n", sta->sta.addr, mplstates[sta->mesh->plink_state]); sdata = sta->sdata; mshcfg = &sdata->u.mesh.mshcfg; switch (sta->mesh->plink_state) { case NL80211_PLINK_OPN_RCVD: case NL80211_PLINK_OPN_SNT: /* retry timer */ if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) { u32 rand; mpl_dbg(sta->sdata, "Mesh plink for %pM (retry, timeout): %d %d\n", sta->sta.addr, sta->mesh->plink_retries, sta->mesh->plink_timeout); get_random_bytes(&rand, sizeof(u32)); sta->mesh->plink_timeout = sta->mesh->plink_timeout + rand % sta->mesh->plink_timeout; ++sta->mesh->plink_retries; mod_plink_timer(sta, sta->mesh->plink_timeout); action = WLAN_SP_MESH_PEERING_OPEN; break; } reason = WLAN_REASON_MESH_MAX_RETRIES; /* fall through */ case NL80211_PLINK_CNF_RCVD: /* confirm timer */ if (!reason) reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT; sta->mesh->plink_state = NL80211_PLINK_HOLDING; mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); action = WLAN_SP_MESH_PEERING_CLOSE; break; case NL80211_PLINK_HOLDING: /* holding timer */ del_timer(&sta->mesh->plink_timer); mesh_plink_fsm_restart(sta); break; default: break; } spin_unlock_bh(&sta->mesh->plink_lock); if (action) mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr, sta->mesh->llid, sta->mesh->plid, reason); } static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout) { sta->mesh->plink_timeout = timeout; mod_timer(&sta->mesh->plink_timer, jiffies + msecs_to_jiffies(timeout)); } static bool llid_in_use(struct ieee80211_sub_if_data *sdata, u16 llid) { struct ieee80211_local *local = sdata->local; bool in_use = false; struct sta_info *sta; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) { in_use = true; break; } } rcu_read_unlock(); return in_use; } static u16 mesh_get_new_llid(struct ieee80211_sub_if_data *sdata) { u16 llid; do { get_random_bytes(&llid, sizeof(llid)); } while (llid_in_use(sdata, llid)); return llid; } u32 mesh_plink_open(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; u32 changed; if (!test_sta_flag(sta, WLAN_STA_AUTH)) return 0; spin_lock_bh(&sta->mesh->plink_lock); sta->mesh->llid = mesh_get_new_llid(sdata); if (sta->mesh->plink_state != NL80211_PLINK_LISTEN && sta->mesh->plink_state != NL80211_PLINK_BLOCKED) { spin_unlock_bh(&sta->mesh->plink_lock); return 0; } sta->mesh->plink_state = NL80211_PLINK_OPN_SNT; mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout); spin_unlock_bh(&sta->mesh->plink_lock); mpl_dbg(sdata, "Mesh plink: starting establishment with %pM\n", sta->sta.addr); /* set the non-peer mode to active during peering */ changed = ieee80211_mps_local_status_update(sdata); mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN, sta->sta.addr, sta->mesh->llid, 0, 0); return changed; } u32 mesh_plink_block(struct sta_info *sta) { u32 changed; spin_lock_bh(&sta->mesh->plink_lock); changed = __mesh_plink_deactivate(sta); sta->mesh->plink_state = NL80211_PLINK_BLOCKED; spin_unlock_bh(&sta->mesh->plink_lock); mesh_path_flush_by_nexthop(sta); return changed; } static void mesh_plink_close(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, enum plink_event event) { struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; u16 reason = (event == CLS_ACPT) ? WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG; sta->mesh->reason = reason; sta->mesh->plink_state = NL80211_PLINK_HOLDING; mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); } static u32 mesh_plink_establish(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; u32 changed = 0; del_timer(&sta->mesh->plink_timer); sta->mesh->plink_state = NL80211_PLINK_ESTAB; changed |= mesh_plink_inc_estab_count(sdata); changed |= mesh_set_ht_prot_mode(sdata); changed |= mesh_set_short_slot_time(sdata); mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); ieee80211_mps_sta_status_update(sta); changed |= ieee80211_mps_set_sta_local_pm(sta, mshcfg->power_mode); return changed; } /** * mesh_plink_fsm - step @sta MPM based on @event * * @sdata: interface * @sta: mesh neighbor * @event: peering event * * Return: changed MBSS flags */ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, enum plink_event event) { struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; enum ieee80211_self_protected_actioncode action = 0; u32 changed = 0; bool flush = false; mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr, mplstates[sta->mesh->plink_state], mplevents[event]); spin_lock_bh(&sta->mesh->plink_lock); switch (sta->mesh->plink_state) { case NL80211_PLINK_LISTEN: switch (event) { case CLS_ACPT: mesh_plink_fsm_restart(sta); break; case OPN_ACPT: sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD; sta->mesh->llid = mesh_get_new_llid(sdata); mesh_plink_timer_set(sta, mshcfg->dot11MeshRetryTimeout); /* set the non-peer mode to active during peering */ changed |= ieee80211_mps_local_status_update(sdata); action = WLAN_SP_MESH_PEERING_OPEN; break; default: break; } break; case NL80211_PLINK_OPN_SNT: switch (event) { case OPN_RJCT: case CNF_RJCT: case CLS_ACPT: mesh_plink_close(sdata, sta, event); action = WLAN_SP_MESH_PEERING_CLOSE; break; case OPN_ACPT: /* retry timer is left untouched */ sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD; action = WLAN_SP_MESH_PEERING_CONFIRM; break; case CNF_ACPT: sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD; mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout); break; default: break; } break; case NL80211_PLINK_OPN_RCVD: switch (event) { case OPN_RJCT: case CNF_RJCT: case CLS_ACPT: mesh_plink_close(sdata, sta, event); action = WLAN_SP_MESH_PEERING_CLOSE; break; case OPN_ACPT: action = WLAN_SP_MESH_PEERING_CONFIRM; break; case CNF_ACPT: changed |= mesh_plink_establish(sdata, sta); break; default: break; } break; case NL80211_PLINK_CNF_RCVD: switch (event) { case OPN_RJCT: case CNF_RJCT: case CLS_ACPT: mesh_plink_close(sdata, sta, event); action = WLAN_SP_MESH_PEERING_CLOSE; break; case OPN_ACPT: changed |= mesh_plink_establish(sdata, sta); action = WLAN_SP_MESH_PEERING_CONFIRM; break; default: break; } break; case NL80211_PLINK_ESTAB: switch (event) { case CLS_ACPT: changed |= __mesh_plink_deactivate(sta); changed |= mesh_set_ht_prot_mode(sdata); changed |= mesh_set_short_slot_time(sdata); mesh_plink_close(sdata, sta, event); action = WLAN_SP_MESH_PEERING_CLOSE; flush = true; break; case OPN_ACPT: action = WLAN_SP_MESH_PEERING_CONFIRM; break; default: break; } break; case NL80211_PLINK_HOLDING: switch (event) { case CLS_ACPT: del_timer(&sta->mesh->plink_timer); mesh_plink_fsm_restart(sta); break; case OPN_ACPT: case CNF_ACPT: case OPN_RJCT: case CNF_RJCT: action = WLAN_SP_MESH_PEERING_CLOSE; break; default: break; } break; default: /* should not get here, PLINK_BLOCKED is dealt with at the * beginning of the function */ break; } spin_unlock_bh(&sta->mesh->plink_lock); if (flush) mesh_path_flush_by_nexthop(sta); if (action) { mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr, sta->mesh->llid, sta->mesh->plid, sta->mesh->reason); /* also send confirm in open case */ if (action == WLAN_SP_MESH_PEERING_OPEN) { mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CONFIRM, sta->sta.addr, sta->mesh->llid, sta->mesh->plid, 0); } } return changed; } /* * mesh_plink_get_event - get correct MPM event * * @sdata: interface * @sta: peer, leave NULL if processing a frame from a new suitable peer * @elems: peering management IEs * @ftype: frame type * @llid: peer's peer link ID * @plid: peer's local link ID * * Return: new peering event for @sta, but PLINK_UNDEFINED should be treated as * an error. */ static enum plink_event mesh_plink_get_event(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee802_11_elems *elems, enum ieee80211_self_protected_actioncode ftype, u16 llid, u16 plid) { enum plink_event event = PLINK_UNDEFINED; u8 ie_len = elems->peering_len; bool matches_local; matches_local = (ftype == WLAN_SP_MESH_PEERING_CLOSE || mesh_matches_local(sdata, elems)); /* deny open request from non-matching peer */ if (!matches_local && !sta) { event = OPN_RJCT; goto out; } if (!sta) { if (ftype != WLAN_SP_MESH_PEERING_OPEN) { mpl_dbg(sdata, "Mesh plink: cls or cnf from unknown peer\n"); goto out; } /* ftype == WLAN_SP_MESH_PEERING_OPEN */ if (!mesh_plink_free_count(sdata)) { mpl_dbg(sdata, "Mesh plink error: no more free plinks\n"); goto out; } /* new matching peer */ event = OPN_ACPT; goto out; } else { if (!test_sta_flag(sta, WLAN_STA_AUTH)) { mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n"); goto out; } if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED) goto out; } switch (ftype) { case WLAN_SP_MESH_PEERING_OPEN: if (!matches_local) event = OPN_RJCT; if (!mesh_plink_free_count(sdata) || (sta->mesh->plid && sta->mesh->plid != plid)) event = OPN_IGNR; else event = OPN_ACPT; break; case WLAN_SP_MESH_PEERING_CONFIRM: if (!matches_local) event = CNF_RJCT; if (!mesh_plink_free_count(sdata) || sta->mesh->llid != llid || (sta->mesh->plid && sta->mesh->plid != plid)) event = CNF_IGNR; else event = CNF_ACPT; break; case WLAN_SP_MESH_PEERING_CLOSE: if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) /* Do not check for llid or plid. This does not * follow the standard but since multiple plinks * per sta are not supported, it is necessary in * order to avoid a livelock when MP A sees an * establish peer link to MP B but MP B does not * see it. This can be caused by a timeout in * B's peer link establishment or B beign * restarted. */ event = CLS_ACPT; else if (sta->mesh->plid != plid) event = CLS_IGNR; else if (ie_len == 8 && sta->mesh->llid != llid) event = CLS_IGNR; else event = CLS_ACPT; break; default: mpl_dbg(sdata, "Mesh plink: unknown frame subtype\n"); break; } out: return event; } static void mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status) { struct sta_info *sta; enum plink_event event; enum ieee80211_self_protected_actioncode ftype; u32 changed = 0; u8 ie_len = elems->peering_len; u16 plid, llid = 0; if (!elems->peering) { mpl_dbg(sdata, "Mesh plink: missing necessary peer link ie\n"); return; } if (elems->rsn_len && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) { mpl_dbg(sdata, "Mesh plink: can't establish link with secure peer\n"); return; } ftype = mgmt->u.action.u.self_prot.action_code; if ((ftype == WLAN_SP_MESH_PEERING_OPEN && ie_len != 4) || (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) || (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6 && ie_len != 8)) { mpl_dbg(sdata, "Mesh plink: incorrect plink ie length %d %d\n", ftype, ie_len); return; } if (ftype != WLAN_SP_MESH_PEERING_CLOSE && (!elems->mesh_id || !elems->mesh_config)) { mpl_dbg(sdata, "Mesh plink: missing necessary ie\n"); return; } /* Note the lines below are correct, the llid in the frame is the plid * from the point of view of this host. */ plid = get_unaligned_le16(PLINK_GET_LLID(elems->peering)); if (ftype == WLAN_SP_MESH_PEERING_CONFIRM || (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) llid = get_unaligned_le16(PLINK_GET_PLID(elems->peering)); /* WARNING: Only for sta pointer, is dropped & re-acquired */ rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (ftype == WLAN_SP_MESH_PEERING_OPEN && !rssi_threshold_check(sdata, sta)) { mpl_dbg(sdata, "Mesh plink: %pM does not meet rssi threshold\n", mgmt->sa); goto unlock_rcu; } /* Now we will figure out the appropriate event... */ event = mesh_plink_get_event(sdata, sta, elems, ftype, llid, plid); if (event == OPN_ACPT) { rcu_read_unlock(); /* allocate sta entry if necessary and update info */ sta = mesh_sta_info_get(sdata, mgmt->sa, elems, rx_status); if (!sta) { mpl_dbg(sdata, "Mesh plink: failed to init peer!\n"); goto unlock_rcu; } sta->mesh->plid = plid; } else if (!sta && event == OPN_RJCT) { mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE, mgmt->sa, 0, plid, WLAN_REASON_MESH_CONFIG); goto unlock_rcu; } else if (!sta || event == PLINK_UNDEFINED) { /* something went wrong */ goto unlock_rcu; } if (event == CNF_ACPT) { /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */ if (!sta->mesh->plid) sta->mesh->plid = plid; sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt)); } changed |= mesh_plink_fsm(sdata, sta, event); unlock_rcu: rcu_read_unlock(); if (changed) ieee80211_mbss_info_change_notify(sdata, changed); } void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee802_11_elems elems; size_t baselen; u8 *baseaddr; /* need action_code, aux */ if (len < IEEE80211_MIN_ACTION_SIZE + 3) return; if (sdata->u.mesh.user_mpm) /* userspace must register for these */ return; if (is_multicast_ether_addr(mgmt->da)) { mpl_dbg(sdata, "Mesh plink: ignore frame from multicast address\n"); return; } baseaddr = mgmt->u.action.u.self_prot.variable; baselen = (u8 *) mgmt->u.action.u.self_prot.variable - (u8 *) mgmt; if (mgmt->u.action.u.self_prot.action_code == WLAN_SP_MESH_PEERING_CONFIRM) { baseaddr += 4; baselen += 4; if (baselen > len) return; } ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems, mgmt->bssid, NULL); mesh_process_plink_frame(sdata, mgmt, &elems, rx_status); } backport-iwlwifi-dkms-7906/net/mac80211/mesh_ps.c000066400000000000000000000405341351064634500214330ustar00rootroot00000000000000/* * Copyright 2012-2013, Marco Porsch * Copyright 2012-2013, cozybit Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "mesh.h" #include "wme.h" /* mesh PS management */ /** * mps_qos_null_get - create pre-addressed QoS Null frame for mesh powersave */ static struct sk_buff *mps_qos_null_get(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *nullfunc; /* use 4addr header */ struct sk_buff *skb; int size = sizeof(*nullfunc); __le16 fc; skb = dev_alloc_skb(local->hw.extra_tx_headroom + size + 2); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = skb_put(skb, size); fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); ieee80211_fill_mesh_addresses(nullfunc, &fc, sta->sta.addr, sdata->vif.addr); nullfunc->frame_control = fc; nullfunc->duration_id = 0; nullfunc->seq_ctrl = 0; /* no address resolution for this frame -> set addr 1 immediately */ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); skb_put_zero(skb, 2); /* append QoS control field */ ieee80211_mps_set_frame_flags(sdata, sta, nullfunc); return skb; } /** * mps_qos_null_tx - send a QoS Null to indicate link-specific power mode */ static void mps_qos_null_tx(struct sta_info *sta) { struct sk_buff *skb; skb = mps_qos_null_get(sta); if (!skb) return; mps_dbg(sta->sdata, "announcing peer-specific power mode to %pM\n", sta->sta.addr); /* don't unintentionally start a MPSP */ if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { u8 *qc = ieee80211_get_qos_ctl((void *) skb->data); qc[0] |= IEEE80211_QOS_CTL_EOSP; } ieee80211_tx_skb(sta->sdata, skb); } /** * ieee80211_mps_local_status_update - track status of local link-specific PMs * * @sdata: local mesh subif * * sets the non-peer power mode and triggers the driver PS (re-)configuration * Return BSS_CHANGED_BEACON if a beacon update is necessary. */ u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct sta_info *sta; bool peering = false; int light_sleep_cnt = 0; int deep_sleep_cnt = 0; u32 changed = 0; enum nl80211_mesh_power_mode nonpeer_pm; rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { if (sdata != sta->sdata) continue; switch (sta->mesh->plink_state) { case NL80211_PLINK_OPN_SNT: case NL80211_PLINK_OPN_RCVD: case NL80211_PLINK_CNF_RCVD: peering = true; break; case NL80211_PLINK_ESTAB: if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP) light_sleep_cnt++; else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP) deep_sleep_cnt++; break; default: break; } } rcu_read_unlock(); /* * Set non-peer mode to active during peering/scanning/authentication * (see IEEE802.11-2012 13.14.8.3). The non-peer mesh power mode is * deep sleep if the local STA is in light or deep sleep towards at * least one mesh peer (see 13.14.3.1). Otherwise, set it to the * user-configured default value. */ if (peering) { mps_dbg(sdata, "setting non-peer PM to active for peering\n"); nonpeer_pm = NL80211_MESH_POWER_ACTIVE; } else if (light_sleep_cnt || deep_sleep_cnt) { mps_dbg(sdata, "setting non-peer PM to deep sleep\n"); nonpeer_pm = NL80211_MESH_POWER_DEEP_SLEEP; } else { mps_dbg(sdata, "setting non-peer PM to user value\n"); nonpeer_pm = ifmsh->mshcfg.power_mode; } /* need update if sleep counts move between 0 and non-zero */ if (ifmsh->nonpeer_pm != nonpeer_pm || !ifmsh->ps_peers_light_sleep != !light_sleep_cnt || !ifmsh->ps_peers_deep_sleep != !deep_sleep_cnt) changed = BSS_CHANGED_BEACON; ifmsh->nonpeer_pm = nonpeer_pm; ifmsh->ps_peers_light_sleep = light_sleep_cnt; ifmsh->ps_peers_deep_sleep = deep_sleep_cnt; return changed; } /** * ieee80211_mps_set_sta_local_pm - set local PM towards a mesh STA * * @sta: mesh STA * @pm: the power mode to set * Return BSS_CHANGED_BEACON if a beacon update is in order. */ u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, enum nl80211_mesh_power_mode pm) { struct ieee80211_sub_if_data *sdata = sta->sdata; if (sta->mesh->local_pm == pm) return 0; mps_dbg(sdata, "local STA operates in mode %d with %pM\n", pm, sta->sta.addr); sta->mesh->local_pm = pm; /* * announce peer-specific power mode transition * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3) */ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) mps_qos_null_tx(sta); return ieee80211_mps_local_status_update(sdata); } /** * ieee80211_mps_set_frame_flags - set mesh PS flags in FC (and QoS Control) * * @sdata: local mesh subif * @sta: mesh STA * @hdr: 802.11 frame header * * see IEEE802.11-2012 8.2.4.1.7 and 8.2.4.5.11 * * NOTE: sta must be given when an individually-addressed QoS frame header * is handled, for group-addressed and management frames it is not used */ void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_hdr *hdr) { enum nl80211_mesh_power_mode pm; u8 *qc; if (WARN_ON(is_unicast_ether_addr(hdr->addr1) && ieee80211_is_data_qos(hdr->frame_control) && !sta)) return; if (is_unicast_ether_addr(hdr->addr1) && ieee80211_is_data_qos(hdr->frame_control) && sta->mesh->plink_state == NL80211_PLINK_ESTAB) pm = sta->mesh->local_pm; else pm = sdata->u.mesh.nonpeer_pm; if (pm == NL80211_MESH_POWER_ACTIVE) hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_PM); else hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); if (!ieee80211_is_data_qos(hdr->frame_control)) return; qc = ieee80211_get_qos_ctl(hdr); if ((is_unicast_ether_addr(hdr->addr1) && pm == NL80211_MESH_POWER_DEEP_SLEEP) || (is_multicast_ether_addr(hdr->addr1) && sdata->u.mesh.ps_peers_deep_sleep > 0)) qc[1] |= (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8); else qc[1] &= ~(IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8); } /** * ieee80211_mps_sta_status_update - update buffering status of neighbor STA * * @sta: mesh STA * * called after change of peering status or non-peer/peer-specific power mode */ void ieee80211_mps_sta_status_update(struct sta_info *sta) { enum nl80211_mesh_power_mode pm; bool do_buffer; /* For non-assoc STA, prevent buffering or frame transmission */ if (sta->sta_state < IEEE80211_STA_ASSOC) return; /* * use peer-specific power mode if peering is established and the * peer's power mode is known */ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB && sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN) pm = sta->mesh->peer_pm; else pm = sta->mesh->nonpeer_pm; do_buffer = (pm != NL80211_MESH_POWER_ACTIVE); /* clear the MPSP flags for non-peers or active STA */ if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) { clear_sta_flag(sta, WLAN_STA_MPSP_OWNER); clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); } else if (!do_buffer) { clear_sta_flag(sta, WLAN_STA_MPSP_OWNER); } /* Don't let the same PS state be set twice */ if (test_sta_flag(sta, WLAN_STA_PS_STA) == do_buffer) return; if (do_buffer) { set_sta_flag(sta, WLAN_STA_PS_STA); atomic_inc(&sta->sdata->u.mesh.ps.num_sta_ps); mps_dbg(sta->sdata, "start PS buffering frames towards %pM\n", sta->sta.addr); } else { ieee80211_sta_ps_deliver_wakeup(sta); } } static void mps_set_sta_peer_pm(struct sta_info *sta, struct ieee80211_hdr *hdr) { enum nl80211_mesh_power_mode pm; u8 *qc = ieee80211_get_qos_ctl(hdr); /* * Test Power Management field of frame control (PW) and * mesh power save level subfield of QoS control field (PSL) * * | PM | PSL| Mesh PM | * +----+----+---------+ * | 0 |Rsrv| Active | * | 1 | 0 | Light | * | 1 | 1 | Deep | */ if (ieee80211_has_pm(hdr->frame_control)) { if (qc[1] & (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8)) pm = NL80211_MESH_POWER_DEEP_SLEEP; else pm = NL80211_MESH_POWER_LIGHT_SLEEP; } else { pm = NL80211_MESH_POWER_ACTIVE; } if (sta->mesh->peer_pm == pm) return; mps_dbg(sta->sdata, "STA %pM enters mode %d\n", sta->sta.addr, pm); sta->mesh->peer_pm = pm; ieee80211_mps_sta_status_update(sta); } static void mps_set_sta_nonpeer_pm(struct sta_info *sta, struct ieee80211_hdr *hdr) { enum nl80211_mesh_power_mode pm; if (ieee80211_has_pm(hdr->frame_control)) pm = NL80211_MESH_POWER_DEEP_SLEEP; else pm = NL80211_MESH_POWER_ACTIVE; if (sta->mesh->nonpeer_pm == pm) return; mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n", sta->sta.addr, pm); sta->mesh->nonpeer_pm = pm; ieee80211_mps_sta_status_update(sta); } /** * ieee80211_mps_rx_h_sta_process - frame receive handler for mesh powersave * * @sta: STA info that transmitted the frame * @hdr: IEEE 802.11 (QoS) Header */ void ieee80211_mps_rx_h_sta_process(struct sta_info *sta, struct ieee80211_hdr *hdr) { if (is_unicast_ether_addr(hdr->addr1) && ieee80211_is_data_qos(hdr->frame_control)) { /* * individually addressed QoS Data/Null frames contain * peer link-specific PS mode towards the local STA */ mps_set_sta_peer_pm(sta, hdr); /* check for mesh Peer Service Period trigger frames */ ieee80211_mpsp_trigger_process(ieee80211_get_qos_ctl(hdr), sta, false, false); } else { /* * can only determine non-peer PS mode * (see IEEE802.11-2012 8.2.4.1.7) */ mps_set_sta_nonpeer_pm(sta, hdr); } } /* mesh PS frame release */ static void mpsp_trigger_send(struct sta_info *sta, bool rspi, bool eosp) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct sk_buff *skb; struct ieee80211_hdr *nullfunc; struct ieee80211_tx_info *info; u8 *qc; skb = mps_qos_null_get(sta); if (!skb) return; nullfunc = (struct ieee80211_hdr *) skb->data; if (!eosp) nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); /* * | RSPI | EOSP | MPSP triggering | * +------+------+--------------------+ * | 0 | 0 | local STA is owner | * | 0 | 1 | no MPSP (MPSP end) | * | 1 | 0 | both STA are owner | * | 1 | 1 | peer STA is owner | see IEEE802.11-2012 13.14.9.2 */ qc = ieee80211_get_qos_ctl(nullfunc); if (rspi) qc[1] |= (IEEE80211_QOS_CTL_RSPI >> 8); if (eosp) qc[0] |= IEEE80211_QOS_CTL_EOSP; info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | IEEE80211_TX_CTL_REQ_TX_STATUS; mps_dbg(sdata, "sending MPSP trigger%s%s to %pM\n", rspi ? " RSPI" : "", eosp ? " EOSP" : "", sta->sta.addr); ieee80211_tx_skb(sdata, skb); } /** * mpsp_qos_null_append - append QoS Null frame to MPSP skb queue if needed * * To properly end a mesh MPSP the last transmitted frame has to set the EOSP * flag in the QoS Control field. In case the current tailing frame is not a * QoS Data frame, append a QoS Null to carry the flag. */ static void mpsp_qos_null_append(struct sta_info *sta, struct sk_buff_head *frames) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct sk_buff *new_skb, *skb = skb_peek_tail(frames); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info; if (ieee80211_is_data_qos(hdr->frame_control)) return; new_skb = mps_qos_null_get(sta); if (!new_skb) return; mps_dbg(sdata, "appending QoS Null in MPSP towards %pM\n", sta->sta.addr); /* * This frame has to be transmitted last. Assign lowest priority to * make sure it cannot pass other frames when releasing multiple ACs. */ new_skb->priority = 1; skb_set_queue_mapping(new_skb, IEEE80211_AC_BK); ieee80211_set_qos_hdr(sdata, new_skb); info = IEEE80211_SKB_CB(new_skb); info->control.vif = &sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; __skb_queue_tail(frames, new_skb); } /** * mps_frame_deliver - transmit frames during mesh powersave * * @sta: STA info to transmit to * @n_frames: number of frames to transmit. -1 for all */ static void mps_frame_deliver(struct sta_info *sta, int n_frames) { struct ieee80211_local *local = sta->sdata->local; int ac; struct sk_buff_head frames; struct sk_buff *skb; bool more_data = false; skb_queue_head_init(&frames); /* collect frame(s) from buffers */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { while (n_frames != 0) { skb = skb_dequeue(&sta->tx_filtered[ac]); if (!skb) { skb = skb_dequeue( &sta->ps_tx_buf[ac]); if (skb) local->total_ps_buffered--; } if (!skb) break; n_frames--; __skb_queue_tail(&frames, skb); } if (!skb_queue_empty(&sta->tx_filtered[ac]) || !skb_queue_empty(&sta->ps_tx_buf[ac])) more_data = true; } /* nothing to send? -> EOSP */ if (skb_queue_empty(&frames)) { mpsp_trigger_send(sta, false, true); return; } /* in a MPSP make sure the last skb is a QoS Data frame */ if (test_sta_flag(sta, WLAN_STA_MPSP_OWNER)) mpsp_qos_null_append(sta, &frames); mps_dbg(sta->sdata, "sending %d frames to PS STA %pM\n", skb_queue_len(&frames), sta->sta.addr); /* prepare collected frames for transmission */ skb_queue_walk(&frames, skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *) skb->data; /* * Tell TX path to send this frame even though the * STA may still remain is PS mode after this frame * exchange. */ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; if (more_data || !skb_queue_is_last(&frames, skb)) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); else hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); if (skb_queue_is_last(&frames, skb) && ieee80211_is_data_qos(hdr->frame_control)) { u8 *qoshdr = ieee80211_get_qos_ctl(hdr); /* MPSP trigger frame ends service period */ *qoshdr |= IEEE80211_QOS_CTL_EOSP; info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; } } ieee80211_add_pending_skbs(local, &frames); sta_info_recalc_tim(sta); } /** * ieee80211_mpsp_trigger_process - track status of mesh Peer Service Periods * * @qc: QoS Control field * @sta: peer to start a MPSP with * @tx: frame was transmitted by the local STA * @acked: frame has been transmitted successfully * * NOTE: active mode STA may only serve as MPSP owner */ void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta, bool tx, bool acked) { u8 rspi = qc[1] & (IEEE80211_QOS_CTL_RSPI >> 8); u8 eosp = qc[0] & IEEE80211_QOS_CTL_EOSP; if (tx) { if (rspi && acked) set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); if (eosp) clear_sta_flag(sta, WLAN_STA_MPSP_OWNER); else if (acked && test_sta_flag(sta, WLAN_STA_PS_STA) && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER)) mps_frame_deliver(sta, -1); } else { if (eosp) clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE) set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER)) mps_frame_deliver(sta, -1); } } /** * ieee80211_mps_frame_release - release frames buffered due to mesh power save * * @sta: mesh STA * @elems: IEs of beacon or probe response * * For peers if we have individually-addressed frames buffered or the peer * indicates buffered frames, send a corresponding MPSP trigger frame. Since * we do not evaluate the awake window duration, QoS Nulls are used as MPSP * trigger frames. If the neighbour STA is not a peer, only send single frames. */ void ieee80211_mps_frame_release(struct sta_info *sta, struct ieee802_11_elems *elems) { int ac, buffer_local = 0; bool has_buffered = false; if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len, sta->mesh->aid); if (has_buffered) mps_dbg(sta->sdata, "%pM indicates buffered frames\n", sta->sta.addr); /* only transmit to PS STA with announced, non-zero awake window */ if (test_sta_flag(sta, WLAN_STA_PS_STA) && (!elems->awake_window || !le16_to_cpu(*elems->awake_window))) return; if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER)) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) buffer_local += skb_queue_len(&sta->ps_tx_buf[ac]) + skb_queue_len(&sta->tx_filtered[ac]); if (!has_buffered && !buffer_local) return; if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) mpsp_trigger_send(sta, has_buffered, !buffer_local); else mps_frame_deliver(sta, 1); } backport-iwlwifi-dkms-7906/net/mac80211/mesh_sync.c000066400000000000000000000150211351064634500217560ustar00rootroot00000000000000/* * Copyright 2011-2012, Pavel Zubarev * Copyright 2011-2012, Marco Porsch * Copyright 2011-2012, cozybit Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "ieee80211_i.h" #include "mesh.h" #include "driver-ops.h" /* This is not in the standard. It represents a tolerable tsf drift below * which we do no TSF adjustment. */ #define TOFFSET_MINIMUM_ADJUSTMENT 10 /* This is not in the standard. It is a margin added to the * Toffset setpoint to mitigate TSF overcorrection * introduced by TSF adjustment latency. */ #define TOFFSET_SET_MARGIN 20 /* This is not in the standard. It represents the maximum Toffset jump above * which we'll invalidate the Toffset setpoint and choose a new setpoint. This * could be, for instance, in case a neighbor is restarted and its TSF counter * reset. */ #define TOFFSET_MAXIMUM_ADJUSTMENT 800 /* 0.8 ms */ struct sync_method { u8 method; struct ieee80211_mesh_sync_ops ops; }; /** * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT * * @ie: information elements of a management frame from the mesh peer */ static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) { return (ie->mesh_config->meshconf_cap & IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; } void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; /* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */ u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500; u64 tsf; u64 tsfdelta; spin_lock_bh(&ifmsh->sync_offset_lock); if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting\n", (long long) ifmsh->sync_offset_clockdrift_max); tsfdelta = -ifmsh->sync_offset_clockdrift_max; ifmsh->sync_offset_clockdrift_max = 0; } else { msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting by %llu\n", (long long) ifmsh->sync_offset_clockdrift_max, (unsigned long long) beacon_int_fraction); tsfdelta = -beacon_int_fraction; ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; } spin_unlock_bh(&ifmsh->sync_offset_lock); if (local->ops->offset_tsf) { drv_offset_tsf(local, sdata, tsfdelta); } else { tsf = drv_get_tsf(local, sdata); if (tsf != -1ULL) drv_set_tsf(local, sdata, tsf + tsfdelta); } } static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, u16 stype, struct ieee80211_mgmt *mgmt, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; struct sta_info *sta; u64 t_t, t_r; WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); /* standard mentions only beacons */ if (stype != IEEE80211_STYPE_BEACON) return; /* * Get time when timestamp field was received. If we don't * have rx timestamps, then use current tsf as an approximation. * drv_get_tsf() must be called before entering the rcu-read * section. */ if (ieee80211_have_rx_timestamp(rx_status)) t_r = ieee80211_calculate_rx_timestamp(local, rx_status, 24 + 12 + elems->total_len + FCS_LEN, 24); else t_r = drv_get_tsf(local, sdata); rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (!sta) goto no_sync; /* check offset sync conditions (13.13.2.2.1) * * TODO also sync to * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors */ if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr); goto no_sync; } /* Timing offset calculation (see 13.13.2.2.2) */ t_t = le64_to_cpu(mgmt->u.beacon.timestamp); sta->mesh->t_offset = t_t - t_r; if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset; msync_dbg(sdata, "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n", sta->sta.addr, (long long) sta->mesh->t_offset, (long long) sta->mesh->t_offset_setpoint, (long long) t_clockdrift); if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) { msync_dbg(sdata, "STA %pM : t_clockdrift=%lld too large, setpoint reset\n", sta->sta.addr, (long long) t_clockdrift); clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); goto no_sync; } spin_lock_bh(&ifmsh->sync_offset_lock); if (t_clockdrift > ifmsh->sync_offset_clockdrift_max) ifmsh->sync_offset_clockdrift_max = t_clockdrift; spin_unlock_bh(&ifmsh->sync_offset_lock); } else { sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN; set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); msync_dbg(sdata, "STA %pM : offset was invalid, t_offset=%lld\n", sta->sta.addr, (long long) sta->mesh->t_offset); } no_sync: rcu_read_unlock(); } static void mesh_sync_offset_adjust_tsf(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); WARN_ON(!rcu_read_lock_held()); spin_lock_bh(&ifmsh->sync_offset_lock); if (ifmsh->sync_offset_clockdrift_max > TOFFSET_MINIMUM_ADJUSTMENT) { /* Since ajusting the tsf here would * require a possibly blocking call * to the driver tsf setter, we punt * the tsf adjustment to the mesh tasklet */ msync_dbg(sdata, "TSF : kicking off TSF adjustment with clockdrift_max=%lld\n", ifmsh->sync_offset_clockdrift_max); set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags); } else { msync_dbg(sdata, "TSF : max clockdrift=%lld; too small to adjust\n", (long long)ifmsh->sync_offset_clockdrift_max); ifmsh->sync_offset_clockdrift_max = 0; } spin_unlock_bh(&ifmsh->sync_offset_lock); } static const struct sync_method sync_methods[] = { { .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .ops = { .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp, .adjust_tsf = &mesh_sync_offset_adjust_tsf, } }, }; const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) { int i; for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { if (sync_methods[i].method == method) return &sync_methods[i].ops; } return NULL; } backport-iwlwifi-dkms-7906/net/mac80211/michael.c000066400000000000000000000042331351064634500213730ustar00rootroot00000000000000/* * Michael MIC implementation - optimized for TKIP MIC operations * Copyright 2002-2003, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include "michael.h" static void michael_block(struct michael_mic_ctx *mctx, u32 val) { mctx->l ^= val; mctx->r ^= rol32(mctx->l, 17); mctx->l += mctx->r; mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) | ((mctx->l & 0x00ff00ff) << 8); mctx->l += mctx->r; mctx->r ^= rol32(mctx->l, 3); mctx->l += mctx->r; mctx->r ^= ror32(mctx->l, 2); mctx->l += mctx->r; } static void michael_mic_hdr(struct michael_mic_ctx *mctx, const u8 *key, struct ieee80211_hdr *hdr) { u8 *da, *sa, tid; da = ieee80211_get_DA(hdr); sa = ieee80211_get_SA(hdr); if (ieee80211_is_data_qos(hdr->frame_control)) tid = ieee80211_get_tid(hdr); else tid = 0; mctx->l = get_unaligned_le32(key); mctx->r = get_unaligned_le32(key + 4); /* * A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC * calculation, but it is _not_ transmitted */ michael_block(mctx, get_unaligned_le32(da)); michael_block(mctx, get_unaligned_le16(&da[4]) | (get_unaligned_le16(sa) << 16)); michael_block(mctx, get_unaligned_le32(&sa[2])); michael_block(mctx, tid); } void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, const u8 *data, size_t data_len, u8 *mic) { u32 val; size_t block, blocks, left; struct michael_mic_ctx mctx; michael_mic_hdr(&mctx, key, hdr); /* Real data */ blocks = data_len / 4; left = data_len % 4; for (block = 0; block < blocks; block++) michael_block(&mctx, get_unaligned_le32(&data[block * 4])); /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make * total length a multiple of 4. */ val = 0x5a; while (left > 0) { val <<= 8; left--; val |= data[blocks * 4 + left]; } michael_block(&mctx, val); michael_block(&mctx, 0); put_unaligned_le32(mctx.l, mic); put_unaligned_le32(mctx.r, mic + 4); } backport-iwlwifi-dkms-7906/net/mac80211/michael.h000066400000000000000000000011321351064634500213730ustar00rootroot00000000000000/* * Michael MIC implementation - optimized for TKIP MIC operations * Copyright 2002-2003, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef MICHAEL_H #define MICHAEL_H #include #include #define MICHAEL_MIC_LEN 8 struct michael_mic_ctx { u32 l, r; }; void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, const u8 *data, size_t data_len, u8 *mic); #endif /* MICHAEL_H */ backport-iwlwifi-dkms-7906/net/mac80211/mlme.c000066400000000000000000005077241351064634500207400ustar00rootroot00000000000000/* * BSS client mode implementation * Copyright 2003-2008, Jouni Malinen * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "led.h" #include "fils_aead.h" #define IEEE80211_AUTH_TIMEOUT (CPTCFG_IWL_TIMEOUT_FACTOR * HZ / 5) #define IEEE80211_AUTH_TIMEOUT_LONG (CPTCFG_IWL_TIMEOUT_FACTOR * HZ / 2) #define IEEE80211_AUTH_TIMEOUT_SHORT (CPTCFG_IWL_TIMEOUT_FACTOR * HZ / 10) #define IEEE80211_AUTH_TIMEOUT_SAE (CPTCFG_IWL_TIMEOUT_FACTOR * HZ * 2) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_AUTH_WAIT_ASSOC (CPTCFG_IWL_TIMEOUT_FACTOR * HZ * 5) #define IEEE80211_ASSOC_TIMEOUT (CPTCFG_IWL_TIMEOUT_FACTOR * HZ / 5) #define IEEE80211_ASSOC_TIMEOUT_LONG (CPTCFG_IWL_TIMEOUT_FACTOR * HZ / 2) #define IEEE80211_ASSOC_TIMEOUT_SHORT (CPTCFG_IWL_TIMEOUT_FACTOR * HZ / 10) #define IEEE80211_ASSOC_MAX_TRIES 3 static int max_nullfunc_tries = 2; module_param(max_nullfunc_tries, int, 0644); MODULE_PARM_DESC(max_nullfunc_tries, "Maximum nullfunc tx tries before disconnecting (reason 4)."); static int max_probe_tries = 5; module_param(max_probe_tries, int, 0644); MODULE_PARM_DESC(max_probe_tries, "Maximum probe tries before disconnecting (reason 4)."); /* * Beacon loss timeout is calculated as N frames times the * advertised beacon interval. This may need to be somewhat * higher than what hardware might detect to account for * delays in the host processing frames. But since we also * probe on beacon miss before declaring the connection lost * default to what we want. */ static int beacon_loss_count = 7; module_param(beacon_loss_count, int, 0644); MODULE_PARM_DESC(beacon_loss_count, "Number of beacon intervals before we decide beacon was lost."); /* * Time the connection can be idle before we probe * it to see if we can still talk to the AP. */ #define IEEE80211_CONNECTION_IDLE_TIME (30 * HZ) /* * Time we wait for a probe response after sending * a probe request because of beacon loss or for * checking the connection still works. */ static int probe_wait_ms = 500; module_param(probe_wait_ms, int, 0644); MODULE_PARM_DESC(probe_wait_ms, "Maximum time(ms) to wait for probe response" " before disconnecting (reason 4)."); /* * How many Beacon frames need to have been used in average signal strength * before starting to indicate signal change events. */ #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4 /* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only * reschedule it when it should fire _earlier_ than it was * asked for before, or if it's not pending right now. This * function ensures that. Note that it then is required to * run this function for all timeouts after the first one * has happened -- the work that runs from this timer will * do that. */ static void run_again(struct ieee80211_sub_if_data *sdata, unsigned long timeout) { sdata_assert_lock(sdata); if (!timer_pending(&sdata->u.mgd.timer) || time_before(timeout, sdata->u.mgd.timer.expires)) mod_timer(&sdata->u.mgd.timer, timeout); } void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) return; if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) return; mod_timer(&sdata->u.mgd.bcn_mon_timer, round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout)); } void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (unlikely(!ifmgd->associated)) return; if (ifmgd->probe_send_count) ifmgd->probe_send_count = 0; if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) return; mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); } static int ecw2cw(int ecw) { return (1 << ecw) - 1; } static u32 ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, const struct ieee80211_he_operation *he_oper, struct cfg80211_chan_def *chandef, bool tracking) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct cfg80211_chan_def vht_chandef; struct ieee80211_sta_ht_cap sta_ht_cap; u32 ht_cfreq, ret; memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap)); ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); chandef->chan = channel; chandef->width = NL80211_CHAN_WIDTH_20_NOHT; chandef->center_freq1 = channel->center_freq; chandef->center_freq2 = 0; if (!ht_oper || !sta_ht_cap.ht_supported) { ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; goto out; } chandef->width = NL80211_CHAN_WIDTH_20; ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, channel->band); /* check that channel matches the right operating channel */ if (!tracking && channel->center_freq != ht_cfreq) { /* * It's possible that some APs are confused here; * Netgear WNDR3700 sometimes reports 4 higher than * the actual channel in association responses, but * since we look at probe response/beacon data here * it should be OK. */ sdata_info(sdata, "mismatch: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disable HT\n", channel->center_freq, ht_cfreq, ht_oper->primary_chan, channel->band); ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; goto out; } /* check 40 MHz support, if we have it */ if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { ieee80211_chandef_ht_oper(ht_oper, chandef); } else { /* 40 MHz (and 80 MHz) must be supported for VHT */ ret = IEEE80211_STA_DISABLE_VHT; /* also mark 40 MHz disabled */ ret |= IEEE80211_STA_DISABLE_40MHZ; goto out; } if (!vht_oper || !sband->vht_cap.vht_supported) { ret = IEEE80211_STA_DISABLE_VHT; goto out; } vht_chandef = *chandef; if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && he_oper && (le32_to_cpu(he_oper->he_oper_params) & IEEE80211_HE_OPERATION_VHT_OPER_INFO)) { struct ieee80211_vht_operation he_oper_vht_cap; /* * Set only first 3 bytes (other 2 aren't used in * ieee80211_chandef_vht_oper() anyway) */ memcpy(&he_oper_vht_cap, he_oper->optional, 3); he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0); if (!ieee80211_chandef_vht_oper(&sdata->local->hw, &he_oper_vht_cap, ht_oper, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) sdata_info(sdata, "HE AP VHT information is invalid, disable HE\n"); ret = IEEE80211_STA_DISABLE_HE; goto out; } } else if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_oper, ht_oper, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT information is invalid, disable VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } if (!cfg80211_chandef_valid(&vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT information is invalid, disable VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } if (cfg80211_chandef_identical(chandef, &vht_chandef)) { ret = 0; goto out; } if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT information doesn't match HT, disable VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } *chandef = vht_chandef; ret = 0; out: /* * When tracking the current AP, don't do any further checks if the * new chandef is identical to the one we're currently using for the * connection. This keeps us from playing ping-pong with regulatory, * without it the following can happen (for example): * - connect to an AP with 80 MHz, world regdom allows 80 MHz * - AP advertises regdom US * - CRDA loads regdom US with 80 MHz prohibited (old database) * - the code below detects an unsupported channel, downgrades, and * we disconnect from the AP in the caller * - disconnect causes CRDA to reload world regdomain and the game * starts anew. * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881) * * It seems possible that there are still scenarios with CSA or real * bandwidth changes where a this could happen, but those cases are * less common and wouldn't completely prevent using the AP. */ if (tracking && cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) return ret; /* don't print the message below for VHT mismatch if VHT is disabled */ if (ret & IEEE80211_STA_DISABLE_VHT) vht_chandef = *chandef; /* * Ignore the DISABLED flag when we're already connected and only * tracking the APs beacon for bandwidth changes - otherwise we * might get disconnected here if we connect to an AP, update our * regulatory information based on the AP's country IE and the * information we have is wrong/outdated and disables the channel * that we're actually using for the connection to the AP. */ while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, tracking ? 0 : IEEE80211_CHAN_DISABLED)) { if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; break; } ret |= ieee80211_chandef_downgrade(chandef); } if (chandef->width != vht_chandef.width && !tracking) sdata_info(sdata, "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); WARN_ON_ONCE(!cfg80211_chandef_valid(chandef)); return ret; } static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, const struct ieee80211_ht_cap *ht_cap, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, const struct ieee80211_he_operation *he_oper, const u8 *bssid, u32 *changed) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_channel *chan = sdata->vif.bss_conf.chandef.chan; struct ieee80211_supported_band *sband = local->hw.wiphy->bands[chan->band]; struct cfg80211_chan_def chandef; u16 ht_opmode; u32 flags; enum ieee80211_sta_rx_bandwidth new_sta_bw; int ret; /* if HT was/is disabled, don't track any bandwidth changes */ if (ifmgd->flags & IEEE80211_STA_DISABLE_HT || !ht_oper) return 0; /* don't check VHT if we associated as non-VHT station */ if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT) vht_oper = NULL; /* don't check HE if we associated as non-HE station */ if (ifmgd->flags & IEEE80211_STA_DISABLE_HE || !ieee80211_get_he_sta_cap(sband)) he_oper = NULL; if (WARN_ON_ONCE(!sta)) return -EINVAL; /* * if bss configuration changed store the new one - * this may be applicable even if channel is identical */ ht_opmode = le16_to_cpu(ht_oper->operation_mode); if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) { *changed |= BSS_CHANGED_HT; sdata->vif.bss_conf.ht_operation_mode = ht_opmode; } /* calculate new channel (type) based on HT/VHT/HE operation IEs */ flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, vht_oper, he_oper, &chandef, true); /* * Downgrade the new channel if we associated with restricted * capabilities. For example, if we associated as a 20 MHz STA * to a 40 MHz AP (due to regulatory, capabilities or config * reasons) then switching to a 40 MHz channel now won't do us * any good -- we couldn't use it with the AP. */ if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ && chandef.width == NL80211_CHAN_WIDTH_80P80) flags |= ieee80211_chandef_downgrade(&chandef); if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ && chandef.width == NL80211_CHAN_WIDTH_160) flags |= ieee80211_chandef_downgrade(&chandef); if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ && chandef.width > NL80211_CHAN_WIDTH_20) flags |= ieee80211_chandef_downgrade(&chandef); if (cfg80211_chandef_identical(&chandef, &sdata->vif.bss_conf.chandef)) return 0; sdata_info(sdata, "AP %pM changed bandwidth, new config is %d MHz, width %d (%d/%d MHz)\n", ifmgd->bssid, chandef.chan->center_freq, chandef.width, chandef.center_freq1, chandef.center_freq2); if (flags != (ifmgd->flags & (IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_40MHZ | IEEE80211_STA_DISABLE_80P80MHZ | IEEE80211_STA_DISABLE_160MHZ)) || !cfg80211_chandef_valid(&chandef)) { sdata_info(sdata, "AP %pM changed bandwidth in a way we can't support - disconnect\n", ifmgd->bssid); return -EINVAL; } switch (chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: new_sta_bw = IEEE80211_STA_RX_BW_20; break; case NL80211_CHAN_WIDTH_40: new_sta_bw = IEEE80211_STA_RX_BW_40; break; case NL80211_CHAN_WIDTH_80: new_sta_bw = IEEE80211_STA_RX_BW_80; break; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: new_sta_bw = IEEE80211_STA_RX_BW_160; break; default: return -EINVAL; } if (new_sta_bw > sta->cur_max_bandwidth) new_sta_bw = sta->cur_max_bandwidth; if (new_sta_bw < sta->sta.bandwidth) { sta->sta.bandwidth = new_sta_bw; rate_control_rate_update(local, sband, sta, IEEE80211_RC_BW_CHANGED); } ret = ieee80211_vif_change_bandwidth(sdata, &chandef, changed); if (ret) { sdata_info(sdata, "AP %pM changed bandwidth to incompatible one - disconnect\n", ifmgd->bssid); return ret; } if (new_sta_bw > sta->sta.bandwidth) { sta->sta.bandwidth = new_sta_bw; rate_control_rate_update(local, sband, sta, IEEE80211_RC_BW_CHANGED); } return 0; } /* frame sending functions */ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u8 ap_ht_param, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, enum ieee80211_smps_mode smps) { u8 *pos; u32 flags = channel->flags; u16 cap; struct ieee80211_sta_ht_cap ht_cap; BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); ieee80211_apply_htcap_overrides(sdata, &ht_cap); /* determine capability flags */ cap = ht_cap.cap; switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: if (flags & IEEE80211_CHAN_NO_HT40PLUS) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; cap &= ~IEEE80211_HT_CAP_SGI_40; } break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: if (flags & IEEE80211_CHAN_NO_HT40MINUS) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; cap &= ~IEEE80211_HT_CAP_SGI_40; } break; } /* * If 40 MHz was disabled associate as though we weren't * capable of 40 MHz -- some broken APs will never fall * back to trying to transmit in 20 MHz. */ if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_40MHZ) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; cap &= ~IEEE80211_HT_CAP_SGI_40; } /* set SM PS mode properly */ cap &= ~IEEE80211_HT_CAP_SM_PS; switch (smps) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); /* fall through */ case IEEE80211_SMPS_OFF: cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; break; case IEEE80211_SMPS_STATIC: cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; break; case IEEE80211_SMPS_DYNAMIC: cap |= WLAN_HT_CAP_SM_PS_DYNAMIC << IEEE80211_HT_CAP_SM_PS_SHIFT; break; } /* reserve and fill IE */ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); ieee80211_ie_build_ht_cap(pos, &ht_cap, cap); } /* This function determines vht capability flags for the association * and builds the IE. * Note - the function may set the owner of the MU-MIMO capability */ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_supported_band *sband, struct ieee80211_vht_cap *ap_vht_cap) { struct ieee80211_local *local = sdata->local; u8 *pos; u32 cap; struct ieee80211_sta_vht_cap vht_cap; u32 mask, ap_bf_sts, our_bf_sts; BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap)); memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); ieee80211_apply_vhtcap_overrides(sdata, &vht_cap); /* determine capability flags */ cap = vht_cap.cap; if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_80P80MHZ) { u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ || bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; } if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_160MHZ) { cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160; cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; } /* * Some APs apparently get confused if our capabilities are better * than theirs, so restrict what we advertise in the assoc request. */ if (!(ap_vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE))) cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); else if (!(ap_vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; /* * If some other vif is using the MU-MIMO capablity we cannot associate * using MU-MIMO - this will lead to contradictions in the group-id * mechanism. * Ownership is defined since association request, in order to avoid * simultaneous associations with MU-MIMO. */ if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) { bool disable_mu_mimo = false; struct ieee80211_sub_if_data *other; list_for_each_entry_rcu(other, &local->interfaces, list) { if (other->vif.mu_mimo_owner) { disable_mu_mimo = true; break; } } if (disable_mu_mimo) cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; else sdata->vif.mu_mimo_owner = true; } mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; ap_bf_sts = le32_to_cpu(ap_vht_cap->vht_cap_info) & mask; our_bf_sts = cap & mask; if (ap_bf_sts < our_bf_sts) { cap &= ~mask; cap |= ap_bf_sts; } /* reserve and fill IE */ pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); } /* This function determines HE capability flags for the association * and builds the IE. */ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_supported_band *sband) { u8 *pos; const struct ieee80211_sta_he_cap *he_cap = NULL; u8 he_cap_size; he_cap = ieee80211_get_he_sta_cap(sband); if (!he_cap) return; /* * TODO: the 1 added is because this temporarily is under the EXTENSION * IE. Get rid of it when it moves. */ he_cap_size = 2 + 1 + sizeof(he_cap->he_cap_elem) + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) + ieee80211_he_ppe_size(he_cap->ppe_thres[0], he_cap->he_cap_elem.phy_cap_info); pos = skb_put(skb, he_cap_size); ieee80211_ie_build_he_cap(pos, he_cap, pos + he_cap_size); } static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u8 *pos, qos_info, *ie_start; size_t offset = 0, noffset; int i, count, rates_len, supp_rates_len, shift; u16 capab; struct ieee80211_supported_band *sband; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; u32 rates = 0; sdata_assert_lock(sdata); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return; } chan = chanctx_conf->def.chan; rcu_read_unlock(); sband = local->hw.wiphy->bands[chan->band]; shift = ieee80211_vif_get_shift(&sdata->vif); if (assoc_data->supp_rates_len) { /* * Get all rates supported by the device and the AP as * some APs don't like getting a superset of their rates * in the association request (e.g. D-Link DAP 1353 in * b-only mode)... */ rates_len = ieee80211_parse_bitrates(&chanctx_conf->def, sband, assoc_data->supp_rates, assoc_data->supp_rates_len, &rates); } else { /* * In case AP not provide any supported rates information * before association, we send information element(s) with * all rates that we support. */ rates_len = 0; for (i = 0; i < sband->n_bitrates; i++) { rates |= BIT(i); rates_len++; } } skb = alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + /* bit too much but doesn't matter */ 2 + assoc_data->ssid_len + /* SSID */ 4 + rates_len + /* (extended) rates */ 4 + /* power capability */ 2 + 2 * sband->n_channels + /* supported channels */ 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ 2 + sizeof(struct ieee80211_vht_cap) + /* VHT */ 2 + 1 + sizeof(struct ieee80211_he_cap_elem) + /* HE */ sizeof(struct ieee80211_he_mcs_nss_supp) + IEEE80211_HE_PPE_THRES_MAX_LEN + assoc_data->ie_len + /* extra IEs */ (assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) + 9, /* WMM */ GFP_KERNEL); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); capab = WLAN_CAPABILITY_ESS; if (sband->band == NL80211_BAND_2GHZ) { capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME; capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; } if (assoc_data->capability & WLAN_CAPABILITY_PRIVACY) capab |= WLAN_CAPABILITY_PRIVACY; if ((assoc_data->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && ieee80211_hw_check(&local->hw, SPECTRUM_MGMT)) capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM) capab |= WLAN_CAPABILITY_RADIO_MEASURE; mgmt = skb_put_zero(skb, 24); memcpy(mgmt->da, assoc_data->bss->bssid, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, assoc_data->bss->bssid, ETH_ALEN); if (!is_zero_ether_addr(assoc_data->prev_bssid)) { skb_put(skb, 10); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); mgmt->u.reassoc_req.listen_interval = cpu_to_le16(local->hw.conf.listen_interval); memcpy(mgmt->u.reassoc_req.current_ap, assoc_data->prev_bssid, ETH_ALEN); } else { skb_put(skb, 4); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); mgmt->u.assoc_req.listen_interval = cpu_to_le16(local->hw.conf.listen_interval); } /* SSID */ pos = skb_put(skb, 2 + assoc_data->ssid_len); ie_start = pos; *pos++ = WLAN_EID_SSID; *pos++ = assoc_data->ssid_len; memcpy(pos, assoc_data->ssid, assoc_data->ssid_len); /* add all rates which were marked to be used above */ supp_rates_len = rates_len; if (supp_rates_len > 8) supp_rates_len = 8; pos = skb_put(skb, supp_rates_len + 2); *pos++ = WLAN_EID_SUPP_RATES; *pos++ = supp_rates_len; count = 0; for (i = 0; i < sband->n_bitrates; i++) { if (BIT(i) & rates) { int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5 * (1 << shift)); *pos++ = (u8) rate; if (++count == 8) break; } } if (rates_len > count) { pos = skb_put(skb, rates_len - count + 2); *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = rates_len - count; for (i++; i < sband->n_bitrates; i++) { if (BIT(i) & rates) { int rate; rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5 * (1 << shift)); *pos++ = (u8) rate; } } } if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT || capab & WLAN_CAPABILITY_RADIO_MEASURE) { pos = skb_put(skb, 4); *pos++ = WLAN_EID_PWR_CAPABILITY; *pos++ = 2; *pos++ = 0; /* min tx power */ /* max tx power */ *pos++ = ieee80211_chandef_max_power(&chanctx_conf->def); } if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) { /* TODO: get this in reg domain format */ pos = skb_put(skb, 2 * sband->n_channels + 2); *pos++ = WLAN_EID_SUPPORTED_CHANNELS; *pos++ = 2 * sband->n_channels; for (i = 0; i < sband->n_channels; i++) { *pos++ = ieee80211_frequency_to_channel( sband->channels[i].center_freq); *pos++ = 1; /* one channel in the subband*/ } } /* Set MBSSID support for HE AP if needed */ if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && assoc_data->ie_len) { struct element *elem; /* we know it's writable, cast away the const */ elem = (void *)cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, assoc_data->ie, assoc_data->ie_len); /* We can probably assume both always true */ if (elem && elem->datalen >= 3) elem->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT; } /* if present, add any custom IEs that go before HT */ if (assoc_data->ie_len) { static const u8 before_ht[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_PWR_CAPABILITY, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, WLAN_EID_QOS_CAPA, WLAN_EID_RRM_ENABLED_CAPABILITIES, WLAN_EID_MOBILITY_DOMAIN, WLAN_EID_FAST_BSS_TRANSITION, /* reassoc only */ WLAN_EID_RIC_DATA, /* reassoc only */ WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; static const u8 after_ric[] = { WLAN_EID_SUPPORTED_REGULATORY_CLASSES, WLAN_EID_HT_CAPABILITY, WLAN_EID_BSS_COEX_2040, /* luckily this is almost always there */ WLAN_EID_EXT_CAPABILITY, WLAN_EID_QOS_TRAFFIC_CAPA, WLAN_EID_TIM_BCAST_REQ, WLAN_EID_INTERWORKING, /* 60 GHz (Multi-band, DMG, MMS) can't happen */ WLAN_EID_VHT_CAPABILITY, WLAN_EID_OPMODE_NOTIF, }; noffset = ieee80211_ie_split_ric(assoc_data->ie, assoc_data->ie_len, before_ht, ARRAY_SIZE(before_ht), after_ric, ARRAY_SIZE(after_ric), offset); skb_put_data(skb, assoc_data->ie + offset, noffset - offset); offset = noffset; } if (WARN_ON_ONCE((ifmgd->flags & IEEE80211_STA_DISABLE_HT) && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))) ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param, sband, chan, sdata->smps_mode); /* if present, add any custom IEs that go before VHT */ if (assoc_data->ie_len) { static const u8 before_vht[] = { /* * no need to list the ones split off before HT * or generated here */ WLAN_EID_BSS_COEX_2040, WLAN_EID_EXT_CAPABILITY, WLAN_EID_QOS_TRAFFIC_CAPA, WLAN_EID_TIM_BCAST_REQ, WLAN_EID_INTERWORKING, /* 60 GHz (Multi-band, DMG, MMS) can't happen */ }; /* RIC already taken above, so no need to handle here anymore */ noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len, before_vht, ARRAY_SIZE(before_vht), offset); skb_put_data(skb, assoc_data->ie + offset, noffset - offset); offset = noffset; } /* if present, add any custom IEs that go before HE */ if (assoc_data->ie_len) { static const u8 before_he[] = { /* * no need to list the ones split off before VHT * or generated here */ WLAN_EID_OPMODE_NOTIF, WLAN_EID_EXTENSION, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE, /* 11ai elements */ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_SESSION, WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_PUBLIC_KEY, WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_KEY_CONFIRM, WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_HLP_CONTAINER, WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN, /* TODO: add 11ah/11aj/11ak elements */ }; /* RIC already taken above, so no need to handle here anymore */ noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len, before_he, ARRAY_SIZE(before_he), offset); pos = skb_put(skb, noffset - offset); memcpy(pos, assoc_data->ie + offset, noffset - offset); offset = noffset; } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) ieee80211_add_vht_ie(sdata, skb, sband, &assoc_data->ap_vht_cap); /* * If AP doesn't support HT, mark HE as disabled. * If on the 5GHz band, make sure it supports VHT. */ if (ifmgd->flags & IEEE80211_STA_DISABLE_HT || (sband->band == NL80211_BAND_5GHZ && ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) ifmgd->flags |= IEEE80211_STA_DISABLE_HE; if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) ieee80211_add_he_ie(sdata, skb, sband); /* if present, add any custom non-vendor IEs that go after HE */ if (assoc_data->ie_len) { noffset = ieee80211_ie_split_vendor(assoc_data->ie, assoc_data->ie_len, offset); skb_put_data(skb, assoc_data->ie + offset, noffset - offset); offset = noffset; } if (assoc_data->wmm) { if (assoc_data->uapsd) { qos_info = ifmgd->uapsd_queues; qos_info |= (ifmgd->uapsd_max_sp_len << IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT); } else { qos_info = 0; } pos = ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info); } /* add any remaining custom (i.e. vendor specific here) IEs */ if (assoc_data->ie_len) { noffset = assoc_data->ie_len; skb_put_data(skb, assoc_data->ie + offset, noffset - offset); } if (assoc_data->fils_kek_len && fils_encrypt_assoc_req(skb, assoc_data) < 0) { dev_kfree_skb(skb); return; } pos = skb_tail_pointer(skb); kfree(ifmgd->assoc_req_ies); ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC); ifmgd->assoc_req_ies_len = pos - ie_start; drv_mgd_prepare_tx(local, sdata, 0); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_tx_skb(sdata, skb); } void ieee80211_send_pspoll(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_pspoll *pspoll; struct sk_buff *skb; skb = ieee80211_pspoll_get(&local->hw, &sdata->vif); if (!skb) return; pspoll = (struct ieee80211_pspoll *) skb->data; pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool powersave) { struct sk_buff *skb; struct ieee80211_hdr_3addr *nullfunc; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* Don't send NDPs when STA is connected HE */ if (sdata->vif.type == NL80211_IFTYPE_STATION && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) return; skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP)); if (!skb) return; nullfunc = (struct ieee80211_hdr_3addr *) skb->data; if (powersave) nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_INTFL_OFFCHAN_TX_OK; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; ieee80211_tx_skb(sdata, skb); } static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct sk_buff *skb; struct ieee80211_hdr *nullfunc; __le16 fc; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; /* Don't send NDPs when connected HE */ if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) return; skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = skb_put_zero(skb, 30); fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); nullfunc->frame_control = fc; memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN); memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } /* spectrum management related things */ static void ieee80211_chswitch_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; int ret; if (!ieee80211_sdata_running(sdata)) return; sdata_lock(sdata); mutex_lock(&local->mtx); mutex_lock(&local->chanctx_mtx); if (!ifmgd->associated) goto out; if (!sdata->vif.csa_active) goto out; /* * using reservation isn't immediate as it may be deferred until later * with multi-vif. once reservation is complete it will re-schedule the * work with no reserved_chanctx so verify chandef to check if it * completed successfully */ if (sdata->reserved_chanctx) { struct ieee80211_supported_band *sband = NULL; struct sta_info *mgd_sta = NULL; enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20; /* * with multi-vif csa driver may call ieee80211_csa_finish() * many times while waiting for other interfaces to use their * reservations */ if (sdata->reserved_ready) goto out; if (sdata->vif.bss_conf.chandef.width != sdata->csa_chandef.width) { /* * For managed interface, we need to also update the AP * station bandwidth and align the rate scale algorithm * on the bandwidth change. Here we only consider the * bandwidth of the new channel definition (as channel * switch flow does not have the full HT/VHT/HE * information), assuming that if additional changes are * required they would be done as part of the processing * of the next beacon from the AP. */ switch (sdata->csa_chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: default: bw = IEEE80211_STA_RX_BW_20; break; case NL80211_CHAN_WIDTH_40: bw = IEEE80211_STA_RX_BW_40; break; case NL80211_CHAN_WIDTH_80: bw = IEEE80211_STA_RX_BW_80; break; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: bw = IEEE80211_STA_RX_BW_160; break; } mgd_sta = sta_info_get(sdata, ifmgd->bssid); sband = local->hw.wiphy->bands[sdata->csa_chandef.chan->band]; } if (sdata->vif.bss_conf.chandef.width > sdata->csa_chandef.width) { mgd_sta->sta.bandwidth = bw; rate_control_rate_update(local, sband, mgd_sta, IEEE80211_RC_BW_CHANGED); } ret = ieee80211_vif_use_reserved_context(sdata); if (ret) { sdata_info(sdata, "failed to use reserved channel context, disconnecting (err=%d)\n", ret); ieee80211_queue_work(&sdata->local->hw, &ifmgd->csa_connection_drop_work); goto out; } if (sdata->vif.bss_conf.chandef.width < sdata->csa_chandef.width) { mgd_sta->sta.bandwidth = bw; rate_control_rate_update(local, sband, mgd_sta, IEEE80211_RC_BW_CHANGED); } goto out; } if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef, &sdata->csa_chandef)) { sdata_info(sdata, "failed to finalize channel switch, disconnecting\n"); ieee80211_queue_work(&sdata->local->hw, &ifmgd->csa_connection_drop_work); goto out; } /* XXX: shouldn't really modify cfg80211-owned data! */ ifmgd->associated->channel = sdata->csa_chandef.chan; ifmgd->csa_waiting_bcn = true; ieee80211_sta_reset_beacon_monitor(sdata); ieee80211_sta_reset_conn_monitor(sdata); out: mutex_unlock(&local->chanctx_mtx); mutex_unlock(&local->mtx); sdata_unlock(sdata); } static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; int ret; sdata_assert_lock(sdata); WARN_ON(!sdata->vif.csa_active); if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_block_tx = false; } sdata->vif.csa_active = false; ifmgd->csa_waiting_bcn = false; ret = drv_post_channel_switch(sdata); if (ret) { sdata_info(sdata, "driver post channel switch failed, disconnecting\n"); ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); return; } cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef); } void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; trace_api_chswitch_done(sdata, success); if (!success) { sdata_info(sdata, "driver channel switch failed, disconnecting\n"); ieee80211_queue_work(&sdata->local->hw, &ifmgd->csa_connection_drop_work); } else { ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); } } EXPORT_SYMBOL(ieee80211_chswitch_done); static void ieee80211_chswitch_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.chswitch_timer); ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work); } static void ieee80211_sta_abort_chanswitch(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; if (!local->ops->abort_channel_switch) return; mutex_lock(&local->mtx); mutex_lock(&local->chanctx_mtx); ieee80211_vif_unreserve_chanctx(sdata); mutex_unlock(&local->chanctx_mtx); if (sdata->csa_block_tx) ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_block_tx = false; sdata->vif.csa_active = false; mutex_unlock(&local->mtx); drv_abort_channel_switch(sdata); } static void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, u64 timestamp, u32 device_timestamp, struct ieee802_11_elems *elems, bool beacon) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct cfg80211_bss *cbss = ifmgd->associated; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *chanctx; enum nl80211_band current_band; struct ieee80211_csa_ie csa_ie; struct ieee80211_channel_switch ch_switch; int res; sdata_assert_lock(sdata); if (!cbss) return; if (local->scanning) return; current_band = cbss->channel->band; res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, ifmgd->flags, ifmgd->associated->bssid, &csa_ie); if (!res) { ch_switch.timestamp = timestamp; ch_switch.device_timestamp = device_timestamp; ch_switch.block_tx = beacon ? csa_ie.mode : 0; ch_switch.chandef = csa_ie.chandef; ch_switch.count = csa_ie.count; ch_switch.delay = csa_ie.max_switch_time; } if (res < 0) { ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); return; } if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) { if (res) ieee80211_sta_abort_chanswitch(sdata); else drv_channel_switch_rx_beacon(sdata, &ch_switch); return; } else if (sdata->vif.csa_active || res) { /* disregard subsequent announcements if already processing */ return; } if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef, IEEE80211_CHAN_DISABLED)) { sdata_info(sdata, "AP %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", ifmgd->associated->bssid, csa_ie.chandef.chan->center_freq, csa_ie.chandef.width, csa_ie.chandef.center_freq1, csa_ie.chandef.center_freq2); ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); return; } if (cfg80211_chandef_identical(&csa_ie.chandef, &sdata->vif.bss_conf.chandef) && (!csa_ie.mode || !beacon)) { if (ifmgd->csa_ignored_same_chan) return; sdata_info(sdata, "AP %pM tries to chanswitch to same channel, ignore\n", ifmgd->associated->bssid); ifmgd->csa_ignored_same_chan = true; return; } /* * Drop all TDLS peers - either we disconnect or move to a different * channel from this point on. There's no telling what our peer will do. * The TDLS WIDER_BW scenario is also problematic, as peers might now * have an incompatible wider chandef. */ ieee80211_teardown_tdls_peers(sdata); mutex_lock(&local->mtx); mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (!conf) { sdata_info(sdata, "no channel context assigned to vif?, disconnecting\n"); goto drop_connection; } chanctx = container_of(conf, struct ieee80211_chanctx, conf); if (local->use_chanctx && !ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) { sdata_info(sdata, "driver doesn't support chan-switch with channel contexts\n"); goto drop_connection; } if (drv_pre_channel_switch(sdata, &ch_switch)) { sdata_info(sdata, "preparing for channel switch failed, disconnecting\n"); goto drop_connection; } res = ieee80211_vif_reserve_chanctx(sdata, &csa_ie.chandef, chanctx->mode, false); if (res) { sdata_info(sdata, "failed to reserve channel context for channel switch, disconnecting (err=%d)\n", res); goto drop_connection; } mutex_unlock(&local->chanctx_mtx); sdata->vif.csa_active = true; sdata->csa_chandef = csa_ie.chandef; sdata->csa_block_tx = ch_switch.block_tx; ifmgd->csa_ignored_same_chan = false; if (sdata->csa_block_tx) ieee80211_stop_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); mutex_unlock(&local->mtx); cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, csa_ie.count); if (local->ops->channel_switch) { /* use driver's channel switch callback */ drv_channel_switch(local, sdata, &ch_switch); return; } /* channel switch handled in software */ if (csa_ie.count <= 1) ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); else mod_timer(&ifmgd->chswitch_timer, TU_TO_EXP_TIME((csa_ie.count - 1) * cbss->beacon_interval)); return; drop_connection: /* * This is just so that the disconnect flow will know that * we were trying to switch channel and failed. In case the * mode is 1 (we are not allowed to Tx), we will know not to * send a deauthentication frame. Those two fields will be * reset when the disconnection worker runs. */ sdata->vif.csa_active = true; sdata->csa_block_tx = ch_switch.block_tx; ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); mutex_unlock(&local->chanctx_mtx); mutex_unlock(&local->mtx); } static bool ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, const u8 *country_ie, u8 country_ie_len, const u8 *pwr_constr_elem, int *chan_pwr, int *pwr_reduction) { struct ieee80211_country_ie_triplet *triplet; int chan = ieee80211_frequency_to_channel(channel->center_freq); int i, chan_increment; bool have_chan_pwr = false; /* Invalid IE */ if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) return false; triplet = (void *)(country_ie + 3); country_ie_len -= 3; switch (channel->band) { default: WARN_ON_ONCE(1); /* fall through */ case NL80211_BAND_2GHZ: case NL80211_BAND_60GHZ: chan_increment = 1; break; case NL80211_BAND_5GHZ: chan_increment = 4; break; } /* find channel */ while (country_ie_len >= 3) { u8 first_channel = triplet->chans.first_channel; if (first_channel >= IEEE80211_COUNTRY_EXTENSION_ID) goto next; for (i = 0; i < triplet->chans.num_channels; i++) { if (first_channel + i * chan_increment == chan) { have_chan_pwr = true; *chan_pwr = triplet->chans.max_power; break; } } if (have_chan_pwr) break; next: triplet++; country_ie_len -= 3; } if (have_chan_pwr && pwr_constr_elem) *pwr_reduction = *pwr_constr_elem; else *pwr_reduction = 0; return have_chan_pwr; } #if 0 #error "For drivers we (Intel) ship, we want to disable" #error "this reverse-engineered part of CCX." static void ieee80211_find_cisco_dtpc(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, const u8 *cisco_dtpc_ie, int *pwr_level) { /* From practical testing, the first data byte of the DTPC element * seems to contain the requested dBm level, and the CLI on Cisco * APs clearly state the range is -127 to 127 dBm, which indicates * a signed byte, although it seemingly never actually goes negative. * The other byte seems to always be zero. */ *pwr_level = (__s8)cisco_dtpc_ie[4]; } #endif static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, struct ieee80211_mgmt *mgmt, const u8 *country_ie, u8 country_ie_len, const u8 *pwr_constr_ie, const u8 *cisco_dtpc_ie) { bool has_80211h_pwr = false, has_cisco_pwr = false; int chan_pwr = 0, pwr_reduction_80211h = 0; int pwr_level_cisco, pwr_level_80211h; int new_ap_level; __le16 capab = mgmt->u.probe_resp.capab_info; if (country_ie && (capab & cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT) || capab & cpu_to_le16(WLAN_CAPABILITY_RADIO_MEASURE))) { has_80211h_pwr = ieee80211_find_80211h_pwr_constr( sdata, channel, country_ie, country_ie_len, pwr_constr_ie, &chan_pwr, &pwr_reduction_80211h); pwr_level_80211h = max_t(int, 0, chan_pwr - pwr_reduction_80211h); } #if 0 if (cisco_dtpc_ie) { ieee80211_find_cisco_dtpc( sdata, channel, cisco_dtpc_ie, &pwr_level_cisco); has_cisco_pwr = true; } #endif if (!has_80211h_pwr && !has_cisco_pwr) return 0; /* If we have both 802.11h and Cisco DTPC, apply both limits * by picking the smallest of the two power levels advertised. */ if (has_80211h_pwr && (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { new_ap_level = pwr_level_80211h; if (sdata->ap_power_level == new_ap_level) return 0; sdata_dbg(sdata, "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", pwr_level_80211h, chan_pwr, pwr_reduction_80211h, sdata->u.mgd.bssid); } else { /* has_cisco_pwr is always true here. */ new_ap_level = pwr_level_cisco; if (sdata->ap_power_level == new_ap_level) return 0; sdata_dbg(sdata, "Limiting TX power to %d dBm as advertised by %pM\n", pwr_level_cisco, sdata->u.mgd.bssid); } sdata->ap_power_level = new_ap_level; if (__ieee80211_recalc_txpower(sdata)) return BSS_CHANGED_TXPOWER; return 0; } /* powersave */ static void ieee80211_enable_ps(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_conf *conf = &local->hw.conf; /* * If we are scanning right now then the parameters will * take effect when scan finishes. */ if (local->scanning) return; if (conf->dynamic_ps_timeout > 0 && !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) { mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(conf->dynamic_ps_timeout)); } else { if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) ieee80211_send_nullfunc(local, sdata, true); if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) return; conf->flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } } static void ieee80211_change_ps(struct ieee80211_local *local) { struct ieee80211_conf *conf = &local->hw.conf; if (local->ps_sdata) { ieee80211_enable_ps(local, local->ps_sdata); } else if (conf->flags & IEEE80211_CONF_PS) { conf->flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); del_timer_sync(&local->dynamic_ps_timer); cancel_work_sync(&local->dynamic_ps_enable_work); } } static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *mgd = &sdata->u.mgd; struct sta_info *sta = NULL; bool authorized = false; if (!mgd->powersave) return false; if (mgd->broken_ap) return false; if (!mgd->associated) return false; if (mgd->flags & IEEE80211_STA_CONNECTION_POLL) return false; if (!mgd->have_beacon) return false; rcu_read_lock(); sta = sta_info_get(sdata, mgd->bssid); if (sta) authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); rcu_read_unlock(); return authorized; } /* need to hold RTNL or interface lock */ void ieee80211_recalc_ps(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *found = NULL; int count = 0; int timeout; if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) { local->ps_sdata = NULL; return; } list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_AP) { /* If an AP vif is found, then disable PS * by setting the count to zero thereby setting * ps_sdata to NULL. */ count = 0; break; } if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; found = sdata; count++; } if (count == 1 && ieee80211_powersave_allowed(found)) { u8 dtimper = found->u.mgd.dtim_period; timeout = local->dynamic_ps_forced_timeout; if (timeout < 0) timeout = 100; local->hw.conf.dynamic_ps_timeout = timeout; /* If the TIM IE is invalid, pretend the value is 1 */ if (!dtimper) dtimper = 1; local->hw.conf.ps_dtim_period = dtimper; local->ps_sdata = found; } else { local->ps_sdata = NULL; } ieee80211_change_ps(local); } void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata) { bool ps_allowed = ieee80211_powersave_allowed(sdata); if (sdata->vif.bss_conf.ps != ps_allowed) { sdata->vif.bss_conf.ps = ps_allowed; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS); } } void ieee80211_dynamic_ps_disable_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, dynamic_ps_disable_work); if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_PS, false); } void ieee80211_dynamic_ps_enable_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, dynamic_ps_enable_work); struct ieee80211_sub_if_data *sdata = local->ps_sdata; struct ieee80211_if_managed *ifmgd; unsigned long flags; int q; /* can only happen when PS was just disabled anyway */ if (!sdata) return; ifmgd = &sdata->u.mgd; if (local->hw.conf.flags & IEEE80211_CONF_PS) return; if (local->hw.conf.dynamic_ps_timeout > 0) { /* don't enter PS if TX frames are pending */ if (drv_tx_frames_pending(local)) { mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); return; } /* * transmission can be stopped by others which leads to * dynamic_ps_timer expiry. Postpone the ps timer if it * is not the actual idle state. */ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (q = 0; q < local->hw.queues; q++) { if (local->queue_stop_reasons[q]) { spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); return; } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && !(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { if (drv_tx_frames_pending(local)) { mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); } else { ieee80211_send_nullfunc(local, sdata, true); /* Flush to get the tx status of nullfunc frame */ ieee80211_flush_queues(local, sdata, false); } } if (!(ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) || (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; local->hw.conf.flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } } void ieee80211_dynamic_ps_timer(struct timer_list *t) { struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer); ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work); } void ieee80211_dfs_cac_timer_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct ieee80211_sub_if_data *sdata = container_of(delayed_work, struct ieee80211_sub_if_data, dfs_cac_timer_work); struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chandef; mutex_lock(&sdata->local->mtx); if (sdata->wdev.cac_started) { ieee80211_vif_release_channel(sdata); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL); } mutex_unlock(&sdata->local->mtx); } static bool __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool ret = false; int ac; if (local->hw.queues < IEEE80211_NUM_ACS) return false; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; int non_acm_ac; unsigned long now = jiffies; if (tx_tspec->action == TX_TSPEC_ACTION_NONE && tx_tspec->admitted_time && time_after(now, tx_tspec->time_slice_start + HZ)) { tx_tspec->consumed_tx_time = 0; tx_tspec->time_slice_start = now; if (tx_tspec->downgraded) tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; } switch (tx_tspec->action) { case TX_TSPEC_ACTION_STOP_DOWNGRADE: /* take the original parameters */ if (drv_conf_tx(local, sdata, ac, &sdata->tx_conf[ac])) sdata_err(sdata, "failed to set TX queue parameters for queue %d\n", ac); tx_tspec->action = TX_TSPEC_ACTION_NONE; tx_tspec->downgraded = false; ret = true; break; case TX_TSPEC_ACTION_DOWNGRADE: if (time_after(now, tx_tspec->time_slice_start + HZ)) { tx_tspec->action = TX_TSPEC_ACTION_NONE; ret = true; break; } /* downgrade next lower non-ACM AC */ for (non_acm_ac = ac + 1; non_acm_ac < IEEE80211_NUM_ACS; non_acm_ac++) if (!(sdata->wmm_acm & BIT(7 - 2 * non_acm_ac))) break; /* Usually the loop will result in using BK even if it * requires admission control, but such a configuration * makes no sense and we have to transmit somehow - the * AC selection does the same thing. * If we started out trying to downgrade from BK, then * the extra condition here might be needed. */ if (non_acm_ac >= IEEE80211_NUM_ACS) non_acm_ac = IEEE80211_AC_BK; if (drv_conf_tx(local, sdata, ac, &sdata->tx_conf[non_acm_ac])) sdata_err(sdata, "failed to set TX queue parameters for queue %d\n", ac); tx_tspec->action = TX_TSPEC_ACTION_NONE; ret = true; schedule_delayed_work(&ifmgd->tx_tspec_wk, tx_tspec->time_slice_start + HZ - now + 1); break; case TX_TSPEC_ACTION_NONE: /* nothing now */ break; } } return ret; } void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) { if (__ieee80211_sta_handle_tspec_ac_params(sdata)) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); } static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work) { struct ieee80211_sub_if_data *sdata; sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.tx_tspec_wk.work); ieee80211_sta_handle_tspec_ac_params(sdata); } /* MLME */ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const u8 *wmm_param, size_t wmm_param_len, const struct ieee80211_mu_edca_param_set *mu_edca) { struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS]; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t left; int count, mu_edca_count, ac; const u8 *pos; u8 uapsd_queues = 0; if (!local->ops->conf_tx) return false; if (local->hw.queues < IEEE80211_NUM_ACS) return false; if (!wmm_param) return false; if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) return false; if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) uapsd_queues = ifmgd->uapsd_queues; count = wmm_param[6] & 0x0f; /* -1 is the initial value of ifmgd->mu_edca_last_param_set. * if mu_edca was preset before and now it disappeared tell * the driver about it. */ mu_edca_count = mu_edca ? mu_edca->mu_qos_info & 0x0f : -1; if (count == ifmgd->wmm_last_param_set && mu_edca_count == ifmgd->mu_edca_last_param_set) return false; ifmgd->wmm_last_param_set = count; ifmgd->mu_edca_last_param_set = mu_edca_count; pos = wmm_param + 8; left = wmm_param_len - 8; memset(¶ms, 0, sizeof(params)); sdata->wmm_acm = 0; for (; left >= 4; left -= 4, pos += 4) { int aci = (pos[0] >> 5) & 0x03; int acm = (pos[0] >> 4) & 0x01; bool uapsd = false; switch (aci) { case 1: /* AC_BK */ ac = IEEE80211_AC_BK; if (acm) sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) uapsd = true; params[ac].mu_edca = !!mu_edca; if (mu_edca) params[ac].mu_edca_param_rec = mu_edca->ac_bk; break; case 2: /* AC_VI */ ac = IEEE80211_AC_VI; if (acm) sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) uapsd = true; params[ac].mu_edca = !!mu_edca; if (mu_edca) params[ac].mu_edca_param_rec = mu_edca->ac_vi; break; case 3: /* AC_VO */ ac = IEEE80211_AC_VO; if (acm) sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd = true; params[ac].mu_edca = !!mu_edca; if (mu_edca) params[ac].mu_edca_param_rec = mu_edca->ac_vo; break; case 0: /* AC_BE */ default: ac = IEEE80211_AC_BE; if (acm) sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) uapsd = true; params[ac].mu_edca = !!mu_edca; if (mu_edca) params[ac].mu_edca_param_rec = mu_edca->ac_be; break; } params[ac].aifs = pos[0] & 0x0f; if (params[ac].aifs < 2) { sdata_info(sdata, "AP has invalid WMM params (AIFSN=%d for ACI %d), will use 2\n", params[ac].aifs, aci); params[ac].aifs = 2; } params[ac].cw_max = ecw2cw((pos[1] & 0xf0) >> 4); params[ac].cw_min = ecw2cw(pos[1] & 0x0f); params[ac].txop = get_unaligned_le16(pos + 2); params[ac].acm = acm; params[ac].uapsd = uapsd; if (params[ac].cw_min == 0 || params[ac].cw_min > params[ac].cw_max) { sdata_info(sdata, "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", params[ac].cw_min, params[ac].cw_max, aci); return false; } ieee80211_regulatory_limit_wmm_params(sdata, ¶ms[ac], ac); } for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { mlme_dbg(sdata, "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", ac, params[ac].acm, params[ac].aifs, params[ac].cw_min, params[ac].cw_max, params[ac].txop, params[ac].uapsd, ifmgd->tx_tspec[ac].downgraded); sdata->tx_conf[ac] = params[ac]; if (!ifmgd->tx_tspec[ac].downgraded && drv_conf_tx(local, sdata, ac, ¶ms[ac])) sdata_err(sdata, "failed to set TX queue parameters for AC %d\n", ac); } /* enable WMM or activate new settings */ sdata->vif.bss_conf.qos = true; return true; } static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) { lockdep_assert_held(&sdata->local->mtx); sdata->u.mgd.flags &= ~IEEE80211_STA_CONNECTION_POLL; ieee80211_run_deferred_scan(sdata->local); } static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) { mutex_lock(&sdata->local->mtx); __ieee80211_stop_poll(sdata); mutex_unlock(&sdata->local->mtx); } static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, u16 capab, bool erp_valid, u8 erp) { struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; struct ieee80211_supported_band *sband; u32 changed = 0; bool use_protection; bool use_short_preamble; bool use_short_slot; sband = ieee80211_get_sband(sdata); if (!sband) return changed; if (erp_valid) { use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0; use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0; } else { use_protection = false; use_short_preamble = !!(capab & WLAN_CAPABILITY_SHORT_PREAMBLE); } use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); if (sband->band == NL80211_BAND_5GHZ) use_short_slot = true; if (use_protection != bss_conf->use_cts_prot) { bss_conf->use_cts_prot = use_protection; changed |= BSS_CHANGED_ERP_CTS_PROT; } if (use_short_preamble != bss_conf->use_short_preamble) { bss_conf->use_short_preamble = use_short_preamble; changed |= BSS_CHANGED_ERP_PREAMBLE; } if (use_short_slot != bss_conf->use_short_slot) { bss_conf->use_short_slot = use_short_slot; changed |= BSS_CHANGED_ERP_SLOT; } return changed; } static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss, u32 bss_info_changed) { struct ieee80211_bss *bss = (void *)cbss->priv; struct ieee80211_local *local = sdata->local; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; bss_info_changed |= BSS_CHANGED_ASSOC; bss_info_changed |= ieee80211_handle_bss_capability(sdata, bss_conf->assoc_capability, bss->has_erp_value, bss->erp_value); sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec( beacon_loss_count * bss_conf->beacon_int)); sdata->u.mgd.associated = cbss; memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); ieee80211_check_rate_mask(sdata); sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; if (sdata->vif.p2p || sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) { const struct cfg80211_bss_ies *ies; rcu_read_lock(); ies = rcu_dereference(cbss->ies); if (ies) { int ret; ret = cfg80211_get_p2p_attr( ies->data, ies->len, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *) &bss_conf->p2p_noa_attr, sizeof(bss_conf->p2p_noa_attr)); if (ret >= 2) { sdata->u.mgd.p2p_noa_index = bss_conf->p2p_noa_attr.index; bss_info_changed |= BSS_CHANGED_P2P_PS; } } rcu_read_unlock(); } /* just to be sure */ ieee80211_stop_poll(sdata); ieee80211_led_assoc(local, 1); if (sdata->u.mgd.have_beacon) { /* * If the AP is buggy we may get here with no DTIM period * known, so assume it's 1 which is the only safe assumption * in that case, although if the TIM IE is broken powersave * probably just won't work at all. */ bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1; bss_conf->beacon_rate = bss->beacon_rate; bss_info_changed |= BSS_CHANGED_BEACON_INFO; } else { bss_conf->beacon_rate = NULL; bss_conf->dtim_period = 0; } bss_conf->assoc = 1; /* Tell the driver to monitor connection quality (if supported) */ if (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI && bss_conf->cqm_rssi_thold) bss_info_changed |= BSS_CHANGED_CQM; /* Enable ARP filtering */ if (bss_conf->arp_addr_cnt) bss_info_changed |= BSS_CHANGED_ARP_FILTER; ieee80211_bss_info_change_notify(sdata, bss_info_changed); mutex_lock(&local->iflist_mtx); ieee80211_recalc_ps(local); mutex_unlock(&local->iflist_mtx); ieee80211_recalc_smps(sdata); ieee80211_recalc_ps_vif(sdata); netif_carrier_on(sdata->dev); } static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, u16 stype, u16 reason, bool tx, u8 *frame_buf) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; u32 changed = 0; sdata_assert_lock(sdata); if (WARN_ON_ONCE(tx && !frame_buf)) return; if (WARN_ON(!ifmgd->associated)) return; ieee80211_stop_poll(sdata); ifmgd->associated = NULL; netif_carrier_off(sdata->dev); /* * if we want to get out of ps before disassoc (why?) we have * to do it before sending disassoc, as otherwise the null-packet * won't be valid. */ if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } local->ps_sdata = NULL; /* disable per-vif ps */ ieee80211_recalc_ps_vif(sdata); /* make sure ongoing transmission finishes */ synchronize_net(); /* * drop any frame before deauth/disassoc, this can be data or * management frame. Since we are disconnecting, we should not * insist sending these frames which can take time and delay * the disconnection and possible the roaming. */ if (tx) ieee80211_flush_queues(local, sdata, true); /* deauthenticate/disassociate now */ if (tx || frame_buf) { /* * In multi channel scenarios guarantee that the virtual * interface is granted immediate airtime to transmit the * deauthentication frame by calling mgd_prepare_tx, if the * driver requested so. */ if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP) && !ifmgd->have_beacon) drv_mgd_prepare_tx(sdata->local, sdata, 0); ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype, reason, tx, frame_buf); } /* flush out frame - make sure the deauth was actually sent */ if (tx) ieee80211_flush_queues(local, sdata, false); /* clear bssid only after building the needed mgmt frames */ eth_zero_addr(ifmgd->bssid); /* remove AP and TDLS peers */ sta_info_flush(sdata); /* finally reset all BSS / config parameters */ changed |= ieee80211_reset_erp_info(sdata); ieee80211_led_assoc(local, 0); changed |= BSS_CHANGED_ASSOC; sdata->vif.bss_conf.assoc = false; ifmgd->p2p_noa_index = -1; memset(&sdata->vif.bss_conf.p2p_noa_attr, 0, sizeof(sdata->vif.bss_conf.p2p_noa_attr)); /* on the next assoc, re-program HT/VHT parameters */ memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa)); memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask)); /* reset MU-MIMO ownership and group data */ memset(sdata->vif.bss_conf.mu_group.membership, 0, sizeof(sdata->vif.bss_conf.mu_group.membership)); memset(sdata->vif.bss_conf.mu_group.position, 0, sizeof(sdata->vif.bss_conf.mu_group.position)); changed |= BSS_CHANGED_MU_GROUPS; sdata->vif.mu_mimo_owner = false; sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; del_timer_sync(&local->dynamic_ps_timer); cancel_work_sync(&local->dynamic_ps_enable_work); /* Disable ARP filtering */ if (sdata->vif.bss_conf.arp_addr_cnt) changed |= BSS_CHANGED_ARP_FILTER; sdata->vif.bss_conf.qos = false; changed |= BSS_CHANGED_QOS; /* The BSSID (not really interesting) and HT changed */ changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; ieee80211_bss_info_change_notify(sdata, changed); /* disassociated - set to defaults now */ ieee80211_set_wmm_default(sdata, false, false); del_timer_sync(&sdata->u.mgd.conn_mon_timer); del_timer_sync(&sdata->u.mgd.bcn_mon_timer); del_timer_sync(&sdata->u.mgd.timer); del_timer_sync(&sdata->u.mgd.chswitch_timer); sdata->vif.bss_conf.dtim_period = 0; sdata->vif.bss_conf.beacon_rate = NULL; ifmgd->have_beacon = false; ifmgd->flags = 0; mutex_lock(&local->mtx); ieee80211_vif_release_channel(sdata); sdata->vif.csa_active = false; ifmgd->csa_waiting_bcn = false; ifmgd->csa_ignored_same_chan = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_block_tx = false; } mutex_unlock(&local->mtx); /* existing TX TSPEC sessions no longer exist */ memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec)); cancel_delayed_work_sync(&ifmgd->tx_tspec_wk); sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; } void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr) { /* * We can postpone the mgd.timer whenever receiving unicast frames * from AP because we know that the connection is working both ways * at that time. But multicast frames (and hence also beacons) must * be ignored here, because we need to trigger the timer during * data idle periods for sending the periodic probe request to the * AP we're connected to. */ if (is_multicast_ether_addr(hdr->addr1)) return; ieee80211_sta_reset_conn_monitor(sdata); } static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; mutex_lock(&local->mtx); if (!(ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)) goto out; __ieee80211_stop_poll(sdata); mutex_lock(&local->iflist_mtx); ieee80211_recalc_ps(local); mutex_unlock(&local->iflist_mtx); if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) goto out; /* * We've received a probe response, but are not sure whether * we have or will be receiving any beacons or data, so let's * schedule the timers again, just in case. */ ieee80211_sta_reset_beacon_monitor(sdata); mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); out: mutex_unlock(&local->mtx); } static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, u16 tx_time) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 tid = ieee80211_get_tid(hdr); int ac = ieee80211_ac_from_tid(tid); struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; unsigned long now = jiffies; if (likely(!tx_tspec->admitted_time)) return; if (time_after(now, tx_tspec->time_slice_start + HZ)) { tx_tspec->consumed_tx_time = 0; tx_tspec->time_slice_start = now; if (tx_tspec->downgraded) { tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; schedule_delayed_work(&ifmgd->tx_tspec_wk, 0); } } if (tx_tspec->downgraded) return; tx_tspec->consumed_tx_time += tx_time; if (tx_tspec->consumed_tx_time >= tx_tspec->admitted_time) { tx_tspec->downgraded = true; tx_tspec->action = TX_TSPEC_ACTION_DOWNGRADE; schedule_delayed_work(&ifmgd->tx_tspec_wk, 0); } } void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, bool ack, u16 tx_time) { ieee80211_sta_tx_wmm_ac_notify(sdata, hdr, tx_time); if (!ieee80211_is_data(hdr->frame_control)) return; if (ieee80211_is_nullfunc(hdr->frame_control) && sdata->u.mgd.probe_send_count > 0) { if (ack) ieee80211_sta_reset_conn_monitor(sdata); else sdata->u.mgd.nullfunc_failed = true; ieee80211_queue_work(&sdata->local->hw, &sdata->work); return; } if (ack) ieee80211_sta_reset_conn_monitor(sdata); } void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, struct ieee80211_channel *channel) { struct sk_buff *skb; skb = ieee80211_build_probe_req(sdata, src, dst, dst, (u32)-1, channel, ssid, ssid_len, NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); if (skb) ieee80211_tx_skb(sdata, skb); } static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *ssid; u8 *dst = ifmgd->associated->bssid; u8 unicast_limit = max(1, max_probe_tries - 3); struct sta_info *sta; /* * Try sending broadcast probe requests for the last three * probe requests after the first ones failed since some * buggy APs only support broadcast probe requests. */ if (ifmgd->probe_send_count >= unicast_limit) dst = NULL; /* * When the hardware reports an accurate Tx ACK status, it's * better to send a nullfunc frame instead of a probe request, * as it will kick us off the AP quickly if we aren't associated * anymore. The timeout will be reset if the frame is ACKed by * the AP. */ ifmgd->probe_send_count++; if (dst) { mutex_lock(&sdata->local->sta_mtx); sta = sta_info_get(sdata, dst); if (!WARN_ON(!sta)) ieee80211_check_fast_rx(sta); mutex_unlock(&sdata->local->sta_mtx); } if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { ifmgd->nullfunc_failed = false; ieee80211_send_nullfunc(sdata->local, sdata, false); } else { int ssid_len; rcu_read_lock(); ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); if (WARN_ON_ONCE(ssid == NULL)) ssid_len = 0; else ssid_len = ssid[1]; ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst, ssid + 2, ssid_len, ifmgd->associated->channel); rcu_read_unlock(); } ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); run_again(sdata, ifmgd->probe_timeout); } static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, bool beacon) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool already = false; if (!ieee80211_sdata_running(sdata)) return; sdata_lock(sdata); if (!ifmgd->associated) goto out; mutex_lock(&sdata->local->mtx); if (sdata->local->tmp_channel || sdata->local->scanning) { mutex_unlock(&sdata->local->mtx); goto out; } if (beacon) { mlme_dbg_ratelimited(sdata, "detected beacon loss from AP (missed %d beacons) - probing\n", beacon_loss_count); ieee80211_cqm_beacon_loss_notify(&sdata->vif, GFP_KERNEL); } /* * The driver/our work has already reported this event or the * connection monitoring has kicked in and we have already sent * a probe request. Or maybe the AP died and the driver keeps * reporting until we disassociate... * * In either case we have to ignore the current call to this * function (except for setting the correct probe reason bit) * because otherwise we would reset the timer every time and * never check whether we received a probe response! */ if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) already = true; ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; mutex_unlock(&sdata->local->mtx); if (already) goto out; mutex_lock(&sdata->local->iflist_mtx); ieee80211_recalc_ps(sdata->local); mutex_unlock(&sdata->local->iflist_mtx); ifmgd->probe_send_count = 0; ieee80211_mgd_probe_ap_send(sdata); out: sdata_unlock(sdata); } struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct cfg80211_bss *cbss; struct sk_buff *skb; const u8 *ssid; int ssid_len; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return NULL; sdata_assert_lock(sdata); if (ifmgd->associated) cbss = ifmgd->associated; else if (ifmgd->auth_data) cbss = ifmgd->auth_data->bss; else if (ifmgd->assoc_data) cbss = ifmgd->assoc_data->bss; else return NULL; rcu_read_lock(); ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID); if (WARN_ON_ONCE(ssid == NULL)) ssid_len = 0; else ssid_len = ssid[1]; skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid, cbss->bssid, (u32)-1, cbss->channel, ssid + 2, ssid_len, NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_ap_probereq_get); static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata, const u8 *buf, size_t len, bool tx, u16 reason) { struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = tx ? DEAUTH_TX_EVENT : DEAUTH_RX_EVENT, .u.mlme.reason = reason, }; if (tx) cfg80211_tx_mlme_mgmt(sdata->dev, buf, len); else cfg80211_rx_mlme_mgmt(sdata->dev, buf, len); drv_event_callback(sdata->local, sdata, &event); } static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; bool tx; sdata_lock(sdata); if (!ifmgd->associated) { sdata_unlock(sdata); return; } tx = !sdata->csa_block_tx; /* AP is probably out of range (or not reachable for another reason) so * remove the bss struct for that AP. */ cfg80211_unlink_bss(local->hw.wiphy, ifmgd->associated); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, tx, frame_buf); mutex_lock(&local->mtx); sdata->vif.csa_active = false; ifmgd->csa_waiting_bcn = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_block_tx = false; } mutex_unlock(&local->mtx); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); sdata_unlock(sdata); } static void ieee80211_beacon_connection_loss_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.beacon_connection_loss_work); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (ifmgd->associated) ifmgd->beacon_loss_count++; if (ifmgd->connection_loss) { sdata_info(sdata, "Connection to AP %pM lost\n", ifmgd->bssid); __ieee80211_disconnect(sdata); } else { ieee80211_mgd_probe_ap(sdata, true); } } static void ieee80211_csa_connection_drop_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.csa_connection_drop_work); __ieee80211_disconnect(sdata); } void ieee80211_beacon_loss(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_hw *hw = &sdata->local->hw; trace_api_beacon_loss(sdata); sdata->u.mgd.connection_loss = false; ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_beacon_loss); void ieee80211_connection_loss(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_hw *hw = &sdata->local->hw; trace_api_connection_loss(sdata); sdata->u.mgd.connection_loss = true; ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_connection_loss); static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, bool assoc) { struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; sdata_assert_lock(sdata); if (!assoc) { /* * we are not authenticated yet, the only timer that could be * running is the timeout for the authentication response which * which is not relevant anymore. */ del_timer_sync(&sdata->u.mgd.timer); sta_info_destroy_addr(sdata, auth_data->bss->bssid); eth_zero_addr(sdata->u.mgd.bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&sdata->local->mtx); } cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss); kfree(auth_data); sdata->u.mgd.auth_data = NULL; } static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, bool assoc, bool abandon) { struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; sdata_assert_lock(sdata); if (!assoc) { /* * we are not associated yet, the only timer that could be * running is the timeout for the association response which * which is not relevant anymore. */ del_timer_sync(&sdata->u.mgd.timer); sta_info_destroy_addr(sdata, assoc_data->bss->bssid); eth_zero_addr(sdata->u.mgd.bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; sdata->vif.mu_mimo_owner = false; mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&sdata->local->mtx); if (abandon) cfg80211_abandon_assoc(sdata->dev, assoc_data->bss); } kfree(assoc_data); sdata->u.mgd.assoc_data = NULL; } static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; u8 *pos; struct ieee802_11_elems elems; u32 tx_flags = 0; pos = mgmt->u.auth.variable; ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, mgmt->bssid, auth_data->bss->bssid); if (!elems.challenge) return; auth_data->expected_transaction = 4; drv_mgd_prepare_tx(sdata->local, sdata, 0); if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, elems.challenge - 2, elems.challenge_len + 2, auth_data->bss->bssid, auth_data->bss->bssid, auth_data->key, auth_data->key_len, auth_data->key_idx, tx_flags); } static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata, const u8 *bssid) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct sta_info *sta; bool result = true; sdata_info(sdata, "authenticated\n"); ifmgd->auth_data->done = true; ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; ifmgd->auth_data->timeout_started = true; run_again(sdata, ifmgd->auth_data->timeout); /* move station state to auth */ mutex_lock(&sdata->local->sta_mtx); sta = sta_info_get(sdata, bssid); if (!sta) { WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid); result = false; goto out; } if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { sdata_info(sdata, "failed moving %pM to auth\n", bssid); result = false; goto out; } out: mutex_unlock(&sdata->local->sta_mtx); return result; } static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 bssid[ETH_ALEN]; u16 auth_alg, auth_transaction, status_code; struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = AUTH_EVENT, }; sdata_assert_lock(sdata); if (len < 24 + 6) return; if (!ifmgd->auth_data || ifmgd->auth_data->done) return; memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN); if (!ether_addr_equal(bssid, mgmt->bssid)) return; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); status_code = le16_to_cpu(mgmt->u.auth.status_code); if (auth_alg != ifmgd->auth_data->algorithm || (auth_alg != WLAN_AUTH_SAE && auth_transaction != ifmgd->auth_data->expected_transaction) || (auth_alg == WLAN_AUTH_SAE && (auth_transaction < ifmgd->auth_data->expected_transaction || auth_transaction > 2))) { sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n", mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, auth_transaction, ifmgd->auth_data->expected_transaction); return; } if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); ieee80211_destroy_auth_data(sdata, false); cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); event.u.mlme.status = MLME_DENIED; event.u.mlme.reason = status_code; drv_event_callback(sdata->local, sdata, &event); return; } switch (ifmgd->auth_data->algorithm) { case WLAN_AUTH_OPEN: case WLAN_AUTH_LEAP: case WLAN_AUTH_FT: case WLAN_AUTH_SAE: case WLAN_AUTH_FILS_SK: case WLAN_AUTH_FILS_SK_PFS: case WLAN_AUTH_FILS_PK: break; case WLAN_AUTH_SHARED_KEY: if (ifmgd->auth_data->expected_transaction != 4) { ieee80211_auth_challenge(sdata, mgmt, len); /* need another frame */ return; } break; default: WARN_ONCE(1, "invalid auth alg %d", ifmgd->auth_data->algorithm); return; } event.u.mlme.status = MLME_SUCCESS; drv_event_callback(sdata->local, sdata, &event); if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE || (auth_transaction == 2 && ifmgd->auth_data->expected_transaction == 2)) { if (!ieee80211_mark_sta_auth(sdata, bssid)) goto out_err; } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && auth_transaction == 2) { sdata_info(sdata, "SAE peer confirmed\n"); ifmgd->auth_data->peer_confirmed = true; } cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); return; out_err: mutex_unlock(&sdata->local->sta_mtx); /* ignore frame -- wait for timeout */ } #define case_WLAN(type) \ case WLAN_REASON_##type: return #type const char *ieee80211_get_reason_code_string(u16 reason_code) { switch (reason_code) { case_WLAN(UNSPECIFIED); case_WLAN(PREV_AUTH_NOT_VALID); case_WLAN(DEAUTH_LEAVING); case_WLAN(DISASSOC_DUE_TO_INACTIVITY); case_WLAN(DISASSOC_AP_BUSY); case_WLAN(CLASS2_FRAME_FROM_NONAUTH_STA); case_WLAN(CLASS3_FRAME_FROM_NONASSOC_STA); case_WLAN(DISASSOC_STA_HAS_LEFT); case_WLAN(STA_REQ_ASSOC_WITHOUT_AUTH); case_WLAN(DISASSOC_BAD_POWER); case_WLAN(DISASSOC_BAD_SUPP_CHAN); case_WLAN(INVALID_IE); case_WLAN(MIC_FAILURE); case_WLAN(4WAY_HANDSHAKE_TIMEOUT); case_WLAN(GROUP_KEY_HANDSHAKE_TIMEOUT); case_WLAN(IE_DIFFERENT); case_WLAN(INVALID_GROUP_CIPHER); case_WLAN(INVALID_PAIRWISE_CIPHER); case_WLAN(INVALID_AKMP); case_WLAN(UNSUPP_RSN_VERSION); case_WLAN(INVALID_RSN_IE_CAP); case_WLAN(IEEE8021X_FAILED); case_WLAN(CIPHER_SUITE_REJECTED); case_WLAN(DISASSOC_UNSPECIFIED_QOS); case_WLAN(DISASSOC_QAP_NO_BANDWIDTH); case_WLAN(DISASSOC_LOW_ACK); case_WLAN(DISASSOC_QAP_EXCEED_TXOP); case_WLAN(QSTA_LEAVE_QBSS); case_WLAN(QSTA_NOT_USE); case_WLAN(QSTA_REQUIRE_SETUP); case_WLAN(QSTA_TIMEOUT); case_WLAN(QSTA_CIPHER_NOT_SUPP); case_WLAN(MESH_PEER_CANCELED); case_WLAN(MESH_MAX_PEERS); case_WLAN(MESH_CONFIG); case_WLAN(MESH_CLOSE); case_WLAN(MESH_MAX_RETRIES); case_WLAN(MESH_CONFIRM_TIMEOUT); case_WLAN(MESH_INVALID_GTK); case_WLAN(MESH_INCONSISTENT_PARAM); case_WLAN(MESH_INVALID_SECURITY); case_WLAN(MESH_PATH_ERROR); case_WLAN(MESH_PATH_NOFORWARD); case_WLAN(MESH_PATH_DEST_UNREACHABLE); case_WLAN(MAC_EXISTS_IN_MBSS); case_WLAN(MESH_CHAN_REGULATORY); case_WLAN(MESH_CHAN); default: return ""; } } static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); sdata_assert_lock(sdata); if (len < 24 + 2) return; if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); return; } if (ifmgd->associated && ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) { const u8 *bssid = ifmgd->associated->bssid; sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n", bssid, reason_code, ieee80211_get_reason_code_string(reason_code)); ieee80211_set_disassoc(sdata, 0, 0, false, NULL); ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code); return; } if (ifmgd->assoc_data && ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { const u8 *bssid = ifmgd->assoc_data->bss->bssid; sdata_info(sdata, "deauthenticated from %pM while associating (Reason: %u=%s)\n", bssid, reason_code, ieee80211_get_reason_code_string(reason_code)); ieee80211_destroy_assoc_data(sdata, false, true); cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); return; } } static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code; sdata_assert_lock(sdata); if (len < 24 + 2) return; if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) return; reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); return; } sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n", mgmt->sa, reason_code, ieee80211_get_reason_code_string(reason_code)); ieee80211_set_disassoc(sdata, 0, 0, false, NULL); ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code); } static void ieee80211_get_rates(struct ieee80211_supported_band *sband, u8 *supp_rates, unsigned int supp_rates_len, u32 *rates, u32 *basic_rates, bool *have_higher_than_11mbit, int *min_rate, int *min_rate_index, int shift) { int i, j; for (i = 0; i < supp_rates_len; i++) { int rate = supp_rates[i] & 0x7f; bool is_basic = !!(supp_rates[i] & 0x80); if ((rate * 5 * (1 << shift)) > 110) *have_higher_than_11mbit = true; /* * Skip HT and VHT BSS membership selectors since they're not * rates. * * Note: Even though the membership selector and the basic * rate flag share the same bit, they are not exactly * the same. */ if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) || supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY)) continue; for (j = 0; j < sband->n_bitrates; j++) { struct ieee80211_rate *br; int brate; br = &sband->bitrates[j]; brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); if (brate == rate) { *rates |= BIT(j); if (is_basic) *basic_rates |= BIT(j); if ((rate * 5) < *min_rate) { *min_rate = rate * 5; *min_rate_index = j; } break; } } } } static bool ieee80211_twt_req_supported(const struct sta_info *sta, const struct ieee802_11_elems *elems) { if (elems->ext_capab_len < 10) return false; if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT)) return false; return sta->sta.he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES; } static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct sta_info *sta; u8 *pos; u16 capab_info, aid; struct ieee802_11_elems elems; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; const struct cfg80211_bss_ies *bss_ies = NULL; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u32 changed = 0; int err; bool ret; /* AssocResp and ReassocResp have identical structure */ aid = le16_to_cpu(mgmt->u.assoc_resp.aid); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); /* * The 5 MSB of the AID field are reserved * (802.11-2016 9.4.1.8 AID field) */ aid &= 0x7ff; ifmgd->broken_ap = false; if (aid == 0 || aid > IEEE80211_MAX_AID) { sdata_info(sdata, "invalid AID value %d (out of range), turn off PS\n", aid); aid = 0; ifmgd->broken_ap = true; } pos = mgmt->u.assoc_resp.variable; ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, mgmt->bssid, assoc_data->bss->bssid); if (!elems.supp_rates) { sdata_info(sdata, "no SuppRates element in AssocResp\n"); return false; } ifmgd->aid = aid; ifmgd->tdls_chan_switch_prohibited = elems.ext_capab && elems.ext_capab_len >= 5 && (elems.ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED); /* * Some APs are erroneously not including some information in their * (re)association response frames. Try to recover by using the data * from the beacon or probe response. This seems to afflict mobile * 2G/3G/4G wifi routers, reported models include the "Onda PN51T", * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device. */ if ((assoc_data->wmm && !elems.wmm_param) || (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && (!elems.ht_cap_elem || !elems.ht_operation)) || (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && (!elems.vht_cap_elem || !elems.vht_operation))) { const struct cfg80211_bss_ies *ies; struct ieee802_11_elems bss_elems; rcu_read_lock(); ies = rcu_dereference(cbss->ies); if (ies) bss_ies = kmemdup(ies, sizeof(*ies) + ies->len, GFP_ATOMIC); rcu_read_unlock(); if (!bss_ies) return false; ieee802_11_parse_elems(bss_ies->data, bss_ies->len, false, &bss_elems, mgmt->bssid, assoc_data->bss->bssid); if (assoc_data->wmm && !elems.wmm_param && bss_elems.wmm_param) { elems.wmm_param = bss_elems.wmm_param; sdata_info(sdata, "AP bug: WMM param missing from AssocResp\n"); } /* * Also check if we requested HT/VHT, otherwise the AP doesn't * have to include the IEs in the (re)association response. */ if (!elems.ht_cap_elem && bss_elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { elems.ht_cap_elem = bss_elems.ht_cap_elem; sdata_info(sdata, "AP bug: HT capability missing from AssocResp\n"); } if (!elems.ht_operation && bss_elems.ht_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { elems.ht_operation = bss_elems.ht_operation; sdata_info(sdata, "AP bug: HT operation missing from AssocResp\n"); } if (!elems.vht_cap_elem && bss_elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { elems.vht_cap_elem = bss_elems.vht_cap_elem; sdata_info(sdata, "AP bug: VHT capa missing from AssocResp\n"); } if (!elems.vht_operation && bss_elems.vht_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { elems.vht_operation = bss_elems.vht_operation; sdata_info(sdata, "AP bug: VHT operation missing from AssocResp\n"); } } /* * We previously checked these in the beacon/probe response, so * they should be present here. This is just a safety net. */ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) { sdata_info(sdata, "HT AP is missing WMM params or HT capability/operation\n"); ret = false; goto out; } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && (!elems.vht_cap_elem || !elems.vht_operation)) { sdata_info(sdata, "VHT AP is missing VHT capability/operation\n"); ret = false; goto out; } mutex_lock(&sdata->local->sta_mtx); /* * station info was already allocated and inserted before * the association and should be available to us */ sta = sta_info_get(sdata, cbss->bssid); if (WARN_ON(!sta)) { mutex_unlock(&sdata->local->sta_mtx); ret = false; goto out; } sband = ieee80211_get_sband(sdata); if (!sband) { mutex_unlock(&sdata->local->sta_mtx); ret = false; goto out; } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && (!elems.he_cap || !elems.he_operation)) { mutex_unlock(&sdata->local->sta_mtx); sdata_info(sdata, "HE AP is missing HE capability/operation\n"); ret = false; goto out; } /* Set up internal HT/VHT capabilities */ if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems.ht_cap_elem, sta); if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, elems.vht_cap_elem, sta); if (elems.he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && elems.he_cap) { ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems.he_cap, elems.he_cap_len, sta); bss_conf->he_support = sta->sta.he_cap.has_he; bss_conf->twt_requester = ieee80211_twt_req_supported(sta, &elems); } else { bss_conf->he_support = false; bss_conf->twt_requester = false; } if (bss_conf->he_support) { bss_conf->bss_color = le32_get_bits(elems.he_operation->he_oper_params, IEEE80211_HE_OPERATION_BSS_COLOR_MASK); bss_conf->htc_trig_based_pkt_ext = le32_get_bits(elems.he_operation->he_oper_params, IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK); bss_conf->frame_time_rts_th = le32_get_bits(elems.he_operation->he_oper_params, IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); bss_conf->multi_sta_back_32bit = sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP; bss_conf->ack_enabled = sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN; bss_conf->uora_exists = !!elems.uora_element; if (elems.uora_element) bss_conf->uora_ocw_range = elems.uora_element[0]; /* TODO: OPEN: what happens if BSS color disable is set? */ } if (cbss->transmitted_bss) { bss_conf->nontransmitted = true; ether_addr_copy(bss_conf->transmitter_bssid, cbss->transmitted_bss->bssid); bss_conf->bssid_indicator = cbss->max_bssid_indicator; bss_conf->bssid_index = cbss->bssid_index; } /* * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data * in their association response, so ignore that data for our own * configuration. If it changed since the last beacon, we'll get the * next beacon and update then. */ /* * If an operating mode notification IE is present, override the * NSS calculation (that would be done in rate_control_rate_init()) * and use the # of streams from that element. */ if (elems.opmode_notif && !(*elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) { u8 nss; nss = *elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; nss += 1; sta->sta.rx_nss = nss; } rate_control_rate_init(sta); if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) { set_sta_flag(sta, WLAN_STA_MFP); sta->sta.mfp = true; } else { sta->sta.mfp = false; } sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS; err = sta_info_move_state(sta, IEEE80211_STA_ASSOC); if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); if (err) { sdata_info(sdata, "failed to move station %pM to desired state\n", sta->sta.addr); WARN_ON(__sta_info_destroy(sta)); mutex_unlock(&sdata->local->sta_mtx); ret = false; goto out; } mutex_unlock(&sdata->local->sta_mtx); /* * Always handle WMM once after association regardless * of the first value the AP uses. Setting -1 here has * that effect because the AP values is an unsigned * 4-bit value. */ ifmgd->wmm_last_param_set = -1; ifmgd->mu_edca_last_param_set = -1; if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) { ieee80211_set_wmm_default(sdata, false, false); } else if (!ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, elems.wmm_param_len, elems.mu_edca_param_set)) { /* still enable QoS since we might have HT/VHT */ ieee80211_set_wmm_default(sdata, false, true); /* set the disable-WMM flag in this case to disable * tracking WMM parameter changes in the beacon if * the parameters weren't actually valid. Doing so * avoids changing parameters very strangely when * the AP is going back and forth between valid and * invalid parameters. */ ifmgd->flags |= IEEE80211_STA_DISABLE_WMM; } changed |= BSS_CHANGED_QOS; if (elems.max_idle_period_ie) { bss_conf->max_idle_period = le16_to_cpu(elems.max_idle_period_ie->max_idle_period); bss_conf->protected_keep_alive = !!(elems.max_idle_period_ie->idle_options & WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE); changed |= BSS_CHANGED_KEEP_ALIVE; } else { bss_conf->max_idle_period = 0; bss_conf->protected_keep_alive = false; } /* set AID and assoc capability, * ieee80211_set_associated() will tell the driver */ bss_conf->aid = aid; bss_conf->assoc_capability = capab_info; ieee80211_set_associated(sdata, cbss, changed); /* * If we're using 4-addr mode, let the AP know that we're * doing so, so that it can create the STA VLAN on its side */ if (ifmgd->use_4addr) ieee80211_send_4addr_nullfunc(local, sdata); /* * Start timer to probe the connection to the AP now. * Also start the timer that will detect beacon loss. */ ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); ieee80211_sta_reset_beacon_monitor(sdata); ret = true; out: kfree(bss_ies); return ret; } static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u16 capab_info, status_code, aid; struct ieee802_11_elems elems; int ac, uapsd_queues = -1; u8 *pos; bool reassoc; struct cfg80211_bss *bss; struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = ASSOC_EVENT, }; sdata_assert_lock(sdata); if (!assoc_data) return; if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid)) return; /* * AssocResp and ReassocResp have identical structure, so process both * of them in this function. */ if (len < 24 + 6) return; reassoc = ieee80211_is_reassoc_resp(mgmt->frame_control); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); aid = le16_to_cpu(mgmt->u.assoc_resp.aid); sdata_info(sdata, "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n", reassoc ? "Rea" : "A", mgmt->sa, capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); if (assoc_data->fils_kek_len && fils_decrypt_assoc_resp(sdata, (u8 *)mgmt, &len, assoc_data) < 0) return; pos = mgmt->u.assoc_resp.variable; ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, mgmt->bssid, assoc_data->bss->bssid); if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && elems.timeout_int && elems.timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) { u32 tu, ms; tu = le32_to_cpu(elems.timeout_int->value); ms = tu * 1024 / 1000; sdata_info(sdata, "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n", mgmt->sa, tu, ms); assoc_data->timeout = jiffies + msecs_to_jiffies(ms); assoc_data->timeout_started = true; if (ms > IEEE80211_ASSOC_TIMEOUT) run_again(sdata, assoc_data->timeout); return; } bss = assoc_data->bss; if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied association (code=%d)\n", mgmt->sa, status_code); ieee80211_destroy_assoc_data(sdata, false, false); event.u.mlme.status = MLME_DENIED; event.u.mlme.reason = status_code; drv_event_callback(sdata->local, sdata, &event); } else { if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) { /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, false, false); cfg80211_assoc_timeout(sdata->dev, bss); return; } event.u.mlme.status = MLME_SUCCESS; drv_event_callback(sdata->local, sdata, &event); sdata_info(sdata, "associated\n"); /* * destroy assoc_data afterwards, as otherwise an idle * recalc after assoc_data is NULL but before associated * is set can cause the interface to go idle */ ieee80211_destroy_assoc_data(sdata, true, false); /* get uapsd queues configuration */ uapsd_queues = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) if (sdata->tx_conf[ac].uapsd) uapsd_queues |= ieee80211_ac_to_qos_mask[ac]; } cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len, uapsd_queues, ifmgd->assoc_req_ies, ifmgd->assoc_req_ies_len); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_local *local = sdata->local; struct ieee80211_bss *bss; struct ieee80211_channel *channel; sdata_assert_lock(sdata); channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); if (!channel) return; bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel); if (bss) { sdata->vif.bss_conf.beacon_rate = bss->beacon_rate; ieee80211_rx_bss_put(local, bss); } } static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ieee80211_if_managed *ifmgd; struct ieee80211_rx_status *rx_status = (void *) skb->cb; size_t baselen, len = skb->len; ifmgd = &sdata->u.mgd; sdata_assert_lock(sdata); if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return; /* ignore ProbeResp to foreign address */ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return; ieee80211_rx_bss_info(sdata, mgmt, len, rx_status); if (ifmgd->associated && ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) ieee80211_reset_ap_probe(sdata); } /* * This is the canonical list of information elements we care about, * the filter code also gives us all changes to the Microsoft OUI * (00:50:F2) vendor IE which is used for WMM which we need to track, * as well as the DTPC IE (part of the Cisco OUI) used for signaling * changes to requested client power. * * We implement beacon filtering in software since that means we can * avoid processing the frame here and in cfg80211, and userspace * will not be able to tell whether the hardware supports it or not. * * XXX: This list needs to be dynamic -- userspace needs to be able to * add items it requires. It also needs to be able to tell us to * look out for other vendor IEs. */ static const u64 care_about_ies = (1ULL << WLAN_EID_COUNTRY) | (1ULL << WLAN_EID_ERP_INFO) | (1ULL << WLAN_EID_CHANNEL_SWITCH) | (1ULL << WLAN_EID_PWR_CONSTRAINT) | (1ULL << WLAN_EID_HT_CAPABILITY) | (1ULL << WLAN_EID_HT_OPERATION) | (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN); static void ieee80211_handle_beacon_sig(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd, struct ieee80211_bss_conf *bss_conf, struct ieee80211_local *local, struct ieee80211_rx_status *rx_status) { /* Track average RSSI from the Beacon frames of the current AP */ if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) { ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE; ewma_beacon_signal_init(&ifmgd->ave_beacon_signal); ifmgd->last_cqm_event_signal = 0; ifmgd->count_beacon_signal = 1; ifmgd->last_ave_beacon_signal = 0; } else { ifmgd->count_beacon_signal++; } ewma_beacon_signal_add(&ifmgd->ave_beacon_signal, -rx_status->signal); if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold && ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) { int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal); int last_sig = ifmgd->last_ave_beacon_signal; struct ieee80211_event event = { .type = RSSI_EVENT, }; /* * if signal crosses either of the boundaries, invoke callback * with appropriate parameters */ if (sig > ifmgd->rssi_max_thold && (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) { ifmgd->last_ave_beacon_signal = sig; event.u.rssi.data = RSSI_EVENT_HIGH; drv_event_callback(local, sdata, &event); } else if (sig < ifmgd->rssi_min_thold && (last_sig >= ifmgd->rssi_max_thold || last_sig == 0)) { ifmgd->last_ave_beacon_signal = sig; event.u.rssi.data = RSSI_EVENT_LOW; drv_event_callback(local, sdata, &event); } } if (bss_conf->cqm_rssi_thold && ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT && !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) { int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal); int last_event = ifmgd->last_cqm_event_signal; int thold = bss_conf->cqm_rssi_thold; int hyst = bss_conf->cqm_rssi_hyst; if (sig < thold && (last_event == 0 || sig < last_event - hyst)) { ifmgd->last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { ifmgd->last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL); } } if (bss_conf->cqm_rssi_low && ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) { int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal); int last_event = ifmgd->last_cqm_event_signal; int low = bss_conf->cqm_rssi_low; int high = bss_conf->cqm_rssi_high; if (sig < low && (last_event == 0 || last_event >= low)) { ifmgd->last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL); } else if (sig > high && (last_event == 0 || last_event <= high)) { ifmgd->last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL); } } } static bool ieee80211_rx_our_beacon(const u8 *tx_bssid, struct cfg80211_bss *bss) { if (ether_addr_equal(tx_bssid, bss->bssid)) return true; if (!bss->transmitted_bss) return false; return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid); } static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; size_t baselen; struct ieee802_11_elems elems; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; struct sta_info *sta; u32 changed = 0; bool erp_valid; u8 erp_value = 0; u32 ncrc; u8 *bssid; u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; sdata_assert_lock(sdata); /* Process beacon from the current BSS */ baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; if (baselen > len) return; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); return; } if (rx_status->freq != chanctx_conf->def.chan->center_freq) { rcu_read_unlock(); return; } chan = chanctx_conf->def.chan; rcu_read_unlock(); if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon && ieee80211_rx_our_beacon(mgmt->bssid, ifmgd->assoc_data->bss)) { ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, false, &elems, mgmt->bssid, ifmgd->assoc_data->bss->bssid); ieee80211_rx_bss_info(sdata, mgmt, len, rx_status); if (elems.dtim_period) ifmgd->dtim_period = elems.dtim_period; ifmgd->have_beacon = true; ifmgd->assoc_data->need_beacon = false; if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) { sdata->vif.bss_conf.sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); sdata->vif.bss_conf.sync_device_ts = rx_status->device_timestamp; sdata->vif.bss_conf.sync_dtim_count = elems.dtim_count; } if (elems.mbssid_config_ie) bss_conf->profile_periodicity = elems.mbssid_config_ie->profile_periodicity; if (elems.ext_capab_len >= 11 && (elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT)) bss_conf->ema_ap = true; /* continue assoc process */ ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; run_again(sdata, ifmgd->assoc_data->timeout); return; } if (!ifmgd->associated || !ieee80211_rx_our_beacon(mgmt->bssid, ifmgd->associated)) return; bssid = ifmgd->associated->bssid; if (!(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)) ieee80211_handle_beacon_sig(sdata, ifmgd, bss_conf, local, rx_status); if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) { mlme_dbg_ratelimited(sdata, "cancelling AP probe due to a received beacon\n"); ieee80211_reset_ap_probe(sdata); } /* * Push the beacon loss detection into the future since * we are processing a beacon from the AP just now. */ ieee80211_sta_reset_beacon_monitor(sdata); ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, len - baselen, false, &elems, care_about_ies, ncrc, mgmt->bssid, bssid); if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && ieee80211_check_tim(elems.tim, elems.tim_len, ifmgd->aid)) { if (local->hw.conf.dynamic_ps_timeout > 0) { if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } ieee80211_send_nullfunc(local, sdata, false); } else if (!local->pspolling && sdata->u.mgd.powersave) { local->pspolling = true; /* * Here is assumed that the driver will be * able to send ps-poll frame and receive a * response even though power save mode is * enabled, but some drivers might require * to disable power save here. This needs * to be investigated. */ ieee80211_send_pspoll(local, sdata); } } if (sdata->vif.p2p || sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) { struct ieee80211_p2p_noa_attr noa = {}; int ret; ret = cfg80211_get_p2p_attr(mgmt->u.beacon.variable, len - baselen, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *) &noa, sizeof(noa)); if (ret >= 2) { if (sdata->u.mgd.p2p_noa_index != noa.index) { /* valid noa_attr and index changed */ sdata->u.mgd.p2p_noa_index = noa.index; memcpy(&bss_conf->p2p_noa_attr, &noa, sizeof(noa)); changed |= BSS_CHANGED_P2P_PS; /* * make sure we update all information, the CRC * mechanism doesn't look at P2P attributes. */ ifmgd->beacon_crc_valid = false; } } else if (sdata->u.mgd.p2p_noa_index != -1) { /* noa_attr not found and we had valid noa_attr before */ sdata->u.mgd.p2p_noa_index = -1; memset(&bss_conf->p2p_noa_attr, 0, sizeof(bss_conf->p2p_noa_attr)); changed |= BSS_CHANGED_P2P_PS; ifmgd->beacon_crc_valid = false; } } if (ifmgd->csa_waiting_bcn) ieee80211_chswitch_post_beacon(sdata); /* * Update beacon timing and dtim count on every beacon appearance. This * will allow the driver to use the most updated values. Do it before * comparing this one with last received beacon. * IMPORTANT: These parameters would possibly be out of sync by the time * the driver will use them. The synchronized view is currently * guaranteed only in certain callbacks. */ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) { sdata->vif.bss_conf.sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); sdata->vif.bss_conf.sync_device_ts = rx_status->device_timestamp; sdata->vif.bss_conf.sync_dtim_count = elems.dtim_count; } if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) return; ifmgd->beacon_crc = ncrc; ifmgd->beacon_crc_valid = true; ieee80211_rx_bss_info(sdata, mgmt, len, rx_status); ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, rx_status->device_timestamp, &elems, true); if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, elems.wmm_param_len, elems.mu_edca_param_set)) changed |= BSS_CHANGED_QOS; /* * If we haven't had a beacon before, tell the driver about the * DTIM period (and beacon timing if desired) now. */ if (!ifmgd->have_beacon) { /* a few bogus AP send dtim_period = 0 or no TIM IE */ bss_conf->dtim_period = elems.dtim_period ?: 1; changed |= BSS_CHANGED_BEACON_INFO; ifmgd->have_beacon = true; mutex_lock(&local->iflist_mtx); ieee80211_recalc_ps(local); mutex_unlock(&local->iflist_mtx); ieee80211_recalc_ps_vif(sdata); } if (elems.erp_info) { erp_valid = true; erp_value = elems.erp_info[0]; } else { erp_valid = false; } changed |= ieee80211_handle_bss_capability(sdata, le16_to_cpu(mgmt->u.beacon.capab_info), erp_valid, erp_value); mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, bssid); if (ieee80211_config_bw(sdata, sta, elems.ht_cap_elem, elems.ht_operation, elems.vht_operation, elems.he_operation, bssid, &changed)) { mutex_unlock(&local->sta_mtx); sdata_info(sdata, "failed to follow AP %pM bandwidth change, disconnect\n", bssid); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, true, deauth_buf); ieee80211_report_disconnect(sdata, deauth_buf, sizeof(deauth_buf), true, WLAN_REASON_DEAUTH_LEAVING); return; } if (sta && elems.opmode_notif) ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, rx_status->band); mutex_unlock(&local->sta_mtx); changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, elems.country_elem, elems.country_elem_len, elems.pwr_constr_elem, elems.cisco_dtpc_elem); ieee80211_bss_info_change_notify(sdata, changed); } void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; u16 fc; struct ieee802_11_elems elems; int ies_len; rx_status = (struct ieee80211_rx_status *) skb->cb; mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); sdata_lock(sdata); switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_RESP: ieee80211_rx_mgmt_probe_resp(sdata, skb); break; case IEEE80211_STYPE_AUTH: ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DISASSOC: ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); if (ies_len < 0) break; /* CSA IE cannot be overridden, no need for BSSID */ ieee802_11_parse_elems( mgmt->u.action.u.chan_switch.variable, ies_len, true, &elems, mgmt->bssid, NULL); if (elems.parse_error) break; ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, rx_status->device_timestamp, &elems, false); } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.ext_chan_switch.variable); if (ies_len < 0) break; /* * extended CSA IE can't be overridden, no need for * BSSID */ ieee802_11_parse_elems( mgmt->u.action.u.ext_chan_switch.variable, ies_len, true, &elems, mgmt->bssid, NULL); if (elems.parse_error) break; /* for the handling code pretend this was also an IE */ elems.ext_chansw_ie = &mgmt->u.action.u.ext_chan_switch.data; ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, rx_status->device_timestamp, &elems, false); } break; } sdata_unlock(sdata); } static void ieee80211_sta_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.timer); ieee80211_queue_work(&sdata->local->hw, &sdata->work); } static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 *bssid, u8 reason, bool tx) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, tx, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, reason); } static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; u32 tx_flags = 0; u16 trans = 1; u16 status = 0; u16 prepare_tx_duration = 0; sdata_assert_lock(sdata); if (WARN_ON_ONCE(!auth_data)) return -EINVAL; auth_data->tries++; if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { sdata_info(sdata, "authentication with %pM timed out\n", auth_data->bss->bssid); /* * Most likely AP is not in the range so remove the * bss struct for that AP. */ cfg80211_unlink_bss(local->hw.wiphy, auth_data->bss); return -ETIMEDOUT; } if (auth_data->algorithm == WLAN_AUTH_SAE) prepare_tx_duration = jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE); drv_mgd_prepare_tx(local, sdata, prepare_tx_duration); sdata_info(sdata, "send auth to %pM (try %d/%d)\n", auth_data->bss->bssid, auth_data->tries, IEEE80211_AUTH_MAX_TRIES); auth_data->expected_transaction = 2; if (auth_data->algorithm == WLAN_AUTH_SAE) { trans = auth_data->sae_trans; status = auth_data->sae_status; auth_data->expected_transaction = trans; } if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, auth_data->data, auth_data->data_len, auth_data->bss->bssid, auth_data->bss->bssid, NULL, 0, 0, tx_flags); if (tx_flags == 0) { if (auth_data->algorithm == WLAN_AUTH_SAE) auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SAE; else auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; } else { auth_data->timeout = round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); } auth_data->timeout_started = true; run_again(sdata, auth_data->timeout); return 0; } static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) { struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; struct ieee80211_local *local = sdata->local; sdata_assert_lock(sdata); assoc_data->tries++; if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { sdata_info(sdata, "association with %pM timed out\n", assoc_data->bss->bssid); /* * Most likely AP is not in the range so remove the * bss struct for that AP. */ cfg80211_unlink_bss(local->hw.wiphy, assoc_data->bss); return -ETIMEDOUT; } sdata_info(sdata, "associate with %pM (try %d/%d)\n", assoc_data->bss->bssid, assoc_data->tries, IEEE80211_ASSOC_MAX_TRIES); ieee80211_send_assoc(sdata); if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; assoc_data->timeout_started = true; run_again(sdata, assoc_data->timeout); } else { assoc_data->timeout = round_jiffies_up(jiffies + IEEE80211_ASSOC_TIMEOUT_LONG); assoc_data->timeout_started = true; run_again(sdata, assoc_data->timeout); } return 0; } void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, __le16 fc, bool acked) { struct ieee80211_local *local = sdata->local; sdata->u.mgd.status_fc = fc; sdata->u.mgd.status_acked = acked; sdata->u.mgd.status_received = true; ieee80211_queue_work(&local->hw, &sdata->work); } void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; sdata_lock(sdata); if (ifmgd->status_received) { __le16 fc = ifmgd->status_fc; bool status_acked = ifmgd->status_acked; ifmgd->status_received = false; if (ifmgd->auth_data && ieee80211_is_auth(fc)) { if (status_acked) { if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE) ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SAE; else ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; run_again(sdata, ifmgd->auth_data->timeout); } else { ifmgd->auth_data->timeout = jiffies - 1; } ifmgd->auth_data->timeout_started = true; } else if (ifmgd->assoc_data && (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))) { if (status_acked) { ifmgd->assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT; run_again(sdata, ifmgd->assoc_data->timeout); } else { ifmgd->assoc_data->timeout = jiffies - 1; } ifmgd->assoc_data->timeout_started = true; } } if (ifmgd->auth_data && ifmgd->auth_data->timeout_started && time_after(jiffies, ifmgd->auth_data->timeout)) { if (ifmgd->auth_data->done) { /* * ok ... we waited for assoc but userspace didn't, * so let's just kill the auth data */ ieee80211_destroy_auth_data(sdata, false); } else if (ieee80211_auth(sdata)) { u8 bssid[ETH_ALEN]; struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = AUTH_EVENT, .u.mlme.status = MLME_TIMEOUT, }; memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN); ieee80211_destroy_auth_data(sdata, false); cfg80211_auth_timeout(sdata->dev, bssid); drv_event_callback(sdata->local, sdata, &event); } } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) run_again(sdata, ifmgd->auth_data->timeout); if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && time_after(jiffies, ifmgd->assoc_data->timeout)) { if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) || ieee80211_do_assoc(sdata)) { struct cfg80211_bss *bss = ifmgd->assoc_data->bss; struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = ASSOC_EVENT, .u.mlme.status = MLME_TIMEOUT, }; ieee80211_destroy_assoc_data(sdata, false, false); cfg80211_assoc_timeout(sdata->dev, bss); drv_event_callback(sdata->local, sdata, &event); } } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) run_again(sdata, ifmgd->assoc_data->timeout); if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL && ifmgd->associated) { u8 bssid[ETH_ALEN]; int max_tries; memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) max_tries = max_nullfunc_tries; else max_tries = max_probe_tries; /* ACK received for nullfunc probing frame */ if (!ifmgd->probe_send_count) ieee80211_reset_ap_probe(sdata); else if (ifmgd->nullfunc_failed) { if (ifmgd->probe_send_count < max_tries) { mlme_dbg(sdata, "No ack for nullfunc frame to AP %pM, try %d/%i\n", bssid, ifmgd->probe_send_count, max_tries); ieee80211_mgd_probe_ap_send(sdata); } else { mlme_dbg(sdata, "No ack for nullfunc frame to AP %pM, disconnecting.\n", bssid); ieee80211_sta_connection_lost(sdata, bssid, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false); } } else if (time_is_after_jiffies(ifmgd->probe_timeout)) run_again(sdata, ifmgd->probe_timeout); else if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { mlme_dbg(sdata, "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", bssid, probe_wait_ms); ieee80211_sta_connection_lost(sdata, bssid, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false); } else if (ifmgd->probe_send_count < max_tries) { mlme_dbg(sdata, "No probe response from AP %pM after %dms, try %d/%i\n", bssid, probe_wait_ms, ifmgd->probe_send_count, max_tries); ieee80211_mgd_probe_ap_send(sdata); } else { /* * We actually lost the connection ... or did we? * Let's make sure! */ mlme_dbg(sdata, "No probe response from AP %pM after %dms, disconnecting.\n", bssid, probe_wait_ms); ieee80211_sta_connection_lost(sdata, bssid, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false); } } sdata_unlock(sdata); } static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.bcn_mon_timer); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) return; sdata->u.mgd.connection_loss = false; ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_connection_loss_work); } static void ieee80211_sta_conn_mon_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.conn_mon_timer); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) return; ieee80211_queue_work(&local->hw, &ifmgd->monitor_work); } static void ieee80211_sta_monitor_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.monitor_work); ieee80211_mgd_probe_ap(sdata, false); } static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_STATION) { __ieee80211_stop_poll(sdata); /* let's probe the connection once */ if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.monitor_work); } } #ifdef CONFIG_PM void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; sdata_lock(sdata); if (ifmgd->auth_data || ifmgd->assoc_data) { const u8 *bssid = ifmgd->auth_data ? ifmgd->auth_data->bss->bssid : ifmgd->assoc_data->bss->bssid; /* * If we are trying to authenticate / associate while suspending, * cfg80211 won't know and won't actually abort those attempts, * thus we need to do that ourselves. */ ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, false, frame_buf); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false, true); if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); } /* This is a bit of a hack - we should find a better and more generic * solution to this. Normally when suspending, cfg80211 will in fact * deauthenticate. However, it doesn't (and cannot) stop an ongoing * auth (not so important) or assoc (this is the problem) process. * * As a consequence, it can happen that we are in the process of both * associating and suspending, and receive an association response * after cfg80211 has checked if it needs to disconnect, but before * we actually set the flag to drop incoming frames. This will then * cause the workqueue flush to process the association response in * the suspend, resulting in a successful association just before it * tries to remove the interface from the driver, which now though * has a channel context assigned ... this results in issues. * * To work around this (for now) simply deauth here again if we're * now connected. */ if (ifmgd->associated && !sdata->local->wowlan) { u8 bssid[ETH_ALEN]; struct cfg80211_deauth_request req = { .reason_code = WLAN_REASON_DEAUTH_LEAVING, .bssid = bssid, }; memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); ieee80211_mgd_deauth(sdata, &req); } sdata_unlock(sdata); } void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; sdata_lock(sdata); if (!ifmgd->associated) { sdata_unlock(sdata); return; } if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) { sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; mlme_dbg(sdata, "driver requested disconnect after resume\n"); ieee80211_sta_connection_lost(sdata, ifmgd->associated->bssid, WLAN_REASON_UNSPECIFIED, true); sdata_unlock(sdata); return; } sdata_unlock(sdata); ieee80211_send_nullfunc(sdata->local, sdata, false); } #endif /* interface setup */ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd; ifmgd = &sdata->u.mgd; INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); INIT_WORK(&ifmgd->beacon_connection_loss_work, ieee80211_beacon_connection_loss_work); INIT_WORK(&ifmgd->csa_connection_drop_work, ieee80211_csa_connection_drop_work); INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_mgd_work); INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work, ieee80211_tdls_peer_del_work); timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0); timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0); timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0); timer_setup(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, 0); INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk, ieee80211_sta_handle_tspec_ac_params_wk); ifmgd->flags = 0; ifmgd->powersave = sdata->wdev.ps; ifmgd->uapsd_queues = sdata->local->hw.uapsd_queues; ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len; ifmgd->p2p_noa_index = -1; if (sdata->local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; else ifmgd->req_smps = IEEE80211_SMPS_OFF; /* Setup TDLS data */ spin_lock_init(&ifmgd->teardown_lock); ifmgd->teardown_skb = NULL; ifmgd->orig_teardown_skb = NULL; } /* scan finished notification */ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; /* Restart STA timers */ rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (ieee80211_sdata_running(sdata)) ieee80211_restart_sta_timer(sdata); } rcu_read_unlock(); } static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *ht_cap_ie, *vht_cap_ie; const struct ieee80211_ht_cap *ht_cap; const struct ieee80211_vht_cap *vht_cap; u8 chains = 1; if (ifmgd->flags & IEEE80211_STA_DISABLE_HT) return chains; ht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY); if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) { ht_cap = (void *)(ht_cap_ie + 2); chains = ieee80211_mcs_to_chains(&ht_cap->mcs); /* * TODO: use "Tx Maximum Number Spatial Streams Supported" and * "Tx Unequal Modulation Supported" fields. */ } if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT) return chains; vht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY); if (vht_cap_ie && vht_cap_ie[1] >= sizeof(*vht_cap)) { u8 nss; u16 tx_mcs_map; vht_cap = (void *)(vht_cap_ie + 2); tx_mcs_map = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map); for (nss = 8; nss > 0; nss--) { if (((tx_mcs_map >> (2 * (nss - 1))) & 3) != IEEE80211_VHT_MCS_NOT_SUPPORTED) break; } /* TODO: use "Tx Highest Supported Long GI Data Rate" field? */ chains = max(chains, nss); } return chains; } static bool ieee80211_verify_sta_he_mcs_support(struct ieee80211_supported_band *sband, const struct ieee80211_he_operation *he_op) { const struct ieee80211_sta_he_cap *sta_he_cap = ieee80211_get_he_sta_cap(sband); u16 ap_min_req_set; int i; if (!sta_he_cap || !he_op) return false; ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); /* Need to go over for 80MHz, 160MHz and for 80+80 */ for (i = 0; i < 3; i++) { const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp = &sta_he_cap->he_mcs_nss_supp; u16 sta_mcs_map_rx = le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]); u16 sta_mcs_map_tx = le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]); u8 nss; bool verified = true; /* * For each band there is a maximum of 8 spatial streams * possible. Each of the sta_mcs_map_* is a 16-bit struct built * of 2 bits per NSS (1-8), with the values defined in enum * ieee80211_he_mcs_support. Need to make sure STA TX and RX * capabilities aren't less than the AP's minimum requirements * for this HE BSS per SS. * It is enough to find one such band that meets the reqs. */ for (nss = 8; nss > 0; nss--) { u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3; u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3; u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3; if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED) continue; /* * Make sure the HE AP doesn't require MCSs that aren't * supported by the client */ if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) { verified = false; break; } } if (verified) return true; } /* If here, STA doesn't meet AP's HE min requirements */ return false; } static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const struct ieee80211_ht_cap *ht_cap = NULL; const struct ieee80211_ht_operation *ht_oper = NULL; const struct ieee80211_vht_operation *vht_oper = NULL; const struct ieee80211_he_operation *he_oper = NULL; struct ieee80211_supported_band *sband; struct cfg80211_chan_def chandef; int ret; u32 i; bool have_80mhz; sband = local->hw.wiphy->bands[cbss->channel->band]; ifmgd->flags &= ~(IEEE80211_STA_DISABLE_40MHZ | IEEE80211_STA_DISABLE_80P80MHZ | IEEE80211_STA_DISABLE_160MHZ); rcu_read_lock(); if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && sband->ht_cap.ht_supported) { const u8 *ht_oper_ie, *ht_cap_ie; ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION); if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) ht_oper = (void *)(ht_oper_ie + 2); ht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY); if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) ht_cap = (void *)(ht_cap_ie + 2); if (!ht_cap) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ht_oper = NULL; } } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && sband->vht_cap.vht_supported) { const u8 *vht_oper_ie, *vht_cap; vht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_OPERATION); if (vht_oper_ie && vht_oper_ie[1] >= sizeof(*vht_oper)) vht_oper = (void *)(vht_oper_ie + 2); if (vht_oper && !ht_oper) { vht_oper = NULL; sdata_info(sdata, "AP advertised VHT without HT, disabling both\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; } vht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY); if (!vht_cap || vht_cap[1] < sizeof(struct ieee80211_vht_cap)) { ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; vht_oper = NULL; } } if (!ieee80211_get_he_sta_cap(sband)) ifmgd->flags |= IEEE80211_STA_DISABLE_HE; if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) { const struct cfg80211_bss_ies *ies; const u8 *he_oper_ie; ies = rcu_dereference(cbss->ies); he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ies->data, ies->len); if (he_oper_ie && he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3])) he_oper = (void *)(he_oper_ie + 3); else he_oper = NULL; if (!ieee80211_verify_sta_he_mcs_support(sband, he_oper)) ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } /* Allow VHT if at least one channel on the sband supports 80 MHz */ have_80mhz = false; for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_NO_80MHZ)) continue; have_80mhz = true; break; } if (!have_80mhz) ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, cbss->channel, ht_oper, vht_oper, he_oper, &chandef, false); sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), local->rx_chains); rcu_read_unlock(); /* will change later if needed */ sdata->smps_mode = IEEE80211_SMPS_OFF; mutex_lock(&local->mtx); /* * If this fails (possibly due to channel context sharing * on incompatible channels, e.g. 80+80 and 160 sharing the * same control channel) try to use a smaller bandwidth. */ ret = ieee80211_vif_use_channel(sdata, &chandef, IEEE80211_CHANCTX_SHARED); /* don't downgrade for 5 and 10 MHz channels, though. */ if (chandef.width == NL80211_CHAN_WIDTH_5 || chandef.width == NL80211_CHAN_WIDTH_10) goto out; while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) { ifmgd->flags |= ieee80211_chandef_downgrade(&chandef); ret = ieee80211_vif_use_channel(sdata, &chandef, IEEE80211_CHANCTX_SHARED); } out: mutex_unlock(&local->mtx); return ret; } static bool ieee80211_get_dtim(const struct cfg80211_bss_ies *ies, u8 *dtim_count, u8 *dtim_period) { const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len); const u8 *idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, ies->data, ies->len); const struct ieee80211_tim_ie *tim = NULL; const struct ieee80211_bssid_index *idx; bool valid = tim_ie && tim_ie[1] >= 2; if (valid) tim = (void *)(tim_ie + 2); if (dtim_count) *dtim_count = valid ? tim->dtim_count : 0; if (dtim_period) *dtim_period = valid ? tim->dtim_period : 0; /* Check if value is overridden by non-transmitted profile */ if (!idx_ie || idx_ie[1] < 3) return valid; idx = (void *)(idx_ie + 2); if (dtim_count) *dtim_count = idx->dtim_count; if (dtim_period) *dtim_period = idx->dtim_period; return true; } static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss, bool assoc, bool override) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss *bss = (void *)cbss->priv; struct sta_info *new_sta = NULL; struct ieee80211_supported_band *sband; bool have_sta = false; int err; sband = local->hw.wiphy->bands[cbss->channel->band]; if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) return -EINVAL; /* If a reconfig is happening, bail out */ if (local->in_reconfig) return -EBUSY; if (assoc) { rcu_read_lock(); have_sta = sta_info_get(sdata, cbss->bssid); rcu_read_unlock(); } if (!have_sta) { new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL); if (!new_sta) return -ENOMEM; } /* * Set up the information for the new channel before setting the * new channel. We can't - completely race-free - change the basic * rates bitmap and the channel (sband) that it refers to, but if * we set it up before we at least avoid calling into the driver's * bss_info_changed() method with invalid information (since we do * call that from changing the channel - only for IDLE and perhaps * some others, but ...). * * So to avoid that, just set up all the new information before the * channel, but tell the driver to apply it only afterwards, since * it might need the new channel for that. */ if (new_sta) { u32 rates = 0, basic_rates = 0; bool have_higher_than_11mbit; int min_rate = INT_MAX, min_rate_index = -1; const struct cfg80211_bss_ies *ies; int shift = ieee80211_vif_get_shift(&sdata->vif); ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len, &rates, &basic_rates, &have_higher_than_11mbit, &min_rate, &min_rate_index, shift); /* * This used to be a workaround for basic rates missing * in the association response frame. Now that we no * longer use the basic rates from there, it probably * doesn't happen any more, but keep the workaround so * in case some *other* APs are buggy in different ways * we can connect -- with a warning. */ if (!basic_rates && min_rate_index >= 0) { sdata_info(sdata, "No basic rates, using min rate instead\n"); basic_rates = BIT(min_rate_index); } if (rates) new_sta->sta.supp_rates[cbss->channel->band] = rates; else sdata_info(sdata, "No rates found, keeping mandatory only\n"); sdata->vif.bss_conf.basic_rates = basic_rates; /* cf. IEEE 802.11 9.2.12 */ if (cbss->channel->band == NL80211_BAND_2GHZ && have_higher_than_11mbit) sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; else sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN); /* set timing information */ sdata->vif.bss_conf.beacon_int = cbss->beacon_interval; rcu_read_lock(); ies = rcu_dereference(cbss->beacon_ies); if (ies) { sdata->vif.bss_conf.sync_tsf = ies->tsf; sdata->vif.bss_conf.sync_device_ts = bss->device_ts_beacon; ieee80211_get_dtim(ies, &sdata->vif.bss_conf.sync_dtim_count, NULL); } else if (!ieee80211_hw_check(&sdata->local->hw, TIMING_BEACON_ONLY)) { ies = rcu_dereference(cbss->proberesp_ies); /* must be non-NULL since beacon IEs were NULL */ sdata->vif.bss_conf.sync_tsf = ies->tsf; sdata->vif.bss_conf.sync_device_ts = bss->device_ts_presp; sdata->vif.bss_conf.sync_dtim_count = 0; } else { sdata->vif.bss_conf.sync_tsf = 0; sdata->vif.bss_conf.sync_device_ts = 0; sdata->vif.bss_conf.sync_dtim_count = 0; } rcu_read_unlock(); } if (new_sta || override) { err = ieee80211_prep_channel(sdata, cbss); if (err) { if (new_sta) sta_info_free(local, new_sta); return -EINVAL; } } if (new_sta) { /* * tell driver about BSSID, basic rates and timing * this was set up above, before setting the channel */ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT); if (assoc) sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH); err = sta_info_insert(new_sta); new_sta = NULL; if (err) { sdata_info(sdata, "failed to insert STA entry for the AP (error %d)\n", err); return err; } } else WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid)); /* Cancel scan to ensure that nothing interferes with connection */ if (local->scanning) ieee80211_scan_cancel(local); return 0; } /* config hooks */ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, struct cfg80211_auth_request *req) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_auth_data *auth_data; u16 auth_alg; int err; bool cont_auth; /* prepare auth data structure */ switch (req->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: auth_alg = WLAN_AUTH_OPEN; break; case NL80211_AUTHTYPE_SHARED_KEY: if (IS_ERR(local->wep_tx_tfm)) return -EOPNOTSUPP; auth_alg = WLAN_AUTH_SHARED_KEY; break; case NL80211_AUTHTYPE_FT: auth_alg = WLAN_AUTH_FT; break; case NL80211_AUTHTYPE_NETWORK_EAP: auth_alg = WLAN_AUTH_LEAP; break; case NL80211_AUTHTYPE_SAE: auth_alg = WLAN_AUTH_SAE; break; case NL80211_AUTHTYPE_FILS_SK: auth_alg = WLAN_AUTH_FILS_SK; break; case NL80211_AUTHTYPE_FILS_SK_PFS: auth_alg = WLAN_AUTH_FILS_SK_PFS; break; case NL80211_AUTHTYPE_FILS_PK: auth_alg = WLAN_AUTH_FILS_PK; break; default: return -EOPNOTSUPP; } if (ifmgd->assoc_data) return -EBUSY; auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len + req->ie_len, GFP_KERNEL); if (!auth_data) return -ENOMEM; auth_data->bss = req->bss; if (req->auth_data_len >= 4) { if (req->auth_type == NL80211_AUTHTYPE_SAE) { __le16 *pos = (__le16 *) req->auth_data; auth_data->sae_trans = le16_to_cpu(pos[0]); auth_data->sae_status = le16_to_cpu(pos[1]); } memcpy(auth_data->data, req->auth_data + 4, req->auth_data_len - 4); auth_data->data_len += req->auth_data_len - 4; } /* Check if continuing authentication or trying to authenticate with the * same BSS that we were in the process of authenticating with and avoid * removal and re-addition of the STA entry in * ieee80211_prep_connection(). */ cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss; if (req->ie && req->ie_len) { memcpy(&auth_data->data[auth_data->data_len], req->ie, req->ie_len); auth_data->data_len += req->ie_len; } if (req->key && req->key_len) { auth_data->key_len = req->key_len; auth_data->key_idx = req->key_idx; memcpy(auth_data->key, req->key, req->key_len); } auth_data->algorithm = auth_alg; /* try to authenticate/probe */ if (ifmgd->auth_data) { if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE) { auth_data->peer_confirmed = ifmgd->auth_data->peer_confirmed; } ieee80211_destroy_auth_data(sdata, cont_auth); } /* prep auth_data so we don't go into idle on disassoc */ ifmgd->auth_data = auth_data; /* If this is continuation of an ongoing SAE authentication exchange * (i.e., request to send SAE Confirm) and the peer has already * confirmed, mark authentication completed since we are about to send * out SAE Confirm. */ if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE && auth_data->peer_confirmed && auth_data->sae_trans == 2) ieee80211_mark_sta_auth(sdata, req->bss->bssid); if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; sdata_info(sdata, "disconnect from AP %pM for new auth to %pM\n", ifmgd->associated->bssid, req->bss->bssid); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_UNSPECIFIED, false, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, WLAN_REASON_UNSPECIFIED); } sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); err = ieee80211_prep_connection(sdata, req->bss, cont_auth, false); if (err) goto err_clear; err = ieee80211_auth(sdata); if (err) { sta_info_destroy_addr(sdata, req->bss->bssid); goto err_clear; } /* hold our own reference */ cfg80211_ref_bss(local->hw.wiphy, auth_data->bss); return 0; err_clear: eth_zero_addr(ifmgd->bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->auth_data = NULL; mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&sdata->local->mtx); kfree(auth_data); return err; } static bool ieee80211_is_uapsd_blacklisted_by_oui(struct ieee80211_sub_if_data *sdata, struct cfg80211_assoc_request *req) { const struct cfg80211_bss_ies *ies; const struct uapsd_black_list *uapsd_bl; bool result = false; int i; rcu_read_lock(); ies = rcu_dereference(req->bss->ies); uapsd_bl = rcu_dereference(sdata->local->uapsd_black_list); if (!ies || !uapsd_bl) goto out; for (i = 0; i < uapsd_bl->num_oui; i++) { if (cfg80211_find_vendor_ie(uapsd_bl->oui[i], -1, ies->data, ies->len)) { result = true; break; } } out: rcu_read_unlock(); return result; } int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_assoc_request *req) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss *bss = (void *)req->bss->priv; struct ieee80211_mgd_assoc_data *assoc_data; const struct cfg80211_bss_ies *beacon_ies; struct ieee80211_supported_band *sband; const u8 *ssidie, *ht_ie, *vht_ie; int i, err; bool override = false; assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL); if (!assoc_data) return -ENOMEM; rcu_read_lock(); ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); if (!ssidie) { rcu_read_unlock(); kfree(assoc_data); return -EINVAL; } memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]); assoc_data->ssid_len = ssidie[1]; rcu_read_unlock(); if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; sdata_info(sdata, "disconnect from AP %pM for new assoc to %pM\n", ifmgd->associated->bssid, req->bss->bssid); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_UNSPECIFIED, false, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, WLAN_REASON_UNSPECIFIED); } if (ifmgd->auth_data && !ifmgd->auth_data->done) { err = -EBUSY; goto err_free; } if (ifmgd->assoc_data) { err = -EBUSY; goto err_free; } if (ifmgd->auth_data) { bool match; /* keep sta info, bssid if matching */ match = ether_addr_equal(ifmgd->bssid, req->bss->bssid); ieee80211_destroy_auth_data(sdata, match); } /* prepare assoc data */ ifmgd->beacon_crc_valid = false; assoc_data->wmm = bss->wmm_used && (local->hw.queues >= IEEE80211_NUM_ACS); /* * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode. * We still associate in non-HT mode (11a/b/g) if any one of these * ciphers is configured as pairwise. * We can set this to true for non-11n hardware, that'll be checked * separately along with the peer capabilities. */ for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) { if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= IEEE80211_STA_DISABLE_HE; netdev_info(sdata->dev, "disabling HT/VHT/HE due to WEP/TKIP use\n"); } } /* Also disable HT if we don't support it or the AP doesn't use WMM */ sband = local->hw.wiphy->bands[req->bss->channel->band]; if (!sband->ht_cap.ht_supported || local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used || ifmgd->flags & IEEE80211_STA_DISABLE_WMM) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; if (!bss->wmm_used && !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM)) netdev_info(sdata->dev, "disabling HT as WMM/QoS is not supported by the AP\n"); } /* disable VHT if we don't support it or the AP doesn't use WMM */ if (!sband->vht_cap.vht_supported || local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used || ifmgd->flags & IEEE80211_STA_DISABLE_WMM) { ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; if (!bss->wmm_used && !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM)) netdev_info(sdata->dev, "disabling VHT as WMM/QoS is not supported by the AP\n"); } memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, sizeof(ifmgd->ht_capa_mask)); memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa)); memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask, sizeof(ifmgd->vht_capa_mask)); if (req->ie && req->ie_len) { memcpy(assoc_data->ie, req->ie, req->ie_len); assoc_data->ie_len = req->ie_len; } if (req->fils_kek) { /* should already be checked in cfg80211 - so warn */ if (WARN_ON(req->fils_kek_len > FILS_MAX_KEK_LEN)) { err = -EINVAL; goto err_free; } memcpy(assoc_data->fils_kek, req->fils_kek, req->fils_kek_len); assoc_data->fils_kek_len = req->fils_kek_len; } if (req->fils_nonces) memcpy(assoc_data->fils_nonces, req->fils_nonces, 2 * FILS_NONCE_LEN); assoc_data->bss = req->bss; if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) { if (ifmgd->powersave) sdata->smps_mode = IEEE80211_SMPS_DYNAMIC; else sdata->smps_mode = IEEE80211_SMPS_OFF; } else sdata->smps_mode = ifmgd->req_smps; assoc_data->capability = req->bss->capability; assoc_data->supp_rates = bss->supp_rates; assoc_data->supp_rates_len = bss->supp_rates_len; rcu_read_lock(); ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION); if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation)) assoc_data->ap_ht_param = ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param; else ifmgd->flags |= IEEE80211_STA_DISABLE_HT; vht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_VHT_CAPABILITY); if (vht_ie && vht_ie[1] >= sizeof(struct ieee80211_vht_cap)) memcpy(&assoc_data->ap_vht_cap, vht_ie + 2, sizeof(struct ieee80211_vht_cap)); else ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; rcu_read_unlock(); if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) && ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK), "U-APSD not supported with HW_PS_NULLFUNC_STACK\n")) sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; if (bss->wmm_used && bss->uapsd_supported && !ieee80211_is_uapsd_blacklisted_by_oui(sdata, req) && (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) { assoc_data->uapsd = true; ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; } else { assoc_data->uapsd = false; ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED; } if (req->prev_bssid) memcpy(assoc_data->prev_bssid, req->prev_bssid, ETH_ALEN); if (req->use_mfp) { ifmgd->mfp = IEEE80211_MFP_REQUIRED; ifmgd->flags |= IEEE80211_STA_MFP_ENABLED; } else { ifmgd->mfp = IEEE80211_MFP_DISABLED; ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED; } if (req->flags & ASSOC_REQ_USE_RRM) ifmgd->flags |= IEEE80211_STA_ENABLE_RRM; else ifmgd->flags &= ~IEEE80211_STA_ENABLE_RRM; if (req->crypto.control_port) ifmgd->flags |= IEEE80211_STA_CONTROL_PORT; else ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; sdata->control_port_protocol = req->crypto.control_port_ethertype; sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt; sdata->control_port_over_nl80211 = req->crypto.control_port_over_nl80211; sdata->encrypt_headroom = ieee80211_cs_headroom(local, &req->crypto, sdata->vif.type); /* kick off associate process */ ifmgd->assoc_data = assoc_data; ifmgd->dtim_period = 0; ifmgd->have_beacon = false; /* override HT/VHT configuration only if the AP and we support it */ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { struct ieee80211_sta_ht_cap sta_ht_cap; if (req->flags & ASSOC_REQ_DISABLE_HT) override = true; memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap)); ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); /* check for 40 MHz disable override */ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ) && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && !(sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) override = true; if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && req->flags & ASSOC_REQ_DISABLE_VHT) override = true; } if (req->flags & ASSOC_REQ_DISABLE_HT) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; } if (req->flags & ASSOC_REQ_DISABLE_VHT) ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; err = ieee80211_prep_connection(sdata, req->bss, true, override); if (err) goto err_clear; rcu_read_lock(); beacon_ies = rcu_dereference(req->bss->beacon_ies); if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC) && !beacon_ies) { /* * Wait up to one beacon interval ... * should this be more if we miss one? */ sdata_info(sdata, "waiting for beacon from %pM\n", ifmgd->bssid); assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval); assoc_data->timeout_started = true; assoc_data->need_beacon = true; } else if (beacon_ies) { const u8 *ie; u8 dtim_count = 0; ieee80211_get_dtim(beacon_ies, &dtim_count, &ifmgd->dtim_period); ifmgd->have_beacon = true; assoc_data->timeout = jiffies; assoc_data->timeout_started = true; if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) { sdata->vif.bss_conf.sync_tsf = beacon_ies->tsf; sdata->vif.bss_conf.sync_device_ts = bss->device_ts_beacon; sdata->vif.bss_conf.sync_dtim_count = dtim_count; } ie = cfg80211_find_ext_ie(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION, beacon_ies->data, beacon_ies->len); if (ie && ie[1] >= 3) sdata->vif.bss_conf.profile_periodicity = ie[4]; ie = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, beacon_ies->data, beacon_ies->len); if (ie && ie[1] >= 11 && (ie[10] & WLAN_EXT_CAPA11_EMA_SUPPORT)) sdata->vif.bss_conf.ema_ap = true; } else { assoc_data->timeout = jiffies; assoc_data->timeout_started = true; } rcu_read_unlock(); run_again(sdata, assoc_data->timeout); if (bss->corrupt_data) { char *corrupt_type = "data"; if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_BEACON) { if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) corrupt_type = "beacon and probe response"; else corrupt_type = "beacon"; } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) corrupt_type = "probe response"; sdata_info(sdata, "associating with AP with corrupt %s\n", corrupt_type); } return 0; err_clear: eth_zero_addr(ifmgd->bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->assoc_data = NULL; err_free: kfree(assoc_data); return err; } int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, struct cfg80211_deauth_request *req) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; bool tx = !req->local_state_change; if (ifmgd->auth_data && ether_addr_equal(ifmgd->auth_data->bss->bssid, req->bssid)) { sdata_info(sdata, "aborting authentication with %pM by local choice (Reason: %u=%s)\n", req->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); drv_mgd_prepare_tx(sdata->local, sdata, 0); ieee80211_send_deauth_disassoc(sdata, req->bssid, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, frame_buf); ieee80211_destroy_auth_data(sdata, false); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code); return 0; } if (ifmgd->assoc_data && ether_addr_equal(ifmgd->assoc_data->bss->bssid, req->bssid)) { sdata_info(sdata, "aborting association with %pM by local choice (Reason: %u=%s)\n", req->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); drv_mgd_prepare_tx(sdata->local, sdata, 0); ieee80211_send_deauth_disassoc(sdata, req->bssid, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, frame_buf); ieee80211_destroy_assoc_data(sdata, false, true); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code); return 0; } if (ifmgd->associated && ether_addr_equal(ifmgd->associated->bssid, req->bssid)) { sdata_info(sdata, "deauthenticating from %pM by local choice (Reason: %u=%s)\n", req->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code); return 0; } return -ENOTCONN; } int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_disassoc_request *req) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 bssid[ETH_ALEN]; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; /* * cfg80211 should catch this ... but it's racy since * we can receive a disassoc frame, process it, hand it * to cfg80211 while that's in a locked section already * trying to tell us that the user wants to disconnect. */ if (ifmgd->associated != req->bss) return -ENOLINK; sdata_info(sdata, "disassociating from %pM by local choice (Reason: %u=%s)\n", req->bss->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); memcpy(bssid, req->bss->bssid, ETH_ALEN); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, req->reason_code, !req->local_state_change, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code); return 0; } void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* * Make sure some work items will not run after this, * they will not do anything but might not have been * cancelled when disconnecting. */ cancel_work_sync(&ifmgd->monitor_work); cancel_work_sync(&ifmgd->beacon_connection_loss_work); cancel_work_sync(&ifmgd->request_smps_work); cancel_work_sync(&ifmgd->csa_connection_drop_work); cancel_work_sync(&ifmgd->chswitch_work); cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work); sdata_lock(sdata); if (ifmgd->assoc_data) { struct cfg80211_bss *bss = ifmgd->assoc_data->bss; ieee80211_destroy_assoc_data(sdata, false, false); cfg80211_assoc_timeout(sdata->dev, bss); } if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); spin_lock_bh(&ifmgd->teardown_lock); if (ifmgd->teardown_skb) { kfree_skb(ifmgd->teardown_skb); ifmgd->teardown_skb = NULL; ifmgd->orig_teardown_skb = NULL; } kfree(ifmgd->assoc_req_ies); ifmgd->assoc_req_ies = NULL; ifmgd->assoc_req_ies_len = 0; spin_unlock_bh(&ifmgd->teardown_lock); del_timer_sync(&ifmgd->timer); sdata_unlock(sdata); } void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_cqm_rssi_notify(sdata, rssi_event, rssi_level); cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, rssi_level, gfp); } EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_cqm_beacon_loss_notify(sdata->local, sdata); cfg80211_cqm_beacon_loss_notify(sdata->dev, gfp); } EXPORT_SYMBOL(ieee80211_cqm_beacon_loss_notify); backport-iwlwifi-dkms-7906/net/mac80211/ocb.c000066400000000000000000000154541351064634500205430ustar00rootroot00000000000000/* * OCB mode implementation * * Copyright: (c) 2014 Czech Technical University in Prague * (c) 2014 Volkswagen Group Research * Author: Rostislav Lisovy * Funded by: Volkswagen Group Research * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #define IEEE80211_OCB_HOUSEKEEPING_INTERVAL (60 * HZ) #define IEEE80211_OCB_PEER_INACTIVITY_LIMIT (240 * HZ) #define IEEE80211_OCB_MAX_STA_ENTRIES 128 /** * enum ocb_deferred_task_flags - mac80211 OCB deferred tasks * @OCB_WORK_HOUSEKEEPING: run the periodic OCB housekeeping tasks * * These flags are used in @wrkq_flags field of &struct ieee80211_if_ocb */ enum ocb_deferred_task_flags { OCB_WORK_HOUSEKEEPING, }; void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; enum nl80211_bss_scan_width scan_width; struct sta_info *sta; int band; /* XXX: Consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_OCB_MAX_STA_ENTRIES) { net_info_ratelimited("%s: No room for a new OCB STA entry %pM\n", sdata->name, addr); return; } ocb_dbg(sdata, "Adding new OCB station %pM\n", addr); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) { rcu_read_unlock(); return; } band = chanctx_conf->def.chan->band; scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); if (!sta) return; /* Add only mandatory rates for now */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = ieee80211_mandatory_rates(sband, scan_width); spin_lock(&ifocb->incomplete_lock); list_add(&sta->list, &ifocb->incomplete_stations); spin_unlock(&ifocb->incomplete_lock); ieee80211_queue_work(&local->hw, &sdata->work); } static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta) __acquires(RCU) { struct ieee80211_sub_if_data *sdata = sta->sdata; u8 addr[ETH_ALEN]; memcpy(addr, sta->sta.addr, ETH_ALEN); ocb_dbg(sdata, "Adding new IBSS station %pM (dev=%s)\n", addr, sdata->name); sta_info_move_state(sta, IEEE80211_STA_AUTH); sta_info_move_state(sta, IEEE80211_STA_ASSOC); sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); rate_control_rate_init(sta); /* If it fails, maybe we raced another insertion? */ if (sta_info_insert_rcu(sta)) return sta_info_get(sdata, addr); return sta; } static void ieee80211_ocb_housekeeping(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; ocb_dbg(sdata, "Running ocb housekeeping\n"); ieee80211_sta_expire(sdata, IEEE80211_OCB_PEER_INACTIVITY_LIMIT); mod_timer(&ifocb->housekeeping_timer, round_jiffies(jiffies + IEEE80211_OCB_HOUSEKEEPING_INTERVAL)); } void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct sta_info *sta; if (ifocb->joined != true) return; sdata_lock(sdata); spin_lock_bh(&ifocb->incomplete_lock); while (!list_empty(&ifocb->incomplete_stations)) { sta = list_first_entry(&ifocb->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifocb->incomplete_lock); ieee80211_ocb_finish_sta(sta); rcu_read_unlock(); spin_lock_bh(&ifocb->incomplete_lock); } spin_unlock_bh(&ifocb->incomplete_lock); if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags)) ieee80211_ocb_housekeeping(sdata); sdata_unlock(sdata); } static void ieee80211_ocb_housekeeping_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.ocb.housekeeping_timer); struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); ieee80211_queue_work(&local->hw, &sdata->work); } void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; timer_setup(&ifocb->housekeeping_timer, ieee80211_ocb_housekeeping_timer, 0); INIT_LIST_HEAD(&ifocb->incomplete_stations); spin_lock_init(&ifocb->incomplete_lock); } int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, struct ocb_setup *setup) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID; int err; if (ifocb->joined == true) return -EINVAL; sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; sdata->smps_mode = IEEE80211_SMPS_OFF; sdata->needed_rx_chains = sdata->local->rx_chains; mutex_lock(&sdata->local->mtx); err = ieee80211_vif_use_channel(sdata, &setup->chandef, IEEE80211_CHANCTX_SHARED); mutex_unlock(&sdata->local->mtx); if (err) return err; ieee80211_bss_info_change_notify(sdata, changed); ifocb->joined = true; set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); ieee80211_queue_work(&local->hw, &sdata->work); netif_carrier_on(sdata->dev); return 0; } int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct ieee80211_local *local = sdata->local; struct sta_info *sta; ifocb->joined = false; sta_info_flush(sdata); spin_lock_bh(&ifocb->incomplete_lock); while (!list_empty(&ifocb->incomplete_stations)) { sta = list_first_entry(&ifocb->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifocb->incomplete_lock); sta_info_free(local, sta); spin_lock_bh(&ifocb->incomplete_lock); } spin_unlock_bh(&ifocb->incomplete_lock); netif_carrier_off(sdata->dev); clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&sdata->local->mtx); skb_queue_purge(&sdata->skb_queue); del_timer_sync(&sdata->u.ocb.housekeeping_timer); /* If the timer fired while we waited for it, it will have * requeued the work. Now the work will be running again * but will not rearm the timer again because it checks * whether we are connected to the network or not -- at this * point we shouldn't be anymore. */ return 0; } backport-iwlwifi-dkms-7906/net/mac80211/offchannel.c000066400000000000000000000645221351064634500221030ustar00rootroot00000000000000/* * Off-channel operation helpers * * Copyright 2003, Jouni Malinen * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2009 Johannes Berg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include "ieee80211_i.h" #include "driver-ops.h" /* * Tell our hardware to disable PS. * Optionally inform AP that we will go to sleep so that it will buffer * the frames while we are doing off-channel work. This is optional * because we *may* be doing work on-operating channel, and want our * hardware unconditionally awake, but still let the AP send us normal frames. */ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; local->offchannel_ps_enabled = false; /* FIXME: what to do when local->pspolling is true? */ del_timer_sync(&local->dynamic_ps_timer); del_timer_sync(&ifmgd->bcn_mon_timer); del_timer_sync(&ifmgd->conn_mon_timer); cancel_work_sync(&local->dynamic_ps_enable_work); if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->offchannel_ps_enabled = true; local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } if (!local->offchannel_ps_enabled || !ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) /* * If power save was enabled, no need to send a nullfunc * frame because AP knows that we are sleeping. But if the * hardware is creating the nullfunc frame for power save * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not * enabled) and power save was enabled, the firmware just * sent a null frame with power save disabled. So we need * to send a new nullfunc frame to inform the AP that we * are again sleeping. */ ieee80211_send_nullfunc(local, sdata, true); } /* inform AP that we are awake again, unless power save is enabled */ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; if (!local->ps_sdata) ieee80211_send_nullfunc(local, sdata, false); else if (local->offchannel_ps_enabled) { /* * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware * will send a nullfunc frame with the powersave bit set * even though the AP already knows that we are sleeping. * This could be avoided by sending a null frame with power * save bit disabled before enabling the power save, but * this doesn't gain anything. * * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need * to send a nullfunc frame because AP already knows that * we are sleeping, let's just enable power save mode in * hardware. */ /* TODO: Only set hardware if CONF_PS changed? * TODO: Should we set offchannel_ps_enabled to false? */ local->hw.conf.flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } else if (local->hw.conf.dynamic_ps_timeout > 0) { /* * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer * had been running before leaving the operating channel, * restart the timer now and send a nullfunc frame to inform * the AP that we are awake. */ ieee80211_send_nullfunc(local, sdata, false); mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); } ieee80211_sta_reset_beacon_monitor(sdata); ieee80211_sta_reset_conn_monitor(sdata); } void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; if (WARN_ON(local->use_chanctx)) return; /* * notify the AP about us leaving the channel and stop all * STA interfaces. */ /* * Stop queues and transmit all frames queued by the driver * before sending nullfunc to enable powersave at the AP. */ ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, false); ieee80211_flush_queues(local, NULL, false); mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || sdata->vif.type == NL80211_IFTYPE_NAN) continue; if (sdata->vif.type != NL80211_IFTYPE_MONITOR) set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); /* Check to see if we should disable beaconing. */ if (sdata->vif.bss_conf.enable_beacon) { set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); sdata->vif.bss_conf.enable_beacon = false; ieee80211_bss_info_change_notify( sdata, BSS_CHANGED_BEACON_ENABLED); } if (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.associated) ieee80211_offchannel_ps_enable(sdata); } mutex_unlock(&local->iflist_mtx); } void ieee80211_offchannel_return(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; if (WARN_ON(local->use_chanctx)) return; mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) continue; if (sdata->vif.type != NL80211_IFTYPE_MONITOR) clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); if (!ieee80211_sdata_running(sdata)) continue; /* Tell AP we're back */ if (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.associated) ieee80211_offchannel_ps_disable(sdata); if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state)) { sdata->vif.bss_conf.enable_beacon = true; ieee80211_bss_info_change_notify( sdata, BSS_CHANGED_BEACON_ENABLED); } } mutex_unlock(&local->iflist_mtx); ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, false); } static void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) { /* was never transmitted */ if (roc->frame) { cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie, roc->frame->data, roc->frame->len, false, GFP_KERNEL); ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame); } if (!roc->mgmt_tx_cookie) cfg80211_remain_on_channel_expired(&roc->sdata->wdev, roc->cookie, roc->chan, GFP_KERNEL); list_del(&roc->list); kfree(roc); } static unsigned long ieee80211_end_finished_rocs(struct ieee80211_local *local, unsigned long now) { struct ieee80211_roc_work *roc, *tmp; long remaining_dur_min = LONG_MAX; lockdep_assert_held(&local->mtx); list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { long remaining; if (!roc->started) break; remaining = roc->start_time + msecs_to_jiffies(roc->duration) - now; /* In case of HW ROC, it is possible that the HW finished the * ROC session before the actual requested time. In such a case * end the ROC session (disregarding the remaining time). */ if (roc->abort || roc->hw_begun || remaining <= 0) ieee80211_roc_notify_destroy(roc); else remaining_dur_min = min(remaining_dur_min, remaining); } return remaining_dur_min; } static bool ieee80211_recalc_sw_work(struct ieee80211_local *local, unsigned long now) { long dur = ieee80211_end_finished_rocs(local, now); if (dur == LONG_MAX) return false; mod_delayed_work(local->workqueue, &local->roc_work, dur); return true; } static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc, unsigned long start_time) { if (WARN_ON(roc->notified)) return; roc->start_time = start_time; roc->started = true; if (roc->mgmt_tx_cookie) { if (!WARN_ON(!roc->frame)) { ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7, roc->chan->band, 0); roc->frame = NULL; } } else { cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie, roc->chan, roc->req_duration, GFP_KERNEL); } roc->notified = true; } static void ieee80211_hw_roc_start(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, hw_roc_start); struct ieee80211_roc_work *roc; mutex_lock(&local->mtx); list_for_each_entry(roc, &local->roc_list, list) { if (!roc->started) break; roc->hw_begun = true; ieee80211_handle_roc_started(roc, local->hw_roc_start_time); } mutex_unlock(&local->mtx); } void ieee80211_ready_on_channel(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); local->hw_roc_start_time = jiffies; trace_api_ready_on_channel(local); ieee80211_queue_work(hw, &local->hw_roc_start); } EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel); static void _ieee80211_start_next_roc(struct ieee80211_local *local) { struct ieee80211_roc_work *roc, *tmp; enum ieee80211_roc_type type; u32 min_dur, max_dur; lockdep_assert_held(&local->mtx); if (WARN_ON(list_empty(&local->roc_list))) return; roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work, list); if (WARN_ON(roc->started)) return; min_dur = roc->duration; max_dur = roc->duration; type = roc->type; list_for_each_entry(tmp, &local->roc_list, list) { if (tmp == roc) continue; if (tmp->sdata != roc->sdata || tmp->chan != roc->chan) break; max_dur = max(tmp->duration, max_dur); min_dur = min(tmp->duration, min_dur); type = max(tmp->type, type); } if (local->ops->remain_on_channel) { int ret = drv_remain_on_channel(local, roc->sdata, roc->chan, max_dur, type); if (ret) { wiphy_warn(local->hw.wiphy, "failed to start next HW ROC (%d)\n", ret); /* * queue the work struct again to avoid recursion * when multiple failures occur */ list_for_each_entry(tmp, &local->roc_list, list) { if (tmp->sdata != roc->sdata || tmp->chan != roc->chan) break; tmp->started = true; tmp->abort = true; } ieee80211_queue_work(&local->hw, &local->hw_roc_done); return; } /* we'll notify about the start once the HW calls back */ list_for_each_entry(tmp, &local->roc_list, list) { if (tmp->sdata != roc->sdata || tmp->chan != roc->chan) break; tmp->started = true; } } else { /* If actually operating on the desired channel (with at least * 20 MHz channel width) don't stop all the operations but still * treat it as though the ROC operation started properly, so * other ROC operations won't interfere with this one. */ roc->on_channel = roc->chan == local->_oper_chandef.chan && local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 && local->_oper_chandef.width != NL80211_CHAN_WIDTH_10; /* start this ROC */ ieee80211_recalc_idle(local); if (!roc->on_channel) { ieee80211_offchannel_stop_vifs(local); local->tmp_channel = roc->chan; ieee80211_hw_config(local, 0); } ieee80211_queue_delayed_work(&local->hw, &local->roc_work, msecs_to_jiffies(min_dur)); /* tell userspace or send frame(s) */ list_for_each_entry(tmp, &local->roc_list, list) { if (tmp->sdata != roc->sdata || tmp->chan != roc->chan) break; tmp->on_channel = roc->on_channel; ieee80211_handle_roc_started(tmp, jiffies); } } } void ieee80211_start_next_roc(struct ieee80211_local *local) { struct ieee80211_roc_work *roc; lockdep_assert_held(&local->mtx); if (list_empty(&local->roc_list)) { ieee80211_run_deferred_scan(local); return; } /* defer roc if driver is not started (i.e. during reconfig) */ if (local->in_reconfig) return; roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work, list); if (WARN_ON_ONCE(roc->started)) return; if (local->ops->remain_on_channel) { _ieee80211_start_next_roc(local); } else { /* delay it a bit */ ieee80211_queue_delayed_work(&local->hw, &local->roc_work, round_jiffies_relative(HZ/2)); } } static void __ieee80211_roc_work(struct ieee80211_local *local) { struct ieee80211_roc_work *roc; bool on_channel; lockdep_assert_held(&local->mtx); if (WARN_ON(local->ops->remain_on_channel)) return; roc = list_first_entry_or_null(&local->roc_list, struct ieee80211_roc_work, list); if (!roc) return; if (!roc->started) { WARN_ON(local->use_chanctx); _ieee80211_start_next_roc(local); } else { on_channel = roc->on_channel; if (ieee80211_recalc_sw_work(local, jiffies)) return; /* careful - roc pointer became invalid during recalc */ if (!on_channel) { ieee80211_flush_queues(local, NULL, false); local->tmp_channel = NULL; ieee80211_hw_config(local, 0); ieee80211_offchannel_return(local); } ieee80211_recalc_idle(local); ieee80211_start_next_roc(local); } } static void ieee80211_roc_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, roc_work.work); mutex_lock(&local->mtx); __ieee80211_roc_work(local); mutex_unlock(&local->mtx); } static void ieee80211_hw_roc_done(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, hw_roc_done); mutex_lock(&local->mtx); ieee80211_end_finished_rocs(local, jiffies); /* if there's another roc, start it now */ ieee80211_start_next_roc(local); mutex_unlock(&local->mtx); } void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_remain_on_channel_expired(local); ieee80211_queue_work(hw, &local->hw_roc_done); } EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired); static bool ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local, struct ieee80211_roc_work *new_roc, struct ieee80211_roc_work *cur_roc) { unsigned long now = jiffies; unsigned long remaining; if (WARN_ON(!cur_roc->started)) return false; /* if it was scheduled in the hardware, but not started yet, * we can only combine if the older one had a longer duration */ if (!cur_roc->hw_begun && new_roc->duration > cur_roc->duration) return false; remaining = cur_roc->start_time + msecs_to_jiffies(cur_roc->duration) - now; /* if it doesn't fit entirely, schedule a new one */ if (new_roc->duration > jiffies_to_msecs(remaining)) return false; /* add just after the current one so we combine their finish later */ list_add(&new_roc->list, &cur_roc->list); /* if the existing one has already begun then let this one also * begin, otherwise they'll both be marked properly by the work * struct that runs once the driver notifies us of the beginning */ if (cur_roc->hw_begun) { new_roc->hw_begun = true; ieee80211_handle_roc_started(new_roc, now); } return true; } static int ieee80211_start_roc_work(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, unsigned int duration, u64 *cookie, struct sk_buff *txskb, enum ieee80211_roc_type type) { struct ieee80211_roc_work *roc, *tmp; bool queued = false, combine_started = true; int ret; lockdep_assert_held(&local->mtx); if (local->use_chanctx && !local->ops->remain_on_channel) return -EOPNOTSUPP; roc = kzalloc(sizeof(*roc), GFP_KERNEL); if (!roc) return -ENOMEM; /* * If the duration is zero, then the driver * wouldn't actually do anything. Set it to * 10 for now. * * TODO: cancel the off-channel operation * when we get the SKB's TX status and * the wait time was zero before. */ if (!duration) duration = 10; roc->chan = channel; roc->duration = duration; roc->req_duration = duration; roc->frame = txskb; roc->type = type; roc->sdata = sdata; /* * cookie is either the roc cookie (for normal roc) * or the SKB (for mgmt TX) */ if (!txskb) { roc->cookie = ieee80211_mgmt_tx_cookie(local); *cookie = roc->cookie; } else { roc->mgmt_tx_cookie = *cookie; } /* if there's no need to queue, handle it immediately */ if (list_empty(&local->roc_list) && !local->scanning && !ieee80211_is_radar_required(local)) { /* if not HW assist, just queue & schedule work */ if (!local->ops->remain_on_channel) { list_add_tail(&roc->list, &local->roc_list); ieee80211_queue_delayed_work(&local->hw, &local->roc_work, 0); } else { /* otherwise actually kick it off here * (for error handling) */ ret = drv_remain_on_channel(local, sdata, channel, duration, type); if (ret) { kfree(roc); return ret; } roc->started = true; list_add_tail(&roc->list, &local->roc_list); } return 0; } /* otherwise handle queueing */ list_for_each_entry(tmp, &local->roc_list, list) { if (tmp->chan != channel || tmp->sdata != sdata) continue; /* * Extend this ROC if possible: If it hasn't started, add * just after the new one to combine. */ if (!tmp->started) { list_add(&roc->list, &tmp->list); queued = true; break; } if (!combine_started) continue; if (!local->ops->remain_on_channel) { /* If there's no hardware remain-on-channel, and * doing so won't push us over the maximum r-o-c * we allow, then we can just add the new one to * the list and mark it as having started now. * If it would push over the limit, don't try to * combine with other started ones (that haven't * been running as long) but potentially sort it * with others that had the same fate. */ unsigned long now = jiffies; u32 elapsed = jiffies_to_msecs(now - tmp->start_time); struct wiphy *wiphy = local->hw.wiphy; u32 max_roc = wiphy->max_remain_on_channel_duration; if (elapsed + roc->duration > max_roc) { combine_started = false; continue; } list_add(&roc->list, &tmp->list); queued = true; roc->on_channel = tmp->on_channel; ieee80211_handle_roc_started(roc, now); ieee80211_recalc_sw_work(local, now); break; } queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp); if (queued) break; /* if it wasn't queued, perhaps it can be combined with * another that also couldn't get combined previously, * but no need to check for already started ones, since * that can't work. */ combine_started = false; } if (!queued) list_add_tail(&roc->list, &local->roc_list); return 0; } int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct ieee80211_local *local = sdata->local; int ret; mutex_lock(&local->mtx); ret = ieee80211_start_roc_work(local, sdata, chan, duration, cookie, NULL, IEEE80211_ROC_TYPE_NORMAL); mutex_unlock(&local->mtx); return ret; } static int ieee80211_cancel_roc(struct ieee80211_local *local, u64 cookie, bool mgmt_tx) { struct ieee80211_roc_work *roc, *tmp, *found = NULL; int ret; if (!cookie) return -ENOENT; flush_work(&local->hw_roc_start); mutex_lock(&local->mtx); list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { if (!mgmt_tx && roc->cookie != cookie) continue; else if (mgmt_tx && roc->mgmt_tx_cookie != cookie) continue; found = roc; break; } if (!found) { mutex_unlock(&local->mtx); return -ENOENT; } if (!found->started) { ieee80211_roc_notify_destroy(found); goto out_unlock; } if (local->ops->remain_on_channel) { ret = drv_cancel_remain_on_channel(local); if (WARN_ON_ONCE(ret)) { mutex_unlock(&local->mtx); return ret; } /* TODO: * if multiple items were combined here then we really shouldn't * cancel them all - we should wait for as much time as needed * for the longest remaining one, and only then cancel ... */ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { if (!roc->started) break; if (roc == found) found = NULL; ieee80211_roc_notify_destroy(roc); } /* that really must not happen - it was started */ WARN_ON(found); ieee80211_start_next_roc(local); } else { /* go through work struct to return to the operating channel */ found->abort = true; mod_delayed_work(local->workqueue, &local->roc_work, 0); } out_unlock: mutex_unlock(&local->mtx); return 0; } int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct ieee80211_local *local = sdata->local; return ieee80211_cancel_roc(local, cookie, false); } int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct sta_info *sta; const struct ieee80211_mgmt *mgmt = (void *)params->buf; bool need_offchan = false; u32 flags; int ret; u8 *data; if (params->dont_wait_for_ack) flags = IEEE80211_TX_CTL_NO_ACK; else flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | IEEE80211_TX_CTL_REQ_TX_STATUS; if (params->no_cck) flags |= IEEE80211_TX_CTL_NO_CCK_RATE; switch (sdata->vif.type) { case NL80211_IFTYPE_ADHOC: if (!sdata->vif.bss_conf.ibss_joined) need_offchan = true; #ifdef CPTCFG_MAC80211_MESH /* fall through */ case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif) && !sdata->u.mesh.mesh_id_len) need_offchan = true; #endif /* fall through */ case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: if (sdata->vif.type != NL80211_IFTYPE_ADHOC && !ieee80211_vif_is_mesh(&sdata->vif) && !rcu_access_pointer(sdata->bss->beacon)) need_offchan = true; if (!ieee80211_is_action(mgmt->frame_control) || mgmt->u.action.category == WLAN_CATEGORY_PUBLIC || mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED || mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) break; rcu_read_lock(); sta = sta_info_get_bss(sdata, mgmt->da); rcu_read_unlock(); if (!sta) return -ENOLINK; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: sdata_lock(sdata); if (!sdata->u.mgd.associated || (params->offchan && params->wait && local->ops->remain_on_channel && memcmp(sdata->u.mgd.associated->bssid, mgmt->bssid, ETH_ALEN))) need_offchan = true; sdata_unlock(sdata); break; case NL80211_IFTYPE_P2P_DEVICE: need_offchan = true; break; case NL80211_IFTYPE_NAN: default: return -EOPNOTSUPP; } /* configurations requiring offchan cannot work if no channel has been * specified */ if (need_offchan && !params->chan) return -EINVAL; mutex_lock(&local->mtx); /* Check if the operating channel is the requested channel */ if (!need_offchan) { struct ieee80211_chanctx_conf *chanctx_conf; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (chanctx_conf) { need_offchan = params->chan && (params->chan != chanctx_conf->def.chan); } else if (!params->chan) { ret = -EINVAL; rcu_read_unlock(); goto out_unlock; } else { need_offchan = true; } rcu_read_unlock(); } if (need_offchan && !params->offchan) { ret = -EBUSY; goto out_unlock; } skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len); if (!skb) { ret = -ENOMEM; goto out_unlock; } skb_reserve(skb, local->hw.extra_tx_headroom); data = skb_put_data(skb, params->buf, params->len); /* Update CSA counters */ if (sdata->vif.csa_active && (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_MESH_POINT || sdata->vif.type == NL80211_IFTYPE_ADHOC) && params->n_csa_offsets) { int i; struct beacon_data *beacon = NULL; rcu_read_lock(); if (sdata->vif.type == NL80211_IFTYPE_AP) beacon = rcu_dereference(sdata->u.ap.beacon); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) beacon = rcu_dereference(sdata->u.ibss.presp); else if (ieee80211_vif_is_mesh(&sdata->vif)) beacon = rcu_dereference(sdata->u.mesh.beacon); if (beacon) for (i = 0; i < params->n_csa_offsets; i++) data[params->csa_offsets[i]] = beacon->csa_current_counter; rcu_read_unlock(); } IEEE80211_SKB_CB(skb)->flags = flags; skb->dev = sdata->dev; if (!params->dont_wait_for_ack) { /* make a copy to preserve the frame contents * in case of encryption. */ ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL); if (ret) { kfree_skb(skb); goto out_unlock; } } else { /* Assign a dummy non-zero cookie, it's not sent to * userspace in this case but we rely on its value * internally in the need_offchan case to distinguish * mgmt-tx from remain-on-channel. */ *cookie = 0xffffffff; } if (!need_offchan) { ieee80211_tx_skb(sdata, skb); ret = 0; goto out_unlock; } IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN | IEEE80211_TX_INTFL_OFFCHAN_TX_OK; if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) IEEE80211_SKB_CB(skb)->hw_queue = local->hw.offchannel_tx_hw_queue; /* This will handle all kinds of coalescing and immediate TX */ ret = ieee80211_start_roc_work(local, sdata, params->chan, params->wait, cookie, skb, IEEE80211_ROC_TYPE_MGMT_TX); if (ret) ieee80211_free_txskb(&local->hw, skb); out_unlock: mutex_unlock(&local->mtx); return ret; } int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { struct ieee80211_local *local = wiphy_priv(wiphy); return ieee80211_cancel_roc(local, cookie, true); } void ieee80211_roc_setup(struct ieee80211_local *local) { INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start); INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done); INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work); INIT_LIST_HEAD(&local->roc_list); } void ieee80211_roc_purge(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_roc_work *roc, *tmp; bool work_to_do = false; mutex_lock(&local->mtx); list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { if (sdata && roc->sdata != sdata) continue; if (roc->started) { if (local->ops->remain_on_channel) { /* can race, so ignore return value */ drv_cancel_remain_on_channel(local); ieee80211_roc_notify_destroy(roc); } else { roc->abort = true; work_to_do = true; } } else { ieee80211_roc_notify_destroy(roc); } } if (work_to_do) __ieee80211_roc_work(local); mutex_unlock(&local->mtx); } backport-iwlwifi-dkms-7906/net/mac80211/packet_filtering.h000066400000000000000000000063271351064634500233160ustar00rootroot00000000000000/* * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Packet filtering * * Checks if differnt type of packets should be filterd. * 1. Gratuitous ARP * 2. Unsolicited Neighbor Advertisement * 3. Frames Encrypted using the GTK */ #ifndef PACKET_FILTER_H #define PACKET_FILTER_H #include #include #include #include #include /** * ieee80211_is_shared_gtk - packet is GTK * @skb: the input packet, must be an ethernet frame already * * Return: %true if the packet is Encrypted using the GTK . * This is used to drop packets that shouldn't occur because the AP implements * a proxy service. */ static inline bool ieee80211_is_shared_gtk(struct sk_buff *skb) { const struct ethhdr *eth = (void *)skb->data; const struct iphdr *ipv4; const struct ipv6hdr *ipv6; const struct in6_addr *saddr; struct fib_result res; struct flowi4 fl4; switch (eth->h_proto) { case cpu_to_be16(ETH_P_IP): ipv4 = (void *)(eth + 1); fl4.daddr = ipv4->daddr; fl4.saddr = ipv4->saddr; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) if (!fib_lookup(dev_net(skb->dev), &fl4, &res)) #else if (!fib_lookup(dev_net(skb->dev), &fl4, &res, 0)) #endif if (res.type == RTN_MULTICAST || res.type == RTN_BROADCAST) return true; break; case cpu_to_be16(ETH_P_IPV6): ipv6 = (void *)(eth + 1); saddr = &ipv6->saddr; if (ipv6_addr_is_multicast(saddr)) return true; } return false; } /** * ieee80211_is_gratuitous_arp_unsolicited_na - packet is grat. ARP/unsol. NA * @skb: the input packet, must be an ethernet frame already * * Return: %true if the packet is a gratuitous ARP or unsolicited NA packet. * This is used to drop packets that shouldn't occur because the AP implements * a proxy service. */ static inline bool ieee80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb) { const struct ethhdr *eth = (void *)skb->data; const struct { struct arphdr hdr; u8 ar_sha[ETH_ALEN]; u8 ar_sip[4]; u8 ar_tha[ETH_ALEN]; u8 ar_tip[4]; } __packed *arp; const struct ipv6hdr *ipv6; const struct icmp6hdr *icmpv6; switch (eth->h_proto) { case cpu_to_be16(ETH_P_ARP): /* can't say - but will probably be dropped later anyway */ if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*arp))) return false; arp = (void *)(eth + 1); if ((arp->hdr.ar_op == cpu_to_be16(ARPOP_REPLY) || arp->hdr.ar_op == cpu_to_be16(ARPOP_REQUEST)) && !memcmp(arp->ar_sip, arp->ar_tip, sizeof(arp->ar_sip))) return true; break; case cpu_to_be16(ETH_P_IPV6): /* can't say - but will probably be dropped later anyway */ if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*ipv6) + sizeof(*icmpv6))) return false; ipv6 = (void *)(eth + 1); icmpv6 = (void *)(ipv6 + 1); if (icmpv6->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT && !icmpv6->icmp6_solicited) return true; break; default: /* * no need to support other protocols, proxy service isn't * specified for any others */ break; } return false; } #endif /* PACKET_FILTER_H */ backport-iwlwifi-dkms-7906/net/mac80211/pm.c000066400000000000000000000127541351064634500204140ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 #include #include #include "ieee80211_i.h" #include "mesh.h" #include "driver-ops.h" #include "led.h" static void ieee80211_sched_scan_cancel(struct ieee80211_local *local) { if (ieee80211_request_sched_scan_stop(local)) return; cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0); } int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; struct sta_info *sta; if (!local->open_count) goto suspend; ieee80211_scan_cancel(local); ieee80211_dfs_cac_cancel(local); ieee80211_roc_purge(local, NULL); ieee80211_del_virtual_monitor(local); if (ieee80211_hw_check(hw, AMPDU_AGGREGATION) && !(wowlan && wowlan->any)) { mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { set_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions( sta, AGG_STOP_LOCAL_REQUEST); } mutex_unlock(&local->sta_mtx); } /* keep sched_scan only in case of 'any' trigger */ if (!(wowlan && wowlan->any)) ieee80211_sched_scan_cancel(local); ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); /* flush out all packets */ synchronize_net(); ieee80211_flush_queues(local, NULL, true); local->quiescing = true; /* make quiescing visible to timers everywhere */ mb(); flush_workqueue(local->workqueue); /* Don't try to run timers while suspended. */ del_timer_sync(&local->sta_cleanup); /* * Note that this particular timer doesn't need to be * restarted at resume. */ cancel_work_sync(&local->dynamic_ps_enable_work); del_timer_sync(&local->dynamic_ps_timer); local->wowlan = wowlan; if (local->wowlan) { int err; /* Drivers don't expect to suspend while some operations like * authenticating or associating are in progress. It doesn't * make sense anyway to accept that, since the authentication * or association would never finish since the driver can't do * that on its own. * Thus, clean up in-progress auth/assoc first. */ list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; ieee80211_mgd_quiesce(sdata); /* If suspended during TX in progress, and wowlan * is enabled (connection will be active) there * can be a race where the driver is put out * of power-save due to TX and during suspend * dynamic_ps_timer is cancelled and TX packet * is flushed, leaving the driver in ACTIVE even * after resuming until dynamic_ps_timer puts * driver back in DOZE. */ if (sdata->u.mgd.associated && sdata->u.mgd.powersave && !(local->hw.conf.flags & IEEE80211_CONF_PS)) { local->hw.conf.flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } } err = drv_suspend(local, wowlan); if (err < 0) { local->quiescing = false; local->wowlan = false; if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) { mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { clear_sta_flag(sta, WLAN_STA_BLOCK_BA); } mutex_unlock(&local->sta_mtx); } ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); return err; } else if (err > 0) { WARN_ON(err != 1); /* cfg80211 will call back into mac80211 to disconnect * all interfaces, allow that to proceed properly */ ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); return err; } else { goto suspend; } } /* remove all interfaces that were created in the driver */ list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: continue; case NL80211_IFTYPE_STATION: ieee80211_mgd_quiesce(sdata); break; case NL80211_IFTYPE_WDS: /* tear down aggregation sessions and remove STAs */ mutex_lock(&local->sta_mtx); sta = sdata->u.wds.sta; if (sta && sta->uploaded) { enum ieee80211_sta_state state; state = sta->sta_state; for (; state > IEEE80211_STA_NOTEXIST; state--) WARN_ON(drv_sta_state(local, sta->sdata, sta, state, state - 1)); } mutex_unlock(&local->sta_mtx); break; default: break; } flush_delayed_work(&sdata->dec_tailroom_needed_wk); drv_remove_interface(local, sdata); } /* * We disconnected on all interfaces before suspend, all channel * contexts should be released. */ WARN_ON(!list_empty(&local->chanctx_list)); /* stop hardware - this must stop RX */ ieee80211_stop_device(local); suspend: local->suspended = true; /* need suspended to be visible before quiescing is false */ barrier(); local->quiescing = false; return 0; } /* * __ieee80211_resume() is a static inline which just calls * ieee80211_reconfig(), which is also needed for hardware * hang/firmware failure/etc. recovery. */ void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif, struct cfg80211_wowlan_wakeup *wakeup, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); cfg80211_report_wowlan_wakeup(&sdata->wdev, wakeup, gfp); } EXPORT_SYMBOL(ieee80211_report_wowlan_wakeup); backport-iwlwifi-dkms-7906/net/mac80211/rate.c000066400000000000000000000626541351064634500207370ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc * Copyright 2017 Intel Deutschland GmbH * Copyright (C) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include "rate.h" #include "ieee80211_i.h" #include "debugfs.h" struct rate_control_alg { struct list_head list; const struct rate_control_ops *ops; }; static LIST_HEAD(rate_ctrl_algs); static DEFINE_MUTEX(rate_ctrl_mutex); static char *ieee80211_default_rc_algo = CPTCFG_MAC80211_RC_DEFAULT; module_param(ieee80211_default_rc_algo, charp, 0644); MODULE_PARM_DESC(ieee80211_default_rc_algo, "Default rate control algorithm for mac80211 to use"); void rate_control_rate_init(struct sta_info *sta) { struct ieee80211_local *local = sta->sdata->local; struct rate_control_ref *ref = sta->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; struct ieee80211_supported_band *sband; struct ieee80211_chanctx_conf *chanctx_conf; ieee80211_sta_set_rx_nss(sta); if (!ref) return; rcu_read_lock(); chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return; } sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; spin_lock_bh(&sta->rate_ctrl_lock); ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista, priv_sta); spin_unlock_bh(&sta->rate_ctrl_lock); rcu_read_unlock(); set_sta_flag(sta, WLAN_STA_RATE_CONTROL); } void rate_control_tx_status(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct ieee80211_tx_status *st) { struct rate_control_ref *ref = local->rate_ctrl; struct sta_info *sta = container_of(st->sta, struct sta_info, sta); void *priv_sta = sta->rate_ctrl_priv; if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) return; spin_lock_bh(&sta->rate_ctrl_lock); if (ref->ops->tx_status_ext) ref->ops->tx_status_ext(ref->priv, sband, priv_sta, st); else if (st->skb) ref->ops->tx_status(ref->priv, sband, st->sta, priv_sta, st->skb); else WARN_ON_ONCE(1); spin_unlock_bh(&sta->rate_ctrl_lock); } void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct sta_info *sta, u32 changed) { struct rate_control_ref *ref = local->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; struct ieee80211_chanctx_conf *chanctx_conf; if (ref && ref->ops->rate_update) { rcu_read_lock(); chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return; } spin_lock_bh(&sta->rate_ctrl_lock); ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def, ista, priv_sta, changed); spin_unlock_bh(&sta->rate_ctrl_lock); rcu_read_unlock(); } drv_sta_rc_update(local, sta->sdata, &sta->sta, changed); } int ieee80211_rate_control_register(const struct rate_control_ops *ops) { struct rate_control_alg *alg; if (!ops->name) return -EINVAL; mutex_lock(&rate_ctrl_mutex); list_for_each_entry(alg, &rate_ctrl_algs, list) { if (!strcmp(alg->ops->name, ops->name)) { /* don't register an algorithm twice */ WARN_ON(1); mutex_unlock(&rate_ctrl_mutex); return -EALREADY; } } alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (alg == NULL) { mutex_unlock(&rate_ctrl_mutex); return -ENOMEM; } alg->ops = ops; list_add_tail(&alg->list, &rate_ctrl_algs); mutex_unlock(&rate_ctrl_mutex); return 0; } EXPORT_SYMBOL(ieee80211_rate_control_register); void ieee80211_rate_control_unregister(const struct rate_control_ops *ops) { struct rate_control_alg *alg; mutex_lock(&rate_ctrl_mutex); list_for_each_entry(alg, &rate_ctrl_algs, list) { if (alg->ops == ops) { list_del(&alg->list); kfree(alg); break; } } mutex_unlock(&rate_ctrl_mutex); } EXPORT_SYMBOL(ieee80211_rate_control_unregister); static const struct rate_control_ops * ieee80211_try_rate_control_ops_get(const char *name) { struct rate_control_alg *alg; const struct rate_control_ops *ops = NULL; if (!name) return NULL; mutex_lock(&rate_ctrl_mutex); list_for_each_entry(alg, &rate_ctrl_algs, list) { if (!strcmp(alg->ops->name, name)) { ops = alg->ops; break; } } mutex_unlock(&rate_ctrl_mutex); return ops; } /* Get the rate control algorithm. */ static const struct rate_control_ops * ieee80211_rate_control_ops_get(const char *name) { const struct rate_control_ops *ops; const char *alg_name; kernel_param_lock(THIS_MODULE); if (!name) alg_name = ieee80211_default_rc_algo; else alg_name = name; ops = ieee80211_try_rate_control_ops_get(alg_name); if (!ops && name) /* try default if specific alg requested but not found */ ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo); /* Note: check for > 0 is intentional to avoid clang warning */ if (!ops && (strlen(CPTCFG_MAC80211_RC_DEFAULT) > 0)) /* try built-in one if specific alg requested but not found */ ops = ieee80211_try_rate_control_ops_get(CPTCFG_MAC80211_RC_DEFAULT); kernel_param_unlock(THIS_MODULE); return ops; } #ifdef CPTCFG_MAC80211_DEBUGFS static ssize_t rcname_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct rate_control_ref *ref = file->private_data; int len = strlen(ref->ops->name); return simple_read_from_buffer(userbuf, count, ppos, ref->ops->name, len); } static const struct file_operations rcname_ops = { .read = rcname_read, .open = simple_open, .llseek = default_llseek, }; #endif static struct rate_control_ref *rate_control_alloc(const char *name, struct ieee80211_local *local) { struct dentry *debugfsdir = NULL; struct rate_control_ref *ref; ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); if (!ref) return NULL; ref->ops = ieee80211_rate_control_ops_get(name); if (!ref->ops) goto free; #ifdef CPTCFG_MAC80211_DEBUGFS debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); local->debugfs.rcdir = debugfsdir; debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops); #endif ref->priv = ref->ops->alloc(&local->hw, debugfsdir); if (!ref->priv) goto free; return ref; free: kfree(ref); return NULL; } static void rate_control_free(struct ieee80211_local *local, struct rate_control_ref *ctrl_ref) { ctrl_ref->ops->free(ctrl_ref->priv); #ifdef CPTCFG_MAC80211_DEBUGFS debugfs_remove_recursive(local->debugfs.rcdir); local->debugfs.rcdir = NULL; #endif kfree(ctrl_ref); } void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; u32 user_mask, basic_rates = sdata->vif.bss_conf.basic_rates; enum nl80211_band band; if (WARN_ON(!sdata->vif.bss_conf.chandef.chan)) return; if (WARN_ON_ONCE(!basic_rates)) return; band = sdata->vif.bss_conf.chandef.chan->band; user_mask = sdata->rc_rateidx_mask[band]; sband = local->hw.wiphy->bands[band]; if (user_mask & basic_rates) return; sdata_dbg(sdata, "no overlap between basic rates (0x%x) and user mask (0x%x on band %d) - clearing the latter", basic_rates, user_mask, band); sdata->rc_rateidx_mask[band] = (1 << sband->n_bitrates) - 1; } static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc) { struct sk_buff *skb = txrc->skb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); __le16 fc; fc = hdr->frame_control; return (info->flags & (IEEE80211_TX_CTL_NO_ACK | IEEE80211_TX_CTL_USE_MINRATE)) || !ieee80211_is_data(fc); } static void rc_send_low_basicrate(s8 *idx, u32 basic_rates, struct ieee80211_supported_band *sband) { u8 i; if (basic_rates == 0) return; /* assume basic rates unknown and accept rate */ if (*idx < 0) return; if (basic_rates & (1 << *idx)) return; /* selected rate is a basic rate */ for (i = *idx + 1; i <= sband->n_bitrates; i++) { if (basic_rates & (1 << i)) { *idx = i; return; } } /* could not find a basic rate; use original selection */ } static void __rate_control_send_low(struct ieee80211_hw *hw, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, struct ieee80211_tx_info *info, u32 rate_mask) { int i; u32 rate_flags = ieee80211_chandef_rate_flags(&hw->conf.chandef); if ((sband->band == NL80211_BAND_2GHZ) && (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) rate_flags |= IEEE80211_RATE_ERP_G; info->control.rates[0].idx = 0; for (i = 0; i < sband->n_bitrates; i++) { if (!(rate_mask & BIT(i))) continue; if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; if (!rate_supported(sta, sband->band, i)) continue; info->control.rates[0].idx = i; break; } WARN_ONCE(i == sband->n_bitrates, "no supported rates for sta %pM (0x%x, band %d) in rate_mask 0x%x with flags 0x%x\n", sta ? sta->addr : NULL, sta ? sta->supp_rates[sband->band] : -1, sband->band, rate_mask, rate_flags); info->control.rates[0].count = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 1 : hw->max_rate_tries; info->control.skip_table = 1; } static bool rate_control_send_low(struct ieee80211_sta *pubsta, struct ieee80211_tx_rate_control *txrc) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); struct ieee80211_supported_band *sband = txrc->sband; struct sta_info *sta; int mcast_rate; bool use_basicrate = false; if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) { __rate_control_send_low(txrc->hw, sband, pubsta, info, txrc->rate_idx_mask); if (!pubsta && txrc->bss) { mcast_rate = txrc->bss_conf->mcast_rate[sband->band]; if (mcast_rate > 0) { info->control.rates[0].idx = mcast_rate - 1; return true; } use_basicrate = true; } else if (pubsta) { sta = container_of(pubsta, struct sta_info, sta); if (ieee80211_vif_is_mesh(&sta->sdata->vif)) use_basicrate = true; } if (use_basicrate) rc_send_low_basicrate(&info->control.rates[0].idx, txrc->bss_conf->basic_rates, sband); return true; } return false; } static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask) { int j; /* See whether the selected rate or anything below it is allowed. */ for (j = *rate_idx; j >= 0; j--) { if (mask & (1 << j)) { /* Okay, found a suitable rate. Use it. */ *rate_idx = j; return true; } } /* Try to find a higher rate that would be allowed */ for (j = *rate_idx + 1; j < n_bitrates; j++) { if (mask & (1 << j)) { /* Okay, found a suitable rate. Use it. */ *rate_idx = j; return true; } } return false; } static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask) { int i, j; int ridx, rbit; ridx = *rate_idx / 8; rbit = *rate_idx % 8; /* sanity check */ if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN) return false; /* See whether the selected rate or anything below it is allowed. */ for (i = ridx; i >= 0; i--) { for (j = rbit; j >= 0; j--) if (mcs_mask[i] & BIT(j)) { *rate_idx = i * 8 + j; return true; } rbit = 7; } /* Try to find a higher rate that would be allowed */ ridx = (*rate_idx + 1) / 8; rbit = (*rate_idx + 1) % 8; for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) { for (j = rbit; j < 8; j++) if (mcs_mask[i] & BIT(j)) { *rate_idx = i * 8 + j; return true; } rbit = 0; } return false; } static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask) { int i, j; int ridx, rbit; ridx = *rate_idx >> 4; rbit = *rate_idx & 0xf; if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX) return false; /* See whether the selected rate or anything below it is allowed. */ for (i = ridx; i >= 0; i--) { for (j = rbit; j >= 0; j--) { if (vht_mask[i] & BIT(j)) { *rate_idx = (i << 4) | j; return true; } } rbit = 15; } /* Try to find a higher rate that would be allowed */ ridx = (*rate_idx + 1) >> 4; rbit = (*rate_idx + 1) & 0xf; for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) { for (j = rbit; j < 16; j++) { if (vht_mask[i] & BIT(j)) { *rate_idx = (i << 4) | j; return true; } } rbit = 0; } return false; } static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags, struct ieee80211_supported_band *sband, enum nl80211_chan_width chan_width, u32 mask, u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN], u16 vht_mask[NL80211_VHT_NSS_MAX]) { if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) { /* handle VHT rates */ if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask)) return; *rate_idx = 0; /* keep protection flags */ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT | IEEE80211_TX_RC_USE_SHORT_PREAMBLE); *rate_flags |= IEEE80211_TX_RC_MCS; if (chan_width == NL80211_CHAN_WIDTH_40) *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) return; /* also try the legacy rates. */ *rate_flags &= ~(IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_40_MHZ_WIDTH); if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, mask)) return; } else if (*rate_flags & IEEE80211_TX_RC_MCS) { /* handle HT rates */ if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) return; /* also try the legacy rates. */ *rate_idx = 0; /* keep protection flags */ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT | IEEE80211_TX_RC_USE_SHORT_PREAMBLE); if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, mask)) return; } else { /* handle legacy rates */ if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, mask)) return; /* if HT BSS, and we handle a data frame, also try HT rates */ switch (chan_width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: return; default: break; } *rate_idx = 0; /* keep protection flags */ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT | IEEE80211_TX_RC_USE_SHORT_PREAMBLE); *rate_flags |= IEEE80211_TX_RC_MCS; if (chan_width == NL80211_CHAN_WIDTH_40) *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) return; } /* * Uh.. No suitable rate exists. This should not really happen with * sane TX rate mask configurations. However, should someone manage to * configure supported rates and TX rate mask in incompatible way, * allow the frame to be transmitted with whatever the rate control * selected. */ } static void rate_fixup_ratelist(struct ieee80211_vif *vif, struct ieee80211_supported_band *sband, struct ieee80211_tx_info *info, struct ieee80211_tx_rate *rates, int max_rates) { struct ieee80211_rate *rate; bool inval = false; int i; /* * Set up the RTS/CTS rate as the fastest basic rate * that is not faster than the data rate unless there * is no basic rate slower than the data rate, in which * case we pick the slowest basic rate * * XXX: Should this check all retry rates? */ if (!(rates[0].flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) { u32 basic_rates = vif->bss_conf.basic_rates; s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0; rate = &sband->bitrates[rates[0].idx]; for (i = 0; i < sband->n_bitrates; i++) { /* must be a basic rate */ if (!(basic_rates & BIT(i))) continue; /* must not be faster than the data rate */ if (sband->bitrates[i].bitrate > rate->bitrate) continue; /* maximum */ if (sband->bitrates[baserate].bitrate < sband->bitrates[i].bitrate) baserate = i; } info->control.rts_cts_rate_idx = baserate; } for (i = 0; i < max_rates; i++) { /* * make sure there's no valid rate following * an invalid one, just in case drivers don't * take the API seriously to stop at -1. */ if (inval) { rates[i].idx = -1; continue; } if (rates[i].idx < 0) { inval = true; continue; } /* * For now assume MCS is already set up correctly, this * needs to be fixed. */ if (rates[i].flags & IEEE80211_TX_RC_MCS) { WARN_ON(rates[i].idx > 76); if (!(rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) && info->control.use_cts_prot) rates[i].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT; continue; } if (rates[i].flags & IEEE80211_TX_RC_VHT_MCS) { WARN_ON(ieee80211_rate_get_vht_mcs(&rates[i]) > 9); continue; } /* set up RTS protection if desired */ if (info->control.use_rts) { rates[i].flags |= IEEE80211_TX_RC_USE_RTS_CTS; info->control.use_cts_prot = false; } /* RC is busted */ if (WARN_ON_ONCE(rates[i].idx >= sband->n_bitrates)) { rates[i].idx = -1; continue; } rate = &sband->bitrates[rates[i].idx]; /* set up short preamble */ if (info->control.short_preamble && rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) rates[i].flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; /* set up G protection */ if (!(rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) && info->control.use_cts_prot && rate->flags & IEEE80211_RATE_ERP_G) rates[i].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT; } } static void rate_control_fill_sta_table(struct ieee80211_sta *sta, struct ieee80211_tx_info *info, struct ieee80211_tx_rate *rates, int max_rates) { struct ieee80211_sta_rates *ratetbl = NULL; int i; if (sta && !info->control.skip_table) ratetbl = rcu_dereference(sta->rates); /* Fill remaining rate slots with data from the sta rate table. */ max_rates = min_t(int, max_rates, IEEE80211_TX_RATE_TABLE_SIZE); for (i = 0; i < max_rates; i++) { if (i < ARRAY_SIZE(info->control.rates) && info->control.rates[i].idx >= 0 && info->control.rates[i].count) { if (rates != info->control.rates) rates[i] = info->control.rates[i]; } else if (ratetbl) { rates[i].idx = ratetbl->rate[i].idx; rates[i].flags = ratetbl->rate[i].flags; if (info->control.use_rts) rates[i].count = ratetbl->rate[i].count_rts; else if (info->control.use_cts_prot) rates[i].count = ratetbl->rate[i].count_cts; else rates[i].count = ratetbl->rate[i].count; } else { rates[i].idx = -1; rates[i].count = 0; } if (rates[i].idx < 0 || !rates[i].count) break; } } static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, u32 *mask, u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN], u16 vht_mask[NL80211_VHT_NSS_MAX]) { u32 i, flags; *mask = sdata->rc_rateidx_mask[sband->band]; flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); for (i = 0; i < sband->n_bitrates; i++) { if ((flags & sband->bitrates[i].flags) != flags) *mask &= ~BIT(i); } if (*mask == (1 << sband->n_bitrates) - 1 && !sdata->rc_has_mcs_mask[sband->band] && !sdata->rc_has_vht_mcs_mask[sband->band]) return false; if (sdata->rc_has_mcs_mask[sband->band]) memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band], IEEE80211_HT_MCS_MASK_LEN); else memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN); if (sdata->rc_has_vht_mcs_mask[sband->band]) memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band], sizeof(u16) * NL80211_VHT_NSS_MAX); else memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX); if (sta) { __le16 sta_vht_cap; u16 sta_vht_mask[NL80211_VHT_NSS_MAX]; /* Filter out rates that the STA does not support */ *mask &= sta->supp_rates[sband->band]; for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i]; sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map; ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask); for (i = 0; i < NL80211_VHT_NSS_MAX; i++) vht_mask[i] &= sta_vht_mask[i]; } return true; } static void rate_control_apply_mask_ratetbl(struct sta_info *sta, struct ieee80211_supported_band *sband, struct ieee80211_sta_rates *rates) { int i; u32 mask; u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; u16 vht_mask[NL80211_VHT_NSS_MAX]; enum nl80211_chan_width chan_width; if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask, mcs_mask, vht_mask)) return; chan_width = sta->sdata->vif.bss_conf.chandef.width; for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) { if (rates->rate[i].idx < 0) break; rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags, sband, chan_width, mask, mcs_mask, vht_mask); } } static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct ieee80211_tx_rate *rates, int max_rates) { enum nl80211_chan_width chan_width; u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; u32 mask; u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX]; int i; /* * Try to enforce the rateidx mask the user wanted. skip this if the * default mask (allow all rates) is used to save some processing for * the common case. */ if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask, vht_mask)) return; /* * Make sure the rate index selected for each TX rate is * included in the configured mask and change the rate indexes * if needed. */ chan_width = sdata->vif.bss_conf.chandef.width; for (i = 0; i < max_rates; i++) { /* Skip invalid rates */ if (rates[i].idx < 0) break; rate_flags = rates[i].flags; rate_idx_match_mask(&rates[i].idx, &rate_flags, sband, chan_width, mask, mcs_mask, vht_mask); rates[i].flags = rate_flags; } } void ieee80211_get_tx_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct sk_buff *skb, struct ieee80211_tx_rate *dest, int max_rates) { struct ieee80211_sub_if_data *sdata; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_supported_band *sband; rate_control_fill_sta_table(sta, info, dest, max_rates); if (!vif) return; sdata = vif_to_sdata(vif); sband = sdata->local->hw.wiphy->bands[info->band]; if (ieee80211_is_data(hdr->frame_control)) rate_control_apply_mask(sdata, sta, sband, dest, max_rates); if (dest[0].idx < 0) __rate_control_send_low(&sdata->local->hw, sband, sta, info, sdata->rc_rateidx_mask[info->band]); if (sta) rate_fixup_ratelist(vif, sband, info, dest, max_rates); } EXPORT_SYMBOL(ieee80211_get_tx_rates); void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_tx_rate_control *txrc) { struct rate_control_ref *ref = sdata->local->rate_ctrl; void *priv_sta = NULL; struct ieee80211_sta *ista = NULL; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); int i; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { info->control.rates[i].idx = -1; info->control.rates[i].flags = 0; info->control.rates[i].count = 0; } if (rate_control_send_low(sta ? &sta->sta : NULL, txrc)) return; if (ieee80211_hw_check(&sdata->local->hw, HAS_RATE_CONTROL)) return; if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) { ista = &sta->sta; priv_sta = sta->rate_ctrl_priv; } if (ista) { spin_lock_bh(&sta->rate_ctrl_lock); ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); spin_unlock_bh(&sta->rate_ctrl_lock); } else { rate_control_send_low(NULL, txrc); } if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_RC_TABLE)) return; ieee80211_get_tx_rates(&sdata->vif, ista, txrc->skb, info->control.rates, ARRAY_SIZE(info->control.rates)); } int rate_control_set_rates(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_sta_rates *rates) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sta_rates *old; struct ieee80211_supported_band *sband; sband = ieee80211_get_sband(sta->sdata); if (!sband) return -EINVAL; rate_control_apply_mask_ratetbl(sta, sband, rates); /* * mac80211 guarantees that this function will not be called * concurrently, so the following RCU access is safe, even without * extra locking. This can not be checked easily, so we just set * the condition to true. */ old = rcu_dereference_protected(pubsta->rates, true); rcu_assign_pointer(pubsta->rates, rates); if (old) kfree_rcu(old, rcu_head); drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); return 0; } EXPORT_SYMBOL(rate_control_set_rates); int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, const char *name) { struct rate_control_ref *ref; ASSERT_RTNL(); if (local->open_count) return -EBUSY; if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { if (WARN_ON(!local->ops->set_rts_threshold)) return -EINVAL; return 0; } ref = rate_control_alloc(name, local); if (!ref) { wiphy_warn(local->hw.wiphy, "Failed to select rate control algorithm\n"); return -ENOENT; } WARN_ON(local->rate_ctrl); local->rate_ctrl = ref; wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n", ref->ops->name); return 0; } void rate_control_deinitialize(struct ieee80211_local *local) { struct rate_control_ref *ref; ref = local->rate_ctrl; if (!ref) return; local->rate_ctrl = NULL; rate_control_free(local, ref); } backport-iwlwifi-dkms-7906/net/mac80211/rate.h000066400000000000000000000053601351064634500207330ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef IEEE80211_RATE_H #define IEEE80211_RATE_H #include #include #include #include #include "ieee80211_i.h" #include "sta_info.h" #include "driver-ops.h" struct rate_control_ref { const struct rate_control_ops *ops; void *priv; }; void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_tx_rate_control *txrc); void rate_control_tx_status(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct ieee80211_tx_status *st); void rate_control_rate_init(struct sta_info *sta); void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct sta_info *sta, u32 changed); static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, struct sta_info *sta, gfp_t gfp) { spin_lock_init(&sta->rate_ctrl_lock); return ref->ops->alloc_sta(ref->priv, &sta->sta, gfp); } static inline void rate_control_free_sta(struct sta_info *sta) { struct rate_control_ref *ref = sta->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; ref->ops->free_sta(ref->priv, ista, priv_sta); } static inline void rate_control_add_sta_debugfs(struct sta_info *sta) { #ifdef CPTCFG_MAC80211_DEBUGFS struct rate_control_ref *ref = sta->rate_ctrl; if (ref && sta->debugfs_dir && ref->ops->add_sta_debugfs) ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv, sta->debugfs_dir); #endif } static inline void rate_control_remove_sta_debugfs(struct sta_info *sta) { #ifdef CPTCFG_MAC80211_DEBUGFS struct rate_control_ref *ref = sta->rate_ctrl; if (ref && ref->ops->remove_sta_debugfs) ref->ops->remove_sta_debugfs(ref->priv, sta->rate_ctrl_priv); #endif } void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata); /* Get a reference to the rate control algorithm. If `name' is NULL, get the * first available algorithm. */ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, const char *name); void rate_control_deinitialize(struct ieee80211_local *local); /* Rate control algorithms */ #ifdef CPTCFG_MAC80211_RC_MINSTREL int rc80211_minstrel_init(void); void rc80211_minstrel_exit(void); #else static inline int rc80211_minstrel_init(void) { return 0; } static inline void rc80211_minstrel_exit(void) { } #endif #endif /* IEEE80211_RATE_H */ backport-iwlwifi-dkms-7906/net/mac80211/rc80211_minstrel.c000066400000000000000000000424601351064634500227120ustar00rootroot00000000000000/* * Copyright (C) 2008 Felix Fietkau * Copyright (C) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on minstrel.c: * Copyright (C) 2005-2007 Derek Smithies * Sponsored by Indranet Technologies Ltd * * Based on sample.c: * Copyright (c) 2005 John Bicket * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #include #include #include #include #include #include #include #include "rate.h" #include "rc80211_minstrel.h" #define SAMPLE_TBL(_mi, _idx, _col) \ _mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col] /* convert mac80211 rate index to local array index */ static inline int rix_to_ndx(struct minstrel_sta_info *mi, int rix) { int i = rix; for (i = rix; i >= 0; i--) if (mi->r[i].rix == rix) break; return i; } /* return current EMWA throughput */ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma) { int usecs; usecs = mr->perfect_tx_time; if (!usecs) usecs = 1000000; /* reset thr. below 10% success */ if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100)) return 0; if (prob_ewma > MINSTREL_FRAC(90, 100)) return MINSTREL_TRUNC(100000 * (MINSTREL_FRAC(90, 100) / usecs)); else return MINSTREL_TRUNC(100000 * (prob_ewma / usecs)); } /* find & sort topmost throughput rates */ static inline void minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) { int j; struct minstrel_rate_stats *tmp_mrs; struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats; for (j = MAX_THR_RATES; j > 0; --j) { tmp_mrs = &mi->r[tp_list[j - 1]].stats; if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <= minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma)) break; } if (j < MAX_THR_RATES - 1) memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1)); if (j < MAX_THR_RATES) tp_list[j] = i; } static void minstrel_set_rate(struct minstrel_sta_info *mi, struct ieee80211_sta_rates *ratetbl, int offset, int idx) { struct minstrel_rate *r = &mi->r[idx]; ratetbl->rate[offset].idx = r->rix; ratetbl->rate[offset].count = r->adjusted_retry_count; ratetbl->rate[offset].count_cts = r->retry_count_cts; ratetbl->rate[offset].count_rts = r->stats.retry_count_rtscts; } static void minstrel_update_rates(struct minstrel_priv *mp, struct minstrel_sta_info *mi) { struct ieee80211_sta_rates *ratetbl; int i = 0; ratetbl = kzalloc(sizeof(*ratetbl), GFP_ATOMIC); if (!ratetbl) return; /* Start with max_tp_rate */ minstrel_set_rate(mi, ratetbl, i++, mi->max_tp_rate[0]); if (mp->hw->max_rates >= 3) { /* At least 3 tx rates supported, use max_tp_rate2 next */ minstrel_set_rate(mi, ratetbl, i++, mi->max_tp_rate[1]); } if (mp->hw->max_rates >= 2) { /* At least 2 tx rates supported, use max_prob_rate next */ minstrel_set_rate(mi, ratetbl, i++, mi->max_prob_rate); } /* Use lowest rate last */ ratetbl->rate[i].idx = mi->lowest_rix; ratetbl->rate[i].count = mp->max_retry; ratetbl->rate[i].count_cts = mp->max_retry; ratetbl->rate[i].count_rts = mp->max_retry; rate_control_set_rates(mp->hw, mi->sta, ratetbl); } /* * Recalculate statistics and counters of a given rate */ void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs) { unsigned int cur_prob; if (unlikely(mrs->attempts > 0)) { mrs->sample_skipped = 0; cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts); if (unlikely(!mrs->att_hist)) { mrs->prob_ewma = cur_prob; } else { /*update exponential weighted moving avarage */ mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma, cur_prob, EWMA_LEVEL); } mrs->att_hist += mrs->attempts; mrs->succ_hist += mrs->success; } else { mrs->sample_skipped++; } mrs->last_success = mrs->success; mrs->last_attempts = mrs->attempts; mrs->success = 0; mrs->attempts = 0; } static void minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) { u8 tmp_tp_rate[MAX_THR_RATES]; u8 tmp_prob_rate = 0; int i, tmp_cur_tp, tmp_prob_tp; for (i = 0; i < MAX_THR_RATES; i++) tmp_tp_rate[i] = 0; for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; struct minstrel_rate_stats *mrs = &mi->r[i].stats; struct minstrel_rate_stats *tmp_mrs = &mi->r[tmp_prob_rate].stats; /* Update statistics of success probability per rate */ minstrel_calc_rate_stats(mrs); /* Sample less often below the 10% chance of success. * Sample less often above the 95% chance of success. */ if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) || mrs->prob_ewma < MINSTREL_FRAC(10, 100)) { mr->adjusted_retry_count = mrs->retry_count >> 1; if (mr->adjusted_retry_count > 2) mr->adjusted_retry_count = 2; mr->sample_limit = 4; } else { mr->sample_limit = -1; mr->adjusted_retry_count = mrs->retry_count; } if (!mr->adjusted_retry_count) mr->adjusted_retry_count = 2; minstrel_sort_best_tp_rates(mi, i, tmp_tp_rate); /* To determine the most robust rate (max_prob_rate) used at * 3rd mmr stage we distinct between two cases: * (1) if any success probabilitiy >= 95%, out of those rates * choose the maximum throughput rate as max_prob_rate * (2) if all success probabilities < 95%, the rate with * highest success probability is chosen as max_prob_rate */ if (mrs->prob_ewma >= MINSTREL_FRAC(95, 100)) { tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_ewma); tmp_prob_tp = minstrel_get_tp_avg(&mi->r[tmp_prob_rate], tmp_mrs->prob_ewma); if (tmp_cur_tp >= tmp_prob_tp) tmp_prob_rate = i; } else { if (mrs->prob_ewma >= tmp_mrs->prob_ewma) tmp_prob_rate = i; } } /* Assign the new rate set */ memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate)); mi->max_prob_rate = tmp_prob_rate; #ifdef CPTCFG_MAC80211_DEBUGFS /* use fixed index if set */ if (mp->fixed_rate_idx != -1) { mi->max_tp_rate[0] = mp->fixed_rate_idx; mi->max_tp_rate[1] = mp->fixed_rate_idx; mi->max_prob_rate = mp->fixed_rate_idx; } #endif /* Reset update timer */ mi->last_stats_update = jiffies; minstrel_update_rates(mp, mi); } static void minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, void *priv_sta, struct ieee80211_tx_status *st) { struct ieee80211_tx_info *info = st->info; struct minstrel_priv *mp = priv; struct minstrel_sta_info *mi = priv_sta; struct ieee80211_tx_rate *ar = info->status.rates; int i, ndx; int success; success = !!(info->flags & IEEE80211_TX_STAT_ACK); for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { if (ar[i].idx < 0) break; ndx = rix_to_ndx(mi, ar[i].idx); if (ndx < 0) continue; mi->r[ndx].stats.attempts += ar[i].count; if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0)) mi->r[ndx].stats.success += success; } if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0)) mi->sample_packets++; if (mi->sample_deferred > 0) mi->sample_deferred--; if (time_after(jiffies, mi->last_stats_update + (mp->update_interval * HZ) / 1000)) minstrel_update_stats(mp, mi); } static inline unsigned int minstrel_get_retry_count(struct minstrel_rate *mr, struct ieee80211_tx_info *info) { u8 retry = mr->adjusted_retry_count; if (info->control.use_rts) retry = max_t(u8, 2, min(mr->stats.retry_count_rtscts, retry)); else if (info->control.use_cts_prot) retry = max_t(u8, 2, min(mr->retry_count_cts, retry)); return retry; } static int minstrel_get_next_sample(struct minstrel_sta_info *mi) { unsigned int sample_ndx; sample_ndx = SAMPLE_TBL(mi, mi->sample_row, mi->sample_column); mi->sample_row++; if ((int) mi->sample_row >= mi->n_rates) { mi->sample_row = 0; mi->sample_column++; if (mi->sample_column >= SAMPLE_COLUMNS) mi->sample_column = 0; } return sample_ndx; } static void minstrel_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate_control *txrc) { struct sk_buff *skb = txrc->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct minstrel_sta_info *mi = priv_sta; struct minstrel_priv *mp = priv; struct ieee80211_tx_rate *rate = &info->control.rates[0]; struct minstrel_rate *msr, *mr; unsigned int ndx; bool mrr_capable; bool prev_sample; int delta; int sampling_ratio; /* check multi-rate-retry capabilities & adjust lookaround_rate */ mrr_capable = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot; if (mrr_capable) sampling_ratio = mp->lookaround_rate_mrr; else sampling_ratio = mp->lookaround_rate; /* increase sum packet counter */ mi->total_packets++; #ifdef CPTCFG_MAC80211_DEBUGFS if (mp->fixed_rate_idx != -1) return; #endif /* Don't use EAPOL frames for sampling on non-mrr hw */ if (mp->hw->max_rates == 1 && (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) return; delta = (mi->total_packets * sampling_ratio / 100) - (mi->sample_packets + mi->sample_deferred / 2); /* delta < 0: no sampling required */ prev_sample = mi->prev_sample; mi->prev_sample = false; if (delta < 0 || (!mrr_capable && prev_sample)) return; if (mi->total_packets >= 10000) { mi->sample_deferred = 0; mi->sample_packets = 0; mi->total_packets = 0; } else if (delta > mi->n_rates * 2) { /* With multi-rate retry, not every planned sample * attempt actually gets used, due to the way the retry * chain is set up - [max_tp,sample,prob,lowest] for * sample_rate < max_tp. * * If there's too much sampling backlog and the link * starts getting worse, minstrel would start bursting * out lots of sampling frames, which would result * in a large throughput loss. */ mi->sample_packets += (delta - mi->n_rates * 2); } /* get next random rate sample */ ndx = minstrel_get_next_sample(mi); msr = &mi->r[ndx]; mr = &mi->r[mi->max_tp_rate[0]]; /* Decide if direct ( 1st mrr stage) or indirect (2nd mrr stage) * rate sampling method should be used. * Respect such rates that are not sampled for 20 interations. */ if (mrr_capable && msr->perfect_tx_time > mr->perfect_tx_time && msr->stats.sample_skipped < 20) { /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark * packets that have the sampling rate deferred to the * second MRR stage. Increase the sample counter only * if the deferred sample rate was actually used. * Use the sample_deferred counter to make sure that * the sampling is not done in large bursts */ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; rate++; mi->sample_deferred++; } else { if (!msr->sample_limit) return; mi->sample_packets++; if (msr->sample_limit > 0) msr->sample_limit--; } /* If we're not using MRR and the sampling rate already * has a probability of >95%, we shouldn't be attempting * to use it, as this only wastes precious airtime */ if (!mrr_capable && (mi->r[ndx].stats.prob_ewma > MINSTREL_FRAC(95, 100))) return; mi->prev_sample = true; rate->idx = mi->r[ndx].rix; rate->count = minstrel_get_retry_count(&mi->r[ndx], info); } static void calc_rate_durations(enum nl80211_band band, struct minstrel_rate *d, struct ieee80211_rate *rate, struct cfg80211_chan_def *chandef) { int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); int shift = ieee80211_chandef_get_shift(chandef); d->perfect_tx_time = ieee80211_frame_duration(band, 1200, DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1, shift); d->ack_time = ieee80211_frame_duration(band, 10, DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1, shift); } static void init_sample_table(struct minstrel_sta_info *mi) { unsigned int i, col, new_idx; u8 rnd[8]; mi->sample_column = 0; mi->sample_row = 0; memset(mi->sample_table, 0xff, SAMPLE_COLUMNS * mi->n_rates); for (col = 0; col < SAMPLE_COLUMNS; col++) { prandom_bytes(rnd, sizeof(rnd)); for (i = 0; i < mi->n_rates; i++) { new_idx = (i + rnd[i & 7]) % mi->n_rates; while (SAMPLE_TBL(mi, new_idx, col) != 0xff) new_idx = (new_idx + 1) % mi->n_rates; SAMPLE_TBL(mi, new_idx, col) = i; } } } static void minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_sta_info *mi = priv_sta; struct minstrel_priv *mp = priv; struct ieee80211_rate *ctl_rate; unsigned int i, n = 0; unsigned int t_slot = 9; /* FIXME: get real slot time */ u32 rate_flags; mi->sta = sta; mi->lowest_rix = rate_lowest_index(sband, sta); ctl_rate = &sband->bitrates[mi->lowest_rix]; mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10, ctl_rate->bitrate, !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1, ieee80211_chandef_get_shift(chandef)); rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); memset(mi->max_tp_rate, 0, sizeof(mi->max_tp_rate)); mi->max_prob_rate = 0; for (i = 0; i < sband->n_bitrates; i++) { struct minstrel_rate *mr = &mi->r[n]; struct minstrel_rate_stats *mrs = &mi->r[n].stats; unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0; unsigned int tx_time_single; unsigned int cw = mp->cw_min; int shift; if (!rate_supported(sta, sband->band, i)) continue; if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; n++; memset(mr, 0, sizeof(*mr)); memset(mrs, 0, sizeof(*mrs)); mr->rix = i; shift = ieee80211_chandef_get_shift(chandef); mr->bitrate = DIV_ROUND_UP(sband->bitrates[i].bitrate, (1 << shift) * 5); calc_rate_durations(sband->band, mr, &sband->bitrates[i], chandef); /* calculate maximum number of retransmissions before * fallback (based on maximum segment size) */ mr->sample_limit = -1; mrs->retry_count = 1; mr->retry_count_cts = 1; mrs->retry_count_rtscts = 1; tx_time = mr->perfect_tx_time + mi->sp_ack_dur; do { /* add one retransmission */ tx_time_single = mr->ack_time + mr->perfect_tx_time; /* contention window */ tx_time_single += (t_slot * cw) >> 1; cw = min((cw << 1) | 1, mp->cw_max); tx_time += tx_time_single; tx_time_cts += tx_time_single + mi->sp_ack_dur; tx_time_rtscts += tx_time_single + 2 * mi->sp_ack_dur; if ((tx_time_cts < mp->segment_size) && (mr->retry_count_cts < mp->max_retry)) mr->retry_count_cts++; if ((tx_time_rtscts < mp->segment_size) && (mrs->retry_count_rtscts < mp->max_retry)) mrs->retry_count_rtscts++; } while ((tx_time < mp->segment_size) && (++mr->stats.retry_count < mp->max_retry)); mr->adjusted_retry_count = mrs->retry_count; if (!(sband->bitrates[i].flags & IEEE80211_RATE_ERP_G)) mr->retry_count_cts = mrs->retry_count; } for (i = n; i < sband->n_bitrates; i++) { struct minstrel_rate *mr = &mi->r[i]; mr->rix = -1; } mi->n_rates = n; mi->last_stats_update = jiffies; init_sample_table(mi); minstrel_update_rates(mp, mi); } static u32 minstrel_get_expected_throughput(void *priv_sta) { struct minstrel_sta_info *mi = priv_sta; struct minstrel_rate_stats *tmp_mrs; int idx = mi->max_tp_rate[0]; int tmp_cur_tp; /* convert pkt per sec in kbps (1200 is the average pkt size used for * computing cur_tp */ tmp_mrs = &mi->r[idx].stats; tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10; tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024; return tmp_cur_tp; } const struct rate_control_ops mac80211_minstrel = { .tx_status_ext = minstrel_tx_status, .get_rate = minstrel_get_rate, .rate_init = minstrel_rate_init, .get_expected_throughput = minstrel_get_expected_throughput, }; backport-iwlwifi-dkms-7906/net/mac80211/rc80211_minstrel.h000066400000000000000000000061251351064634500227150ustar00rootroot00000000000000/* * Copyright (C) 2008 Felix Fietkau * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __RC_MINSTREL_H #define __RC_MINSTREL_H #define EWMA_LEVEL 96 /* ewma weighting factor [/EWMA_DIV] */ #define EWMA_DIV 128 #define SAMPLE_COLUMNS 10 /* number of columns in sample table */ /* scaled fraction values */ #define MINSTREL_SCALE 12 #define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div) #define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE) /* number of highest throughput rates to consider*/ #define MAX_THR_RATES 4 /* * Perform EWMA (Exponentially Weighted Moving Average) calculation */ static inline int minstrel_ewma(int old, int new, int weight) { int diff, incr; diff = new - old; incr = (EWMA_DIV - weight) * diff / EWMA_DIV; return old + incr; } struct minstrel_rate_stats { /* current / last sampling period attempts/success counters */ u16 attempts, last_attempts; u16 success, last_success; /* total attempts/success counters */ u32 att_hist, succ_hist; /* prob_ewma - exponential weighted moving average of prob */ u16 prob_ewma; /* maximum retry counts */ u8 retry_count; u8 retry_count_rtscts; u8 sample_skipped; bool retry_updated; }; struct minstrel_rate { int bitrate; s8 rix; u8 retry_count_cts; u8 adjusted_retry_count; unsigned int perfect_tx_time; unsigned int ack_time; int sample_limit; struct minstrel_rate_stats stats; }; struct minstrel_sta_info { struct ieee80211_sta *sta; unsigned long last_stats_update; unsigned int sp_ack_dur; unsigned int rate_avg; unsigned int lowest_rix; u8 max_tp_rate[MAX_THR_RATES]; u8 max_prob_rate; unsigned int total_packets; unsigned int sample_packets; int sample_deferred; unsigned int sample_row; unsigned int sample_column; int n_rates; struct minstrel_rate *r; bool prev_sample; /* sampling table */ u8 *sample_table; }; struct minstrel_priv { struct ieee80211_hw *hw; bool has_mrr; unsigned int cw_min; unsigned int cw_max; unsigned int max_retry; unsigned int segment_size; unsigned int update_interval; unsigned int lookaround_rate; unsigned int lookaround_rate_mrr; u8 cck_rates[4]; #ifdef CPTCFG_MAC80211_DEBUGFS /* * enable fixed rate processing per RC * - write static index to debugfs:ieee80211/phyX/rc/fixed_rate_idx * - write -1 to enable RC processing again * - setting will be applied on next update */ u32 fixed_rate_idx; #endif }; struct minstrel_debugfs_info { size_t len; char buf[]; }; extern const struct rate_control_ops mac80211_minstrel; void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); /* Recalculate success probabilities and counters for a given rate using EWMA */ void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs); int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma); /* debugfs */ int minstrel_stats_open(struct inode *inode, struct file *file); int minstrel_stats_csv_open(struct inode *inode, struct file *file); #endif backport-iwlwifi-dkms-7906/net/mac80211/rc80211_minstrel_debugfs.c000066400000000000000000000135371351064634500244140ustar00rootroot00000000000000/* * Copyright (C) 2008 Felix Fietkau * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on minstrel.c: * Copyright (C) 2005-2007 Derek Smithies * Sponsored by Indranet Technologies Ltd * * Based on sample.c: * Copyright (c) 2005 John Bicket * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #include #include #include #include #include #include #include #include "rc80211_minstrel.h" int minstrel_stats_open(struct inode *inode, struct file *file) { struct minstrel_sta_info *mi = inode->i_private; struct minstrel_debugfs_info *ms; unsigned int i, tp_max, tp_avg, eprob; char *p; ms = kmalloc(2048, GFP_KERNEL); if (!ms) return -ENOMEM; file->private_data = ms; p = ms->buf; p += sprintf(p, "\n"); p += sprintf(p, "best __________rate_________ ____statistics___ ____last_____ ______sum-of________\n"); p += sprintf(p, "rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n"); for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; struct minstrel_rate_stats *mrs = &mi->r[i].stats; *(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' '; *(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' '; *(p++) = (i == mi->max_tp_rate[2]) ? 'C' : ' '; *(p++) = (i == mi->max_tp_rate[3]) ? 'D' : ' '; *(p++) = (i == mi->max_prob_rate) ? 'P' : ' '; p += sprintf(p, " %3u%s ", mr->bitrate / 2, (mr->bitrate & 1 ? ".5" : " ")); p += sprintf(p, "%3u ", i); p += sprintf(p, "%6u ", mr->perfect_tx_time); tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u" " %3u %3u %-3u " "%9llu %-9llu\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, (unsigned long long)mrs->succ_hist, (unsigned long long)mrs->att_hist); } p += sprintf(p, "\nTotal packet count:: ideal %d " "lookaround %d\n\n", mi->total_packets - mi->sample_packets, mi->sample_packets); ms->len = p - ms->buf; WARN_ON(ms->len + sizeof(*ms) > 2048); return 0; } int minstrel_stats_csv_open(struct inode *inode, struct file *file) { struct minstrel_sta_info *mi = inode->i_private; struct minstrel_debugfs_info *ms; unsigned int i, tp_max, tp_avg, eprob; char *p; ms = kmalloc(2048, GFP_KERNEL); if (!ms) return -ENOMEM; file->private_data = ms; p = ms->buf; for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; struct minstrel_rate_stats *mrs = &mi->r[i].stats; p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : "")); p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : "")); p += sprintf(p, "%s" ,((i == mi->max_tp_rate[2]) ? "C" : "")); p += sprintf(p, "%s" ,((i == mi->max_tp_rate[3]) ? "D" : "")); p += sprintf(p, "%s" ,((i == mi->max_prob_rate) ? "P" : "")); p += sprintf(p, ",%u%s", mr->bitrate / 2, (mr->bitrate & 1 ? ".5," : ",")); p += sprintf(p, "%u,", i); p += sprintf(p, "%u,",mr->perfect_tx_time); tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u," "%llu,%llu,%d,%d\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, (unsigned long long)mrs->succ_hist, (unsigned long long)mrs->att_hist, mi->total_packets - mi->sample_packets, mi->sample_packets); } ms->len = p - ms->buf; WARN_ON(ms->len + sizeof(*ms) > 2048); return 0; } backport-iwlwifi-dkms-7906/net/mac80211/rc80211_minstrel_ht.c000066400000000000000000001213531351064634500234040ustar00rootroot00000000000000/* * Copyright (C) 2010-2013 Felix Fietkau * Copyright (C) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include "rate.h" #include "sta_info.h" #include "rc80211_minstrel.h" #include "rc80211_minstrel_ht.h" #define AVG_AMPDU_SIZE 16 #define AVG_PKT_SIZE 1200 /* Number of bits for an average sized packet */ #define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3) /* Number of symbols for a packet with (bps) bits per symbol */ #define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps)) /* Transmission time (nanoseconds) for a packet containing (syms) symbols */ #define MCS_SYMBOL_TIME(sgi, syms) \ (sgi ? \ ((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \ ((syms) * 1000) << 2 /* syms * 4 us */ \ ) /* Transmit duration for the raw data part of an average sized packet */ #define MCS_DURATION(streams, sgi, bps) \ (MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE) #define BW_20 0 #define BW_40 1 #define BW_80 2 /* * Define group sort order: HT40 -> SGI -> #streams */ #define GROUP_IDX(_streams, _sgi, _ht40) \ MINSTREL_HT_GROUP_0 + \ MINSTREL_MAX_STREAMS * 2 * _ht40 + \ MINSTREL_MAX_STREAMS * _sgi + \ _streams - 1 /* MCS rate information for an MCS group */ #define MCS_GROUP(_streams, _sgi, _ht40, _s) \ [GROUP_IDX(_streams, _sgi, _ht40)] = { \ .streams = _streams, \ .shift = _s, \ .flags = \ IEEE80211_TX_RC_MCS | \ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ .duration = { \ MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \ MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \ MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \ MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \ MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \ MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \ MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \ MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \ } \ } #define VHT_GROUP_IDX(_streams, _sgi, _bw) \ (MINSTREL_VHT_GROUP_0 + \ MINSTREL_MAX_STREAMS * 2 * (_bw) + \ MINSTREL_MAX_STREAMS * (_sgi) + \ (_streams) - 1) #define BW2VBPS(_bw, r3, r2, r1) \ (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1) #define VHT_GROUP(_streams, _sgi, _bw, _s) \ [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \ .streams = _streams, \ .shift = _s, \ .flags = \ IEEE80211_TX_RC_VHT_MCS | \ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ (_bw == BW_80 ? IEEE80211_TX_RC_80_MHZ_WIDTH : \ _bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ .duration = { \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 117, 54, 26)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 234, 108, 52)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 351, 162, 78)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 468, 216, 104)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 702, 324, 156)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 936, 432, 208)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 1053, 486, 234)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 1170, 540, 260)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 1404, 648, 312)) >> _s, \ MCS_DURATION(_streams, _sgi, \ BW2VBPS(_bw, 1560, 720, 346)) >> _s \ } \ } #define CCK_DURATION(_bitrate, _short, _len) \ (1000 * (10 /* SIFS */ + \ (_short ? 72 + 24 : 144 + 48) + \ (8 * (_len + 4) * 10) / (_bitrate))) #define CCK_ACK_DURATION(_bitrate, _short) \ (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \ CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE)) #define CCK_DURATION_LIST(_short, _s) \ CCK_ACK_DURATION(10, _short) >> _s, \ CCK_ACK_DURATION(20, _short) >> _s, \ CCK_ACK_DURATION(55, _short) >> _s, \ CCK_ACK_DURATION(110, _short) >> _s #define CCK_GROUP(_s) \ [MINSTREL_CCK_GROUP] = { \ .streams = 1, \ .flags = 0, \ .shift = _s, \ .duration = { \ CCK_DURATION_LIST(false, _s), \ CCK_DURATION_LIST(true, _s) \ } \ } static bool minstrel_vht_only = true; module_param(minstrel_vht_only, bool, 0644); MODULE_PARM_DESC(minstrel_vht_only, "Use only VHT rates when VHT is supported by sta."); /* * To enable sufficiently targeted rate sampling, MCS rates are divided into * groups, based on the number of streams and flags (HT40, SGI) that they * use. * * Sortorder has to be fixed for GROUP_IDX macro to be applicable: * BW -> SGI -> #streams */ const struct mcs_group minstrel_mcs_groups[] = { MCS_GROUP(1, 0, BW_20, 5), MCS_GROUP(2, 0, BW_20, 4), MCS_GROUP(3, 0, BW_20, 4), MCS_GROUP(1, 1, BW_20, 5), MCS_GROUP(2, 1, BW_20, 4), MCS_GROUP(3, 1, BW_20, 4), MCS_GROUP(1, 0, BW_40, 4), MCS_GROUP(2, 0, BW_40, 4), MCS_GROUP(3, 0, BW_40, 4), MCS_GROUP(1, 1, BW_40, 4), MCS_GROUP(2, 1, BW_40, 4), MCS_GROUP(3, 1, BW_40, 4), CCK_GROUP(8), VHT_GROUP(1, 0, BW_20, 5), VHT_GROUP(2, 0, BW_20, 4), VHT_GROUP(3, 0, BW_20, 4), VHT_GROUP(1, 1, BW_20, 5), VHT_GROUP(2, 1, BW_20, 4), VHT_GROUP(3, 1, BW_20, 4), VHT_GROUP(1, 0, BW_40, 4), VHT_GROUP(2, 0, BW_40, 4), VHT_GROUP(3, 0, BW_40, 4), VHT_GROUP(1, 1, BW_40, 4), VHT_GROUP(2, 1, BW_40, 4), VHT_GROUP(3, 1, BW_40, 4), VHT_GROUP(1, 0, BW_80, 4), VHT_GROUP(2, 0, BW_80, 4), VHT_GROUP(3, 0, BW_80, 4), VHT_GROUP(1, 1, BW_80, 4), VHT_GROUP(2, 1, BW_80, 4), VHT_GROUP(3, 1, BW_80, 4), }; static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly; static void minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi); /* * Some VHT MCSes are invalid (when Ndbps / Nes is not an integer) * e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1 * * Returns the valid mcs map for struct minstrel_mcs_group_data.supported */ static u16 minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map) { u16 mask = 0; if (bw == BW_20) { if (nss != 3 && nss != 6) mask = BIT(9); } else if (bw == BW_80) { if (nss == 3 || nss == 7) mask = BIT(6); else if (nss == 6) mask = BIT(9); } else { WARN_ON(bw != BW_40); } switch ((le16_to_cpu(mcs_map) >> (2 * (nss - 1))) & 3) { case IEEE80211_VHT_MCS_SUPPORT_0_7: mask |= 0x300; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: mask |= 0x200; break; case IEEE80211_VHT_MCS_SUPPORT_0_9: break; default: mask = 0x3ff; } return 0x3ff & ~mask; } /* * Look up an MCS group index based on mac80211 rate information */ static int minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) { return GROUP_IDX((rate->idx / 8) + 1, !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); } static int minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate) { return VHT_GROUP_IDX(ieee80211_rate_get_vht_nss(rate), !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + 2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)); } static struct minstrel_rate_stats * minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, struct ieee80211_tx_rate *rate) { int group, idx; if (rate->flags & IEEE80211_TX_RC_MCS) { group = minstrel_ht_get_group_idx(rate); idx = rate->idx % 8; } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { group = minstrel_vht_get_group_idx(rate); idx = ieee80211_rate_get_vht_mcs(rate); } else { group = MINSTREL_CCK_GROUP; for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) if (rate->idx == mp->cck_rates[idx]) break; /* short preamble */ if ((mi->supported[group] & BIT(idx + 4)) && (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)) idx += 4; } return &mi->groups[group].rates[idx]; } static inline struct minstrel_rate_stats * minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) { return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES]; } /* * Return current throughput based on the average A-MPDU length, taking into * account the expected number of retransmissions and their expected length */ int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate, int prob_ewma) { unsigned int nsecs = 0; /* do not account throughput if sucess prob is below 10% */ if (prob_ewma < MINSTREL_FRAC(10, 100)) return 0; if (group != MINSTREL_CCK_GROUP) nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); nsecs += minstrel_mcs_groups[group].duration[rate] << minstrel_mcs_groups[group].shift; /* * For the throughput calculation, limit the probability value to 90% to * account for collision related packet error rate fluctuation * (prob is scaled - see MINSTREL_FRAC above) */ if (prob_ewma > MINSTREL_FRAC(90, 100)) return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000) / nsecs)); else return MINSTREL_TRUNC(100000 * ((prob_ewma * 1000) / nsecs)); } /* * Find & sort topmost throughput rates * * If multiple rates provide equal throughput the sorting is based on their * current success probability. Higher success probability is preferred among * MCS groups, CCK rates do not provide aggregation and are therefore at last. */ static void minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index, u16 *tp_list) { int cur_group, cur_idx, cur_tp_avg, cur_prob; int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob; int j = MAX_THR_RATES; cur_group = index / MCS_GROUP_RATES; cur_idx = index % MCS_GROUP_RATES; cur_prob = mi->groups[cur_group].rates[cur_idx].prob_ewma; cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob); do { tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma; tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); if (cur_tp_avg < tmp_tp_avg || (cur_tp_avg == tmp_tp_avg && cur_prob <= tmp_prob)) break; j--; } while (j > 0); if (j < MAX_THR_RATES - 1) { memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * (MAX_THR_RATES - (j + 1)))); } if (j < MAX_THR_RATES) tp_list[j] = index; } /* * Find and set the topmost probability rate per sta and per group */ static void minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index) { struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mrs; int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob; int max_tp_group, cur_tp_avg, cur_group, cur_idx; int max_gpr_group, max_gpr_idx; int max_gpr_tp_avg, max_gpr_prob; cur_group = index / MCS_GROUP_RATES; cur_idx = index % MCS_GROUP_RATES; mg = &mi->groups[index / MCS_GROUP_RATES]; mrs = &mg->rates[index % MCS_GROUP_RATES]; tmp_group = mi->max_prob_rate / MCS_GROUP_RATES; tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES; tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma; tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */ max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES; if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) && (max_tp_group != MINSTREL_CCK_GROUP)) return; max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES; max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma; if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) { cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, mrs->prob_ewma); if (cur_tp_avg > tmp_tp_avg) mi->max_prob_rate = index; max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group, max_gpr_idx, max_gpr_prob); if (cur_tp_avg > max_gpr_tp_avg) mg->max_group_prob_rate = index; } else { if (mrs->prob_ewma > tmp_prob) mi->max_prob_rate = index; if (mrs->prob_ewma > max_gpr_prob) mg->max_group_prob_rate = index; } } /* * Assign new rate set per sta and use CCK rates only if the fastest * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted * rate sets where MCS and CCK rates are mixed, because CCK rates can * not use aggregation. */ static void minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi, u16 tmp_mcs_tp_rate[MAX_THR_RATES], u16 tmp_cck_tp_rate[MAX_THR_RATES]) { unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob; int i; tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES; tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES; tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma; tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES; tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES; tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma; tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); if (tmp_cck_tp > tmp_mcs_tp) { for(i = 0; i < MAX_THR_RATES; i++) { minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i], tmp_mcs_tp_rate); } } } /* * Try to increase robustness of max_prob rate by decrease number of * streams if possible. */ static inline void minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) { struct minstrel_mcs_group_data *mg; int tmp_max_streams, group, tmp_idx, tmp_prob; int tmp_tp = 0; tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / MCS_GROUP_RATES].streams; for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { mg = &mi->groups[group]; if (!mi->supported[group] || group == MINSTREL_CCK_GROUP) continue; tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma; if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) && (minstrel_mcs_groups[group].streams < tmp_max_streams)) { mi->max_prob_rate = mg->max_group_prob_rate; tmp_tp = minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob); } } } /* * Update rate statistics and select new primary rates * * Rules for rate selection: * - max_prob_rate must use only one stream, as a tradeoff between delivery * probability and throughput during strong fluctuations * - as long as the max prob rate has a probability of more than 75%, pick * higher throughput rates, even if the probablity is a bit lower */ static void minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mrs; int group, i, j, cur_prob; u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; u16 tmp_cck_tp_rate[MAX_THR_RATES], index; if (mi->ampdu_packets > 0) { mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL); mi->ampdu_len = 0; mi->ampdu_packets = 0; } mi->sample_slow = 0; mi->sample_count = 0; /* Initialize global rate indexes */ for(j = 0; j < MAX_THR_RATES; j++){ tmp_mcs_tp_rate[j] = 0; tmp_cck_tp_rate[j] = 0; } /* Find best rate sets within all MCS groups*/ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { mg = &mi->groups[group]; if (!mi->supported[group]) continue; mi->sample_count++; /* (re)Initialize group rate indexes */ for(j = 0; j < MAX_THR_RATES; j++) tmp_group_tp_rate[j] = group; for (i = 0; i < MCS_GROUP_RATES; i++) { if (!(mi->supported[group] & BIT(i))) continue; index = MCS_GROUP_RATES * group + i; mrs = &mg->rates[i]; mrs->retry_updated = false; minstrel_calc_rate_stats(mrs); cur_prob = mrs->prob_ewma; if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0) continue; /* Find max throughput rate set */ if (group != MINSTREL_CCK_GROUP) { minstrel_ht_sort_best_tp_rates(mi, index, tmp_mcs_tp_rate); } else if (group == MINSTREL_CCK_GROUP) { minstrel_ht_sort_best_tp_rates(mi, index, tmp_cck_tp_rate); } /* Find max throughput rate set within a group */ minstrel_ht_sort_best_tp_rates(mi, index, tmp_group_tp_rate); /* Find max probability rate per group and global */ minstrel_ht_set_best_prob_rate(mi, index); } memcpy(mg->max_group_tp_rate, tmp_group_tp_rate, sizeof(mg->max_group_tp_rate)); } /* Assign new rate set per sta */ minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate); memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate)); /* Try to increase robustness of max_prob_rate*/ minstrel_ht_prob_rate_reduce_streams(mi); /* try to sample all available rates during each interval */ mi->sample_count *= 8; #ifdef CPTCFG_MAC80211_DEBUGFS /* use fixed index if set */ if (mp->fixed_rate_idx != -1) { for (i = 0; i < 4; i++) mi->max_tp_rate[i] = mp->fixed_rate_idx; mi->max_prob_rate = mp->fixed_rate_idx; } #endif /* Reset update timer */ mi->last_stats_update = jiffies; } static bool minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate) { if (rate->idx < 0) return false; if (!rate->count) return false; if (rate->flags & IEEE80211_TX_RC_MCS || rate->flags & IEEE80211_TX_RC_VHT_MCS) return true; return rate->idx == mp->cck_rates[0] || rate->idx == mp->cck_rates[1] || rate->idx == mp->cck_rates[2] || rate->idx == mp->cck_rates[3]; } static void minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi) { struct minstrel_mcs_group_data *mg; for (;;) { mi->sample_group++; mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups); mg = &mi->groups[mi->sample_group]; if (!mi->supported[mi->sample_group]) continue; if (++mg->index >= MCS_GROUP_RATES) { mg->index = 0; if (++mg->column >= ARRAY_SIZE(sample_table)) mg->column = 0; } break; } } static void minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary) { int group, orig_group; orig_group = group = *idx / MCS_GROUP_RATES; while (group > 0) { group--; if (!mi->supported[group]) continue; if (minstrel_mcs_groups[group].streams > minstrel_mcs_groups[orig_group].streams) continue; if (primary) *idx = mi->groups[group].max_group_tp_rate[0]; else *idx = mi->groups[group].max_group_tp_rate[1]; break; } } static void minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct sta_info *sta = container_of(pubsta, struct sta_info, sta); u16 tid; if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) return; if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) return; if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) return; tid = ieee80211_get_tid(hdr); if (likely(sta->ampdu_mlme.tid_tx[tid])) return; ieee80211_start_tx_ba_session(pubsta, tid, 0); } static void minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, void *priv_sta, struct ieee80211_tx_status *st) { struct ieee80211_tx_info *info = st->info; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_tx_rate *ar = info->status.rates; struct minstrel_rate_stats *rate, *rate2; struct minstrel_priv *mp = priv; bool last, update = false; int i; if (!msp->is_ht) return mac80211_minstrel.tx_status_ext(priv, sband, &msp->legacy, st); /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) { info->status.ampdu_ack_len = (info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0); info->status.ampdu_len = 1; } mi->ampdu_packets++; mi->ampdu_len += info->status.ampdu_len; if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); mi->sample_tries = 1; mi->sample_count--; } if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) mi->sample_packets += info->status.ampdu_len; last = !minstrel_ht_txstat_valid(mp, &ar[0]); for (i = 0; !last; i++) { last = (i == IEEE80211_TX_MAX_RATES - 1) || !minstrel_ht_txstat_valid(mp, &ar[i + 1]); rate = minstrel_ht_get_stats(mp, mi, &ar[i]); if (last) rate->success += info->status.ampdu_ack_len; rate->attempts += ar[i].count * info->status.ampdu_len; } /* * check for sudden death of spatial multiplexing, * downgrade to a lower number of streams if necessary. */ rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); if (rate->attempts > 30 && MINSTREL_FRAC(rate->success, rate->attempts) < MINSTREL_FRAC(20, 100)) { minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); update = true; } rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]); if (rate2->attempts > 30 && MINSTREL_FRAC(rate2->success, rate2->attempts) < MINSTREL_FRAC(20, 100)) { minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false); update = true; } if (time_after(jiffies, mi->last_stats_update + (mp->update_interval / 2 * HZ) / 1000)) { update = true; minstrel_ht_update_stats(mp, mi); } if (update) minstrel_ht_update_rates(mp, mi); } static inline int minstrel_get_duration(int index) { const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; unsigned int duration = group->duration[index % MCS_GROUP_RATES]; return duration << group->shift; } static void minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, int index) { struct minstrel_rate_stats *mrs; unsigned int tx_time, tx_time_rtscts, tx_time_data; unsigned int cw = mp->cw_min; unsigned int ctime = 0; unsigned int t_slot = 9; /* FIXME */ unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); unsigned int overhead = 0, overhead_rtscts = 0; mrs = minstrel_get_ratestats(mi, index); if (mrs->prob_ewma < MINSTREL_FRAC(1, 10)) { mrs->retry_count = 1; mrs->retry_count_rtscts = 1; return; } mrs->retry_count = 2; mrs->retry_count_rtscts = 2; mrs->retry_updated = true; tx_time_data = minstrel_get_duration(index) * ampdu_len / 1000; /* Contention time for first 2 tries */ ctime = (t_slot * cw) >> 1; cw = min((cw << 1) | 1, mp->cw_max); ctime += (t_slot * cw) >> 1; cw = min((cw << 1) | 1, mp->cw_max); if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) { overhead = mi->overhead; overhead_rtscts = mi->overhead_rtscts; } /* Total TX time for data and Contention after first 2 tries */ tx_time = ctime + 2 * (overhead + tx_time_data); tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data); /* See how many more tries we can fit inside segment size */ do { /* Contention time for this try */ ctime = (t_slot * cw) >> 1; cw = min((cw << 1) | 1, mp->cw_max); /* Total TX time after this try */ tx_time += ctime + overhead + tx_time_data; tx_time_rtscts += ctime + overhead_rtscts + tx_time_data; if (tx_time_rtscts < mp->segment_size) mrs->retry_count_rtscts++; } while ((tx_time < mp->segment_size) && (++mrs->retry_count < mp->max_retry)); } static void minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, struct ieee80211_sta_rates *ratetbl, int offset, int index) { const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; struct minstrel_rate_stats *mrs; u8 idx; u16 flags = group->flags; mrs = minstrel_get_ratestats(mi, index); if (!mrs->retry_updated) minstrel_calc_retransmit(mp, mi, index); if (mrs->prob_ewma < MINSTREL_FRAC(20, 100) || !mrs->retry_count) { ratetbl->rate[offset].count = 2; ratetbl->rate[offset].count_rts = 2; ratetbl->rate[offset].count_cts = 2; } else { ratetbl->rate[offset].count = mrs->retry_count; ratetbl->rate[offset].count_cts = mrs->retry_count; ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts; } if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)]; else if (flags & IEEE80211_TX_RC_VHT_MCS) idx = ((group->streams - 1) << 4) | ((index % MCS_GROUP_RATES) & 0xF); else idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8; /* enable RTS/CTS if needed: * - if station is in dynamic SMPS (and streams > 1) * - for fallback rates, to increase chances of getting through */ if (offset > 0 || (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC && group->streams > 1)) { ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts; flags |= IEEE80211_TX_RC_USE_RTS_CTS; } ratetbl->rate[offset].idx = idx; ratetbl->rate[offset].flags = flags; } static inline int minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate) { int group = rate / MCS_GROUP_RATES; rate %= MCS_GROUP_RATES; return mi->groups[group].rates[rate].prob_ewma; } static int minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi) { int group = mi->max_prob_rate / MCS_GROUP_RATES; const struct mcs_group *g = &minstrel_mcs_groups[group]; int rate = mi->max_prob_rate % MCS_GROUP_RATES; unsigned int duration; /* Disable A-MSDU if max_prob_rate is bad */ if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100)) return 1; duration = g->duration[rate]; duration <<= g->shift; /* If the rate is slower than single-stream MCS1, make A-MSDU limit small */ if (duration > MCS_DURATION(1, 0, 52)) return 500; /* * If the rate is slower than single-stream MCS4, limit A-MSDU to usual * data packet size */ if (duration > MCS_DURATION(1, 0, 104)) return 1600; /* * If the rate is slower than single-stream MCS7, or if the max throughput * rate success probability is less than 75%, limit A-MSDU to twice the usual * data packet size */ if (duration > MCS_DURATION(1, 0, 260) || (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) < MINSTREL_FRAC(75, 100))) return 3200; /* * HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes. * Since aggregation sessions are started/stopped without txq flush, use * the limit here to avoid the complexity of having to de-aggregate * packets in the queue. */ if (!mi->sta->vht_cap.vht_supported) return IEEE80211_MAX_MPDU_LEN_HT_BA; /* unlimited */ return 0; } static void minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct ieee80211_sta_rates *rates; int i = 0; rates = kzalloc(sizeof(*rates), GFP_ATOMIC); if (!rates) return; /* Start with max_tp_rate[0] */ minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]); if (mp->hw->max_rates >= 3) { /* At least 3 tx rates supported, use max_tp_rate[1] next */ minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]); } if (mp->hw->max_rates >= 2) { /* * At least 2 tx rates supported, use max_prob_rate next */ minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate); } mi->sta->max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi); rates->rate[i].idx = -1; rate_control_set_rates(mp->hw, mi->sta, rates); } static int minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct minstrel_rate_stats *mrs; struct minstrel_mcs_group_data *mg; unsigned int sample_dur, sample_group, cur_max_tp_streams; int tp_rate1, tp_rate2; int sample_idx = 0; if (mi->sample_wait > 0) { mi->sample_wait--; return -1; } if (!mi->sample_tries) return -1; sample_group = mi->sample_group; mg = &mi->groups[sample_group]; sample_idx = sample_table[mg->column][mg->index]; minstrel_set_next_sample_idx(mi); if (!(mi->supported[sample_group] & BIT(sample_idx))) return -1; mrs = &mg->rates[sample_idx]; sample_idx += sample_group * MCS_GROUP_RATES; /* Set tp_rate1, tp_rate2 to the highest / second highest max_tp_rate */ if (minstrel_get_duration(mi->max_tp_rate[0]) > minstrel_get_duration(mi->max_tp_rate[1])) { tp_rate1 = mi->max_tp_rate[1]; tp_rate2 = mi->max_tp_rate[0]; } else { tp_rate1 = mi->max_tp_rate[0]; tp_rate2 = mi->max_tp_rate[1]; } /* * Sampling might add some overhead (RTS, no aggregation) * to the frame. Hence, don't use sampling for the highest currently * used highest throughput or probability rate. */ if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate) return -1; /* * Do not sample if the probability is already higher than 95%, * or if the rate is 3 times slower than the current max probability * rate, to avoid wasting airtime. */ sample_dur = minstrel_get_duration(sample_idx); if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) || minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur) return -1; /* * Make sure that lower rates get sampled only occasionally, * if the link is working perfectly. */ cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 / MCS_GROUP_RATES].streams; if (sample_dur >= minstrel_get_duration(tp_rate2) && (cur_max_tp_streams - 1 < minstrel_mcs_groups[sample_group].streams || sample_dur >= minstrel_get_duration(mi->max_prob_rate))) { if (mrs->sample_skipped < 20) return -1; if (mi->sample_slow++ > 2) return -1; } mi->sample_tries--; return sample_idx; } static void minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate_control *txrc) { const struct mcs_group *sample_group; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); struct ieee80211_tx_rate *rate = &info->status.rates[0]; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct minstrel_priv *mp = priv; int sample_idx; if (!msp->is_ht) return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); if (!(info->flags & IEEE80211_TX_CTL_AMPDU) && mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) minstrel_aggr_check(sta, txrc->skb); info->flags |= mi->tx_flags; #ifdef CPTCFG_MAC80211_DEBUGFS if (mp->fixed_rate_idx != -1) return; #endif /* Don't use EAPOL frames for sampling on non-mrr hw */ if (mp->hw->max_rates == 1 && (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) sample_idx = -1; else sample_idx = minstrel_get_sample_rate(mp, mi); mi->total_packets++; /* wraparound */ if (mi->total_packets == ~0) { mi->total_packets = 0; mi->sample_packets = 0; } if (sample_idx < 0) return; sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES]; sample_idx %= MCS_GROUP_RATES; if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] && (sample_idx >= 4) != txrc->short_preamble) return; info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; rate->count = 1; if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) { int idx = sample_idx % ARRAY_SIZE(mp->cck_rates); rate->idx = mp->cck_rates[idx]; } else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) { ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES, sample_group->streams); } else { rate->idx = sample_idx + (sample_group->streams - 1) * 8; } rate->flags = sample_group->flags; } static void minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta) { int i; if (sband->band != NL80211_BAND_2GHZ) return; if (!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES)) return; mi->cck_supported = 0; mi->cck_supported_short = 0; for (i = 0; i < 4; i++) { if (!rate_supported(sta, sband->band, mp->cck_rates[i])) continue; mi->cck_supported |= BIT(i); if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE) mi->cck_supported_short |= BIT(i); } mi->supported[MINSTREL_CCK_GROUP] = mi->cck_supported; } static void minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_priv *mp = priv; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; u16 ht_cap = sta->ht_cap.cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; int use_vht; int n_supported = 0; int ack_dur; int stbc; int i; bool ldpc; /* fall back to the old minstrel for legacy stations */ if (!sta->ht_cap.ht_supported) goto use_legacy; BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB); if (vht_cap->vht_supported) use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0); else use_vht = 0; msp->is_ht = true; memset(mi, 0, sizeof(*mi)); mi->sta = sta; mi->last_stats_update = jiffies; ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0); mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0); mi->overhead += ack_dur; mi->overhead_rtscts = mi->overhead + 2 * ack_dur; mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); /* When using MRR, sample more on the first attempt, without delay */ if (mp->has_mrr) { mi->sample_count = 16; mi->sample_wait = 0; } else { mi->sample_count = 8; mi->sample_wait = 8; } mi->sample_tries = 4; if (!use_vht) { stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >> IEEE80211_HT_CAP_RX_STBC_SHIFT; ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING; } else { stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >> IEEE80211_VHT_CAP_RXSTBC_SHIFT; ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC; } mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; if (ldpc) mi->tx_flags |= IEEE80211_TX_CTL_LDPC; for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { u32 gflags = minstrel_mcs_groups[i].flags; int bw, nss; mi->supported[i] = 0; if (i == MINSTREL_CCK_GROUP) { minstrel_ht_update_cck(mp, mi, sband, sta); continue; } if (gflags & IEEE80211_TX_RC_SHORT_GI) { if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) { if (!(ht_cap & IEEE80211_HT_CAP_SGI_40)) continue; } else { if (!(ht_cap & IEEE80211_HT_CAP_SGI_20)) continue; } } if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH && sta->bandwidth < IEEE80211_STA_RX_BW_40) continue; nss = minstrel_mcs_groups[i].streams; /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */ if (sta->smps_mode == IEEE80211_SMPS_STATIC && nss > 1) continue; /* HT rate */ if (gflags & IEEE80211_TX_RC_MCS) { if (use_vht && minstrel_vht_only) continue; mi->supported[i] = mcs->rx_mask[nss - 1]; if (mi->supported[i]) n_supported++; continue; } /* VHT rate */ if (!vht_cap->vht_supported || WARN_ON(!(gflags & IEEE80211_TX_RC_VHT_MCS)) || WARN_ON(gflags & IEEE80211_TX_RC_160_MHZ_WIDTH)) continue; if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) { if (sta->bandwidth < IEEE80211_STA_RX_BW_80 || ((gflags & IEEE80211_TX_RC_SHORT_GI) && !(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) { continue; } } if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) bw = BW_40; else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) bw = BW_80; else bw = BW_20; mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss, vht_cap->vht_mcs.tx_mcs_map); if (mi->supported[i]) n_supported++; } if (!n_supported) goto use_legacy; mi->supported[MINSTREL_CCK_GROUP] |= mi->cck_supported_short << 4; /* create an initial rate table with the lowest supported rates */ minstrel_ht_update_stats(mp, mi); minstrel_ht_update_rates(mp, mi); return; use_legacy: msp->is_ht = false; memset(&msp->legacy, 0, sizeof(msp->legacy)); msp->legacy.r = msp->ratelist; msp->legacy.sample_table = msp->sample_table; return mac80211_minstrel.rate_init(priv, sband, chandef, sta, &msp->legacy); } static void minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta) { minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta); } static void minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta, u32 changed) { minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta); } static void * minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) { struct ieee80211_supported_band *sband; struct minstrel_ht_sta_priv *msp; struct minstrel_priv *mp = priv; struct ieee80211_hw *hw = mp->hw; int max_rates = 0; int i; for (i = 0; i < NUM_NL80211_BANDS; i++) { sband = hw->wiphy->bands[i]; if (sband && sband->n_bitrates > max_rates) max_rates = sband->n_bitrates; } msp = kzalloc(sizeof(*msp), gfp); if (!msp) return NULL; msp->ratelist = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp); if (!msp->ratelist) goto error; msp->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp); if (!msp->sample_table) goto error1; return msp; error1: kfree(msp->ratelist); error: kfree(msp); return NULL; } static void minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_ht_sta_priv *msp = priv_sta; kfree(msp->sample_table); kfree(msp->ratelist); kfree(msp); } static void minstrel_ht_init_cck_rates(struct minstrel_priv *mp) { static const int bitrates[4] = { 10, 20, 55, 110 }; struct ieee80211_supported_band *sband; u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); int i, j; sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ]; if (!sband) return; for (i = 0; i < sband->n_bitrates; i++) { struct ieee80211_rate *rate = &sband->bitrates[i]; if (rate->flags & IEEE80211_RATE_ERP_G) continue; if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; for (j = 0; j < ARRAY_SIZE(bitrates); j++) { if (rate->bitrate != bitrates[j]) continue; mp->cck_rates[j] = i; break; } } } static void * minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { struct minstrel_priv *mp; mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC); if (!mp) return NULL; /* contention window settings * Just an approximation. Using the per-queue values would complicate * the calculations and is probably unnecessary */ mp->cw_min = 15; mp->cw_max = 1023; /* number of packets (in %) to use for sampling other rates * sample less often for non-mrr packets, because the overhead * is much higher than with mrr */ mp->lookaround_rate = 5; mp->lookaround_rate_mrr = 10; /* maximum time that the hw is allowed to stay in one MRR segment */ mp->segment_size = 6000; if (hw->max_rate_tries > 0) mp->max_retry = hw->max_rate_tries; else /* safe default, does not necessarily have to match hw properties */ mp->max_retry = 7; if (hw->max_rates >= 4) mp->has_mrr = true; mp->hw = hw; mp->update_interval = 100; #ifdef CPTCFG_MAC80211_DEBUGFS mp->fixed_rate_idx = (u32) -1; debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx); #endif minstrel_ht_init_cck_rates(mp); return mp; } static void minstrel_ht_free(void *priv) { kfree(priv); } static u32 minstrel_ht_get_expected_throughput(void *priv_sta) { struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; int i, j, prob, tp_avg; if (!msp->is_ht) return mac80211_minstrel.get_expected_throughput(priv_sta); i = mi->max_tp_rate[0] / MCS_GROUP_RATES; j = mi->max_tp_rate[0] % MCS_GROUP_RATES; prob = mi->groups[i].rates[j].prob_ewma; /* convert tp_avg from pkt per second in kbps */ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10; tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024; return tp_avg; } static const struct rate_control_ops mac80211_minstrel_ht = { .name = "minstrel_ht", .tx_status_ext = minstrel_ht_tx_status, .get_rate = minstrel_ht_get_rate, .rate_init = minstrel_ht_rate_init, .rate_update = minstrel_ht_rate_update, .alloc_sta = minstrel_ht_alloc_sta, .free_sta = minstrel_ht_free_sta, .alloc = minstrel_ht_alloc, .free = minstrel_ht_free, #ifdef CPTCFG_MAC80211_DEBUGFS .add_sta_debugfs = minstrel_ht_add_sta_debugfs, #endif .get_expected_throughput = minstrel_ht_get_expected_throughput, }; static void __init init_sample_table(void) { int col, i, new_idx; u8 rnd[MCS_GROUP_RATES]; memset(sample_table, 0xff, sizeof(sample_table)); for (col = 0; col < SAMPLE_COLUMNS; col++) { prandom_bytes(rnd, sizeof(rnd)); for (i = 0; i < MCS_GROUP_RATES; i++) { new_idx = (i + rnd[i]) % MCS_GROUP_RATES; while (sample_table[col][new_idx] != 0xff) new_idx = (new_idx + 1) % MCS_GROUP_RATES; sample_table[col][new_idx] = i; } } } int __init rc80211_minstrel_init(void) { init_sample_table(); return ieee80211_rate_control_register(&mac80211_minstrel_ht); } void rc80211_minstrel_exit(void) { ieee80211_rate_control_unregister(&mac80211_minstrel_ht); } backport-iwlwifi-dkms-7906/net/mac80211/rc80211_minstrel_ht.h000066400000000000000000000053341351064634500234110ustar00rootroot00000000000000/* * Copyright (C) 2010 Felix Fietkau * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __RC_MINSTREL_HT_H #define __RC_MINSTREL_HT_H /* * The number of streams can be changed to 2 to reduce code * size and memory footprint. */ #define MINSTREL_MAX_STREAMS 3 #define MINSTREL_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */ #define MINSTREL_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */ #define MINSTREL_HT_GROUPS_NB (MINSTREL_MAX_STREAMS * \ MINSTREL_HT_STREAM_GROUPS) #define MINSTREL_VHT_GROUPS_NB (MINSTREL_MAX_STREAMS * \ MINSTREL_VHT_STREAM_GROUPS) #define MINSTREL_CCK_GROUPS_NB 1 #define MINSTREL_GROUPS_NB (MINSTREL_HT_GROUPS_NB + \ MINSTREL_VHT_GROUPS_NB + \ MINSTREL_CCK_GROUPS_NB) #define MINSTREL_HT_GROUP_0 0 #define MINSTREL_CCK_GROUP (MINSTREL_HT_GROUP_0 + MINSTREL_HT_GROUPS_NB) #define MINSTREL_VHT_GROUP_0 (MINSTREL_CCK_GROUP + 1) #define MCS_GROUP_RATES 10 struct mcs_group { u16 flags; u8 streams; u8 shift; u16 duration[MCS_GROUP_RATES]; }; extern const struct mcs_group minstrel_mcs_groups[]; struct minstrel_mcs_group_data { u8 index; u8 column; /* sorted rate set within a MCS group*/ u16 max_group_tp_rate[MAX_THR_RATES]; u16 max_group_prob_rate; /* MCS rate statistics */ struct minstrel_rate_stats rates[MCS_GROUP_RATES]; }; struct minstrel_ht_sta { struct ieee80211_sta *sta; /* ampdu length (average, per sampling interval) */ unsigned int ampdu_len; unsigned int ampdu_packets; /* ampdu length (EWMA) */ unsigned int avg_ampdu_len; /* overall sorted rate set */ u16 max_tp_rate[MAX_THR_RATES]; u16 max_prob_rate; /* time of last status update */ unsigned long last_stats_update; /* overhead time in usec for each frame */ unsigned int overhead; unsigned int overhead_rtscts; unsigned int total_packets; unsigned int sample_packets; /* tx flags to add for frames for this sta */ u32 tx_flags; u8 sample_wait; u8 sample_tries; u8 sample_count; u8 sample_slow; /* current MCS group to be sampled */ u8 sample_group; u8 cck_supported; u8 cck_supported_short; /* Bitfield of supported MCS rates of all groups */ u16 supported[MINSTREL_GROUPS_NB]; /* MCS rate group info and statistics */ struct minstrel_mcs_group_data groups[MINSTREL_GROUPS_NB]; }; struct minstrel_ht_sta_priv { union { struct minstrel_ht_sta ht; struct minstrel_sta_info legacy; }; void *ratelist; void *sample_table; bool is_ht; }; void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate, int prob_ewma); #endif backport-iwlwifi-dkms-7906/net/mac80211/rc80211_minstrel_ht_debugfs.c000066400000000000000000000216041351064634500251010ustar00rootroot00000000000000/* * Copyright (C) 2010 Felix Fietkau * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include "rc80211_minstrel.h" #include "rc80211_minstrel_ht.h" static ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct minstrel_debugfs_info *ms; ms = file->private_data; return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len); } static int minstrel_stats_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static char * minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) { const struct mcs_group *mg; unsigned int j, tp_max, tp_avg, eprob, tx_time; char htmode = '2'; char gimode = 'L'; u32 gflags; if (!mi->supported[i]) return p; mg = &minstrel_mcs_groups[i]; gflags = mg->flags; if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) htmode = '4'; else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) htmode = '8'; if (gflags & IEEE80211_TX_RC_SHORT_GI) gimode = 'S'; for (j = 0; j < MCS_GROUP_RATES; j++) { struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; static const int bitrates[4] = { 10, 20, 55, 110 }; int idx = i * MCS_GROUP_RATES + j; unsigned int duration; if (!(mi->supported[i] & BIT(j))) continue; if (gflags & IEEE80211_TX_RC_MCS) { p += sprintf(p, "HT%c0 ", htmode); p += sprintf(p, "%cGI ", gimode); p += sprintf(p, "%d ", mg->streams); } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { p += sprintf(p, "VHT%c0 ", htmode); p += sprintf(p, "%cGI ", gimode); p += sprintf(p, "%d ", mg->streams); } else { p += sprintf(p, "CCK "); p += sprintf(p, "%cP ", j < 4 ? 'L' : 'S'); p += sprintf(p, "1 "); } *(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' '; *(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' '; *(p++) = (idx == mi->max_tp_rate[2]) ? 'C' : ' '; *(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' '; *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' '; if (gflags & IEEE80211_TX_RC_MCS) { p += sprintf(p, " MCS%-2u", (mg->streams - 1) * 8 + j); } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { p += sprintf(p, " MCS%-1u/%1u", j, mg->streams); } else { int r = bitrates[j % 4]; p += sprintf(p, " %2u.%1uM", r / 10, r % 10); } p += sprintf(p, " %3u ", idx); /* tx_time[rate(i)] in usec */ duration = mg->duration[j]; duration <<= mg->shift; tx_time = DIV_ROUND_CLOSEST(duration, 1000); p += sprintf(p, "%6u ", tx_time); tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u" " %3u %3u %-3u " "%9llu %-9llu\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, (unsigned long long)mrs->succ_hist, (unsigned long long)mrs->att_hist); } return p; } static int minstrel_ht_stats_open(struct inode *inode, struct file *file) { struct minstrel_ht_sta_priv *msp = inode->i_private; struct minstrel_ht_sta *mi = &msp->ht; struct minstrel_debugfs_info *ms; unsigned int i; int ret; char *p; if (!msp->is_ht) { inode->i_private = &msp->legacy; ret = minstrel_stats_open(inode, file); inode->i_private = msp; return ret; } ms = kmalloc(32768, GFP_KERNEL); if (!ms) return -ENOMEM; file->private_data = ms; p = ms->buf; p += sprintf(p, "\n"); p += sprintf(p, " best ____________rate__________ ____statistics___ _____last____ ______sum-of________\n"); p += sprintf(p, "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n"); p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p); for (i = 0; i < MINSTREL_CCK_GROUP; i++) p = minstrel_ht_stats_dump(mi, i, p); for (i++; i < ARRAY_SIZE(mi->groups); i++) p = minstrel_ht_stats_dump(mi, i, p); p += sprintf(p, "\nTotal packet count:: ideal %d " "lookaround %d\n", max(0, (int) mi->total_packets - (int) mi->sample_packets), mi->sample_packets); p += sprintf(p, "Average # of aggregated frames per A-MPDU: %d.%d\n", MINSTREL_TRUNC(mi->avg_ampdu_len), MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); ms->len = p - ms->buf; WARN_ON(ms->len + sizeof(*ms) > 32768); return nonseekable_open(inode, file); } static const struct file_operations minstrel_ht_stat_fops = { .owner = THIS_MODULE, .open = minstrel_ht_stats_open, .read = minstrel_stats_read, .release = minstrel_stats_release, .llseek = no_llseek, }; static char * minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) { const struct mcs_group *mg; unsigned int j, tp_max, tp_avg, eprob, tx_time; char htmode = '2'; char gimode = 'L'; u32 gflags; if (!mi->supported[i]) return p; mg = &minstrel_mcs_groups[i]; gflags = mg->flags; if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) htmode = '4'; else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) htmode = '8'; if (gflags & IEEE80211_TX_RC_SHORT_GI) gimode = 'S'; for (j = 0; j < MCS_GROUP_RATES; j++) { struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; static const int bitrates[4] = { 10, 20, 55, 110 }; int idx = i * MCS_GROUP_RATES + j; unsigned int duration; if (!(mi->supported[i] & BIT(j))) continue; if (gflags & IEEE80211_TX_RC_MCS) { p += sprintf(p, "HT%c0,", htmode); p += sprintf(p, "%cGI,", gimode); p += sprintf(p, "%d,", mg->streams); } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { p += sprintf(p, "VHT%c0,", htmode); p += sprintf(p, "%cGI,", gimode); p += sprintf(p, "%d,", mg->streams); } else { p += sprintf(p, "CCK,"); p += sprintf(p, "%cP,", j < 4 ? 'L' : 'S'); p += sprintf(p, "1,"); } p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[0]) ? "A" : "")); p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[1]) ? "B" : "")); p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[2]) ? "C" : "")); p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[3]) ? "D" : "")); p += sprintf(p, "%s" ,((idx == mi->max_prob_rate) ? "P" : "")); if (gflags & IEEE80211_TX_RC_MCS) { p += sprintf(p, ",MCS%-2u,", (mg->streams - 1) * 8 + j); } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { p += sprintf(p, ",MCS%-1u/%1u,", j, mg->streams); } else { int r = bitrates[j % 4]; p += sprintf(p, ",%2u.%1uM,", r / 10, r % 10); } p += sprintf(p, "%u,", idx); duration = mg->duration[j]; duration <<= mg->shift; tx_time = DIV_ROUND_CLOSEST(duration, 1000); p += sprintf(p, "%u,", tx_time); tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u," "%u,%llu,%llu,", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, (unsigned long long)mrs->succ_hist, (unsigned long long)mrs->att_hist); p += sprintf(p, "%d,%d,%d.%d\n", max(0, (int) mi->total_packets - (int) mi->sample_packets), mi->sample_packets, MINSTREL_TRUNC(mi->avg_ampdu_len), MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); } return p; } static int minstrel_ht_stats_csv_open(struct inode *inode, struct file *file) { struct minstrel_ht_sta_priv *msp = inode->i_private; struct minstrel_ht_sta *mi = &msp->ht; struct minstrel_debugfs_info *ms; unsigned int i; int ret; char *p; if (!msp->is_ht) { inode->i_private = &msp->legacy; ret = minstrel_stats_csv_open(inode, file); inode->i_private = msp; return ret; } ms = kmalloc(32768, GFP_KERNEL); if (!ms) return -ENOMEM; file->private_data = ms; p = ms->buf; p = minstrel_ht_stats_csv_dump(mi, MINSTREL_CCK_GROUP, p); for (i = 0; i < MINSTREL_CCK_GROUP; i++) p = minstrel_ht_stats_csv_dump(mi, i, p); for (i++; i < ARRAY_SIZE(mi->groups); i++) p = minstrel_ht_stats_csv_dump(mi, i, p); ms->len = p - ms->buf; WARN_ON(ms->len + sizeof(*ms) > 32768); return nonseekable_open(inode, file); } static const struct file_operations minstrel_ht_stat_csv_fops = { .owner = THIS_MODULE, .open = minstrel_ht_stats_csv_open, .read = minstrel_stats_read, .release = minstrel_stats_release, .llseek = no_llseek, }; void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) { struct minstrel_ht_sta_priv *msp = priv_sta; debugfs_create_file("rc_stats", 0444, dir, msp, &minstrel_ht_stat_fops); debugfs_create_file("rc_stats_csv", 0444, dir, msp, &minstrel_ht_stat_csv_fops); } backport-iwlwifi-dkms-7906/net/mac80211/rx.c000066400000000000000000004017701351064634500204310ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #ifdef CPTCFG_IWLMVM_VENDOR_CMDS #include "packet_filtering.h" #endif #include "driver-ops.h" #include "led.h" #include "mesh.h" #include "wep.h" #include "wpa.h" #include "tkip.h" #include "wme.h" #include "rate.h" static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev_tstats(dev)); u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += len; u64_stats_update_end(&tstats->syncp); } static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, enum nl80211_iftype type) { __le16 fc = hdr->frame_control; if (ieee80211_is_data(fc)) { if (len < 24) /* drop incorrect hdr len (data) */ return NULL; if (ieee80211_has_a4(fc)) return NULL; if (ieee80211_has_tods(fc)) return hdr->addr1; if (ieee80211_has_fromds(fc)) return hdr->addr2; return hdr->addr3; } if (ieee80211_is_mgmt(fc)) { if (len < 24) /* drop incorrect hdr len (mgmt) */ return NULL; return hdr->addr3; } if (ieee80211_is_ctl(fc)) { if (ieee80211_is_pspoll(fc)) return hdr->addr1; if (ieee80211_is_back_req(fc)) { switch (type) { case NL80211_IFTYPE_STATION: return hdr->addr2; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: return hdr->addr1; default: break; /* fall through to the return */ } } } return NULL; } /* * monitor mode reception * * This function cleans up the SKB, i.e. it removes all the stuff * only useful for monitoring. */ static void remove_monitor_info(struct sk_buff *skb, unsigned int present_fcs_len, unsigned int rtap_space) { if (present_fcs_len) __pskb_trim(skb, skb->len - present_fcs_len); __pskb_pull(skb, rtap_space); } static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, unsigned int rtap_space) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr; hdr = (void *)(skb->data + rtap_space); if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC | RX_FLAG_ONLY_MONITOR | RX_FLAG_NO_PSDU)) return true; if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) return true; if (ieee80211_is_ctl(hdr->frame_control) && !ieee80211_is_pspoll(hdr->frame_control) && !ieee80211_is_back_req(hdr->frame_control)) return true; return false; } static int ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, struct ieee80211_rx_status *status, struct sk_buff *skb) { int len; /* always present fields */ len = sizeof(struct ieee80211_radiotap_header) + 8; /* allocate extra bitmaps */ if (status->chains) len += 4 * hweight8(status->chains); /* vendor presence bitmap */ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) len += 4; if (ieee80211_have_rx_timestamp(status)) { len = ALIGN(len, 8); len += 8; } if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) len += 1; /* antenna field, if we don't have per-chain info */ if (!status->chains) len += 1; /* padding for RX_FLAGS if necessary */ len = ALIGN(len, 2); if (status->encoding == RX_ENC_HT) /* HT info */ len += 3; if (status->flag & RX_FLAG_AMPDU_DETAILS) { len = ALIGN(len, 4); len += 8; } if (status->encoding == RX_ENC_VHT) { len = ALIGN(len, 2); len += 12; } if (local->hw.radiotap_timestamp.units_pos >= 0) { len = ALIGN(len, 8); len += 12; } if (status->encoding == RX_ENC_HE && status->flag & RX_FLAG_RADIOTAP_HE) { len = ALIGN(len, 2); len += 12; BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); } if (status->encoding == RX_ENC_HE && status->flag & RX_FLAG_RADIOTAP_HE_MU) { len = ALIGN(len, 2); len += 12; BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); } if (status->flag & RX_FLAG_NO_PSDU) len += 1; if (status->flag & RX_FLAG_RADIOTAP_LSIG) { len = ALIGN(len, 2); len += 4; BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); } if (status->chains) { /* antenna and antenna signal fields */ len += 2 * hweight8(status->chains); } if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { struct ieee80211_vendor_radiotap *rtap; int vendor_data_offset = 0; /* * The position to look at depends on the existence (or non- * existence) of other elements, so take that into account... */ if (status->flag & RX_FLAG_RADIOTAP_HE) vendor_data_offset += sizeof(struct ieee80211_radiotap_he); if (status->flag & RX_FLAG_RADIOTAP_HE_MU) vendor_data_offset += sizeof(struct ieee80211_radiotap_he_mu); if (status->flag & RX_FLAG_RADIOTAP_LSIG) vendor_data_offset += sizeof(struct ieee80211_radiotap_lsig); rtap = (void *)&skb->data[vendor_data_offset]; /* alignment for fixed 6-byte vendor data header */ len = ALIGN(len, 2); /* vendor data header */ len += 6; if (WARN_ON(rtap->align == 0)) rtap->align = 1; len = ALIGN(len, rtap->align); len += rtap->len + rtap->pad; } return len; } static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int rtap_space) { struct { struct ieee80211_hdr_3addr hdr; u8 category; u8 action_code; } __packed action; if (!sdata) return; BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); if (skb->len < rtap_space + sizeof(action) + VHT_MUMIMO_GROUPS_DATA_LEN) return; if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) return; skb_copy_bits(skb, rtap_space, &action, sizeof(action)); if (!ieee80211_is_action(action.hdr.frame_control)) return; if (action.category != WLAN_CATEGORY_VHT) return; if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) return; if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) return; skb = skb_copy(skb, GFP_ATOMIC); if (!skb) return; skb_queue_tail(&sdata->skb_queue, skb); ieee80211_queue_work(&sdata->local->hw, &sdata->work); } /* * ieee80211_add_rx_radiotap_header - add radiotap header * * add a radiotap header containing all the fields which the hardware provided. */ static void ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_rate *rate, int rtap_len, bool has_fcs) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_header *rthdr; unsigned char *pos; __le32 *it_present; u32 it_present_val; u16 rx_flags = 0; u16 channel_flags = 0; int mpdulen, chain; unsigned long chains = status->chains; struct ieee80211_vendor_radiotap rtap = {}; struct ieee80211_radiotap_he he = {}; struct ieee80211_radiotap_he_mu he_mu = {}; struct ieee80211_radiotap_lsig lsig = {}; if (status->flag & RX_FLAG_RADIOTAP_HE) { he = *(struct ieee80211_radiotap_he *)skb->data; skb_pull(skb, sizeof(he)); WARN_ON_ONCE(status->encoding != RX_ENC_HE); } if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; skb_pull(skb, sizeof(he_mu)); } if (status->flag & RX_FLAG_RADIOTAP_LSIG) { lsig = *(struct ieee80211_radiotap_lsig *)skb->data; skb_pull(skb, sizeof(lsig)); } if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { rtap = *(struct ieee80211_vendor_radiotap *)skb->data; /* rtap.len and rtap.pad are undone immediately */ skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); } mpdulen = skb->len; if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) mpdulen += FCS_LEN; rthdr = skb_push(skb, rtap_len); memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); it_present = &rthdr->it_present; /* radiotap header, set always present flags */ rthdr->it_len = cpu_to_le16(rtap_len); it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | BIT(IEEE80211_RADIOTAP_CHANNEL) | BIT(IEEE80211_RADIOTAP_RX_FLAGS); if (!status->chains) it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { it_present_val |= BIT(IEEE80211_RADIOTAP_EXT) | BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); put_unaligned_le32(it_present_val, it_present); it_present++; it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); } if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | BIT(IEEE80211_RADIOTAP_EXT); put_unaligned_le32(it_present_val, it_present); it_present++; it_present_val = rtap.present; } put_unaligned_le32(it_present_val, it_present); pos = (void *)(it_present + 1); /* the order of the following fields is important */ /* IEEE80211_RADIOTAP_TSFT */ if (ieee80211_have_rx_timestamp(status)) { /* padding */ while ((pos - (u8 *)rthdr) & 7) *pos++ = 0; put_unaligned_le64( ieee80211_calculate_rx_timestamp(local, status, mpdulen, 0), pos); rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); pos += 8; } /* IEEE80211_RADIOTAP_FLAGS */ if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) *pos |= IEEE80211_RADIOTAP_F_FCS; if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) *pos |= IEEE80211_RADIOTAP_F_BADFCS; if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; pos++; /* IEEE80211_RADIOTAP_RATE */ if (!rate || status->encoding != RX_ENC_LEGACY) { /* * Without rate information don't add it. If we have, * MCS information is a separate field in radiotap, * added below. The byte here is needed as padding * for the channel though, so initialise it to 0. */ *pos = 0; } else { int shift = 0; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); if (status->bw == RATE_INFO_BW_10) shift = 1; else if (status->bw == RATE_INFO_BW_5) shift = 2; *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); } pos++; /* IEEE80211_RADIOTAP_CHANNEL */ put_unaligned_le16(status->freq, pos); pos += 2; if (status->bw == RATE_INFO_BW_10) channel_flags |= IEEE80211_CHAN_HALF; else if (status->bw == RATE_INFO_BW_5) channel_flags |= IEEE80211_CHAN_QUARTER; if (status->band == NL80211_BAND_5GHZ) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; else if (status->encoding != RX_ENC_LEGACY) channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; else if (rate && rate->flags & IEEE80211_RATE_ERP_G) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; else if (rate) channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; else channel_flags |= IEEE80211_CHAN_2GHZ; put_unaligned_le16(channel_flags, pos); pos += 2; /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { *pos = status->signal; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); pos++; } /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ if (!status->chains) { /* IEEE80211_RADIOTAP_ANTENNA */ *pos = status->antenna; pos++; } /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ /* IEEE80211_RADIOTAP_RX_FLAGS */ /* ensure 2 byte alignment for the 2 byte field as required */ if ((pos - (u8 *)rthdr) & 1) *pos++ = 0; if (status->flag & RX_FLAG_FAILED_PLCP_CRC) rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; put_unaligned_le16(rx_flags, pos); pos += 2; if (status->encoding == RX_ENC_HT) { unsigned int stbc; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); *pos++ = local->hw.radiotap_mcs_details; *pos = 0; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) *pos |= IEEE80211_RADIOTAP_MCS_SGI; if (status->bw == RATE_INFO_BW_40) *pos |= IEEE80211_RADIOTAP_MCS_BW_40; if (status->enc_flags & RX_ENC_FLAG_HT_GF) *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; if (status->enc_flags & RX_ENC_FLAG_LDPC) *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; pos++; *pos++ = status->rate_idx; } if (status->flag & RX_FLAG_AMPDU_DETAILS) { u16 flags = 0; /* ensure 4 byte alignment */ while ((pos - (u8 *)rthdr) & 3) pos++; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); put_unaligned_le32(status->ampdu_reference, pos); pos += 4; if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; if (status->flag & RX_FLAG_AMPDU_IS_LAST) flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; if (status->flag & RX_FLAG_AMPDU_EOF_BIT) flags |= IEEE80211_RADIOTAP_AMPDU_EOF; put_unaligned_le16(flags, pos); pos += 2; if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) *pos++ = status->ampdu_delimiter_crc; else *pos++ = 0; *pos++ = 0; } if (status->encoding == RX_ENC_VHT) { u16 known = local->hw.radiotap_vht_details; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); put_unaligned_le16(known, pos); pos += 2; /* flags */ if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; /* in VHT, STBC is binary */ if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; if (status->enc_flags & RX_ENC_FLAG_BF) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; pos++; /* bandwidth */ switch (status->bw) { case RATE_INFO_BW_80: *pos++ = 4; break; case RATE_INFO_BW_160: *pos++ = 11; break; case RATE_INFO_BW_40: *pos++ = 1; break; default: *pos++ = 0; } /* MCS/NSS */ *pos = (status->rate_idx << 4) | status->nss; pos += 4; /* coding field */ if (status->enc_flags & RX_ENC_FLAG_LDPC) *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; pos++; /* group ID */ pos++; /* partial_aid */ pos += 2; } if (local->hw.radiotap_timestamp.units_pos >= 0) { u16 accuracy = 0; u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP); /* ensure 8 byte alignment */ while ((pos - (u8 *)rthdr) & 7) pos++; put_unaligned_le64(status->device_timestamp, pos); pos += sizeof(u64); if (local->hw.radiotap_timestamp.accuracy >= 0) { accuracy = local->hw.radiotap_timestamp.accuracy; flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; } put_unaligned_le16(accuracy, pos); pos += sizeof(u16); *pos++ = local->hw.radiotap_timestamp.units_pos; *pos++ = flags; } if (status->encoding == RX_ENC_HE && status->flag & RX_FLAG_RADIOTAP_HE) { #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { he.data6 |= HE_PREP(DATA6_NSTS, FIELD_GET(RX_ENC_FLAG_STBC_MASK, status->enc_flags)); he.data3 |= HE_PREP(DATA3_STBC, 1); } else { he.data6 |= HE_PREP(DATA6_NSTS, status->nss); } #define CHECK_GI(s) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ (int)NL80211_RATE_INFO_HE_GI_##s) CHECK_GI(0_8); CHECK_GI(1_6); CHECK_GI(3_2); he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); he.data3 |= HE_PREP(DATA3_CODING, !!(status->enc_flags & RX_ENC_FLAG_LDPC)); he.data5 |= HE_PREP(DATA5_GI, status->he_gi); switch (status->bw) { case RATE_INFO_BW_20: he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); break; case RATE_INFO_BW_40: he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); break; case RATE_INFO_BW_80: he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); break; case RATE_INFO_BW_160: he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); break; case RATE_INFO_BW_HE_RU: #define CHECK_RU_ALLOC(s) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) CHECK_RU_ALLOC(26); CHECK_RU_ALLOC(52); CHECK_RU_ALLOC(106); CHECK_RU_ALLOC(242); CHECK_RU_ALLOC(484); CHECK_RU_ALLOC(996); CHECK_RU_ALLOC(2x996); he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, status->he_ru + 4); break; default: WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); } /* ensure 2 byte alignment */ while ((pos - (u8 *)rthdr) & 1) pos++; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); memcpy(pos, &he, sizeof(he)); pos += sizeof(he); } if (status->encoding == RX_ENC_HE && status->flag & RX_FLAG_RADIOTAP_HE_MU) { /* ensure 2 byte alignment */ while ((pos - (u8 *)rthdr) & 1) pos++; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); memcpy(pos, &he_mu, sizeof(he_mu)); pos += sizeof(he_mu); } if (status->flag & RX_FLAG_NO_PSDU) { rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU); *pos++ = status->zero_length_psdu_type; } if (status->flag & RX_FLAG_RADIOTAP_LSIG) { /* ensure 2 byte alignment */ while ((pos - (u8 *)rthdr) & 1) pos++; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG); memcpy(pos, &lsig, sizeof(lsig)); pos += sizeof(lsig); } for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { *pos++ = status->chain_signal[chain]; *pos++ = chain; } if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { /* ensure 2 byte alignment for the vendor field as required */ if ((pos - (u8 *)rthdr) & 1) *pos++ = 0; *pos++ = rtap.oui[0]; *pos++ = rtap.oui[1]; *pos++ = rtap.oui[2]; *pos++ = rtap.subns; put_unaligned_le16(rtap.len, pos); pos += 2; /* align the actual payload as requested */ while ((pos - (u8 *)rthdr) & (rtap.align - 1)) *pos++ = 0; /* data (and possible padding) already follows */ } } static struct sk_buff * ieee80211_make_monitor_skb(struct ieee80211_local *local, struct sk_buff **origskb, struct ieee80211_rate *rate, int rtap_space, bool use_origskb) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); int rt_hdrlen, needed_headroom; struct sk_buff *skb; /* room for the radiotap header based on driver features */ rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); needed_headroom = rt_hdrlen - rtap_space; if (use_origskb) { /* only need to expand headroom if necessary */ skb = *origskb; *origskb = NULL; /* * This shouldn't trigger often because most devices have an * RX header they pull before we get here, and that should * be big enough for our radiotap information. We should * probably export the length to drivers so that we can have * them allocate enough headroom to start with. */ if (skb_headroom(skb) < needed_headroom && pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { dev_kfree_skb(skb); return NULL; } } else { /* * Need to make a copy and possibly remove radiotap header * and FCS from the original. */ skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); if (!skb) return NULL; } /* prepend radiotap information */ ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); return skb; } /* * This function copies a received frame to all monitor interfaces and * returns a cleaned-up SKB that no longer includes the FCS nor the * radiotap header the driver might have added. */ static struct sk_buff * ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, struct ieee80211_rate *rate) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); struct ieee80211_sub_if_data *sdata; struct sk_buff *monskb = NULL; int present_fcs_len = 0; unsigned int rtap_space = 0; struct ieee80211_sub_if_data *monitor_sdata = rcu_dereference(local->monitor_sdata); bool only_monitor = false; unsigned int min_head_len; if (status->flag & RX_FLAG_RADIOTAP_HE) rtap_space += sizeof(struct ieee80211_radiotap_he); if (status->flag & RX_FLAG_RADIOTAP_HE_MU) rtap_space += sizeof(struct ieee80211_radiotap_he_mu); if (status->flag & RX_FLAG_RADIOTAP_LSIG) rtap_space += sizeof(struct ieee80211_radiotap_lsig); if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { struct ieee80211_vendor_radiotap *rtap = (void *)(origskb->data + rtap_space); rtap_space += sizeof(*rtap) + rtap->len + rtap->pad; } min_head_len = rtap_space; /* * First, we may need to make a copy of the skb because * (1) we need to modify it for radiotap (if not present), and * (2) the other RX handlers will modify the skb we got. * * We don't need to, of course, if we aren't going to return * the SKB because it has a bad FCS/PLCP checksum. */ if (!(status->flag & RX_FLAG_NO_PSDU)) { if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { /* driver bug */ WARN_ON(1); dev_kfree_skb(origskb); return NULL; } present_fcs_len = FCS_LEN; } /* also consider the hdr->frame_control */ min_head_len += 2; } /* ensure that the expected data elements are in skb head */ if (!pskb_may_pull(origskb, min_head_len)) { dev_kfree_skb(origskb); return NULL; } only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { if (only_monitor) { dev_kfree_skb(origskb); return NULL; } remove_monitor_info(origskb, present_fcs_len, rtap_space); return origskb; } ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { bool last_monitor = list_is_last(&sdata->u.mntr.list, &local->mon_list); if (!monskb) monskb = ieee80211_make_monitor_skb(local, &origskb, rate, rtap_space, only_monitor && last_monitor); if (monskb) { struct sk_buff *skb; if (last_monitor) { skb = monskb; monskb = NULL; } else { skb = skb_clone(monskb, GFP_ATOMIC); } if (skb) { skb->dev = sdata->dev; ieee80211_rx_stats(skb->dev, skb->len); netif_receive_skb(skb); } } if (last_monitor) break; } /* this happens if last_monitor was erroneously false */ dev_kfree_skb(monskb); /* ditto */ if (!origskb) return NULL; remove_monitor_info(origskb, present_fcs_len, rtap_space); return origskb; } static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); int tid, seqno_idx, security_idx; /* does the frame have a qos control field? */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); /* frame has qos control */ tid = *qc & IEEE80211_QOS_CTL_TID_MASK; if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) status->rx_flags |= IEEE80211_RX_AMSDU; seqno_idx = tid; security_idx = tid; } else { /* * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): * * Sequence numbers for management frames, QoS data * frames with a broadcast/multicast address in the * Address 1 field, and all non-QoS data frames sent * by QoS STAs are assigned using an additional single * modulo-4096 counter, [...] * * We also use that counter for non-QoS STAs. */ seqno_idx = IEEE80211_NUM_TIDS; security_idx = 0; if (ieee80211_is_mgmt(hdr->frame_control)) security_idx = IEEE80211_NUM_TIDS; tid = 0; } rx->seqno_idx = seqno_idx; rx->security_idx = security_idx; /* Set skb->priority to 1d tag if highest order bit of TID is not set. * For now, set skb->priority to 0 for other cases. */ rx->skb->priority = (tid > 7) ? 0 : tid; } /** * DOC: Packet alignment * * Drivers always need to pass packets that are aligned to two-byte boundaries * to the stack. * * Additionally, should, if possible, align the payload data in a way that * guarantees that the contained IP header is aligned to a four-byte * boundary. In the case of regular frames, this simply means aligning the * payload to a four-byte boundary (because either the IP header is directly * contained, or IV/RFC1042 headers that have a length divisible by four are * in front of it). If the payload data is not properly aligned and the * architecture doesn't support efficient unaligned operations, mac80211 * will align the data. * * With A-MSDU frames, however, the payload data address must yield two modulo * four because there are 14-byte 802.3 headers within the A-MSDU frames that * push the IP header further back to a multiple of four again. Thankfully, the * specs were sane enough this time around to require padding each A-MSDU * subframe to a length that is a multiple of four. * * Padding like Atheros hardware adds which is between the 802.11 header and * the payload is not supported, the driver is required to move the 802.11 * header to be directly in front of the payload in that case. */ static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) { #ifdef CPTCFG_MAC80211_VERBOSE_DEBUG WARN_ON_ONCE((unsigned long)rx->skb->data & 1); #endif } /* rx handlers */ static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (is_multicast_ether_addr(hdr->addr1)) return 0; return ieee80211_is_robust_mgmt_frame(skb); } static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (!is_multicast_ether_addr(hdr->addr1)) return 0; return ieee80211_is_robust_mgmt_frame(skb); } /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) { struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; struct ieee80211_mmie *mmie; struct ieee80211_mmie_16 *mmie16; if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) return -1; if (!ieee80211_is_robust_mgmt_frame(skb)) return -1; /* not a robust management frame */ mmie = (struct ieee80211_mmie *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id == WLAN_EID_MMIE && mmie->length == sizeof(*mmie) - 2) return le16_to_cpu(mmie->key_id); mmie16 = (struct ieee80211_mmie_16 *) (skb->data + skb->len - sizeof(*mmie16)); if (skb->len >= 24 + sizeof(*mmie16) && mmie16->element_id == WLAN_EID_MMIE && mmie16->length == sizeof(*mmie16) - 2) return le16_to_cpu(mmie16->key_id); return -1; } static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc; int hdrlen; u8 keyid; fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); if (skb->len < hdrlen + cs->hdr_len) return -EINVAL; skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); keyid &= cs->key_idx_mask; keyid >>= cs->key_idx_shift; return keyid; } static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; char *dev_addr = rx->sdata->vif.addr; if (ieee80211_is_data(hdr->frame_control)) { if (is_multicast_ether_addr(hdr->addr1)) { if (ieee80211_has_tods(hdr->frame_control) || !ieee80211_has_fromds(hdr->frame_control)) return RX_DROP_MONITOR; if (ether_addr_equal(hdr->addr3, dev_addr)) return RX_DROP_MONITOR; } else { if (!ieee80211_has_a4(hdr->frame_control)) return RX_DROP_MONITOR; if (ether_addr_equal(hdr->addr4, dev_addr)) return RX_DROP_MONITOR; } } /* If there is not an established peer link and this is not a peer link * establisment frame, beacon or probe, drop the frame. */ if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { struct ieee80211_mgmt *mgmt; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_DROP_MONITOR; if (ieee80211_is_action(hdr->frame_control)) { u8 category; /* make sure category field is present */ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) return RX_DROP_MONITOR; mgmt = (struct ieee80211_mgmt *)hdr; category = mgmt->u.action.category; if (category != WLAN_CATEGORY_MESH_ACTION && category != WLAN_CATEGORY_SELF_PROTECTED) return RX_DROP_MONITOR; return RX_CONTINUE; } if (ieee80211_is_probe_req(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control) || ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_auth(hdr->frame_control)) return RX_CONTINUE; return RX_DROP_MONITOR; } return RX_CONTINUE; } static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, int index) { struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; struct sk_buff *tail = skb_peek_tail(frames); struct ieee80211_rx_status *status; if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) return true; if (!tail) return false; status = IEEE80211_SKB_RXCB(tail); if (status->flag & RX_FLAG_AMSDU_MORE) return false; return true; } static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_rx *tid_agg_rx, int index, struct sk_buff_head *frames) { struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; struct sk_buff *skb; struct ieee80211_rx_status *status; lockdep_assert_held(&tid_agg_rx->reorder_lock); if (skb_queue_empty(skb_list)) goto no_frame; if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { __skb_queue_purge(skb_list); goto no_frame; } /* release frames from the reorder ring buffer */ tid_agg_rx->stored_mpdu_num--; while ((skb = __skb_dequeue(skb_list))) { status = IEEE80211_SKB_RXCB(skb); status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; __skb_queue_tail(frames, skb); } no_frame: tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); } static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_rx *tid_agg_rx, u16 head_seq_num, struct sk_buff_head *frames) { int index; lockdep_assert_held(&tid_agg_rx->reorder_lock); while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, frames); } } /* * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If * the skb was added to the buffer longer than this time ago, the earlier * frames that have not yet been received are assumed to be lost and the skb * can be released for processing. This may also release other skb's from the * reorder buffer if there are no additional gaps between the frames. * * Callers must hold tid_agg_rx->reorder_lock. */ #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_rx *tid_agg_rx, struct sk_buff_head *frames) { int index, i, j; lockdep_assert_held(&tid_agg_rx->reorder_lock); /* release the buffer until next missing frame */ index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && tid_agg_rx->stored_mpdu_num) { /* * No buffers ready to be released, but check whether any * frames in the reorder buffer have timed out. */ int skipped = 1; for (j = (index + 1) % tid_agg_rx->buf_size; j != index; j = (j + 1) % tid_agg_rx->buf_size) { if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { skipped++; continue; } if (skipped && !time_after(jiffies, tid_agg_rx->reorder_time[j] + HT_RX_REORDER_BUF_TIMEOUT)) goto set_release_timer; /* don't leave incomplete A-MSDUs around */ for (i = (index + 1) % tid_agg_rx->buf_size; i != j; i = (i + 1) % tid_agg_rx->buf_size) __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); ht_dbg_ratelimited(sdata, "release an RX reorder frame due to timeout on earlier frames\n"); ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, frames); /* * Increment the head seq# also for the skipped slots. */ tid_agg_rx->head_seq_num = (tid_agg_rx->head_seq_num + skipped) & IEEE80211_SN_MASK; skipped = 0; } } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, frames); index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; } if (tid_agg_rx->stored_mpdu_num) { j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; for (; j != (index - 1) % tid_agg_rx->buf_size; j = (j + 1) % tid_agg_rx->buf_size) { if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) break; } set_release_timer: if (!tid_agg_rx->removed) mod_timer(&tid_agg_rx->reorder_timer, tid_agg_rx->reorder_time[j] + 1 + HT_RX_REORDER_BUF_TIMEOUT); } else { del_timer(&tid_agg_rx->reorder_timer); } } /* * As this function belongs to the RX path it must be under * rcu_read_lock protection. It returns false if the frame * can be processed immediately, true if it was consumed. */ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_rx *tid_agg_rx, struct sk_buff *skb, struct sk_buff_head *frames) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u16 sc = le16_to_cpu(hdr->seq_ctrl); u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; u16 head_seq_num, buf_size; int index; bool ret = true; spin_lock(&tid_agg_rx->reorder_lock); /* * Offloaded BA sessions have no known starting sequence number so pick * one from first Rxed frame for this tid after BA was started. */ if (unlikely(tid_agg_rx->auto_seq)) { tid_agg_rx->auto_seq = false; tid_agg_rx->ssn = mpdu_seq_num; tid_agg_rx->head_seq_num = mpdu_seq_num; } buf_size = tid_agg_rx->buf_size; head_seq_num = tid_agg_rx->head_seq_num; /* * If the current MPDU's SN is smaller than the SSN, it shouldn't * be reordered. */ if (unlikely(!tid_agg_rx->started)) { if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { ret = false; goto out; } tid_agg_rx->started = true; } /* frame with out of date sequence number */ if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { dev_kfree_skb(skb); goto out; } /* * If frame the sequence number exceeds our buffering window * size release some previous frames to make room for this one. */ if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { head_seq_num = ieee80211_sn_inc( ieee80211_sn_sub(mpdu_seq_num, buf_size)); /* release stored frames up to new head to stack */ ieee80211_release_reorder_frames(sdata, tid_agg_rx, head_seq_num, frames); } /* Now the new frame is always in the range of the reordering buffer */ index = mpdu_seq_num % tid_agg_rx->buf_size; /* check if we already stored this frame */ if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { dev_kfree_skb(skb); goto out; } /* * If the current MPDU is in the right order and nothing else * is stored we can process it directly, no need to buffer it. * If it is first but there's something stored, we may be able * to release frames after this one. */ if (mpdu_seq_num == tid_agg_rx->head_seq_num && tid_agg_rx->stored_mpdu_num == 0) { if (!(status->flag & RX_FLAG_AMSDU_MORE)) tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); ret = false; goto out; } /* put the frame in the reordering buffer */ __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); if (!(status->flag & RX_FLAG_AMSDU_MORE)) { tid_agg_rx->reorder_time[index] = jiffies; tid_agg_rx->stored_mpdu_num++; ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); } out: spin_unlock(&tid_agg_rx->reorder_lock); return ret; } /* * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns * true if the MPDU was buffered, false if it should be processed. */ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) { struct sk_buff *skb = rx->skb; struct ieee80211_local *local = rx->local; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct sta_info *sta = rx->sta; struct tid_ampdu_rx *tid_agg_rx; u16 sc; u8 tid, ack_policy; if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) goto dont_reorder; /* * filter the QoS data rx stream according to * STA/TID and check if this STA/TID is on aggregation */ if (!sta) goto dont_reorder; ack_policy = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK; tid = ieee80211_get_tid(hdr); tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); if (!tid_agg_rx) { if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_REQUIRE_SETUP); goto dont_reorder; } /* qos null data frames are excluded */ if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) goto dont_reorder; /* not part of a BA session */ if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) goto dont_reorder; /* new, potentially un-ordered, ampdu frame - process it */ /* reset session timer */ if (tid_agg_rx->timeout) tid_agg_rx->last_rx = jiffies; /* if this mpdu is fragmented - terminate rx aggregation session */ sc = le16_to_cpu(hdr->seq_ctrl); if (sc & IEEE80211_SCTL_FRAG) { skb_queue_tail(&rx->sdata->skb_queue, skb); ieee80211_queue_work(&local->hw, &rx->sdata->work); return; } /* * No locking needed -- we will only ever process one * RX packet at a time, and thus own tid_agg_rx. All * other code manipulating it needs to (and does) make * sure that we cannot get to it any more before doing * anything with it. */ if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, frames)) return; dont_reorder: __skb_queue_tail(frames, skb); } static ieee80211_rx_result debug_noinline ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); if (status->flag & RX_FLAG_DUP_VALIDATED) return RX_CONTINUE; /* * Drop duplicate 802.11 retransmissions * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") */ if (rx->skb->len < 24) return RX_CONTINUE; if (ieee80211_is_ctl(hdr->frame_control) || ieee80211_is_nullfunc(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return RX_CONTINUE; if (!rx->sta) return RX_CONTINUE; if (unlikely(ieee80211_has_retry(hdr->frame_control) && rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); rx->sta->rx_stats.num_duplicates++; return RX_DROP_UNUSABLE; } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; } return RX_CONTINUE; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_check(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; /* Drop disallowed frame classes based on STA auth/assoc state; * IEEE 802.11, Chap 5.5. * * mac80211 filters only based on association state, i.e. it drops * Class 3 frames from not associated stations. hostapd sends * deauth/disassoc frames when needed. In addition, hostapd is * responsible for filtering on both auth and assoc states. */ if (ieee80211_vif_is_mesh(&rx->sdata->vif)) return ieee80211_rx_mesh_check(rx); if (unlikely((ieee80211_is_data(hdr->frame_control) || ieee80211_is_pspoll(hdr->frame_control)) && rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && rx->sdata->vif.type != NL80211_IFTYPE_WDS && rx->sdata->vif.type != NL80211_IFTYPE_OCB && (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { /* * accept port control frames from the AP even when it's not * yet marked ASSOC to prevent a race where we don't set the * assoc bit quickly enough before it sends the first frame */ if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && ieee80211_is_data_present(hdr->frame_control)) { unsigned int hdrlen; __be16 ethertype; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (rx->skb->len < hdrlen + 8) return RX_DROP_MONITOR; skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); if (ethertype == rx->sdata->control_port_protocol) return RX_CONTINUE; } if (rx->sdata->vif.type == NL80211_IFTYPE_AP && cfg80211_rx_spurious_frame(rx->sdata->dev, hdr->addr2, GFP_ATOMIC)) return RX_DROP_UNUSABLE; return RX_DROP_MONITOR; } return RX_CONTINUE; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) { struct ieee80211_local *local; struct ieee80211_hdr *hdr; struct sk_buff *skb; local = rx->local; skb = rx->skb; hdr = (struct ieee80211_hdr *) skb->data; if (!local->pspolling) return RX_CONTINUE; if (!ieee80211_has_fromds(hdr->frame_control)) /* this is not from AP */ return RX_CONTINUE; if (!ieee80211_is_data(hdr->frame_control)) return RX_CONTINUE; if (!ieee80211_has_moredata(hdr->frame_control)) { /* AP has no more frames buffered for us */ local->pspolling = false; return RX_CONTINUE; } /* more data bit is set, let's request a new frame from the AP */ ieee80211_send_pspoll(local, rx->sdata); return RX_CONTINUE; } static void sta_ps_start(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct ps_data *ps; int tid; if (sta->sdata->vif.type == NL80211_IFTYPE_AP || sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ps = &sdata->bss->ps; else return; atomic_inc(&ps->num_sta_ps); set_sta_flag(sta, WLAN_STA_PS_STA); if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", sta->sta.addr, sta->sta.aid); ieee80211_clear_fast_xmit(sta); if (!sta->sta.txq[0]) return; for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { if (txq_has_queue(sta->sta.txq[tid])) set_bit(tid, &sta->txq_buffered_tids); else clear_bit(tid, &sta->txq_buffered_tids); } } static void sta_ps_end(struct sta_info *sta) { ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", sta->sta.addr, sta->sta.aid); if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { /* * Clear the flag only if the other one is still set * so that the TX path won't start TX'ing new frames * directly ... In the case that the driver flag isn't * set ieee80211_sta_ps_deliver_wakeup() will clear it. */ clear_sta_flag(sta, WLAN_STA_PS_STA); ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", sta->sta.addr, sta->sta.aid); return; } set_sta_flag(sta, WLAN_STA_PS_DELIVER); clear_sta_flag(sta, WLAN_STA_PS_STA); ieee80211_sta_ps_deliver_wakeup(sta); } int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); bool in_ps; WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); /* Don't let the same PS state be set twice */ in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); if ((start && in_ps) || (!start && !in_ps)) return -EINVAL; if (start) sta_ps_start(sta); else sta_ps_end(sta); return 0; } EXPORT_SYMBOL(ieee80211_sta_ps_transition); void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); if (test_sta_flag(sta, WLAN_STA_SP)) return; if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) ieee80211_sta_ps_deliver_poll_response(sta); else set_sta_flag(sta, WLAN_STA_PSPOLL); } EXPORT_SYMBOL(ieee80211_sta_pspoll); void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); int ac = ieee80211_ac_from_tid(tid); /* * If this AC is not trigger-enabled do nothing unless the * driver is calling us after it already checked. * * NB: This could/should check a separate bitmap of trigger- * enabled queues, but for now we only implement uAPSD w/o * TSPEC changes to the ACs, so they're always the same. */ if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && tid != IEEE80211_NUM_TIDS) return; /* if we are in a service period, do nothing */ if (test_sta_flag(sta, WLAN_STA_SP)) return; if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) ieee80211_sta_ps_deliver_uapsd(sta); else set_sta_flag(sta, WLAN_STA_UAPSD); } EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); static ieee80211_rx_result debug_noinline ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_hdr *hdr = (void *)rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); if (!rx->sta) return RX_CONTINUE; if (sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_AP_VLAN) return RX_CONTINUE; /* * The device handles station powersave, so don't do anything about * uAPSD and PS-Poll frames (the latter shouldn't even come up from * it to mac80211 since they're handled.) */ if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) return RX_CONTINUE; /* * Don't do anything if the station isn't already asleep. In * the uAPSD case, the station will probably be marked asleep, * in the PS-Poll case the station must be confused ... */ if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) return RX_CONTINUE; if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { ieee80211_sta_pspoll(&rx->sta->sta); /* Free PS Poll skb here instead of returning RX_DROP that would * count as an dropped frame. */ dev_kfree_skb(rx->skb); return RX_QUEUED; } else if (!ieee80211_has_morefrags(hdr->frame_control) && !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && ieee80211_has_pm(hdr->frame_control) && (ieee80211_is_data_qos(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control))) { u8 tid = ieee80211_get_tid(hdr); ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); } return RX_CONTINUE; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) { struct sta_info *sta = rx->sta; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int i; if (!sta) return RX_CONTINUE; /* * Update last_rx only for IBSS packets which are for the current * BSSID and for station already AUTHORIZED to avoid keeping the * current IBSS network alive in cases where other STAs start * using different BSSID. This will also give the station another * chance to restart the authentication/authorization in case * something went wrong the first time. */ if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, NL80211_IFTYPE_ADHOC); if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { sta->rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1)) sta->rx_stats.last_rate = sta_stats_encode_rate(status); } } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { sta->rx_stats.last_rx = jiffies; } else if (!is_multicast_ether_addr(hdr->addr1)) { /* * Mesh beacons will update last_rx when if they are found to * match the current local configuration when processed. */ sta->rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control)) sta->rx_stats.last_rate = sta_stats_encode_rate(status); } if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_sta_rx_notify(rx->sdata, hdr); sta->rx_stats.fragments++; u64_stats_update_begin(&rx->sta->rx_stats.syncp); sta->rx_stats.bytes += rx->skb->len; u64_stats_update_end(&rx->sta->rx_stats.syncp); if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { sta->rx_stats.last_signal = status->signal; ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); } if (status->chains) { sta->rx_stats.chains = status->chains; for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { int signal = status->chain_signal[i]; if (!(status->chains & BIT(i))) continue; sta->rx_stats.chain_signal_last[i] = signal; ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], -signal); } } /* * Change STA power saving mode only at the end of a frame * exchange sequence, and only for a data or management * frame as specified in IEEE 802.11-2016 11.2.3.2 */ if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && !ieee80211_has_morefrags(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1) && (ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_data(hdr->frame_control)) && !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { if (test_sta_flag(sta, WLAN_STA_PS_STA)) { if (!ieee80211_has_pm(hdr->frame_control)) sta_ps_end(sta); } else { if (ieee80211_has_pm(hdr->frame_control)) sta_ps_start(sta); } } /* mesh power save support */ if (ieee80211_vif_is_mesh(&rx->sdata->vif)) ieee80211_mps_rx_h_sta_process(sta, hdr); /* * Drop (qos-)data::nullfunc frames silently, since they * are used only to control station power saving mode. */ if (ieee80211_is_nullfunc(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control)) { I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); /* * If we receive a 4-addr nullfunc frame from a STA * that was not moved to a 4-addr STA vlan yet send * the event to userspace and for older hostapd drop * the frame to the monitor interface. */ if (ieee80211_has_a4(hdr->frame_control) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !rx->sdata->u.vlan.sta))) { if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) cfg80211_rx_unexpected_4addr_frame( rx->sdata->dev, sta->sta.addr, GFP_ATOMIC); return RX_DROP_MONITOR; } /* * Update counter and free packet here to avoid * counting this as a dropped packed. */ sta->rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } return RX_CONTINUE; } /* ieee80211_rx_h_sta_process */ static ieee80211_rx_result debug_noinline ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int keyidx; int hdrlen; ieee80211_rx_result result = RX_DROP_UNUSABLE; struct ieee80211_key *sta_ptk = NULL; int mmie_keyidx = -1; __le16 fc; const struct ieee80211_cipher_scheme *cs = NULL; /* * Key selection 101 * * There are four types of keys: * - GTK (group keys) * - IGTK (group keys for management frames) * - PTK (pairwise keys) * - STK (station-to-station pairwise keys) * * When selecting a key, we have to distinguish between multicast * (including broadcast) and unicast frames, the latter can only * use PTKs and STKs while the former always use GTKs and IGTKs. * Unless, of course, actual WEP keys ("pre-RSNA") are used, then * unicast frames can also use key indices like GTKs. Hence, if we * don't have a PTK/STK we check the key index for a WEP key. * * Note that in a regular BSS, multicast frames are sent by the * AP only, associated stations unicast the frame to the AP first * which then multicasts it on their behalf. * * There is also a slight problem in IBSS mode: GTKs are negotiated * with each station, that is something we don't currently handle. * The spec seems to expect that one negotiates the same key with * every station but there's no such requirement; VLANs could be * possible. */ /* start without a key */ rx->key = NULL; fc = hdr->frame_control; if (rx->sta) { int keyid = rx->sta->ptk_idx; if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { cs = rx->sta->cipher_scheme; keyid = ieee80211_get_cs_keyid(cs, rx->skb); if (unlikely(keyid < 0)) return RX_DROP_UNUSABLE; } sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); } if (!ieee80211_has_protected(fc)) mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { rx->key = sta_ptk; if ((status->flag & RX_FLAG_DECRYPTED) && (status->flag & RX_FLAG_IV_STRIPPED)) return RX_CONTINUE; /* Skip decryption if the frame is not protected. */ if (!ieee80211_has_protected(fc)) return RX_CONTINUE; } else if (mmie_keyidx >= 0) { /* Broadcast/multicast robust management frame / BIP */ if ((status->flag & RX_FLAG_DECRYPTED) && (status->flag & RX_FLAG_IV_STRIPPED)) return RX_CONTINUE; if (mmie_keyidx < NUM_DEFAULT_KEYS || mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) return RX_DROP_MONITOR; /* unexpected BIP keyidx */ if (rx->sta) { if (ieee80211_is_group_privacy_action(skb) && test_sta_flag(rx->sta, WLAN_STA_MFP)) return RX_DROP_MONITOR; rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); } if (!rx->key) rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); } else if (!ieee80211_has_protected(fc)) { /* * The frame was not protected, so skip decryption. However, we * need to set rx->key if there is a key that could have been * used so that the frame may be dropped if encryption would * have been expected. */ struct ieee80211_key *key = NULL; struct ieee80211_sub_if_data *sdata = rx->sdata; int i; if (ieee80211_is_mgmt(fc) && is_multicast_ether_addr(hdr->addr1) && (key = rcu_dereference(rx->sdata->default_mgmt_key))) rx->key = key; else { if (rx->sta) { for (i = 0; i < NUM_DEFAULT_KEYS; i++) { key = rcu_dereference(rx->sta->gtk[i]); if (key) break; } } if (!key) { for (i = 0; i < NUM_DEFAULT_KEYS; i++) { key = rcu_dereference(sdata->keys[i]); if (key) break; } } if (key) rx->key = key; } return RX_CONTINUE; } else { u8 keyid; /* * The device doesn't give us the IV so we won't be * able to look up the key. That's ok though, we * don't need to decrypt the frame, we just won't * be able to keep statistics accurate. * Except for key threshold notifications, should * we somehow allow the driver to tell us which key * the hardware used if this flag is set? */ if ((status->flag & RX_FLAG_DECRYPTED) && (status->flag & RX_FLAG_IV_STRIPPED)) return RX_CONTINUE; hdrlen = ieee80211_hdrlen(fc); if (cs) { keyidx = ieee80211_get_cs_keyid(cs, rx->skb); if (unlikely(keyidx < 0)) return RX_DROP_UNUSABLE; } else { if (rx->skb->len < 8 + hdrlen) return RX_DROP_UNUSABLE; /* TODO: count this? */ /* * no need to call ieee80211_wep_get_keyidx, * it verifies a bunch of things we've done already */ skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); keyidx = keyid >> 6; } /* check per-station GTK first, if multicast packet */ if (is_multicast_ether_addr(hdr->addr1) && rx->sta) rx->key = rcu_dereference(rx->sta->gtk[keyidx]); /* if not found, try default key */ if (!rx->key) { rx->key = rcu_dereference(rx->sdata->keys[keyidx]); /* * RSNA-protected unicast frames should always be * sent with pairwise or station-to-station keys, * but for WEP we allow using a key index as well. */ if (rx->key && rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && !is_multicast_ether_addr(hdr->addr1)) rx->key = NULL; } } if (rx->key) { if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) return RX_DROP_MONITOR; /* TODO: add threshold stuff again */ } else { return RX_DROP_MONITOR; } switch (rx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: result = ieee80211_crypto_wep_decrypt(rx); break; case WLAN_CIPHER_SUITE_TKIP: result = ieee80211_crypto_tkip_decrypt(rx); break; case WLAN_CIPHER_SUITE_CCMP: result = ieee80211_crypto_ccmp_decrypt( rx, IEEE80211_CCMP_MIC_LEN); break; case WLAN_CIPHER_SUITE_CCMP_256: result = ieee80211_crypto_ccmp_decrypt( rx, IEEE80211_CCMP_256_MIC_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: result = ieee80211_crypto_aes_cmac_decrypt(rx); break; case WLAN_CIPHER_SUITE_BIP_CMAC_256: result = ieee80211_crypto_aes_cmac_256_decrypt(rx); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: result = ieee80211_crypto_aes_gmac_decrypt(rx); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: result = ieee80211_crypto_gcmp_decrypt(rx); break; default: result = ieee80211_crypto_hw_decrypt(rx); } /* the hdr variable is invalid after the decrypt handlers */ /* either the frame has been decrypted or will be dropped */ status->flag |= RX_FLAG_DECRYPTED; return result; } static inline struct ieee80211_fragment_entry * ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, unsigned int frag, unsigned int seq, int rx_queue, struct sk_buff **skb) { struct ieee80211_fragment_entry *entry; entry = &sdata->fragments[sdata->fragment_next++]; if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) sdata->fragment_next = 0; if (!skb_queue_empty(&entry->skb_list)) __skb_queue_purge(&entry->skb_list); __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ *skb = NULL; entry->first_frag_time = jiffies; entry->seq = seq; entry->rx_queue = rx_queue; entry->last_frag = frag; entry->check_sequential_pn = false; entry->extra_len = 0; return entry; } static inline struct ieee80211_fragment_entry * ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, unsigned int frag, unsigned int seq, int rx_queue, struct ieee80211_hdr *hdr) { struct ieee80211_fragment_entry *entry; int i, idx; idx = sdata->fragment_next; for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { struct ieee80211_hdr *f_hdr; struct sk_buff *f_skb; idx--; if (idx < 0) idx = IEEE80211_FRAGMENT_MAX - 1; entry = &sdata->fragments[idx]; if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || entry->rx_queue != rx_queue || entry->last_frag + 1 != frag) continue; f_skb = __skb_peek(&entry->skb_list); f_hdr = (struct ieee80211_hdr *) f_skb->data; /* * Check ftype and addresses are equal, else check next fragment */ if (((hdr->frame_control ^ f_hdr->frame_control) & cpu_to_le16(IEEE80211_FCTL_FTYPE)) || !ether_addr_equal(hdr->addr1, f_hdr->addr1) || !ether_addr_equal(hdr->addr2, f_hdr->addr2)) continue; if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { __skb_queue_purge(&entry->skb_list); continue; } return entry; } return NULL; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr; u16 sc; __le16 fc; unsigned int frag, seq; struct ieee80211_fragment_entry *entry; struct sk_buff *skb; hdr = (struct ieee80211_hdr *)rx->skb->data; fc = hdr->frame_control; if (ieee80211_is_ctl(fc)) return RX_CONTINUE; sc = le16_to_cpu(hdr->seq_ctrl); frag = sc & IEEE80211_SCTL_FRAG; if (is_multicast_ether_addr(hdr->addr1)) { I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); goto out_no_led; } if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) goto out; I802_DEBUG_INC(rx->local->rx_handlers_fragments); if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; /* * skb_linearize() might change the skb->data and * previously cached variables (in this case, hdr) need to * be refreshed with the new data. */ hdr = (struct ieee80211_hdr *)rx->skb->data; seq = (sc & IEEE80211_SCTL_SEQ) >> 4; if (frag == 0) { /* This is the first fragment of a new frame. */ entry = ieee80211_reassemble_add(rx->sdata, frag, seq, rx->seqno_idx, &(rx->skb)); if (rx->key && (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && ieee80211_has_protected(fc)) { int queue = rx->security_idx; /* Store CCMP/GCMP PN so that we can verify that the * next fragment has a sequential PN value. */ entry->check_sequential_pn = true; memcpy(entry->last_pn, rx->key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN); BUILD_BUG_ON(offsetof(struct ieee80211_key, u.ccmp.rx_pn) != offsetof(struct ieee80211_key, u.gcmp.rx_pn)); BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != sizeof(rx->key->u.gcmp.rx_pn[queue])); BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN); } return RX_QUEUED; } /* This is a fragment for a frame that should already be pending in * fragment cache. Add this fragment to the end of the pending entry. */ entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->seqno_idx, hdr); if (!entry) { I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); return RX_DROP_MONITOR; } /* "The receiver shall discard MSDUs and MMPDUs whose constituent * MPDU PN values are not incrementing in steps of 1." * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) */ if (entry->check_sequential_pn) { int i; u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; int queue; if (!rx->key || (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) return RX_DROP_UNUSABLE; memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { pn[i]++; if (pn[i]) break; } queue = rx->security_idx; rpn = rx->key->u.ccmp.rx_pn[queue]; if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) return RX_DROP_UNUSABLE; memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); } skb_pull(rx->skb, ieee80211_hdrlen(fc)); __skb_queue_tail(&entry->skb_list, rx->skb); entry->last_frag = frag; entry->extra_len += rx->skb->len; if (ieee80211_has_morefrags(fc)) { rx->skb = NULL; return RX_QUEUED; } rx->skb = __skb_dequeue(&entry->skb_list); if (skb_tailroom(rx->skb) < entry->extra_len) { I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, GFP_ATOMIC))) { I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); __skb_queue_purge(&entry->skb_list); return RX_DROP_UNUSABLE; } } while ((skb = __skb_dequeue(&entry->skb_list))) { skb_put_data(rx->skb, skb->data, skb->len); dev_kfree_skb(skb); } out: ieee80211_led_rx(rx->local); out_no_led: if (rx->sta) rx->sta->rx_stats.packets++; return RX_CONTINUE; } static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) { if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) return -EACCES; return 0; } static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); /* * Pass through unencrypted frames if the hardware has * decrypted them already. */ if (status->flag & RX_FLAG_DECRYPTED) return 0; /* Drop unencrypted frames if key is set. */ if (unlikely(!ieee80211_has_protected(fc) && !ieee80211_is_nullfunc(fc) && ieee80211_is_data(fc) && rx->key)) return -EACCES; return 0; } static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); __le16 fc = hdr->frame_control; /* * Pass through unencrypted frames if the hardware has * decrypted them already. */ if (status->flag & RX_FLAG_DECRYPTED) return 0; if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { if (unlikely(!ieee80211_has_protected(fc) && ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && rx->key)) { if (ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, rx->skb->data, rx->skb->len); return -EACCES; } /* BIP does not use Protected field, so need to check MMIE */ if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && ieee80211_get_mmie_keyidx(rx->skb) < 0)) { if (ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, rx->skb->data, rx->skb->len); return -EACCES; } /* * When using MFP, Action frames are not allowed prior to * having configured keys. */ if (unlikely(ieee80211_is_action(fc) && !rx->key && ieee80211_is_robust_mgmt_frame(rx->skb))) return -EACCES; } return 0; } static int __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; bool check_port_control = false; struct ethhdr *ehdr; int ret; *port_control = false; if (ieee80211_has_a4(hdr->frame_control) && sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) return -1; if (sdata->vif.type == NL80211_IFTYPE_STATION && !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { if (!sdata->u.mgd.use_4addr) return -1; else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) check_port_control = true; } if (is_multicast_ether_addr(hdr->addr1) && sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) return -1; ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); if (ret < 0) return ret; ehdr = (struct ethhdr *) rx->skb->data; if (ehdr->h_proto == rx->sdata->control_port_protocol) *port_control = true; else if (check_port_control) return -1; return 0; } /* * requires that rx->skb is a frame with ethernet header */ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) { static const u8 pae_group_addr[ETH_ALEN] __aligned(2) = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; /* * Allow EAPOL frames to us/the PAE group address regardless * of whether the frame was encrypted or not. */ if (ehdr->h_proto == rx->sdata->control_port_protocol && (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || ether_addr_equal(ehdr->h_dest, pae_group_addr))) return true; if (ieee80211_802_1x_port_control(rx) || ieee80211_drop_unencrypted(rx, fc)) return false; return true; } static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct net_device *dev = sdata->dev; if (unlikely((skb->protocol == sdata->control_port_protocol || skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && sdata->control_port_over_nl80211)) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); bool noencrypt = status->flag & RX_FLAG_DECRYPTED; cfg80211_rx_control_port(dev, skb, noencrypt); dev_kfree_skb(skb); } else { /* deliver to local stack */ if (rx->napi) napi_gro_receive(rx->napi, skb); else netif_receive_skb(skb); } } /* * requires that rx->skb is a frame with ethernet header */ static void ieee80211_deliver_skb(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct net_device *dev = sdata->dev; struct sk_buff *skb, *xmit_skb; struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; struct sta_info *dsta; skb = rx->skb; xmit_skb = NULL; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS /* * Filter packets in case that configured to do so by user space, * and we are associated to an Hotspot AP and have an IP address. */ if (sdata->vif.filter_grat_arp_unsol_na && sdata->vif.bss_conf.arp_addr_cnt && ieee80211_is_gratuitous_arp_unsolicited_na(skb)) { dev_kfree_skb(skb); return; } if (sdata->vif.filter_gtk && sdata->vif.bss_conf.arp_addr_cnt && ieee80211_is_shared_gtk(skb)) { dev_kfree_skb(skb); return; } #endif ieee80211_rx_stats(dev, skb->len); if (rx->sta) { /* The seqno index has the same property as needed * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS * for non-QoS-data frames. Here we know it's a data * frame, so count MSDUs. */ u64_stats_update_begin(&rx->sta->rx_stats.syncp); rx->sta->rx_stats.msdu[rx->seqno_idx]++; u64_stats_update_end(&rx->sta->rx_stats.syncp); } if ((sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { if (is_multicast_ether_addr(ehdr->h_dest) && ieee80211_vif_get_num_mcast_if(sdata) != 0) { /* * send multicast frames both to higher layers in * local net stack and back to the wireless medium */ xmit_skb = skb_copy(skb, GFP_ATOMIC); if (!xmit_skb) net_info_ratelimited("%s: failed to clone multicast frame\n", dev->name); } else if (!is_multicast_ether_addr(ehdr->h_dest) && !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { dsta = sta_info_get(sdata, ehdr->h_dest); if (dsta) { /* * The destination station is associated to * this AP (in this VLAN), so send the frame * directly to it and do not pass it to local * net stack. */ xmit_skb = skb; skb = NULL; } } } #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (skb) { /* 'align' will only take the values 0 or 2 here since all * frames are required to be aligned to 2-byte boundaries * when being passed to mac80211; the code here works just * as well if that isn't true, but mac80211 assumes it can * access fields as 2-byte aligned (e.g. for ether_addr_equal) */ int align; align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; if (align) { if (WARN_ON(skb_headroom(skb) < 3)) { dev_kfree_skb(skb); skb = NULL; } else { u8 *data = skb->data; size_t len = skb_headlen(skb); skb->data -= align; memmove(skb->data, data, len); skb_set_tail_pointer(skb, len); } } } #endif if (skb) { skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); ieee80211_deliver_skb_to_local_stack(skb, rx); } if (xmit_skb) { /* * Send to wireless media and increase priority by 256 to * keep the received priority instead of reclassifying * the frame (see cfg80211_classify8021d). */ xmit_skb->priority += 256; xmit_skb->protocol = htons(ETH_P_802_3); skb_reset_network_header(xmit_skb); skb_reset_mac_header(xmit_skb); dev_queue_xmit(xmit_skb); } } static ieee80211_rx_result debug_noinline __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) { struct net_device *dev = rx->sdata->dev; struct sk_buff *skb = rx->skb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; struct sk_buff_head frame_list; struct ethhdr ethhdr; const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; if (unlikely(ieee80211_has_a4(hdr->frame_control))) { check_da = NULL; check_sa = NULL; } else switch (rx->sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: check_da = NULL; break; case NL80211_IFTYPE_STATION: if (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) check_sa = NULL; break; case NL80211_IFTYPE_MESH_POINT: check_sa = NULL; break; default: break; } skb->dev = dev; __skb_queue_head_init(&frame_list); if (ieee80211_data_to_8023_exthdr(skb, ðhdr, rx->sdata->vif.addr, rx->sdata->vif.type, data_offset)) return RX_DROP_UNUSABLE; ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, rx->sdata->vif.type, rx->local->hw.extra_tx_headroom, check_da, check_sa); while (!skb_queue_empty(&frame_list)) { rx->skb = __skb_dequeue(&frame_list); if (!ieee80211_frame_allowed(rx, fc)) { dev_kfree_skb(rx->skb); continue; } ieee80211_deliver_skb(rx); } return RX_QUEUED; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; if (!(status->rx_flags & IEEE80211_RX_AMSDU)) return RX_CONTINUE; if (unlikely(!ieee80211_is_data(fc))) return RX_CONTINUE; if (unlikely(!ieee80211_is_data_present(fc))) return RX_DROP_MONITOR; if (unlikely(ieee80211_has_a4(hdr->frame_control))) { switch (rx->sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: if (!rx->sdata->u.vlan.sta) return RX_DROP_UNUSABLE; break; case NL80211_IFTYPE_STATION: if (!rx->sdata->u.mgd.use_4addr) return RX_DROP_UNUSABLE; break; default: return RX_DROP_UNUSABLE; } } if (is_multicast_ether_addr(hdr->addr1)) return RX_DROP_UNUSABLE; return __ieee80211_rx_h_amsdu(rx, 0); } #ifdef CPTCFG_MAC80211_MESH static ieee80211_rx_result ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *fwd_hdr, *hdr; struct ieee80211_tx_info *info; struct ieee80211s_hdr *mesh_hdr; struct sk_buff *skb = rx->skb, *fwd_skb; struct ieee80211_local *local = rx->local; struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u16 ac, q, hdrlen; hdr = (struct ieee80211_hdr *) skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); /* make sure fixed part of mesh header is there, also checks skb len */ if (!pskb_may_pull(rx->skb, hdrlen + 6)) return RX_DROP_MONITOR; mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); /* make sure full mesh header is there, also checks skb len */ if (!pskb_may_pull(rx->skb, hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) return RX_DROP_MONITOR; /* reload pointers */ hdr = (struct ieee80211_hdr *) skb->data; mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) return RX_DROP_MONITOR; /* frame is in RMC, don't forward */ if (ieee80211_is_data(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1) && mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) return RX_DROP_MONITOR; if (!ieee80211_is_data(hdr->frame_control)) return RX_CONTINUE; if (!mesh_hdr->ttl) return RX_DROP_MONITOR; if (mesh_hdr->flags & MESH_FLAGS_AE) { struct mesh_path *mppath; char *proxied_addr; char *mpp_addr; if (is_multicast_ether_addr(hdr->addr1)) { mpp_addr = hdr->addr3; proxied_addr = mesh_hdr->eaddr1; } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) { /* has_a4 already checked in ieee80211_rx_mesh_check */ mpp_addr = hdr->addr4; proxied_addr = mesh_hdr->eaddr2; } else { return RX_DROP_MONITOR; } rcu_read_lock(); mppath = mpp_path_lookup(sdata, proxied_addr); if (!mppath) { mpp_path_add(sdata, proxied_addr, mpp_addr); } else { spin_lock_bh(&mppath->state_lock); if (!ether_addr_equal(mppath->mpp, mpp_addr)) memcpy(mppath->mpp, mpp_addr, ETH_ALEN); mppath->exp_time = jiffies; spin_unlock_bh(&mppath->state_lock); } rcu_read_unlock(); } /* Frame has reached destination. Don't forward */ if (!is_multicast_ether_addr(hdr->addr1) && ether_addr_equal(sdata->vif.addr, hdr->addr3)) return RX_CONTINUE; ac = ieee80211_select_queue_80211(sdata, skb, hdr); q = sdata->vif.hw_queue[ac]; if (ieee80211_queue_stopped(&local->hw, q)) { IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); return RX_DROP_MONITOR; } skb_set_queue_mapping(skb, q); if (!--mesh_hdr->ttl) { IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); goto out; } if (!ifmsh->mshcfg.dot11MeshForwarding) goto out; fwd_skb = skb_copy_expand(skb, local->tx_headroom + sdata->encrypt_headroom, 0, GFP_ATOMIC); if (!fwd_skb) goto out; fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); info = IEEE80211_SKB_CB(fwd_skb); memset(info, 0, sizeof(*info)); info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->control.vif = &rx->sdata->vif; info->control.jiffies = jiffies; if (is_multicast_ether_addr(fwd_hdr->addr1)) { IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); /* update power mode indication when forwarding */ ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { /* mesh power mode flags updated in mesh_nexthop_lookup */ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); } else { /* unable to resolve next hop */ mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, fwd_hdr->addr3, 0, WLAN_REASON_MESH_PATH_NOFORWARD, fwd_hdr->addr2); IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); kfree_skb(fwd_skb); return RX_DROP_MONITOR; } IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); ieee80211_add_pending_skb(local, fwd_skb); out: if (is_multicast_ether_addr(hdr->addr1)) return RX_CONTINUE; return RX_DROP_MONITOR; } #endif static ieee80211_rx_result debug_noinline ieee80211_rx_h_data(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_local *local = rx->local; struct net_device *dev = sdata->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; __le16 fc = hdr->frame_control; bool port_control; int err; if (unlikely(!ieee80211_is_data(hdr->frame_control))) return RX_CONTINUE; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return RX_DROP_MONITOR; /* * Send unexpected-4addr-frame event to hostapd. For older versions, * also drop the frame to cooked monitor interfaces. */ if (ieee80211_has_a4(hdr->frame_control) && sdata->vif.type == NL80211_IFTYPE_AP) { if (rx->sta && !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) cfg80211_rx_unexpected_4addr_frame( rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); return RX_DROP_MONITOR; } err = __ieee80211_data_to_8023(rx, &port_control); if (unlikely(err)) return RX_DROP_UNUSABLE; if (!ieee80211_frame_allowed(rx, fc)) return RX_DROP_MONITOR; /* directly handle TDLS channel switch requests/responses */ if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == cpu_to_be16(ETH_P_TDLS))) { struct ieee80211_tdls_data *tf = (void *)rx->skb->data; if (pskb_may_pull(rx->skb, offsetof(struct ieee80211_tdls_data, u)) && tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && tf->category == WLAN_CATEGORY_TDLS && (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); schedule_work(&local->tdls_chsw_work); if (rx->sta) rx->sta->rx_stats.packets++; return RX_QUEUED; } } if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && unlikely(port_control) && sdata->bss) { sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); dev = sdata->dev; rx->sdata = sdata; } rx->skb->dev = dev; if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && !is_multicast_ether_addr( ((struct ethhdr *)rx->skb->data)->h_dest) && (!local->scanning && !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); ieee80211_deliver_skb(rx); return RX_QUEUED; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) { struct sk_buff *skb = rx->skb; struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; struct tid_ampdu_rx *tid_agg_rx; u16 start_seq_num; u16 tid; if (likely(!ieee80211_is_ctl(bar->frame_control))) return RX_CONTINUE; if (ieee80211_is_back_req(bar->frame_control)) { struct { __le16 control, start_seq_num; } __packed bar_data; struct ieee80211_event event = { .type = BAR_RX_EVENT, }; if (!rx->sta) return RX_DROP_MONITOR; if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), &bar_data, sizeof(bar_data))) return RX_DROP_MONITOR; tid = le16_to_cpu(bar_data.control) >> 12; if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_REQUIRE_SETUP); tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); if (!tid_agg_rx) return RX_DROP_MONITOR; start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; event.u.ba.tid = tid; event.u.ba.ssn = start_seq_num; event.u.ba.sta = &rx->sta->sta; /* reset session timer */ if (tid_agg_rx->timeout) mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(tid_agg_rx->timeout)); spin_lock(&tid_agg_rx->reorder_lock); /* release stored frames up to start of BAR */ ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, start_seq_num, frames); spin_unlock(&tid_agg_rx->reorder_lock); drv_event_callback(rx->local, rx->sdata, &event); kfree_skb(skb); return RX_QUEUED; } /* * After this point, we only want management frames, * so we can drop all remaining control frames to * cooked monitor interfaces. */ return RX_DROP_MONITOR; } static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *resp; if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { /* Not to own unicast address */ return; } if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { /* Not from the current AP or not associated yet. */ return; } if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { /* Too short SA Query request frame */ return; } skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); if (skb == NULL) return; skb_reserve(skb, local->hw.extra_tx_headroom); resp = skb_put_zero(skb, 24); memcpy(resp->da, mgmt->sa, ETH_ALEN); memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); resp->u.action.category = WLAN_CATEGORY_SA_QUERY; resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; memcpy(resp->u.action.u.sa_query.trans_id, mgmt->u.action.u.sa_query.trans_id, WLAN_SA_QUERY_TR_ID_LEN); ieee80211_tx_skb(sdata, skb); } static ieee80211_rx_result debug_noinline ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); /* * From here on, look only at management frames. * Data and control frames are already handled, * and unknown (reserved) frames are useless. */ if (rx->skb->len < 24) return RX_DROP_MONITOR; if (!ieee80211_is_mgmt(mgmt->frame_control)) return RX_DROP_MONITOR; if (rx->sdata->vif.type == NL80211_IFTYPE_AP && ieee80211_is_beacon(mgmt->frame_control) && !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { int sig = 0; if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) sig = status->signal; cfg80211_report_obss_beacon(rx->local->hw.wiphy, rx->skb->data, rx->skb->len, status->freq, sig); rx->flags |= IEEE80211_RX_BEACON_REPORTED; } if (ieee80211_drop_unencrypted_mgmt(rx)) return RX_DROP_UNUSABLE; return RX_CONTINUE; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_action(struct ieee80211_rx_data *rx) { struct ieee80211_local *local = rx->local; struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); int len = rx->skb->len; if (!ieee80211_is_action(mgmt->frame_control)) return RX_CONTINUE; /* drop too small frames */ if (len < IEEE80211_MIN_ACTION_SIZE) return RX_DROP_UNUSABLE; if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) return RX_DROP_UNUSABLE; switch (mgmt->u.action.category) { case WLAN_CATEGORY_HT: /* reject HT action frames from stations not supporting HT */ if (!rx->sta->sta.ht_cap.ht_supported) goto invalid; if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC) break; /* verify action & smps_control/chanwidth are present */ if (len < IEEE80211_MIN_ACTION_SIZE + 2) goto invalid; switch (mgmt->u.action.u.ht_smps.action) { case WLAN_HT_ACTION_SMPS: { struct ieee80211_supported_band *sband; enum ieee80211_smps_mode smps_mode; struct sta_opmode_info sta_opmode = {}; /* convert to HT capability */ switch (mgmt->u.action.u.ht_smps.smps_control) { case WLAN_HT_SMPS_CONTROL_DISABLED: smps_mode = IEEE80211_SMPS_OFF; break; case WLAN_HT_SMPS_CONTROL_STATIC: smps_mode = IEEE80211_SMPS_STATIC; break; case WLAN_HT_SMPS_CONTROL_DYNAMIC: smps_mode = IEEE80211_SMPS_DYNAMIC; break; default: goto invalid; } /* if no change do nothing */ if (rx->sta->sta.smps_mode == smps_mode) goto handled; rx->sta->sta.smps_mode = smps_mode; sta_opmode.smps_mode = ieee80211_smps_mode_to_smps_mode(smps_mode); sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; sband = rx->local->hw.wiphy->bands[status->band]; rate_control_rate_update(local, sband, rx->sta, IEEE80211_RC_SMPS_CHANGED); cfg80211_sta_opmode_change_notify(sdata->dev, rx->sta->addr, &sta_opmode, GFP_ATOMIC); goto handled; } case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { struct ieee80211_supported_band *sband; u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; enum ieee80211_sta_rx_bandwidth max_bw, new_bw; struct sta_opmode_info sta_opmode = {}; /* If it doesn't support 40 MHz it can't change ... */ if (!(rx->sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) goto handled; if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) max_bw = IEEE80211_STA_RX_BW_20; else max_bw = ieee80211_sta_cap_rx_bw(rx->sta); /* set cur_max_bandwidth and recalc sta bw */ rx->sta->cur_max_bandwidth = max_bw; new_bw = ieee80211_sta_cur_vht_bw(rx->sta); if (rx->sta->sta.bandwidth == new_bw) goto handled; rx->sta->sta.bandwidth = new_bw; sband = rx->local->hw.wiphy->bands[status->band]; sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(rx->sta); sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; rate_control_rate_update(local, sband, rx->sta, IEEE80211_RC_BW_CHANGED); cfg80211_sta_opmode_change_notify(sdata->dev, rx->sta->addr, &sta_opmode, GFP_ATOMIC); goto handled; } default: goto invalid; } break; case WLAN_CATEGORY_PUBLIC: if (len < IEEE80211_MIN_ACTION_SIZE + 1) goto invalid; if (sdata->vif.type != NL80211_IFTYPE_STATION) break; if (!rx->sta) break; if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) break; if (mgmt->u.action.u.ext_chan_switch.action_code != WLAN_PUB_ACTION_EXT_CHANSW_ANN) break; if (len < offsetof(struct ieee80211_mgmt, u.action.u.ext_chan_switch.variable)) goto invalid; goto queue; case WLAN_CATEGORY_VHT: if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC) break; /* verify action code is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 1) goto invalid; switch (mgmt->u.action.u.vht_opmode_notif.action_code) { case WLAN_VHT_ACTION_OPMODE_NOTIF: { /* verify opmode is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 2) goto invalid; goto queue; } case WLAN_VHT_ACTION_GROUPID_MGMT: { if (len < IEEE80211_MIN_ACTION_SIZE + 25) goto invalid; goto queue; } default: break; } break; case WLAN_CATEGORY_BACK: if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC) break; /* verify action_code is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 1) break; switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.addba_req))) goto invalid; break; case WLAN_ACTION_ADDBA_RESP: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.addba_resp))) goto invalid; break; case WLAN_ACTION_DELBA: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.delba))) goto invalid; break; default: goto invalid; } goto queue; case WLAN_CATEGORY_SPECTRUM_MGMT: /* verify action_code is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 1) break; switch (mgmt->u.action.u.measurement.action_code) { case WLAN_ACTION_SPCT_MSR_REQ: if (status->band != NL80211_BAND_5GHZ) break; if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.measurement))) break; if (sdata->vif.type != NL80211_IFTYPE_STATION) break; ieee80211_process_measurement_req(sdata, mgmt, len); goto handled; case WLAN_ACTION_SPCT_CHL_SWITCH: { u8 *bssid; if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.chan_switch))) break; if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT) break; if (sdata->vif.type == NL80211_IFTYPE_STATION) bssid = sdata->u.mgd.bssid; else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) bssid = sdata->u.ibss.bssid; else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) bssid = mgmt->sa; else break; if (!ether_addr_equal(mgmt->bssid, bssid)) break; goto queue; } } break; case WLAN_CATEGORY_SA_QUERY: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.sa_query))) break; switch (mgmt->u.action.u.sa_query.action) { case WLAN_ACTION_SA_QUERY_REQUEST: if (sdata->vif.type != NL80211_IFTYPE_STATION) break; ieee80211_process_sa_query_req(sdata, mgmt, len); goto handled; } break; case WLAN_CATEGORY_SELF_PROTECTED: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.self_prot.action_code))) break; switch (mgmt->u.action.u.self_prot.action_code) { case WLAN_SP_MESH_PEERING_OPEN: case WLAN_SP_MESH_PEERING_CLOSE: case WLAN_SP_MESH_PEERING_CONFIRM: if (!ieee80211_vif_is_mesh(&sdata->vif)) goto invalid; if (sdata->u.mesh.user_mpm) /* userspace handles this frame */ break; goto queue; case WLAN_SP_MGK_INFORM: case WLAN_SP_MGK_ACK: if (!ieee80211_vif_is_mesh(&sdata->vif)) goto invalid; break; } break; case WLAN_CATEGORY_MESH_ACTION: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.mesh_action.action_code))) break; if (!ieee80211_vif_is_mesh(&sdata->vif)) break; if (mesh_action_is_path_sel(mgmt) && !mesh_path_sel_is_hwmp(sdata)) break; goto queue; } return RX_CONTINUE; invalid: status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; /* will return in the next handlers */ return RX_CONTINUE; handled: if (rx->sta) rx->sta->rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; queue: skb_queue_tail(&sdata->skb_queue, rx->skb); ieee80211_queue_work(&local->hw, &sdata->work); if (rx->sta) rx->sta->rx_stats.packets++; return RX_QUEUED; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); int sig = 0; /* skip known-bad action frames and return them in the next handler */ if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) return RX_CONTINUE; /* * Getting here means the kernel doesn't know how to handle * it, but maybe userspace does ... include returned frames * so userspace can register for those to know whether ones * it transmitted were processed or returned. */ if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) sig = status->signal; if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, rx->skb->data, rx->skb->len, 0)) { if (rx->sta) rx->sta->rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } return RX_CONTINUE; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) { struct ieee80211_local *local = rx->local; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; struct sk_buff *nskb; struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); if (!ieee80211_is_action(mgmt->frame_control)) return RX_CONTINUE; /* * For AP mode, hostapd is responsible for handling any action * frames that we didn't handle, including returning unknown * ones. For all other modes we will return them to the sender, * setting the 0x80 bit in the action category, as required by * 802.11-2012 9.24.4. * Newer versions of hostapd shall also use the management frame * registration mechanisms, but older ones still use cooked * monitor interfaces so push all frames there. */ if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) return RX_DROP_MONITOR; if (is_multicast_ether_addr(mgmt->da)) return RX_DROP_MONITOR; /* do not return rejected action frames */ if (mgmt->u.action.category & 0x80) return RX_DROP_UNUSABLE; nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, GFP_ATOMIC); if (nskb) { struct ieee80211_mgmt *nmgmt = (void *)nskb->data; nmgmt->u.action.category |= 0x80; memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); memset(nskb->cb, 0, sizeof(nskb->cb)); if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | IEEE80211_TX_INTFL_OFFCHAN_TX_OK | IEEE80211_TX_CTL_NO_CCK_RATE; if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) info->hw_queue = local->hw.offchannel_tx_hw_queue; } __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, status->band, 0); } dev_kfree_skb(rx->skb); return RX_QUEUED; } static ieee80211_rx_result debug_noinline ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; __le16 stype; stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); if (!ieee80211_vif_is_mesh(&sdata->vif) && sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_OCB && sdata->vif.type != NL80211_IFTYPE_STATION) return RX_DROP_MONITOR; switch (stype) { case cpu_to_le16(IEEE80211_STYPE_AUTH): case cpu_to_le16(IEEE80211_STYPE_BEACON): case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): /* process for all: mesh, mlme, ibss */ break; case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): case cpu_to_le16(IEEE80211_STYPE_DEAUTH): case cpu_to_le16(IEEE80211_STYPE_DISASSOC): if (is_multicast_ether_addr(mgmt->da) && !is_broadcast_ether_addr(mgmt->da)) return RX_DROP_MONITOR; /* process only for station */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return RX_DROP_MONITOR; break; case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): /* process only for ibss and mesh */ if (sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT) return RX_DROP_MONITOR; break; default: return RX_DROP_MONITOR; } /* queue up frame and kick off work to process it */ skb_queue_tail(&sdata->skb_queue, rx->skb); ieee80211_queue_work(&rx->local->hw, &sdata->work); if (rx->sta) rx->sta->rx_stats.packets++; return RX_QUEUED; } static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, struct ieee80211_rate *rate) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local = rx->local; struct sk_buff *skb = rx->skb, *skb2; struct net_device *prev_dev = NULL; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); int needed_headroom; /* * If cooked monitor has been processed already, then * don't do it again. If not, set the flag. */ if (rx->flags & IEEE80211_RX_CMNTR) goto out_free_skb; rx->flags |= IEEE80211_RX_CMNTR; /* If there are no cooked monitor interfaces, just free the SKB */ if (!local->cooked_mntrs) goto out_free_skb; /* vendor data is long removed here */ status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; /* room for the radiotap header based on driver features */ needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); if (skb_headroom(skb) < needed_headroom && pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) goto out_free_skb; /* prepend radiotap information */ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, false); skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) continue; if (prev_dev) { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { skb2->dev = prev_dev; netif_receive_skb(skb2); } } prev_dev = sdata->dev; ieee80211_rx_stats(sdata->dev, skb->len); } if (prev_dev) { skb->dev = prev_dev; netif_receive_skb(skb); return; } out_free_skb: dev_kfree_skb(skb); } static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, ieee80211_rx_result res) { switch (res) { case RX_DROP_MONITOR: I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) rx->sta->rx_stats.dropped++; /* fall through */ case RX_CONTINUE: { struct ieee80211_rate *rate = NULL; struct ieee80211_supported_band *sband; struct ieee80211_rx_status *status; status = IEEE80211_SKB_RXCB((rx->skb)); sband = rx->local->hw.wiphy->bands[status->band]; if (status->encoding == RX_ENC_LEGACY) rate = &sband->bitrates[status->rate_idx]; ieee80211_rx_cooked_monitor(rx, rate); break; } case RX_DROP_UNUSABLE: I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) rx->sta->rx_stats.dropped++; dev_kfree_skb(rx->skb); break; case RX_QUEUED: I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); break; } } static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) { ieee80211_rx_result res = RX_DROP_MONITOR; struct sk_buff *skb; #define CALL_RXH(rxh) \ do { \ res = rxh(rx); \ if (res != RX_CONTINUE) \ goto rxh_next; \ } while (0) /* Lock here to avoid hitting all of the data used in the RX * path (e.g. key data, station data, ...) concurrently when * a frame is released from the reorder buffer due to timeout * from the timer, potentially concurrently with RX from the * driver. */ spin_lock_bh(&rx->local->rx_path_lock); while ((skb = __skb_dequeue(frames))) { /* * all the other fields are valid across frames * that belong to an aMPDU since they are on the * same TID from the same station */ rx->skb = skb; CALL_RXH(ieee80211_rx_h_check_more_data); CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); CALL_RXH(ieee80211_rx_h_sta_process); CALL_RXH(ieee80211_rx_h_decrypt); CALL_RXH(ieee80211_rx_h_defragment); CALL_RXH(ieee80211_rx_h_michael_mic_verify); /* must be after MMIC verify so header is counted in MPDU mic */ #ifdef CPTCFG_MAC80211_MESH if (ieee80211_vif_is_mesh(&rx->sdata->vif)) CALL_RXH(ieee80211_rx_h_mesh_fwding); #endif CALL_RXH(ieee80211_rx_h_amsdu); CALL_RXH(ieee80211_rx_h_data); /* special treatment -- needs the queue */ res = ieee80211_rx_h_ctrl(rx, frames); if (res != RX_CONTINUE) goto rxh_next; CALL_RXH(ieee80211_rx_h_mgmt_check); CALL_RXH(ieee80211_rx_h_action); CALL_RXH(ieee80211_rx_h_userspace_mgmt); CALL_RXH(ieee80211_rx_h_action_return); CALL_RXH(ieee80211_rx_h_mgmt); rxh_next: ieee80211_rx_handlers_result(rx, res); #undef CALL_RXH } spin_unlock_bh(&rx->local->rx_path_lock); } static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) { struct sk_buff_head reorder_release; ieee80211_rx_result res = RX_DROP_MONITOR; __skb_queue_head_init(&reorder_release); #define CALL_RXH(rxh) \ do { \ res = rxh(rx); \ if (res != RX_CONTINUE) \ goto rxh_next; \ } while (0) CALL_RXH(ieee80211_rx_h_check_dup); CALL_RXH(ieee80211_rx_h_check); ieee80211_rx_reorder_ampdu(rx, &reorder_release); ieee80211_rx_handlers(rx, &reorder_release); return; rxh_next: ieee80211_rx_handlers_result(rx, res); #undef CALL_RXH } /* * This function makes calls into the RX path, therefore * it has to be invoked under RCU read lock. */ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) { struct sk_buff_head frames; struct ieee80211_rx_data rx = { .sta = sta, .sdata = sta->sdata, .local = sta->local, /* This is OK -- must be QoS data frame */ .security_idx = tid, .seqno_idx = tid, .napi = NULL, /* must be NULL to not have races */ }; struct tid_ampdu_rx *tid_agg_rx; tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); if (!tid_agg_rx) return; __skb_queue_head_init(&frames); spin_lock(&tid_agg_rx->reorder_lock); ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); spin_unlock(&tid_agg_rx->reorder_lock); if (!skb_queue_empty(&frames)) { struct ieee80211_event event = { .type = BA_FRAME_TIMEOUT, .u.ba.tid = tid, .u.ba.sta = &sta->sta, }; drv_event_callback(rx.local, rx.sdata, &event); } ieee80211_rx_handlers(&rx, &frames); } void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, u16 ssn, u64 filtered, u16 received_mpdus) { struct sta_info *sta; struct tid_ampdu_rx *tid_agg_rx; struct sk_buff_head frames; struct ieee80211_rx_data rx = { /* This is OK -- must be QoS data frame */ .security_idx = tid, .seqno_idx = tid, }; int i, diff; if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) return; __skb_queue_head_init(&frames); sta = container_of(pubsta, struct sta_info, sta); rx.sta = sta; rx.sdata = sta->sdata; rx.local = sta->local; rcu_read_lock(); tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); if (!tid_agg_rx) goto out; spin_lock_bh(&tid_agg_rx->reorder_lock); if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { int release; /* release all frames in the reorder buffer */ release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % IEEE80211_SN_MODULO; ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, release, &frames); /* update ssn to match received ssn */ tid_agg_rx->head_seq_num = ssn; } else { ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, &frames); } /* handle the case that received ssn is behind the mac ssn. * it can be tid_agg_rx->buf_size behind and still be valid */ diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; if (diff >= tid_agg_rx->buf_size) { tid_agg_rx->reorder_buf_filtered = 0; goto release; } filtered = filtered >> diff; ssn += diff; /* update bitmap */ for (i = 0; i < tid_agg_rx->buf_size; i++) { int index = (ssn + i) % tid_agg_rx->buf_size; tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); if (filtered & BIT_ULL(i)) tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); } /* now process also frames that the filter marking released */ ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); release: spin_unlock_bh(&tid_agg_rx->reorder_lock); ieee80211_rx_handlers(&rx, &frames); out: rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); /* main receive path */ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct sk_buff *skb = rx->skb; struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); bool multicast = is_multicast_ether_addr(hdr->addr1); switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: if (!bssid && !sdata->u.mgd.use_4addr) return false; if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) return false; if (multicast) return true; return ether_addr_equal(sdata->vif.addr, hdr->addr1); case NL80211_IFTYPE_ADHOC: if (!bssid) return false; if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) return false; if (ieee80211_is_beacon(hdr->frame_control)) return true; if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) return false; if (!multicast && !ether_addr_equal(sdata->vif.addr, hdr->addr1)) return false; if (!rx->sta) { int rate_idx; if (status->encoding != RX_ENC_LEGACY) rate_idx = 0; /* TODO: HT/VHT rates */ else rate_idx = status->rate_idx; ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, BIT(rate_idx)); } return true; case NL80211_IFTYPE_OCB: if (!bssid) return false; if (!ieee80211_is_data_present(hdr->frame_control)) return false; if (!is_broadcast_ether_addr(bssid)) return false; if (!multicast && !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) return false; if (!rx->sta) { int rate_idx; if (status->encoding != RX_ENC_LEGACY) rate_idx = 0; /* TODO: HT rates */ else rate_idx = status->rate_idx; ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, BIT(rate_idx)); } return true; case NL80211_IFTYPE_MESH_POINT: if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) return false; if (multicast) return true; return ether_addr_equal(sdata->vif.addr, hdr->addr1); case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_AP: if (!bssid) return ether_addr_equal(sdata->vif.addr, hdr->addr1); if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { /* * Accept public action frames even when the * BSSID doesn't match, this is used for P2P * and location updates. Note that mac80211 * itself never looks at these frames. */ if (!multicast && !ether_addr_equal(sdata->vif.addr, hdr->addr1)) return false; if (ieee80211_is_public_action(hdr, skb->len)) return true; return ieee80211_is_beacon(hdr->frame_control); } if (!ieee80211_has_tods(hdr->frame_control)) { /* ignore data frames to TDLS-peers */ if (ieee80211_is_data(hdr->frame_control)) return false; /* ignore action frames to TDLS-peers */ if (ieee80211_is_action(hdr->frame_control) && !is_broadcast_ether_addr(bssid) && !ether_addr_equal(bssid, hdr->addr1)) return false; } /* * 802.11-2016 Table 9-26 says that for data frames, A1 must be * the BSSID - we've checked that already but may have accepted * the wildcard (ff:ff:ff:ff:ff:ff). * * It also says: * The BSSID of the Data frame is determined as follows: * a) If the STA is contained within an AP or is associated * with an AP, the BSSID is the address currently in use * by the STA contained in the AP. * * So we should not accept data frames with an address that's * multicast. * * Accepting it also opens a security problem because stations * could encrypt it with the GTK and inject traffic that way. */ if (ieee80211_is_data(hdr->frame_control) && multicast) return false; return true; case NL80211_IFTYPE_WDS: if (bssid || !ieee80211_is_data(hdr->frame_control)) return false; return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); case NL80211_IFTYPE_P2P_DEVICE: return ieee80211_is_public_action(hdr, skb->len) || ieee80211_is_probe_req(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control) || ieee80211_is_beacon(hdr->frame_control); case NL80211_IFTYPE_NAN: /* Currently no frames on NAN interface are allowed */ return false; default: break; } WARN_ON_ONCE(1); return false; } void ieee80211_check_fast_rx(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_key *key; struct ieee80211_fast_rx fastrx = { .dev = sdata->dev, .vif_type = sdata->vif.type, .control_port_protocol = sdata->control_port_protocol, }, *old, *new = NULL; bool assign = false; /* use sparse to check that we don't return without updating */ __acquire(check_fast_rx); BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); /* fast-rx doesn't do reordering */ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) goto clear; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: if (sta->sta.tdls) { fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); fastrx.expected_ds_bits = 0; } else { fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0; fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_FROMDS); } if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { fastrx.expected_ds_bits |= cpu_to_le16(IEEE80211_FCTL_TODS); fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); } if (!sdata->u.mgd.powersave) break; /* software powersave is a huge mess, avoid all of it */ if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) goto clear; if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) goto clear; break; case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_AP: /* parallel-rx requires this, at least with calls to * ieee80211_sta_ps_transition() */ if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) goto clear; fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); fastrx.internal_forward = !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) { fastrx.expected_ds_bits |= cpu_to_le16(IEEE80211_FCTL_FROMDS); fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); fastrx.internal_forward = 0; } break; default: goto clear; } if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) goto clear; rcu_read_lock(); key = rcu_dereference(sta->ptk[sta->ptk_idx]); if (key) { switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: /* we don't want to deal with MMIC in fast-rx */ goto clear_rcu; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: break; default: /* we also don't want to deal with WEP or cipher scheme * since those require looking up the key idx in the * frame, rather than assuming the PTK is used * (we need to revisit this once we implement the real * PTK index, which is now valid in the spec, but we * haven't implemented that part yet) */ goto clear_rcu; } fastrx.key = true; fastrx.icv_len = key->conf.icv_len; } #ifdef CPTCFG_IWLMVM_VENDOR_CMDS if (sdata->vif.filter_grat_arp_unsol_na && sdata->vif.bss_conf.arp_addr_cnt) fastrx.drop_grat_arp_unsol_na = true; #endif assign = true; clear_rcu: rcu_read_unlock(); clear: __release(check_fast_rx); if (assign) new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); spin_lock_bh(&sta->lock); old = rcu_dereference_protected(sta->fast_rx, true); rcu_assign_pointer(sta->fast_rx, new); spin_unlock_bh(&sta->lock); if (old) kfree_rcu(old, rcu_head); } void ieee80211_clear_fast_rx(struct sta_info *sta) { struct ieee80211_fast_rx *old; spin_lock_bh(&sta->lock); old = rcu_dereference_protected(sta->fast_rx, true); RCU_INIT_POINTER(sta->fast_rx, NULL); spin_unlock_bh(&sta->lock); if (old) kfree_rcu(old, rcu_head); } void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; lockdep_assert_held(&local->sta_mtx); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata && (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) continue; ieee80211_check_fast_rx(sta); } } void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; mutex_lock(&local->sta_mtx); __ieee80211_check_fast_rx_iface(sdata); mutex_unlock(&local->sta_mtx); } static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, struct ieee80211_fast_rx *fast_rx) { struct sk_buff *skb = rx->skb; struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct sta_info *sta = rx->sta; int orig_len = skb->len; int hdrlen = ieee80211_hdrlen(hdr->frame_control); int snap_offs = hdrlen; struct { u8 snap[sizeof(rfc1042_header)]; __be16 proto; } *payload __aligned(2); struct { u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; } addrs __aligned(2); struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; if (fast_rx->uses_rss) stats = this_cpu_ptr(sta->pcpu_rx_stats); /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write * to a common data structure; drivers can implement that per queue * but we don't have that information in mac80211 */ if (!(status->flag & RX_FLAG_DUP_VALIDATED)) return false; #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) /* If using encryption, we also need to have: * - PN_VALIDATED: similar, but the implementation is tricky * - DECRYPTED: necessary for PN_VALIDATED */ if (fast_rx->key && (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) return false; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return false; if (unlikely(ieee80211_is_frag(hdr))) return false; /* Since our interface address cannot be multicast, this * implicitly also rejects multicast frames without the * explicit check. * * We shouldn't get any *data* frames not addressed to us * (AP mode will accept multicast *management* frames), but * punting here will make it go through the full checks in * ieee80211_accept_frame(). */ if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) return false; if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) != fast_rx->expected_ds_bits) return false; /* assign the key to drop unencrypted frames (later) * and strip the IV/MIC if necessary */ if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { /* GCMP header length is the same */ snap_offs += IEEE80211_CCMP_HDR_LEN; } if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) goto drop; payload = (void *)(skb->data + snap_offs); if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) return false; /* Don't handle these here since they require special code. * Accept AARP and IPX even though they should come with a * bridge-tunnel header - but if we get them this way then * there's little point in discarding them. */ if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || payload->proto == fast_rx->control_port_protocol)) return false; } /* after this point, don't punt to the slowpath! */ if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && pskb_trim(skb, skb->len - fast_rx->icv_len)) goto drop; if (unlikely(fast_rx->sta_notify)) { ieee80211_sta_rx_notify(rx->sdata, hdr); fast_rx->sta_notify = false; } /* statistics part of ieee80211_rx_h_sta_process() */ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { stats->last_signal = status->signal; if (!fast_rx->uses_rss) ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); } if (status->chains) { int i; stats->chains = status->chains; for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { int signal = status->chain_signal[i]; if (!(status->chains & BIT(i))) continue; stats->chain_signal_last[i] = signal; if (!fast_rx->uses_rss) ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], -signal); } } /* end of statistics */ if (rx->key && !ieee80211_has_protected(hdr->frame_control)) goto drop; if (status->rx_flags & IEEE80211_RX_AMSDU) { if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != RX_QUEUED) goto drop; return true; } stats->last_rx = jiffies; stats->last_rate = sta_stats_encode_rate(status); stats->fragments++; stats->packets++; /* do the header conversion - first grab the addresses */ ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); /* remove the SNAP but leave the ethertype */ skb_pull(skb, snap_offs + sizeof(rfc1042_header)); /* push the addresses in front */ memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); skb->dev = fast_rx->dev; #ifdef CPTCFG_IWLMVM_VENDOR_CMDS if (fast_rx->drop_grat_arp_unsol_na && ieee80211_is_gratuitous_arp_unsolicited_na(skb)) goto drop; #endif ieee80211_rx_stats(fast_rx->dev, skb->len); /* The seqno index has the same property as needed * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS * for non-QoS-data frames. Here we know it's a data * frame, so count MSDUs. */ u64_stats_update_begin(&stats->syncp); stats->msdu[rx->seqno_idx]++; stats->bytes += orig_len; u64_stats_update_end(&stats->syncp); if (fast_rx->internal_forward) { struct sk_buff *xmit_skb = NULL; if (is_multicast_ether_addr(addrs.da)) { xmit_skb = skb_copy(skb, GFP_ATOMIC); } else if (!ether_addr_equal(addrs.da, addrs.sa) && sta_info_get(rx->sdata, addrs.da)) { xmit_skb = skb; skb = NULL; } if (xmit_skb) { /* * Send to wireless media and increase priority by 256 * to keep the received priority instead of * reclassifying the frame (see cfg80211_classify8021d). */ xmit_skb->priority += 256; xmit_skb->protocol = htons(ETH_P_802_3); skb_reset_network_header(xmit_skb); skb_reset_mac_header(xmit_skb); dev_queue_xmit(xmit_skb); } if (!skb) return true; } /* deliver to local stack */ skb->protocol = eth_type_trans(skb, fast_rx->dev); memset(skb->cb, 0, sizeof(skb->cb)); if (rx->napi) napi_gro_receive(rx->napi, skb); else netif_receive_skb(skb); return true; drop: dev_kfree_skb(skb); stats->dropped++; return true; } /* * This function returns whether or not the SKB * was destined for RX processing or not, which, * if consume is true, is equivalent to whether * or not the skb was consumed. */ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, struct sk_buff *skb, bool consume) { struct ieee80211_local *local = rx->local; struct ieee80211_sub_if_data *sdata = rx->sdata; rx->skb = skb; /* See if we can do fast-rx; if we have to copy we already lost, * so punt in that case. We should never have to deliver a data * frame to multiple interfaces anyway. * * We skip the ieee80211_accept_frame() call and do the necessary * checking inside ieee80211_invoke_fast_rx(). */ if (consume && rx->sta) { struct ieee80211_fast_rx *fast_rx; fast_rx = rcu_dereference(rx->sta->fast_rx); if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) return true; } if (!ieee80211_accept_frame(rx)) return false; if (!consume) { skb = skb_copy(skb, GFP_ATOMIC); if (!skb) { if (net_ratelimit()) wiphy_debug(local->hw.wiphy, "failed to copy skb for %s\n", sdata->name); return true; } rx->skb = skb; } ieee80211_invoke_rx_handlers(rx); return true; } /* * This is the actual Rx frames handler. as it belongs to Rx path it must * be called with rcu_read_lock protection. */ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct sk_buff *skb, struct napi_struct *napi) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; struct ieee80211_hdr *hdr; __le16 fc; struct ieee80211_rx_data rx; struct ieee80211_sub_if_data *prev; struct rhlist_head *tmp; int err = 0; fc = ((struct ieee80211_hdr *)skb->data)->frame_control; memset(&rx, 0, sizeof(rx)); rx.skb = skb; rx.local = local; rx.napi = napi; if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) I802_DEBUG_INC(local->dot11ReceivedFragmentCount); if (ieee80211_is_mgmt(fc)) { /* drop frame if too short for header */ if (skb->len < ieee80211_hdrlen(fc)) err = -ENOBUFS; else err = skb_linearize(skb); } else { err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); } if (err) { dev_kfree_skb(skb); return; } hdr = (struct ieee80211_hdr *)skb->data; ieee80211_parse_qos(&rx); ieee80211_verify_alignment(&rx); if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || ieee80211_is_beacon(hdr->frame_control))) ieee80211_scan_rx(local, skb); if (ieee80211_is_data(fc)) { struct sta_info *sta, *prev_sta; if (pubsta) { rx.sta = container_of(pubsta, struct sta_info, sta); rx.sdata = rx.sta->sdata; if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) return; goto out; } prev_sta = NULL; for_each_sta_info(local, hdr->addr2, sta, tmp) { if (!prev_sta) { prev_sta = sta; continue; } rx.sta = prev_sta; rx.sdata = prev_sta->sdata; ieee80211_prepare_and_rx_handle(&rx, skb, false); prev_sta = sta; } if (prev_sta) { rx.sta = prev_sta; rx.sdata = prev_sta->sdata; if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) return; goto out; } } prev = NULL; list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_MONITOR || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; /* * frame is destined for this interface, but if it's * not also for the previous one we handle that after * the loop to avoid copying the SKB once too much */ if (!prev) { prev = sdata; continue; } rx.sta = sta_info_get_bss(prev, hdr->addr2); rx.sdata = prev; ieee80211_prepare_and_rx_handle(&rx, skb, false); prev = sdata; } if (prev) { rx.sta = sta_info_get_bss(prev, hdr->addr2); rx.sdata = prev; if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) return; } out: dev_kfree_skb(skb); } /* * This is the receive path handler. It is called by a low level driver when an * 802.11 MPDU is received from the hardware. */ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct sk_buff *skb, struct napi_struct *napi) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate = NULL; struct ieee80211_supported_band *sband; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); WARN_ON_ONCE(softirq_count() == 0); if (WARN_ON(status->band >= NUM_NL80211_BANDS)) goto drop; sband = local->hw.wiphy->bands[status->band]; if (WARN_ON(!sband)) goto drop; /* * If we're suspending, it is possible although not too likely * that we'd be receiving frames after having already partially * quiesced the stack. We can't process such frames then since * that might, for example, cause stations to be added or other * driver callbacks be invoked. */ if (unlikely(local->quiescing || local->suspended)) goto drop; /* We might be during a HW reconfig, prevent Rx for the same reason */ if (unlikely(local->in_reconfig)) goto drop; /* * The same happens when we're not even started, * but that's worth a warning. */ if (WARN_ON(!local->started)) goto drop; if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { /* * Validate the rate, unless a PLCP error means that * we probably can't have a valid rate here anyway. */ switch (status->encoding) { case RX_ENC_HT: /* * rate_idx is MCS index, which can be [0-76] * as documented on: * * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n * * Anything else would be some sort of driver or * hardware error. The driver should catch hardware * errors. */ if (WARN(status->rate_idx > 76, "Rate marked as an HT rate but passed " "status->rate_idx is not " "an MCS index [0-76]: %d (0x%02x)\n", status->rate_idx, status->rate_idx)) goto drop; break; case RX_ENC_VHT: if (WARN_ONCE(status->rate_idx > 9 || !status->nss || status->nss > 8, "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", status->rate_idx, status->nss)) goto drop; break; case RX_ENC_HE: if (WARN_ONCE(status->rate_idx > 11 || !status->nss || status->nss > 8, "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", status->rate_idx, status->nss)) goto drop; break; default: WARN_ON_ONCE(1); /* fall through */ case RX_ENC_LEGACY: if (WARN_ON(status->rate_idx >= sband->n_bitrates)) goto drop; rate = &sband->bitrates[status->rate_idx]; } } status->rx_flags = 0; /* * key references and virtual interfaces are protected using RCU * and this requires that we are in a read-side RCU section during * receive processing */ rcu_read_lock(); /* * Frames with failed FCS/PLCP checksum are not returned, * all other frames are returned without radiotap header * if it was previously present. * Also, frames with less than 16 bytes are dropped. */ skb = ieee80211_rx_monitor(local, skb, rate); if (!skb) { rcu_read_unlock(); return; } ieee80211_tpt_led_trig_rx(local, ((struct ieee80211_hdr *)skb->data)->frame_control, skb->len); __ieee80211_rx_handle_packet(hw, pubsta, skb, napi); rcu_read_unlock(); return; drop: kfree_skb(skb); } EXPORT_SYMBOL(ieee80211_rx_napi); /* This is a version of the rx handler that can be called from hard irq * context. Post the skb on the queue and schedule the tasklet */ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_local *local = hw_to_local(hw); BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); skb->pkt_type = IEEE80211_RX_MSG; skb_queue_tail(&local->skb_queue, skb); tasklet_schedule(&local->tasklet); } EXPORT_SYMBOL(ieee80211_rx_irqsafe); backport-iwlwifi-dkms-7906/net/mac80211/scan.c000066400000000000000000001111161351064634500207140ustar00rootroot00000000000000/* * Scanning implementation * * Copyright 2003, Jouni Malinen * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "mesh.h" #define IEEE80211_PROBE_DELAY (HZ / 33) #define IEEE80211_CHANNEL_TIME (HZ / 33) #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 9) void ieee80211_rx_bss_put(struct ieee80211_local *local, struct ieee80211_bss *bss) { if (!bss) return; cfg80211_put_bss(local->hw.wiphy, container_of((void *)bss, struct cfg80211_bss, priv)); } static bool is_uapsd_supported(struct ieee802_11_elems *elems) { u8 qos_info; if (elems->wmm_info && elems->wmm_info_len == 7 && elems->wmm_info[5] == 1) qos_info = elems->wmm_info[6]; else if (elems->wmm_param && elems->wmm_param_len == 24 && elems->wmm_param[5] == 1) qos_info = elems->wmm_param[6]; else /* no valid wmm information or parameter element found */ return false; return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD; } static void ieee80211_update_bss_from_elems(struct ieee80211_local *local, struct ieee80211_bss *bss, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status, bool beacon) { int clen, srlen; if (beacon) bss->device_ts_beacon = rx_status->device_timestamp; else bss->device_ts_presp = rx_status->device_timestamp; if (elems->parse_error) { if (beacon) bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON; else bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP; } else { if (beacon) bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON; else bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP; } /* save the ERP value so that it is available at association time */ if (elems->erp_info && (!elems->parse_error || !(bss->valid_data & IEEE80211_BSS_VALID_ERP))) { bss->erp_value = elems->erp_info[0]; bss->has_erp_value = true; if (!elems->parse_error) bss->valid_data |= IEEE80211_BSS_VALID_ERP; } /* replace old supported rates if we get new values */ if (!elems->parse_error || !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) { srlen = 0; if (elems->supp_rates) { clen = IEEE80211_MAX_SUPP_RATES; if (clen > elems->supp_rates_len) clen = elems->supp_rates_len; memcpy(bss->supp_rates, elems->supp_rates, clen); srlen += clen; } if (elems->ext_supp_rates) { clen = IEEE80211_MAX_SUPP_RATES - srlen; if (clen > elems->ext_supp_rates_len) clen = elems->ext_supp_rates_len; memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen); srlen += clen; } if (srlen) { bss->supp_rates_len = srlen; if (!elems->parse_error) bss->valid_data |= IEEE80211_BSS_VALID_RATES; } } if (!elems->parse_error || !(bss->valid_data & IEEE80211_BSS_VALID_WMM)) { bss->wmm_used = elems->wmm_param || elems->wmm_info; bss->uapsd_supported = is_uapsd_supported(elems); if (!elems->parse_error) bss->valid_data |= IEEE80211_BSS_VALID_WMM; } if (beacon) { struct ieee80211_supported_band *sband = local->hw.wiphy->bands[rx_status->band]; if (!(rx_status->encoding == RX_ENC_HT) && !(rx_status->encoding == RX_ENC_VHT)) bss->beacon_rate = &sband->bitrates[rx_status->rate_idx]; } } struct ieee80211_bss * ieee80211_bss_info_update(struct ieee80211_local *local, struct ieee80211_rx_status *rx_status, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_channel *channel) { bool beacon = ieee80211_is_beacon(mgmt->frame_control); struct cfg80211_bss *cbss, *non_tx_cbss; struct ieee80211_bss *bss, *non_tx_bss; struct cfg80211_inform_bss bss_meta = { .boottime_ns = rx_status->boottime_ns, }; bool signal_valid; struct ieee80211_sub_if_data *scan_sdata; struct ieee802_11_elems elems; size_t baselen; u8 *elements; if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL) bss_meta.signal = 0; /* invalid signal indication */ else if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) bss_meta.signal = rx_status->signal * 100; else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal; bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20; if (rx_status->bw == RATE_INFO_BW_5) bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5; else if (rx_status->bw == RATE_INFO_BW_10) bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10; bss_meta.chan = channel; rcu_read_lock(); scan_sdata = rcu_dereference(local->scan_sdata); if (scan_sdata && scan_sdata->vif.type == NL80211_IFTYPE_STATION && scan_sdata->vif.bss_conf.assoc && ieee80211_have_rx_timestamp(rx_status)) { bss_meta.parent_tsf = ieee80211_calculate_rx_timestamp(local, rx_status, len + FCS_LEN, 24); ether_addr_copy(bss_meta.parent_bssid, scan_sdata->vif.bss_conf.bssid); } rcu_read_unlock(); cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt, len, GFP_ATOMIC); if (!cbss) return NULL; if (ieee80211_is_probe_resp(mgmt->frame_control)) { elements = mgmt->u.probe_resp.variable; baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); } else { baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); elements = mgmt->u.beacon.variable; } if (baselen > len) return NULL; ieee802_11_parse_elems(elements, len - baselen, false, &elems, mgmt->bssid, cbss->bssid); /* In case the signal is invalid update the status */ signal_valid = channel == cbss->channel; if (!signal_valid) rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; bss = (void *)cbss->priv; ieee80211_update_bss_from_elems(local, bss, &elems, rx_status, beacon); list_for_each_entry(non_tx_cbss, &cbss->nontrans_list, nontrans_list) { non_tx_bss = (void *)non_tx_cbss->priv; ieee80211_update_bss_from_elems(local, non_tx_bss, &elems, rx_status, beacon); } return bss; } static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata, u32 scan_flags, const u8 *da) { if (!sdata) return false; /* accept broadcast for OCE */ if (scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP && is_broadcast_ether_addr(da)) return true; if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR) return true; return ether_addr_equal(da, sdata->vif.addr); } void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_sub_if_data *sdata1, *sdata2; struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ieee80211_bss *bss; struct ieee80211_channel *channel; if (skb->len < 24 || (!ieee80211_is_probe_resp(mgmt->frame_control) && !ieee80211_is_beacon(mgmt->frame_control))) return; sdata1 = rcu_dereference(local->scan_sdata); sdata2 = rcu_dereference(local->sched_scan_sdata); if (likely(!sdata1 && !sdata2)) return; if (ieee80211_is_probe_resp(mgmt->frame_control)) { struct cfg80211_scan_request *scan_req; struct cfg80211_sched_scan_request *sched_scan_req; u32 scan_req_flags = 0, sched_scan_req_flags = 0; scan_req = rcu_dereference(local->scan_req); sched_scan_req = rcu_dereference(local->sched_scan_req); if (scan_req) scan_req_flags = scan_req->flags; if (sched_scan_req) sched_scan_req_flags = sched_scan_req->flags; /* ignore ProbeResp to foreign address or non-bcast (OCE) * unless scanning with randomised address */ if (!ieee80211_scan_accept_presp(sdata1, scan_req_flags, mgmt->da) && !ieee80211_scan_accept_presp(sdata2, sched_scan_req_flags, mgmt->da)) return; } channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) return; bss = ieee80211_bss_info_update(local, rx_status, mgmt, skb->len, channel); if (bss) ieee80211_rx_bss_put(local, bss); } static void ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef, enum nl80211_bss_scan_width scan_width) { memset(chandef, 0, sizeof(*chandef)); switch (scan_width) { case NL80211_BSS_CHAN_WIDTH_5: chandef->width = NL80211_CHAN_WIDTH_5; break; case NL80211_BSS_CHAN_WIDTH_10: chandef->width = NL80211_CHAN_WIDTH_10; break; default: chandef->width = NL80211_CHAN_WIDTH_20_NOHT; break; } } /* return false if no more work */ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) { struct cfg80211_scan_request *req; struct cfg80211_chan_def chandef; u8 bands_used = 0; int i, ielen, n_chans; u32 flags = 0; req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); if (test_bit(SCAN_HW_CANCELLED, &local->scanning)) return false; if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) { for (i = 0; i < req->n_channels; i++) { local->hw_scan_req->req.channels[i] = req->channels[i]; bands_used |= BIT(req->channels[i]->band); } n_chans = req->n_channels; } else { do { if (local->hw_scan_band == NUM_NL80211_BANDS) return false; n_chans = 0; for (i = 0; i < req->n_channels; i++) { if (req->channels[i]->band != local->hw_scan_band) continue; local->hw_scan_req->req.channels[n_chans] = req->channels[i]; n_chans++; bands_used |= BIT(req->channels[i]->band); } local->hw_scan_band++; } while (!n_chans); } local->hw_scan_req->req.n_channels = n_chans; ieee80211_prepare_scan_chandef(&chandef, req->scan_width); if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->req.ie, local->hw_scan_ies_bufsize, &local->hw_scan_req->ies, req->ie, req->ie_len, bands_used, req->rates, &chandef, flags); local->hw_scan_req->req.ie_len = ielen; local->hw_scan_req->req.no_cck = req->no_cck; ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr); ether_addr_copy(local->hw_scan_req->req.mac_addr_mask, req->mac_addr_mask); ether_addr_copy(local->hw_scan_req->req.bssid, req->bssid); return true; } static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) { struct ieee80211_local *local = hw_to_local(hw); bool hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); bool was_scanning = local->scanning; struct cfg80211_scan_request *scan_req; struct ieee80211_sub_if_data *scan_sdata; struct ieee80211_sub_if_data *sdata; lockdep_assert_held(&local->mtx); /* * It's ok to abort a not-yet-running scan (that * we have one at all will be verified by checking * local->scan_req next), but not to complete it * successfully. */ if (WARN_ON(!local->scanning && !aborted)) aborted = true; if (WARN_ON(!local->scan_req)) return; if (hw_scan && !aborted && !ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS) && ieee80211_prep_hw_scan(local)) { int rc; rc = drv_hw_scan(local, rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx)), local->hw_scan_req); if (rc == 0) return; /* HW scan failed and is going to be reported as aborted, * so clear old scan info. */ memset(&local->scan_info, 0, sizeof(local->scan_info)); aborted = true; } kfree(local->hw_scan_req); local->hw_scan_req = NULL; scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); if (scan_req != local->int_scan_req) { local->scan_info.aborted = aborted; cfg80211_scan_done(scan_req, &local->scan_info); } RCU_INIT_POINTER(local->scan_req, NULL); scan_sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx)); RCU_INIT_POINTER(local->scan_sdata, NULL); local->scanning = 0; local->scan_chandef.chan = NULL; /* Set power back to normal operating levels. */ ieee80211_hw_config(local, 0); if (!hw_scan) { ieee80211_configure_filter(local); drv_sw_scan_complete(local, scan_sdata); ieee80211_offchannel_return(local); } ieee80211_recalc_idle(local); ieee80211_mlme_notify_scan_completed(local); ieee80211_ibss_notify_scan_completed(local); /* Requeue all the work that might have been ignored while * the scan was in progress; if there was none this will * just be a no-op for the particular interface. */ list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (ieee80211_sdata_running(sdata)) ieee80211_queue_work(&sdata->local->hw, &sdata->work); } if (was_scanning) ieee80211_start_next_roc(local); } void ieee80211_scan_completed(struct ieee80211_hw *hw, struct cfg80211_scan_info *info) { struct ieee80211_local *local = hw_to_local(hw); trace_api_scan_completed(local, info->aborted); set_bit(SCAN_COMPLETED, &local->scanning); if (info->aborted) set_bit(SCAN_ABORTED, &local->scanning); memcpy(&local->scan_info, info, sizeof(*info)); ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); } EXPORT_SYMBOL(ieee80211_scan_completed); static int ieee80211_start_sw_scan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { /* Software scan is not supported in multi-channel cases */ if (local->use_chanctx) return -EOPNOTSUPP; /* * Hardware/driver doesn't support hw_scan, so use software * scanning instead. First send a nullfunc frame with power save * bit on so that AP will buffer the frames for us while we are not * listening, then send probe requests to each channel and wait for * the responses. After all channels are scanned, tune back to the * original channel and send a nullfunc frame with power save bit * off to trigger the AP to send us all the buffered frames. * * Note that while local->sw_scanning is true everything else but * nullfunc frames and probe requests will be dropped in * ieee80211_tx_h_check_assoc(). */ drv_sw_scan_start(local, sdata, local->scan_addr); local->leave_oper_channel_time = jiffies; local->next_scan_state = SCAN_DECISION; local->scan_channel_idx = 0; ieee80211_offchannel_stop_vifs(local); /* ensure nullfunc is transmitted before leaving operating channel */ ieee80211_flush_queues(local, NULL, false); ieee80211_configure_filter(local); /* We need to set power level at maximum rate for scanning. */ ieee80211_hw_config(local, 0); ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); return 0; } static bool ieee80211_can_scan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { if (ieee80211_is_radar_required(local)) return false; if (!list_empty(&local->roc_list)) return false; if (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.flags & IEEE80211_STA_CONNECTION_POLL) return false; return true; } void ieee80211_run_deferred_scan(struct ieee80211_local *local) { lockdep_assert_held(&local->mtx); if (!local->scan_req || local->scanning) return; if (!ieee80211_can_scan(local, rcu_dereference_protected( local->scan_sdata, lockdep_is_held(&local->mtx)))) return; ieee80211_queue_delayed_work(&local->hw, &local->scan_work, round_jiffies_relative(0)); } static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 ratemask, u32 flags, u32 tx_flags, struct ieee80211_channel *channel) { struct sk_buff *skb; u32 txdata_flags = 0; skb = ieee80211_build_probe_req(sdata, src, dst, dst, ratemask, channel, ssid, ssid_len, ie, ie_len, flags); if (skb) { if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) { struct ieee80211_hdr *hdr = (void *)skb->data; u16 sn = get_random_u32(); txdata_flags |= IEEE80211_TX_NO_SEQNO; hdr->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); } IEEE80211_SKB_CB(skb)->flags |= tx_flags; ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band, txdata_flags); } } static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, unsigned long *next_delay) { int i; struct ieee80211_sub_if_data *sdata; struct cfg80211_scan_request *scan_req; enum nl80211_band band = local->hw.conf.chandef.chan->band; u32 flags = 0, tx_flags; scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK; if (scan_req->no_cck) tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE; if (scan_req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_SN) flags |= IEEE80211_PROBE_FLAG_RANDOM_SN; sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx)); for (i = 0; i < scan_req->n_ssids; i++) ieee80211_send_scan_probe_req( sdata, local->scan_addr, scan_req->bssid, scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, scan_req->ie, scan_req->ie_len, scan_req->rates[band], flags, tx_flags, local->hw.conf.chandef.chan); /* * After sending probe requests, wait for probe responses * on the channel. */ *next_delay = IEEE80211_CHANNEL_TIME; local->next_scan_state = SCAN_DECISION; } static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req) { struct ieee80211_local *local = sdata->local; bool hw_scan = local->ops->hw_scan; int rc; lockdep_assert_held(&local->mtx); if (local->scan_req || ieee80211_is_radar_required(local)) return -EBUSY; if (!ieee80211_can_scan(local, sdata)) { /* wait for the work to finish/time out */ rcu_assign_pointer(local->scan_req, req); rcu_assign_pointer(local->scan_sdata, sdata); return 0; } again: if (hw_scan) { u8 *ies; local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len; if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) { int i, n_bands = 0; u8 bands_counted = 0; for (i = 0; i < req->n_channels; i++) { if (bands_counted & BIT(req->channels[i]->band)) continue; bands_counted |= BIT(req->channels[i]->band); n_bands++; } local->hw_scan_ies_bufsize *= n_bands; } local->hw_scan_req = kmalloc( sizeof(*local->hw_scan_req) + req->n_channels * sizeof(req->channels[0]) + local->hw_scan_ies_bufsize, GFP_KERNEL); if (!local->hw_scan_req) return -ENOMEM; local->hw_scan_req->req.ssids = req->ssids; local->hw_scan_req->req.n_ssids = req->n_ssids; ies = (u8 *)local->hw_scan_req + sizeof(*local->hw_scan_req) + req->n_channels * sizeof(req->channels[0]); local->hw_scan_req->req.ie = ies; local->hw_scan_req->req.flags = req->flags; eth_broadcast_addr(local->hw_scan_req->req.bssid); local->hw_scan_req->req.duration = req->duration; local->hw_scan_req->req.duration_mandatory = req->duration_mandatory; local->hw_scan_band = 0; /* * After allocating local->hw_scan_req, we must * go through until ieee80211_prep_hw_scan(), so * anything that might be changed here and leave * this function early must not go after this * allocation. */ } rcu_assign_pointer(local->scan_req, req); rcu_assign_pointer(local->scan_sdata, sdata); if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) get_random_mask_addr(local->scan_addr, req->mac_addr, req->mac_addr_mask); else memcpy(local->scan_addr, sdata->vif.addr, ETH_ALEN); if (hw_scan) { __set_bit(SCAN_HW_SCANNING, &local->scanning); } else if ((req->n_channels == 1) && (req->channels[0] == local->_oper_chandef.chan)) { /* * If we are scanning only on the operating channel * then we do not need to stop normal activities */ unsigned long next_delay; __set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning); ieee80211_recalc_idle(local); /* Notify driver scan is starting, keep order of operations * same as normal software scan, in case that matters. */ drv_sw_scan_start(local, sdata, local->scan_addr); ieee80211_configure_filter(local); /* accept probe-responses */ /* We need to ensure power level is at max for scanning. */ ieee80211_hw_config(local, 0); if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) || !req->n_ssids) { next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; } else { ieee80211_scan_state_send_probe(local, &next_delay); next_delay = IEEE80211_CHANNEL_TIME; } /* Now, just wait a bit and we are all done! */ ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay); return 0; } else { /* Do normal software scan */ __set_bit(SCAN_SW_SCANNING, &local->scanning); } ieee80211_recalc_idle(local); if (hw_scan) { WARN_ON(!ieee80211_prep_hw_scan(local)); rc = drv_hw_scan(local, sdata, local->hw_scan_req); } else { rc = ieee80211_start_sw_scan(local, sdata); } if (rc) { kfree(local->hw_scan_req); local->hw_scan_req = NULL; local->scanning = 0; ieee80211_recalc_idle(local); local->scan_req = NULL; RCU_INIT_POINTER(local->scan_sdata, NULL); } if (hw_scan && rc == 1) { /* * we can't fall back to software for P2P-GO * as it must update NoA etc. */ if (ieee80211_vif_type_p2p(&sdata->vif) == NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; hw_scan = false; goto again; } return rc; } static unsigned long ieee80211_scan_get_channel_time(struct ieee80211_channel *chan) { /* * TODO: channel switching also consumes quite some time, * add that delay as well to get a better estimation */ if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) return IEEE80211_PASSIVE_CHANNEL_TIME; return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; } static void ieee80211_scan_state_decision(struct ieee80211_local *local, unsigned long *next_delay) { bool associated = false; bool tx_empty = true; bool bad_latency; struct ieee80211_sub_if_data *sdata; struct ieee80211_channel *next_chan; enum mac80211_scan_state next_scan_state; struct cfg80211_scan_request *scan_req; /* * check if at least one STA interface is associated, * check if at least one STA interface has pending tx frames * and grab the lowest used beacon interval */ mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_STATION) { if (sdata->u.mgd.associated) { associated = true; if (!qdisc_all_tx_empty(sdata->dev)) { tx_empty = false; break; } } } } mutex_unlock(&local->iflist_mtx); scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); next_chan = scan_req->channels[local->scan_channel_idx]; /* * we're currently scanning a different channel, let's * see if we can scan another channel without interfering * with the current traffic situation. * * Keep good latency, do not stay off-channel more than 125 ms. */ bad_latency = time_after(jiffies + ieee80211_scan_get_channel_time(next_chan), local->leave_oper_channel_time + HZ / 8); if (associated && !tx_empty) { if (scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) next_scan_state = SCAN_ABORT; else next_scan_state = SCAN_SUSPEND; } else if (associated && bad_latency) { next_scan_state = SCAN_SUSPEND; } else { next_scan_state = SCAN_SET_CHANNEL; } local->next_scan_state = next_scan_state; *next_delay = 0; } static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, unsigned long *next_delay) { int skip; struct ieee80211_channel *chan; enum nl80211_bss_scan_width oper_scan_width; struct cfg80211_scan_request *scan_req; scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); skip = 0; chan = scan_req->channels[local->scan_channel_idx]; local->scan_chandef.chan = chan; local->scan_chandef.center_freq1 = chan->center_freq; local->scan_chandef.center_freq2 = 0; switch (scan_req->scan_width) { case NL80211_BSS_CHAN_WIDTH_5: local->scan_chandef.width = NL80211_CHAN_WIDTH_5; break; case NL80211_BSS_CHAN_WIDTH_10: local->scan_chandef.width = NL80211_CHAN_WIDTH_10; break; case NL80211_BSS_CHAN_WIDTH_20: /* If scanning on oper channel, use whatever channel-type * is currently in use. */ oper_scan_width = cfg80211_chandef_to_scan_width( &local->_oper_chandef); if (chan == local->_oper_chandef.chan && oper_scan_width == scan_req->scan_width) local->scan_chandef = local->_oper_chandef; else local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT; break; } if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) skip = 1; /* advance state machine to next channel/band */ local->scan_channel_idx++; if (skip) { /* if we skip this channel return to the decision state */ local->next_scan_state = SCAN_DECISION; return; } /* * Probe delay is used to update the NAV, cf. 11.1.3.2.2 * (which unfortunately doesn't say _why_ step a) is done, * but it waits for the probe delay or until a frame is * received - and the received frame would update the NAV). * For now, we do not support waiting until a frame is * received. * * In any case, it is not necessary for a passive scan. */ if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) || !scan_req->n_ssids) { *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; local->next_scan_state = SCAN_DECISION; return; } /* active scan, send probes */ *next_delay = IEEE80211_PROBE_DELAY; local->next_scan_state = SCAN_SEND_PROBE; } static void ieee80211_scan_state_suspend(struct ieee80211_local *local, unsigned long *next_delay) { /* switch back to the operating channel */ local->scan_chandef.chan = NULL; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); /* disable PS */ ieee80211_offchannel_return(local); *next_delay = HZ / 5; /* afterwards, resume scan & go to next channel */ local->next_scan_state = SCAN_RESUME; } static void ieee80211_scan_state_resume(struct ieee80211_local *local, unsigned long *next_delay) { ieee80211_offchannel_stop_vifs(local); if (local->ops->flush) { ieee80211_flush_queues(local, NULL, false); *next_delay = 0; } else *next_delay = HZ / 10; /* remember when we left the operating channel */ local->leave_oper_channel_time = jiffies; /* advance to the next channel to be scanned */ local->next_scan_state = SCAN_SET_CHANNEL; } void ieee80211_scan_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, scan_work.work); struct ieee80211_sub_if_data *sdata; struct cfg80211_scan_request *scan_req; unsigned long next_delay = 0; bool aborted; mutex_lock(&local->mtx); if (!ieee80211_can_run_worker(local)) { aborted = true; goto out_complete; } sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx)); scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); /* When scanning on-channel, the first-callback means completed. */ if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) { aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); goto out_complete; } if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) { aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); goto out_complete; } if (!sdata || !scan_req) goto out; if (!local->scanning) { int rc; RCU_INIT_POINTER(local->scan_req, NULL); RCU_INIT_POINTER(local->scan_sdata, NULL); rc = __ieee80211_start_scan(sdata, scan_req); if (rc) { /* need to complete scan in cfg80211 */ rcu_assign_pointer(local->scan_req, scan_req); aborted = true; goto out_complete; } else goto out; } /* * as long as no delay is required advance immediately * without scheduling a new work */ do { if (!ieee80211_sdata_running(sdata)) { aborted = true; goto out_complete; } switch (local->next_scan_state) { case SCAN_DECISION: /* if no more bands/channels left, complete scan */ if (local->scan_channel_idx >= scan_req->n_channels) { aborted = false; goto out_complete; } ieee80211_scan_state_decision(local, &next_delay); break; case SCAN_SET_CHANNEL: ieee80211_scan_state_set_channel(local, &next_delay); break; case SCAN_SEND_PROBE: ieee80211_scan_state_send_probe(local, &next_delay); break; case SCAN_SUSPEND: ieee80211_scan_state_suspend(local, &next_delay); break; case SCAN_RESUME: ieee80211_scan_state_resume(local, &next_delay); break; case SCAN_ABORT: aborted = true; goto out_complete; } } while (next_delay == 0); ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay); goto out; out_complete: __ieee80211_scan_completed(&local->hw, aborted); out: mutex_unlock(&local->mtx); } int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req) { int res; mutex_lock(&sdata->local->mtx); res = __ieee80211_start_scan(sdata, req); mutex_unlock(&sdata->local->mtx); return res; } int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, const u8 *ssid, u8 ssid_len, struct ieee80211_channel **channels, unsigned int n_channels, enum nl80211_bss_scan_width scan_width) { struct ieee80211_local *local = sdata->local; int ret = -EBUSY, i, n_ch = 0; enum nl80211_band band; mutex_lock(&local->mtx); /* busy scanning */ if (local->scan_req) goto unlock; /* fill internal scan request */ if (!channels) { int max_n; for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!local->hw.wiphy->bands[band]) continue; max_n = local->hw.wiphy->bands[band]->n_channels; for (i = 0; i < max_n; i++) { struct ieee80211_channel *tmp_ch = &local->hw.wiphy->bands[band]->channels[i]; if (tmp_ch->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_DISABLED)) continue; local->int_scan_req->channels[n_ch] = tmp_ch; n_ch++; } } if (WARN_ON_ONCE(n_ch == 0)) goto unlock; local->int_scan_req->n_channels = n_ch; } else { for (i = 0; i < n_channels; i++) { if (channels[i]->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_DISABLED)) continue; local->int_scan_req->channels[n_ch] = channels[i]; n_ch++; } if (WARN_ON_ONCE(n_ch == 0)) goto unlock; local->int_scan_req->n_channels = n_ch; } local->int_scan_req->ssids = &local->scan_ssid; local->int_scan_req->n_ssids = 1; local->int_scan_req->scan_width = scan_width; memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); local->int_scan_req->ssids[0].ssid_len = ssid_len; ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req); unlock: mutex_unlock(&local->mtx); return ret; } /* * Only call this function when a scan can't be queued -- under RTNL. */ void ieee80211_scan_cancel(struct ieee80211_local *local) { /* * We are canceling software scan, or deferred scan that was not * yet really started (see __ieee80211_start_scan ). * * Regarding hardware scan: * - we can not call __ieee80211_scan_completed() as when * SCAN_HW_SCANNING bit is set this function change * local->hw_scan_req to operate on 5G band, what race with * driver which can use local->hw_scan_req * * - we can not cancel scan_work since driver can schedule it * by ieee80211_scan_completed(..., true) to finish scan * * Hence we only call the cancel_hw_scan() callback, but the low-level * driver is still responsible for calling ieee80211_scan_completed() * after the scan was completed/aborted. */ mutex_lock(&local->mtx); if (!local->scan_req) goto out; /* * We have a scan running and the driver already reported completion, * but the worker hasn't run yet or is stuck on the mutex - mark it as * cancelled. */ if (test_bit(SCAN_HW_SCANNING, &local->scanning) && test_bit(SCAN_COMPLETED, &local->scanning)) { set_bit(SCAN_HW_CANCELLED, &local->scanning); goto out; } if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { /* * Make sure that __ieee80211_scan_completed doesn't trigger a * scan on another band. */ set_bit(SCAN_HW_CANCELLED, &local->scanning); if (local->ops->cancel_hw_scan) drv_cancel_hw_scan(local, rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx))); goto out; } /* * If the work is currently running, it must be blocked on * the mutex, but we'll set scan_sdata = NULL and it'll * simply exit once it acquires the mutex. */ cancel_delayed_work(&local->scan_work); /* and clean up */ memset(&local->scan_info, 0, sizeof(local->scan_info)); __ieee80211_scan_completed(&local->hw, true); out: mutex_unlock(&local->mtx); } int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req) { struct ieee80211_local *local = sdata->local; struct ieee80211_scan_ies sched_scan_ies = {}; struct cfg80211_chan_def chandef; int ret, i, iebufsz, num_bands = 0; u32 rate_masks[NUM_NL80211_BANDS] = {}; u8 bands_used = 0; u8 *ie; u32 flags = 0; iebufsz = local->scan_ies_len + req->ie_len; lockdep_assert_held(&local->mtx); if (!local->ops->sched_scan_start) return -ENOTSUPP; for (i = 0; i < NUM_NL80211_BANDS; i++) { if (local->hw.wiphy->bands[i]) { bands_used |= BIT(i); rate_masks[i] = (u32) -1; num_bands++; } } if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; ie = kcalloc(iebufsz, num_bands, GFP_KERNEL); if (!ie) { ret = -ENOMEM; goto out; } ieee80211_prepare_scan_chandef(&chandef, req->scan_width); ieee80211_build_preq_ies(local, ie, num_bands * iebufsz, &sched_scan_ies, req->ie, req->ie_len, bands_used, rate_masks, &chandef, flags); ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); if (ret == 0) { rcu_assign_pointer(local->sched_scan_sdata, sdata); rcu_assign_pointer(local->sched_scan_req, req); } kfree(ie); out: if (ret) { /* Clean in case of failure after HW restart or upon resume. */ RCU_INIT_POINTER(local->sched_scan_sdata, NULL); RCU_INIT_POINTER(local->sched_scan_req, NULL); } return ret; } int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req) { struct ieee80211_local *local = sdata->local; int ret; mutex_lock(&local->mtx); if (rcu_access_pointer(local->sched_scan_sdata)) { mutex_unlock(&local->mtx); return -EBUSY; } ret = __ieee80211_request_sched_scan_start(sdata, req); mutex_unlock(&local->mtx); return ret; } int ieee80211_request_sched_scan_stop(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sched_scan_sdata; int ret = -ENOENT; mutex_lock(&local->mtx); if (!local->ops->sched_scan_stop) { ret = -ENOTSUPP; goto out; } /* We don't want to restart sched scan anymore. */ RCU_INIT_POINTER(local->sched_scan_req, NULL); sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, lockdep_is_held(&local->mtx)); if (sched_scan_sdata) { ret = drv_sched_scan_stop(local, sched_scan_sdata); if (!ret) RCU_INIT_POINTER(local->sched_scan_sdata, NULL); } out: mutex_unlock(&local->mtx); return ret; } void ieee80211_sched_scan_results(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_sched_scan_results(local); cfg80211_sched_scan_results(hw->wiphy, 0); } EXPORT_SYMBOL(ieee80211_sched_scan_results); void ieee80211_sched_scan_end(struct ieee80211_local *local) { mutex_lock(&local->mtx); if (!rcu_access_pointer(local->sched_scan_sdata)) { mutex_unlock(&local->mtx); return; } RCU_INIT_POINTER(local->sched_scan_sdata, NULL); /* If sched scan was aborted by the driver. */ RCU_INIT_POINTER(local->sched_scan_req, NULL); mutex_unlock(&local->mtx); cfg80211_sched_scan_stopped(local->hw.wiphy, 0); } void ieee80211_sched_scan_stopped_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, sched_scan_stopped_work); ieee80211_sched_scan_end(local); } void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_sched_scan_stopped(local); /* * this shouldn't really happen, so for simplicity * simply ignore it, and let mac80211 reconfigure * the sched scan later on. */ if (local->in_reconfig) return; schedule_work(&local->sched_scan_stopped_work); } EXPORT_SYMBOL(ieee80211_sched_scan_stopped); backport-iwlwifi-dkms-7906/net/mac80211/spectmgmt.c000066400000000000000000000174311351064634500220000ustar00rootroot00000000000000/* * spectrum management * * Copyright 2003, Jouni Malinen * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2007-2008, Intel Corporation * Copyright 2008, Johannes Berg * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include "ieee80211_i.h" #include "sta_info.h" #include "wme.h" int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band current_band, u32 sta_flags, u8 *bssid, struct ieee80211_csa_ie *csa_ie) { enum nl80211_band new_band = current_band; int new_freq; u8 new_chan_no; struct ieee80211_channel *new_chan; struct cfg80211_chan_def new_vht_chandef = {}; const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; int secondary_channel_offset = -1; memset(csa_ie, 0, sizeof(*csa_ie)); sec_chan_offs = elems->sec_chan_offs; wide_bw_chansw_ie = elems->wide_bw_chansw_ie; if (sta_flags & (IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_40MHZ)) { sec_chan_offs = NULL; wide_bw_chansw_ie = NULL; } if (sta_flags & IEEE80211_STA_DISABLE_VHT) wide_bw_chansw_ie = NULL; if (elems->ext_chansw_ie) { if (!ieee80211_operating_class_to_band( elems->ext_chansw_ie->new_operating_class, &new_band)) { sdata_info(sdata, "cannot understand ECSA IE operating class, %d, ignoring\n", elems->ext_chansw_ie->new_operating_class); } new_chan_no = elems->ext_chansw_ie->new_ch_num; csa_ie->count = elems->ext_chansw_ie->count; csa_ie->mode = elems->ext_chansw_ie->mode; } else if (elems->ch_switch_ie) { new_chan_no = elems->ch_switch_ie->new_ch_num; csa_ie->count = elems->ch_switch_ie->count; csa_ie->mode = elems->ch_switch_ie->mode; } else { /* nothing here we understand */ return 1; } /* Mesh Channel Switch Parameters Element */ if (elems->mesh_chansw_params_ie) { csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl; csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags; csa_ie->pre_value = le16_to_cpu( elems->mesh_chansw_params_ie->mesh_pre_value); if (elems->mesh_chansw_params_ie->mesh_flags & WLAN_EID_CHAN_SWITCH_PARAM_REASON) csa_ie->reason_code = le16_to_cpu( elems->mesh_chansw_params_ie->mesh_reason); } new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) { sdata_info(sdata, "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n", bssid, new_freq); return -EINVAL; } if (sec_chan_offs) { secondary_channel_offset = sec_chan_offs->sec_chan_offs; } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { /* If the secondary channel offset IE is not present, * we can't know what's the post-CSA offset, so the * best we can do is use 20MHz. */ secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; } switch (secondary_channel_offset) { default: /* secondary_channel_offset was present but is invalid */ case IEEE80211_HT_PARAM_CHA_SEC_NONE: cfg80211_chandef_create(&csa_ie->chandef, new_chan, NL80211_CHAN_HT20); break; case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: cfg80211_chandef_create(&csa_ie->chandef, new_chan, NL80211_CHAN_HT40PLUS); break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: cfg80211_chandef_create(&csa_ie->chandef, new_chan, NL80211_CHAN_HT40MINUS); break; case -1: cfg80211_chandef_create(&csa_ie->chandef, new_chan, NL80211_CHAN_NO_HT); /* keep width for 5/10 MHz channels */ switch (sdata->vif.bss_conf.chandef.width) { case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: csa_ie->chandef.width = sdata->vif.bss_conf.chandef.width; break; default: break; } break; } if (wide_bw_chansw_ie) { struct ieee80211_vht_operation vht_oper = { .chan_width = wide_bw_chansw_ie->new_channel_width, .center_freq_seg0_idx = wide_bw_chansw_ie->new_center_freq_seg0, .center_freq_seg1_idx = wide_bw_chansw_ie->new_center_freq_seg1, /* .basic_mcs_set doesn't matter */ }; struct ieee80211_ht_operation ht_oper = {}; /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT, * to the previously parsed chandef */ new_vht_chandef = csa_ie->chandef; /* ignore if parsing fails */ if (!ieee80211_chandef_vht_oper(&sdata->local->hw, &vht_oper, &ht_oper, &new_vht_chandef)) new_vht_chandef.chan = NULL; if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ && new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80) ieee80211_chandef_downgrade(&new_vht_chandef); if (sta_flags & IEEE80211_STA_DISABLE_160MHZ && new_vht_chandef.width == NL80211_CHAN_WIDTH_160) ieee80211_chandef_downgrade(&new_vht_chandef); } /* if VHT data is there validate & use it */ if (new_vht_chandef.chan) { if (!cfg80211_chandef_compatible(&new_vht_chandef, &csa_ie->chandef)) { sdata_info(sdata, "BSS %pM: CSA has inconsistent channel data, disconnecting\n", bssid); return -EINVAL; } csa_ie->chandef = new_vht_chandef; } if (elems->max_channel_switch_time) csa_ie->max_switch_time = (elems->max_channel_switch_time[0] << 0) | (elems->max_channel_switch_time[1] << 8) | (elems->max_channel_switch_time[2] << 16); return 0; } static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata, struct ieee80211_msrment_ie *request_ie, const u8 *da, const u8 *bssid, u8 dialog_token) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *msr_report; skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + sizeof(struct ieee80211_msrment_ie)); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); msr_report = skb_put_zero(skb, 24); memcpy(msr_report->da, da, ETH_ALEN); memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN); memcpy(msr_report->bssid, bssid, ETH_ALEN); msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; msr_report->u.action.u.measurement.action_code = WLAN_ACTION_SPCT_MSR_RPRT; msr_report->u.action.u.measurement.dialog_token = dialog_token; msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT; msr_report->u.action.u.measurement.length = sizeof(struct ieee80211_msrment_ie); memset(&msr_report->u.action.u.measurement.msr_elem, 0, sizeof(struct ieee80211_msrment_ie)); msr_report->u.action.u.measurement.msr_elem.token = request_ie->token; msr_report->u.action.u.measurement.msr_elem.mode |= IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; ieee80211_tx_skb(sdata, skb); } void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { /* * Ignoring measurement request is spec violation. * Mandatory measurements must be reported optional * measurements might be refused or reported incapable * For now just refuse * TODO: Answer basic measurement as unmeasured */ ieee80211_send_refuse_measurement_request(sdata, &mgmt->u.action.u.measurement.msr_elem, mgmt->sa, mgmt->bssid, mgmt->u.action.u.measurement.dialog_token); } backport-iwlwifi-dkms-7906/net/mac80211/sta_info.c000066400000000000000000002066561351064634500216100ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "sta_info.h" #include "debugfs_sta.h" #include "mesh.h" #include "wme.h" /** * DOC: STA information lifetime rules * * STA info structures (&struct sta_info) are managed in a hash table * for faster lookup and a list for iteration. They are managed using * RCU, i.e. access to the list and hash table is protected by RCU. * * Upon allocating a STA info structure with sta_info_alloc(), the caller * owns that structure. It must then insert it into the hash table using * either sta_info_insert() or sta_info_insert_rcu(); only in the latter * case (which acquires an rcu read section but must not be called from * within one) will the pointer still be valid after the call. Note that * the caller may not do much with the STA info before inserting it, in * particular, it may not start any mesh peer link management or add * encryption keys. * * When the insertion fails (sta_info_insert()) returns non-zero), the * structure will have been freed by sta_info_insert()! * * Station entries are added by mac80211 when you establish a link with a * peer. This means different things for the different type of interfaces * we support. For a regular station this mean we add the AP sta when we * receive an association response from the AP. For IBSS this occurs when * get to know about a peer on the same IBSS. For WDS we add the sta for * the peer immediately upon device open. When using AP mode we add stations * for each respective station upon request from userspace through nl80211. * * In order to remove a STA info structure, various sta_info_destroy_*() * calls are available. * * There is no concept of ownership on a STA entry, each structure is * owned by the global hash table/list until it is removed. All users of * the structure need to be RCU protected so that the structure won't be * freed before they are done using it. */ static const struct rhashtable_params sta_rht_params = { .nelem_hint = 3, /* start small */ .automatic_shrinking = true, .head_offset = offsetof(struct sta_info, hash_node), .key_offset = offsetof(struct sta_info, addr), .key_len = ETH_ALEN, .max_size = CPTCFG_MAC80211_STA_HASH_MAX_SIZE, }; /* Caller must hold local->sta_mtx */ static int sta_info_hash_del(struct ieee80211_local *local, struct sta_info *sta) { return rhltable_remove(&local->sta_hash, &sta->hash_node, sta_rht_params); } static void __cleanup_single_sta(struct sta_info *sta) { int ac, i; struct tid_ampdu_tx *tid_tx; struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct fq *fq = &local->fq; struct ps_data *ps; if (test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER) || test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { if (sta->sdata->vif.type == NL80211_IFTYPE_AP || sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ps = &sdata->bss->ps; else if (ieee80211_vif_is_mesh(&sdata->vif)) ps = &sdata->u.mesh.ps; else return; clear_sta_flag(sta, WLAN_STA_PS_STA); clear_sta_flag(sta, WLAN_STA_PS_DRIVER); clear_sta_flag(sta, WLAN_STA_PS_DELIVER); atomic_dec(&ps->num_sta_ps); } if (sta->sta.txq[0]) { for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { struct txq_info *txqi; if (!sta->sta.txq[i]) continue; txqi = to_txq_info(sta->sta.txq[i]); spin_lock_bh(&fq->lock); ieee80211_txq_purge(local, txqi); spin_unlock_bh(&fq->lock); } } for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); } if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_sta_cleanup(sta); cancel_work_sync(&sta->drv_deliver_wk); /* * Destroy aggregation state here. It would be nice to wait for the * driver to finish aggregation stop and then clean up, but for now * drivers have to handle aggregation stop being requested, followed * directly by station destruction. */ for (i = 0; i < IEEE80211_NUM_TIDS; i++) { kfree(sta->ampdu_mlme.tid_start_tx[i]); tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); if (!tid_tx) continue; ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); kfree(tid_tx); } } static void cleanup_single_sta(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; __cleanup_single_sta(sta); sta_info_free(local, sta); } struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, const u8 *addr) { return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); } /* protected by RCU */ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct ieee80211_local *local = sdata->local; struct rhlist_head *tmp; struct sta_info *sta; rcu_read_lock(); for_each_sta_info(local, addr, sta, tmp) { if (sta->sdata == sdata) { rcu_read_unlock(); /* this is safe as the caller must already hold * another rcu read section or the mutex */ return sta; } } rcu_read_unlock(); return NULL; } /* * Get sta info either from the specified interface * or from one of its vlans */ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct ieee80211_local *local = sdata->local; struct rhlist_head *tmp; struct sta_info *sta; rcu_read_lock(); for_each_sta_info(local, addr, sta, tmp) { if (sta->sdata == sdata || (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { rcu_read_unlock(); /* this is safe as the caller must already hold * another rcu read section or the mutex */ return sta; } } rcu_read_unlock(); return NULL; } struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; int i = 0; list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; if (i < idx) { ++i; continue; } return sta; } return NULL; } /** * sta_info_free - free STA * * @local: pointer to the global information * @sta: STA info to free * * This function must undo everything done by sta_info_alloc() * that may happen before sta_info_insert(). It may only be * called when sta_info_insert() has not been attempted (and * if that fails, the station is freed anyway.) */ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) { if (sta->rate_ctrl) rate_control_free_sta(sta); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS if (sta->tx_lat) { int i; for (i = 0; i < IEEE80211_NUM_TIDS; i++) kfree(sta->tx_lat[i].bins); kfree(sta->tx_lat); } if (sta->tx_consec) { int i; for (i = 0; i < IEEE80211_NUM_TIDS; i++) { kfree(sta->tx_consec[i].late_bins); kfree(sta->tx_consec[i].loss_bins); kfree(sta->tx_consec[i].total_loss_bins); } kfree(sta->tx_consec); } #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); if (sta->sta.txq[0]) kfree(to_txq_info(sta->sta.txq[0])); kfree(rcu_dereference_raw(sta->sta.rates)); #ifdef CPTCFG_MAC80211_MESH kfree(sta->mesh); #endif free_percpu(sta->pcpu_rx_stats); kfree(sta); } /* Caller must hold local->sta_mtx */ static int sta_info_hash_add(struct ieee80211_local *local, struct sta_info *sta) { return rhltable_insert(&local->sta_hash, &sta->hash_node, sta_rht_params); } static void sta_deliver_ps_frames(struct work_struct *wk) { struct sta_info *sta; sta = container_of(wk, struct sta_info, drv_deliver_wk); if (sta->dead) return; local_bh_disable(); if (!test_sta_flag(sta, WLAN_STA_PS_STA)) ieee80211_sta_ps_deliver_wakeup(sta); else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) ieee80211_sta_ps_deliver_poll_response(sta); else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) ieee80211_sta_ps_deliver_uapsd(sta); local_bh_enable(); } static int sta_prepare_rate_control(struct ieee80211_local *local, struct sta_info *sta, gfp_t gfp) { if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) return 0; sta->rate_ctrl = local->rate_ctrl; sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, sta, gfp); if (!sta->rate_ctrl_priv) return -ENOMEM; return 0; } struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, const u8 *addr, gfp_t gfp) { struct ieee80211_local *local = sdata->local; struct ieee80211_hw *hw = &local->hw; struct sta_info *sta; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS struct ieee80211_tx_latency_bin_ranges *tx_latency; struct ieee80211_tx_latency_threshold *tx_threshold; struct ieee80211_tx_consec_loss_ranges *tx_consec; #endif int i; sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); if (!sta) return NULL; if (ieee80211_hw_check(hw, USES_RSS)) { sta->pcpu_rx_stats = alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); if (!sta->pcpu_rx_stats) goto free; } spin_lock_init(&sta->lock); spin_lock_init(&sta->ps_lock); INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); mutex_init(&sta->ampdu_mlme.mtx); #ifdef CPTCFG_MAC80211_MESH if (ieee80211_vif_is_mesh(&sdata->vif)) { sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); if (!sta->mesh) goto free; sta->mesh->plink_sta = sta; spin_lock_init(&sta->mesh->plink_lock); if (ieee80211_vif_is_mesh(&sdata->vif) && !sdata->u.mesh.user_mpm) timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 0); sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; } #endif memcpy(sta->addr, addr, ETH_ALEN); memcpy(sta->sta.addr, addr, ETH_ALEN); sta->sta.max_rx_aggregation_subframes = local->hw.max_rx_aggregation_subframes; sta->local = local; sta->sdata = sdata; sta->rx_stats.last_rx = jiffies; u64_stats_init(&sta->rx_stats.syncp); sta->sta_state = IEEE80211_STA_NONE; /* Mark TID as unreserved */ sta->reserved_tid = IEEE80211_TID_UNRESERVED; sta->last_connected = ktime_get_seconds(); ewma_signal_init(&sta->rx_stats_avg.signal); ewma_avg_signal_init(&sta->status_stats.avg_ack_signal); for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++) ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]); if (local->ops->wake_tx_queue) { void *txq_data; int size = sizeof(struct txq_info) + ALIGN(hw->txq_data_size, sizeof(void *)); txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); if (!txq_data) goto free; for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { struct txq_info *txq = txq_data + i * size; /* might not do anything for the bufferable MMPDU TXQ */ ieee80211_txq_init(sdata, sta, txq, i); } } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS rcu_read_lock(); tx_latency = rcu_dereference(local->tx_latency); tx_threshold = rcu_dereference(local->tx_threshold); /* init stations Tx latency statistics && TID bins */ if (tx_latency) { sta->tx_lat = kzalloc(IEEE80211_NUM_TIDS * sizeof(struct ieee80211_tx_latency_stat), GFP_ATOMIC); if (!sta->tx_lat) { rcu_read_unlock(); goto free_txq; } if (tx_latency->n_ranges) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { /* size of bins is size of the ranges +1 */ sta->tx_lat[i].bin_count = tx_latency->n_ranges + 1; sta->tx_lat[i].bins = kcalloc(sta->tx_lat[i].bin_count, sizeof(u32), GFP_ATOMIC); if (!sta->tx_lat[i].bins) { rcu_read_unlock(); goto free_txq; } } } } if (tx_threshold) { if (!sdata->vif.p2p) sta->tx_lat_thrshld = tx_threshold->thresholds_bss; else sta->tx_lat_thrshld = tx_threshold->thresholds_p2p; } tx_consec = rcu_dereference(local->tx_consec); /* init stations Tx consecutive loss statistics */ if (tx_consec) { size_t size = sizeof(struct ieee80211_tx_consec_loss_stat); sta->tx_consec = kzalloc(IEEE80211_NUM_TIDS * size, GFP_ATOMIC); if (!sta->tx_consec) { rcu_read_unlock(); goto free_txq; } for (i = 0; i < IEEE80211_NUM_TIDS; i++) { sta->tx_consec[i].bin_count = tx_consec->n_ranges; sta->tx_consec[i].loss_bins = kcalloc(sta->tx_consec[i].bin_count, sizeof(u32), GFP_ATOMIC); sta->tx_consec[i].late_bins = kcalloc(sta->tx_consec[i].bin_count, sizeof(u32), GFP_ATOMIC); sta->tx_consec[i].total_loss_bins = kcalloc(sta->tx_consec[i].bin_count, sizeof(u32), GFP_ATOMIC); if (!sta->tx_consec[i].loss_bins || !sta->tx_consec[i].late_bins || !sta->tx_consec[i].total_loss_bins) { rcu_read_unlock(); goto free_txq; } } } rcu_read_unlock(); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ if (sta_prepare_rate_control(local, sta, gfp)) goto free_txq; for (i = 0; i < IEEE80211_NUM_ACS; i++) { skb_queue_head_init(&sta->ps_tx_buf[i]); skb_queue_head_init(&sta->tx_filtered[i]); } for (i = 0; i < IEEE80211_NUM_TIDS; i++) sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); for (i = 0; i < NUM_NL80211_BANDS; i++) { u32 mandatory = 0; int r; if (!hw->wiphy->bands[i]) continue; switch (i) { case NL80211_BAND_2GHZ: /* * We use both here, even if we cannot really know for * sure the station will support both, but the only use * for this is when we don't know anything yet and send * management frames, and then we'll pick the lowest * possible rate anyway. * If we don't include _G here, we cannot find a rate * in P2P, and thus trigger the WARN_ONCE() in rate.c */ mandatory = IEEE80211_RATE_MANDATORY_B | IEEE80211_RATE_MANDATORY_G; break; case NL80211_BAND_5GHZ: mandatory = IEEE80211_RATE_MANDATORY_A; break; case NL80211_BAND_60GHZ: WARN_ON(1); mandatory = 0; break; } for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { struct ieee80211_rate *rate; rate = &hw->wiphy->bands[i]->bitrates[r]; if (!(rate->flags & mandatory)) continue; sta->sta.supp_rates[i] |= BIT(r); } } sta->sta.smps_mode = IEEE80211_SMPS_OFF; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { struct ieee80211_supported_band *sband; u8 smps; sband = ieee80211_get_sband(sdata); if (!sband) goto free_txq; smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT; /* * Assume that hostapd advertises our caps in the beacon and * this is the known_smps_mode for a station that just assciated */ switch (smps) { case WLAN_HT_SMPS_CONTROL_DISABLED: sta->known_smps_mode = IEEE80211_SMPS_OFF; break; case WLAN_HT_SMPS_CONTROL_STATIC: sta->known_smps_mode = IEEE80211_SMPS_STATIC; break; case WLAN_HT_SMPS_CONTROL_DYNAMIC: sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC; break; default: WARN_ON(1); } } sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; sta->cparams.target = MS2TIME(20); sta->cparams.interval = MS2TIME(100); sta->cparams.ecn = true; sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); return sta; free_txq: if (sta->sta.txq[0]) kfree(to_txq_info(sta->sta.txq[0])); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS if (sta->tx_lat) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) kfree(sta->tx_lat[i].bins); kfree(sta->tx_lat); } if (sta->tx_consec) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { kfree(sta->tx_consec[i].late_bins); kfree(sta->tx_consec[i].loss_bins); kfree(sta->tx_consec[i].total_loss_bins); } kfree(sta->tx_consec); } #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ free: free_percpu(sta->pcpu_rx_stats); #ifdef CPTCFG_MAC80211_MESH kfree(sta->mesh); #endif kfree(sta); return NULL; } static int sta_info_insert_check(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; /* * Can't be a WARN_ON because it can be triggered through a race: * something inserts a STA (on one CPU) without holding the RTNL * and another CPU turns off the net device. */ if (unlikely(!ieee80211_sdata_running(sdata))) return -ENETDOWN; if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || is_multicast_ether_addr(sta->sta.addr))) return -EINVAL; /* The RCU read lock is required by rhashtable due to * asynchronous resize/rehash. We also require the mutex * for correctness. */ rcu_read_lock(); lockdep_assert_held(&sdata->local->sta_mtx); if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { rcu_read_unlock(); return -ENOTUNIQ; } rcu_read_unlock(); return 0; } static int sta_info_insert_drv_state(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { enum ieee80211_sta_state state; int err = 0; for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { err = drv_sta_state(local, sdata, sta, state, state + 1); if (err) break; } if (!err) { /* * Drivers using legacy sta_add/sta_remove callbacks only * get uploaded set to true after sta_add is called. */ if (!local->ops->sta_add) sta->uploaded = true; return 0; } if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { sdata_info(sdata, "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", sta->sta.addr, state + 1, err); err = 0; } /* unwind on error */ for (; state > IEEE80211_STA_NOTEXIST; state--) WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); return err; } static void ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; bool allow_p2p_go_ps = sdata->vif.p2p; struct sta_info *sta; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata || !test_sta_flag(sta, WLAN_STA_ASSOC)) continue; if (!sta->sta.support_p2p_ps) { allow_p2p_go_ps = false; break; } } rcu_read_unlock(); if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS); } } /* * should be called with sta_mtx locked * this function replaces the mutex lock * with a RCU lock */ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) { struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct station_info *sinfo = NULL; int err = 0; lockdep_assert_held(&local->sta_mtx); /* check if STA exists already */ if (sta_info_get_bss(sdata, sta->sta.addr)) { err = -EEXIST; goto out_err; } sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); if (!sinfo) { err = -ENOMEM; goto out_err; } local->num_sta++; local->sta_generation++; smp_mb(); /* simplify things and don't accept BA sessions yet */ set_sta_flag(sta, WLAN_STA_BLOCK_BA); /* make the station visible */ err = sta_info_hash_add(local, sta); if (err) goto out_drop_sta; list_add_tail_rcu(&sta->list, &local->sta_list); /* notify driver */ err = sta_info_insert_drv_state(local, sdata, sta); if (err) goto out_remove; set_sta_flag(sta, WLAN_STA_INSERTED); if (sta->sta_state >= IEEE80211_STA_ASSOC) { ieee80211_recalc_min_chandef(sta->sdata); if (!sta->sta.support_p2p_ps) ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); } /* accept BA sessions now */ clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_debugfs_add(sta); rate_control_add_sta_debugfs(sta); sinfo->generation = local->sta_generation; cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); kfree(sinfo); sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); /* move reference to rcu-protected */ rcu_read_lock(); mutex_unlock(&local->sta_mtx); if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_accept_plinks_update(sdata); return 0; out_remove: sta_info_hash_del(local, sta); list_del_rcu(&sta->list); out_drop_sta: local->num_sta--; synchronize_net(); __cleanup_single_sta(sta); out_err: mutex_unlock(&local->sta_mtx); kfree(sinfo); rcu_read_lock(); return err; } int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) { struct ieee80211_local *local = sta->local; int err; might_sleep(); mutex_lock(&local->sta_mtx); err = sta_info_insert_check(sta); if (err) { mutex_unlock(&local->sta_mtx); rcu_read_lock(); goto out_free; } err = sta_info_insert_finish(sta); if (err) goto out_free; return 0; out_free: sta_info_free(local, sta); return err; } int sta_info_insert(struct sta_info *sta) { int err = sta_info_insert_rcu(sta); rcu_read_unlock(); return err; } static inline void __bss_tim_set(u8 *tim, u16 id) { /* * This format has been mandated by the IEEE specifications, * so this line may not be changed to use the __set_bit() format. */ tim[id / 8] |= (1 << (id % 8)); } static inline void __bss_tim_clear(u8 *tim, u16 id) { /* * This format has been mandated by the IEEE specifications, * so this line may not be changed to use the __clear_bit() format. */ tim[id / 8] &= ~(1 << (id % 8)); } static inline bool __bss_tim_get(u8 *tim, u16 id) { /* * This format has been mandated by the IEEE specifications, * so this line may not be changed to use the test_bit() format. */ return tim[id / 8] & (1 << (id % 8)); } static unsigned long ieee80211_tids_for_ac(int ac) { /* If we ever support TIDs > 7, this obviously needs to be adjusted */ switch (ac) { case IEEE80211_AC_VO: return BIT(6) | BIT(7); case IEEE80211_AC_VI: return BIT(4) | BIT(5); case IEEE80211_AC_BE: return BIT(0) | BIT(3); case IEEE80211_AC_BK: return BIT(1) | BIT(2); default: WARN_ON(1); return 0; } } static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) { struct ieee80211_local *local = sta->local; struct ps_data *ps; bool indicate_tim = false; u8 ignore_for_tim = sta->sta.uapsd_queues; int ac; u16 id = sta->sta.aid; if (sta->sdata->vif.type == NL80211_IFTYPE_AP || sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (WARN_ON_ONCE(!sta->sdata->bss)) return; ps = &sta->sdata->bss->ps; #ifdef CPTCFG_MAC80211_MESH } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { ps = &sta->sdata->u.mesh.ps; #endif } else { return; } /* No need to do anything if the driver does all */ if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) return; if (sta->dead) goto done; /* * If all ACs are delivery-enabled then we should build * the TIM bit for all ACs anyway; if only some are then * we ignore those and build the TIM bit using only the * non-enabled ones. */ if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) ignore_for_tim = 0; if (ignore_pending) ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { unsigned long tids; if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) continue; indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || !skb_queue_empty(&sta->ps_tx_buf[ac]); if (indicate_tim) break; tids = ieee80211_tids_for_ac(ac); indicate_tim |= sta->driver_buffered_tids & tids; indicate_tim |= sta->txq_buffered_tids & tids; } done: spin_lock_bh(&local->tim_lock); if (indicate_tim == __bss_tim_get(ps->tim, id)) goto out_unlock; if (indicate_tim) __bss_tim_set(ps->tim, id); else __bss_tim_clear(ps->tim, id); if (local->ops->set_tim && !WARN_ON(sta->dead)) { local->tim_in_locked_section = true; drv_set_tim(local, &sta->sta, indicate_tim); local->tim_in_locked_section = false; } out_unlock: spin_unlock_bh(&local->tim_lock); } void sta_info_recalc_tim(struct sta_info *sta) { __sta_info_recalc_tim(sta, false); } static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_tx_info *info; int timeout; if (!skb) return false; info = IEEE80211_SKB_CB(skb); /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ timeout = (sta->listen_interval * sta->sdata->vif.bss_conf.beacon_int * 32 / 15625) * HZ; if (timeout < STA_TX_BUFFER_EXPIRE) timeout = STA_TX_BUFFER_EXPIRE; return time_after(jiffies, info->control.jiffies + timeout); } static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, struct sta_info *sta, int ac) { unsigned long flags; struct sk_buff *skb; /* * First check for frames that should expire on the filtered * queue. Frames here were rejected by the driver and are on * a separate queue to avoid reordering with normal PS-buffered * frames. They also aren't accounted for right now in the * total_ps_buffered counter. */ for (;;) { spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); skb = skb_peek(&sta->tx_filtered[ac]); if (sta_info_buffer_expired(sta, skb)) skb = __skb_dequeue(&sta->tx_filtered[ac]); else skb = NULL; spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); /* * Frames are queued in order, so if this one * hasn't expired yet we can stop testing. If * we actually reached the end of the queue we * also need to stop, of course. */ if (!skb) break; ieee80211_free_txskb(&local->hw, skb); } /* * Now also check the normal PS-buffered queue, this will * only find something if the filtered queue was emptied * since the filtered frames are all before the normal PS * buffered frames. */ for (;;) { spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); skb = skb_peek(&sta->ps_tx_buf[ac]); if (sta_info_buffer_expired(sta, skb)) skb = __skb_dequeue(&sta->ps_tx_buf[ac]); else skb = NULL; spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); /* * frames are queued in order, so if this one * hasn't expired yet (or we reached the end of * the queue) we can stop testing */ if (!skb) break; local->total_ps_buffered--; ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", sta->sta.addr); ieee80211_free_txskb(&local->hw, skb); } /* * Finally, recalculate the TIM bit for this station -- it might * now be clear because the station was too slow to retrieve its * frames. */ sta_info_recalc_tim(sta); /* * Return whether there are any frames still buffered, this is * used to check whether the cleanup timer still needs to run, * if there are no frames we don't need to rearm the timer. */ return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && skb_queue_empty(&sta->tx_filtered[ac])); } static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, struct sta_info *sta) { bool have_buffered = false; int ac; /* This is only necessary for stations on BSS/MBSS interfaces */ if (!sta->sdata->bss && !ieee80211_vif_is_mesh(&sta->sdata->vif)) return false; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) have_buffered |= sta_info_cleanup_expire_buffered_ac(local, sta, ac); return have_buffered; } static int __must_check __sta_info_destroy_part1(struct sta_info *sta) { struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; int ret; might_sleep(); if (!sta) return -ENOENT; local = sta->local; sdata = sta->sdata; lockdep_assert_held(&local->sta_mtx); /* * Before removing the station from the driver and * rate control, it might still start new aggregation * sessions -- block that to make sure the tear-down * will be sufficient. */ set_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); /* * Before removing the station from the driver there might be pending * rx frames on RSS queues sent prior to the disassociation - wait for * all such frames to be processed. */ drv_sync_rx_queues(local, sta); ret = sta_info_hash_del(local, sta); if (WARN_ON(ret)) return ret; /* * for TDLS peers, make sure to return to the base channel before * removal. */ if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); } list_del_rcu(&sta->list); sta->removed = true; drv_sta_pre_rcu_remove(local, sta->sdata, sta); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && rcu_access_pointer(sdata->u.vlan.sta) == sta) RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); return 0; } static void __sta_info_destroy_part2(struct sta_info *sta) { struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct station_info *sinfo; int ret; /* * NOTE: This assumes at least synchronize_net() was done * after _part1 and before _part2! */ might_sleep(); lockdep_assert_held(&local->sta_mtx); /* now keys can no longer be reached */ ieee80211_free_sta_keys(local, sta); /* disable TIM bit - last chance to tell driver */ __sta_info_recalc_tim(sta, true); sta->dead = true; local->num_sta--; local->sta_generation++; while (sta->sta_state > IEEE80211_STA_NONE) { ret = sta_info_move_state(sta, sta->sta_state - 1); if (ret) { WARN_ON_ONCE(1); break; } } if (sta->uploaded) { ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, IEEE80211_STA_NOTEXIST); WARN_ON_ONCE(ret != 0); } sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); if (sinfo) sta_set_sinfo(sta, sinfo, true); cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); kfree(sinfo); rate_control_remove_sta_debugfs(sta); ieee80211_sta_debugfs_remove(sta); cleanup_single_sta(sta); } int __must_check __sta_info_destroy(struct sta_info *sta) { int err = __sta_info_destroy_part1(sta); if (err) return err; synchronize_net(); __sta_info_destroy_part2(sta); return 0; } int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct sta_info *sta; int ret; mutex_lock(&sdata->local->sta_mtx); sta = sta_info_get(sdata, addr); ret = __sta_info_destroy(sta); mutex_unlock(&sdata->local->sta_mtx); return ret; } int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct sta_info *sta; int ret; mutex_lock(&sdata->local->sta_mtx); sta = sta_info_get_bss(sdata, addr); ret = __sta_info_destroy(sta); mutex_unlock(&sdata->local->sta_mtx); return ret; } static void sta_info_cleanup(struct timer_list *t) { struct ieee80211_local *local = from_timer(local, t, sta_cleanup); struct sta_info *sta; bool timer_needed = false; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) if (sta_info_cleanup_expire_buffered(local, sta)) timer_needed = true; rcu_read_unlock(); if (local->quiescing) return; if (!timer_needed) return; mod_timer(&local->sta_cleanup, round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); } int sta_info_init(struct ieee80211_local *local) { int err; err = rhltable_init(&local->sta_hash, &sta_rht_params); if (err) return err; spin_lock_init(&local->tim_lock); mutex_init(&local->sta_mtx); INIT_LIST_HEAD(&local->sta_list); timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); return 0; } void sta_info_stop(struct ieee80211_local *local) { del_timer_sync(&local->sta_cleanup); rhltable_destroy(&local->sta_hash); } int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) { struct ieee80211_local *local = sdata->local; struct sta_info *sta, *tmp; LIST_HEAD(free_list); int ret = 0; might_sleep(); WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); WARN_ON(vlans && !sdata->bss); mutex_lock(&local->sta_mtx); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { if (sdata == sta->sdata || (vlans && sdata->bss == sta->sdata->bss)) { if (!WARN_ON(__sta_info_destroy_part1(sta))) list_add(&sta->free_list, &free_list); ret++; } } if (!list_empty(&free_list)) { synchronize_net(); list_for_each_entry_safe(sta, tmp, &free_list, free_list) __sta_info_destroy_part2(sta); } mutex_unlock(&local->sta_mtx); return ret; } void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, unsigned long exp_time) { struct ieee80211_local *local = sdata->local; struct sta_info *sta, *tmp; mutex_lock(&local->sta_mtx); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { unsigned long last_active = ieee80211_sta_last_active(sta); if (sdata != sta->sdata) continue; if (time_is_before_jiffies(last_active + exp_time)) { sta_dbg(sta->sdata, "expiring inactive STA %pM\n", sta->sta.addr); if (ieee80211_vif_is_mesh(&sdata->vif) && test_sta_flag(sta, WLAN_STA_PS_STA)) atomic_dec(&sdata->u.mesh.ps.num_sta_ps); WARN_ON(__sta_info_destroy(sta)); } } mutex_unlock(&local->sta_mtx); } struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, const u8 *addr, const u8 *localaddr) { struct ieee80211_local *local = hw_to_local(hw); struct rhlist_head *tmp; struct sta_info *sta; /* * Just return a random station if localaddr is NULL * ... first in list. */ for_each_sta_info(local, addr, sta, tmp) { if (localaddr && !ether_addr_equal(sta->sdata->vif.addr, localaddr)) continue; if (!sta->uploaded) return NULL; return &sta->sta; } return NULL; } EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, const u8 *addr) { struct sta_info *sta; if (!vif) return NULL; sta = sta_info_get_bss(vif_to_sdata(vif), addr); if (!sta) return NULL; if (!sta->uploaded) return NULL; return &sta->sta; } EXPORT_SYMBOL(ieee80211_find_sta); /* powersave support code */ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct sk_buff_head pending; int filtered = 0, buffered = 0, ac, i; unsigned long flags; struct ps_data *ps; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); if (sdata->vif.type == NL80211_IFTYPE_AP) ps = &sdata->bss->ps; else if (ieee80211_vif_is_mesh(&sdata->vif)) ps = &sdata->u.mesh.ps; else return; clear_sta_flag(sta, WLAN_STA_SP); BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); sta->driver_buffered_tids = 0; sta->txq_buffered_tids = 0; if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) continue; drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i])); } skb_queue_head_init(&pending); /* sync with ieee80211_tx_h_unicast_ps_buf */ spin_lock(&sta->ps_lock); /* Send all buffered frames to the station */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { int count = skb_queue_len(&pending), tmp; spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); tmp = skb_queue_len(&pending); filtered += tmp - count; count = tmp; spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); tmp = skb_queue_len(&pending); buffered += tmp - count; } ieee80211_add_pending_skbs(local, &pending); /* now we're no longer in the deliver code */ clear_sta_flag(sta, WLAN_STA_PS_DELIVER); /* The station might have polled and then woken up before we responded, * so clear these flags now to avoid them sticking around. */ clear_sta_flag(sta, WLAN_STA_PSPOLL); clear_sta_flag(sta, WLAN_STA_UAPSD); spin_unlock(&sta->ps_lock); atomic_dec(&ps->num_sta_ps); /* This station just woke up and isn't aware of our SMPS state */ if (!ieee80211_vif_is_mesh(&sdata->vif) && !ieee80211_smps_is_restrictive(sta->known_smps_mode, sdata->smps_mode) && sta->known_smps_mode != sdata->bss->req_smps && sta_info_tx_streams(sta) != 1) { ht_dbg(sdata, "%pM just woke up and MIMO capable - update SMPS\n", sta->sta.addr); ieee80211_send_smps_action(sdata, sdata->bss->req_smps, sta->sta.addr, sdata->vif.bss_conf.bssid); } local->total_ps_buffered -= buffered; sta_info_recalc_tim(sta); ps_dbg(sdata, "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", sta->sta.addr, sta->sta.aid, filtered, buffered); ieee80211_check_fast_xmit(sta); } static void ieee80211_send_null_response(struct sta_info *sta, int tid, enum ieee80211_frame_release_type reason, bool call_driver, bool more_data) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_qos_hdr *nullfunc; struct sk_buff *skb; int size = sizeof(*nullfunc); __le16 fc; bool qos = sta->sta.wme; struct ieee80211_tx_info *info; struct ieee80211_chanctx_conf *chanctx_conf; /* Don't send NDPs when STA is connected HE */ if (sdata->vif.type == NL80211_IFTYPE_STATION && !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) return; if (qos) { fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC | IEEE80211_FCTL_FROMDS); } else { size -= 2; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_FROMDS); } skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = skb_put(skb, size); nullfunc->frame_control = fc; nullfunc->duration_id = 0; memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); nullfunc->seq_ctrl = 0; skb->priority = tid; skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); if (qos) { nullfunc->qos_ctrl = cpu_to_le16(tid); if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { nullfunc->qos_ctrl |= cpu_to_le16(IEEE80211_QOS_CTL_EOSP); if (more_data) nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } } info = IEEE80211_SKB_CB(skb); /* * Tell TX path to send this frame even though the * STA may still remain is PS mode after this frame * exchange. Also set EOSP to indicate this packet * ends the poll/service period. */ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | IEEE80211_TX_STATUS_EOSP | IEEE80211_TX_CTL_REQ_TX_STATUS; info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; if (call_driver) drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false); skb->dev = sdata->dev; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); kfree_skb(skb); return; } info->band = chanctx_conf->def.chan->band; ieee80211_xmit(sdata, sta, skb, 0); rcu_read_unlock(); } static int find_highest_prio_tid(unsigned long tids) { /* lower 3 TIDs aren't ordered perfectly */ if (tids & 0xF8) return fls(tids) - 1; /* TID 0 is BE just like TID 3 */ if (tids & BIT(0)) return 0; return fls(tids) - 1; } /* Indicates if the MORE_DATA bit should be set in the last * frame obtained by ieee80211_sta_ps_get_frames. * Note that driver_release_tids is relevant only if * reason = IEEE80211_FRAME_RELEASE_PSPOLL */ static bool ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, enum ieee80211_frame_release_type reason, unsigned long driver_release_tids) { int ac; /* If the driver has data on more than one TID then * certainly there's more data if we release just a * single frame now (from a single TID). This will * only happen for PS-Poll. */ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && hweight16(driver_release_tids) > 1) return true; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) continue; if (!skb_queue_empty(&sta->tx_filtered[ac]) || !skb_queue_empty(&sta->ps_tx_buf[ac])) return true; } return false; } static void ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, enum ieee80211_frame_release_type reason, struct sk_buff_head *frames, unsigned long *driver_release_tids) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; int ac; /* Get response frame(s) and more data bit for the last one. */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { unsigned long tids; if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) continue; tids = ieee80211_tids_for_ac(ac); /* if we already have frames from software, then we can't also * release from hardware queues */ if (skb_queue_empty(frames)) { *driver_release_tids |= sta->driver_buffered_tids & tids; *driver_release_tids |= sta->txq_buffered_tids & tids; } if (!*driver_release_tids) { struct sk_buff *skb; while (n_frames > 0) { skb = skb_dequeue(&sta->tx_filtered[ac]); if (!skb) { skb = skb_dequeue( &sta->ps_tx_buf[ac]); if (skb) local->total_ps_buffered--; } if (!skb) break; n_frames--; __skb_queue_tail(frames, skb); } } /* If we have more frames buffered on this AC, then abort the * loop since we can't send more data from other ACs before * the buffered frames from this. */ if (!skb_queue_empty(&sta->tx_filtered[ac]) || !skb_queue_empty(&sta->ps_tx_buf[ac])) break; } } static void ieee80211_sta_ps_deliver_response(struct sta_info *sta, int n_frames, u8 ignored_acs, enum ieee80211_frame_release_type reason) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; unsigned long driver_release_tids = 0; struct sk_buff_head frames; bool more_data; /* Service or PS-Poll period starts */ set_sta_flag(sta, WLAN_STA_SP); __skb_queue_head_init(&frames); ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, &frames, &driver_release_tids); more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) driver_release_tids = BIT(find_highest_prio_tid(driver_release_tids)); if (skb_queue_empty(&frames) && !driver_release_tids) { int tid, ac; /* * For PS-Poll, this can only happen due to a race condition * when we set the TIM bit and the station notices it, but * before it can poll for the frame we expire it. * * For uAPSD, this is said in the standard (11.2.1.5 h): * At each unscheduled SP for a non-AP STA, the AP shall * attempt to transmit at least one MSDU or MMPDU, but no * more than the value specified in the Max SP Length field * in the QoS Capability element from delivery-enabled ACs, * that are destined for the non-AP STA. * * Since we have no other MSDU/MMPDU, transmit a QoS null frame. */ /* This will evaluate to 1, 3, 5 or 7. */ for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) break; tid = 7 - 2 * ac; ieee80211_send_null_response(sta, tid, reason, true, false); } else if (!driver_release_tids) { struct sk_buff_head pending; struct sk_buff *skb; int num = 0; u16 tids = 0; bool need_null = false; skb_queue_head_init(&pending); while ((skb = __skb_dequeue(&frames))) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *) skb->data; u8 *qoshdr = NULL; num++; /* * Tell TX path to send this frame even though the * STA may still remain is PS mode after this frame * exchange. */ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; /* * Use MoreData flag to indicate whether there are * more buffered frames for this STA */ if (more_data || !skb_queue_empty(&frames)) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); else hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); if (ieee80211_is_data_qos(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control)) qoshdr = ieee80211_get_qos_ctl(hdr); tids |= BIT(skb->priority); __skb_queue_tail(&pending, skb); /* end service period after last frame or add one */ if (!skb_queue_empty(&frames)) continue; if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { /* for PS-Poll, there's only one frame */ info->flags |= IEEE80211_TX_STATUS_EOSP | IEEE80211_TX_CTL_REQ_TX_STATUS; break; } /* For uAPSD, things are a bit more complicated. If the * last frame has a QoS header (i.e. is a QoS-data or * QoS-nulldata frame) then just set the EOSP bit there * and be done. * If the frame doesn't have a QoS header (which means * it should be a bufferable MMPDU) then we can't set * the EOSP bit in the QoS header; add a QoS-nulldata * frame to the list to send it after the MMPDU. * * Note that this code is only in the mac80211-release * code path, we assume that the driver will not buffer * anything but QoS-data frames, or if it does, will * create the QoS-nulldata frame by itself if needed. * * Cf. 802.11-2012 10.2.1.10 (c). */ if (qoshdr) { *qoshdr |= IEEE80211_QOS_CTL_EOSP; info->flags |= IEEE80211_TX_STATUS_EOSP | IEEE80211_TX_CTL_REQ_TX_STATUS; } else { /* The standard isn't completely clear on this * as it says the more-data bit should be set * if there are more BUs. The QoS-Null frame * we're about to send isn't buffered yet, we * only create it below, but let's pretend it * was buffered just in case some clients only * expect more-data=0 when eosp=1. */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); need_null = true; num++; } break; } drv_allow_buffered_frames(local, sta, tids, num, reason, more_data); ieee80211_add_pending_skbs(local, &pending); if (need_null) ieee80211_send_null_response( sta, find_highest_prio_tid(tids), reason, false, false); sta_info_recalc_tim(sta); } else { int tid; /* * We need to release a frame that is buffered somewhere in the * driver ... it'll have to handle that. * Note that the driver also has to check the number of frames * on the TIDs we're releasing from - if there are more than * n_frames it has to set the more-data bit (if we didn't ask * it to set it anyway due to other buffered frames); if there * are fewer than n_frames it has to make sure to adjust that * to allow the service period to end properly. */ drv_release_buffered_frames(local, sta, driver_release_tids, n_frames, reason, more_data); /* * Note that we don't recalculate the TIM bit here as it would * most likely have no effect at all unless the driver told us * that the TID(s) became empty before returning here from the * release function. * Either way, however, when the driver tells us that the TID(s) * became empty or we find that a txq became empty, we'll do the * TIM recalculation. */ if (!sta->sta.txq[0]) return; for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { if (!sta->sta.txq[tid] || !(driver_release_tids & BIT(tid)) || txq_has_queue(sta->sta.txq[tid])) continue; sta_info_recalc_tim(sta); break; } } } void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) { u8 ignore_for_response = sta->sta.uapsd_queues; /* * If all ACs are delivery-enabled then we should reply * from any of them, if only some are enabled we reply * only from the non-enabled ones. */ if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) ignore_for_response = 0; ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, IEEE80211_FRAME_RELEASE_PSPOLL); } void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) { int n_frames = sta->sta.max_sp; u8 delivery_enabled = sta->sta.uapsd_queues; /* * If we ever grow support for TSPEC this might happen if * the TSPEC update from hostapd comes in between a trigger * frame setting WLAN_STA_UAPSD in the RX path and this * actually getting called. */ if (!delivery_enabled) return; switch (sta->sta.max_sp) { case 1: n_frames = 2; break; case 2: n_frames = 4; break; case 3: n_frames = 6; break; case 0: /* XXX: what is a good value? */ n_frames = 128; break; } ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, IEEE80211_FRAME_RELEASE_UAPSD); } void ieee80211_sta_block_awake(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, bool block) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); trace_api_sta_block_awake(sta->local, pubsta, block); if (block) { set_sta_flag(sta, WLAN_STA_PS_DRIVER); ieee80211_clear_fast_xmit(sta); return; } if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) return; if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { set_sta_flag(sta, WLAN_STA_PS_DELIVER); clear_sta_flag(sta, WLAN_STA_PS_DRIVER); ieee80211_queue_work(hw, &sta->drv_deliver_wk); } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || test_sta_flag(sta, WLAN_STA_UAPSD)) { /* must be asleep in this case */ clear_sta_flag(sta, WLAN_STA_PS_DRIVER); ieee80211_queue_work(hw, &sta->drv_deliver_wk); } else { clear_sta_flag(sta, WLAN_STA_PS_DRIVER); ieee80211_check_fast_xmit(sta); } } EXPORT_SYMBOL(ieee80211_sta_block_awake); void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_local *local = sta->local; trace_api_eosp(local, pubsta); clear_sta_flag(sta, WLAN_STA_SP); } EXPORT_SYMBOL(ieee80211_sta_eosp); void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); enum ieee80211_frame_release_type reason; bool more_data; trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); reason = IEEE80211_FRAME_RELEASE_UAPSD; more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, reason, 0); ieee80211_send_null_response(sta, tid, reason, false, more_data); } EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, u8 tid, bool buffered) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) return; trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); if (buffered) set_bit(tid, &sta->driver_buffered_tids); else clear_bit(tid, &sta->driver_buffered_tids); sta_info_recalc_tim(sta); } EXPORT_SYMBOL(ieee80211_sta_set_buffered); int sta_info_move_state(struct sta_info *sta, enum ieee80211_sta_state new_state) { might_sleep(); if (sta->sta_state == new_state) return 0; /* check allowed transitions first */ switch (new_state) { case IEEE80211_STA_NONE: if (sta->sta_state != IEEE80211_STA_AUTH) return -EINVAL; break; case IEEE80211_STA_AUTH: if (sta->sta_state != IEEE80211_STA_NONE && sta->sta_state != IEEE80211_STA_ASSOC) return -EINVAL; break; case IEEE80211_STA_ASSOC: if (sta->sta_state != IEEE80211_STA_AUTH && sta->sta_state != IEEE80211_STA_AUTHORIZED) return -EINVAL; break; case IEEE80211_STA_AUTHORIZED: if (sta->sta_state != IEEE80211_STA_ASSOC) return -EINVAL; break; default: WARN(1, "invalid state %d", new_state); return -EINVAL; } sta_dbg(sta->sdata, "moving STA %pM to state %d\n", sta->sta.addr, new_state); /* * notify the driver before the actual changes so it can * fail the transition */ if (test_sta_flag(sta, WLAN_STA_INSERTED)) { int err = drv_sta_state(sta->local, sta->sdata, sta, sta->sta_state, new_state); if (err) return err; } /* reflect the change in all state variables */ switch (new_state) { case IEEE80211_STA_NONE: if (sta->sta_state == IEEE80211_STA_AUTH) clear_bit(WLAN_STA_AUTH, &sta->_flags); break; case IEEE80211_STA_AUTH: if (sta->sta_state == IEEE80211_STA_NONE) { set_bit(WLAN_STA_AUTH, &sta->_flags); } else if (sta->sta_state == IEEE80211_STA_ASSOC) { clear_bit(WLAN_STA_ASSOC, &sta->_flags); ieee80211_recalc_min_chandef(sta->sdata); if (!sta->sta.support_p2p_ps) ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); } break; case IEEE80211_STA_ASSOC: if (sta->sta_state == IEEE80211_STA_AUTH) { set_bit(WLAN_STA_ASSOC, &sta->_flags); ieee80211_recalc_min_chandef(sta->sdata); if (!sta->sta.support_p2p_ps) ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { ieee80211_vif_dec_num_mcast(sta->sdata); clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); ieee80211_clear_fast_xmit(sta); ieee80211_clear_fast_rx(sta); } break; case IEEE80211_STA_AUTHORIZED: if (sta->sta_state == IEEE80211_STA_ASSOC) { ieee80211_vif_inc_num_mcast(sta->sdata); set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); ieee80211_check_fast_xmit(sta); ieee80211_check_fast_rx(sta); } break; default: break; } sta->sta_state = new_state; return 0; } u8 sta_info_tx_streams(struct sta_info *sta) { struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap; u8 rx_streams; if (!sta->sta.ht_cap.ht_supported) return 1; if (sta->sta.vht_cap.vht_supported) { int i; u16 tx_mcs_map = le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map); for (i = 7; i >= 0; i--) if ((tx_mcs_map & (0x3 << (i * 2))) != IEEE80211_VHT_MCS_NOT_SUPPORTED) return i + 1; } if (ht_cap->mcs.rx_mask[3]) rx_streams = 4; else if (ht_cap->mcs.rx_mask[2]) rx_streams = 3; else if (ht_cap->mcs.rx_mask[1]) rx_streams = 2; else rx_streams = 1; if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF)) return rx_streams; return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; } static struct ieee80211_sta_rx_stats * sta_get_last_rx_stats(struct sta_info *sta) { struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; struct ieee80211_local *local = sta->local; int cpu; if (!ieee80211_hw_check(&local->hw, USES_RSS)) return stats; for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpustats; cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu); if (time_after(cpustats->last_rx, stats->last_rx)) stats = cpustats; } return stats; } static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, struct rate_info *rinfo) { rinfo->bw = STA_STATS_GET(BW, rate); switch (STA_STATS_GET(TYPE, rate)) { case STA_STATS_RATE_TYPE_VHT: rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); rinfo->nss = STA_STATS_GET(VHT_NSS, rate); if (STA_STATS_GET(SGI, rate)) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case STA_STATS_RATE_TYPE_HT: rinfo->flags = RATE_INFO_FLAGS_MCS; rinfo->mcs = STA_STATS_GET(HT_MCS, rate); if (STA_STATS_GET(SGI, rate)) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case STA_STATS_RATE_TYPE_LEGACY: { struct ieee80211_supported_band *sband; u16 brate; unsigned int shift; int band = STA_STATS_GET(LEGACY_BAND, rate); int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); sband = local->hw.wiphy->bands[band]; brate = sband->bitrates[rate_idx].bitrate; if (rinfo->bw == RATE_INFO_BW_5) shift = 2; else if (rinfo->bw == RATE_INFO_BW_10) shift = 1; else shift = 0; rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); break; } case STA_STATS_RATE_TYPE_HE: rinfo->flags = RATE_INFO_FLAGS_HE_MCS; rinfo->mcs = STA_STATS_GET(HE_MCS, rate); rinfo->nss = STA_STATS_GET(HE_NSS, rate); rinfo->he_gi = STA_STATS_GET(HE_GI, rate); rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); break; } } static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) { u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); if (rate == STA_STATS_RATE_INVALID) return -EINVAL; sta_stats_decode_rate(sta->local, rate, rinfo); return 0; } static void sta_set_tidstats(struct sta_info *sta, struct cfg80211_tid_stats *tidstats, int tid) { struct ieee80211_local *local = sta->local; if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { unsigned int start; do { start = u64_stats_fetch_begin(&sta->rx_stats.syncp); tidstats->rx_msdu = sta->rx_stats.msdu[tid]; } while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start)); tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); } if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); tidstats->tx_msdu = sta->tx_stats.msdu[tid]; } if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid]; } if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid]; } if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) { spin_lock_bh(&local->fq.lock); rcu_read_lock(); tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); ieee80211_fill_txq_stats(&tidstats->txq_stats, to_txq_info(sta->sta.txq[tid])); rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); } } static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) { unsigned int start; u64 value; do { start = u64_stats_fetch_begin(&rxstats->syncp); value = rxstats->bytes; } while (u64_stats_fetch_retry(&rxstats->syncp, start)); return value; } void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, bool tidstats) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; u32 thr = 0; int i, ac, cpu; struct ieee80211_sta_rx_stats *last_rxstats; last_rxstats = sta_get_last_rx_stats(sta); sinfo->generation = sdata->local->sta_generation; /* do before driver, so beacon filtering drivers have a * chance to e.g. just add the number of filtered beacons * (or just modify the value entirely, of course) */ if (sdata->vif.type == NL80211_IFTYPE_STATION) sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal; drv_sta_statistics(local, sdata, &sta->sta, sinfo); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); if (sdata->vif.type == NL80211_IFTYPE_STATION) { sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); } sinfo->connected_time = ktime_get_seconds() - sta->last_connected; sinfo->inactive_time = jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { sinfo->tx_bytes = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) sinfo->tx_bytes += sta->tx_stats.bytes[ac]; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { sinfo->tx_packets = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) sinfo->tx_packets += sta->tx_stats.packets[ac]; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); } if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); if (sta->pcpu_rx_stats) { for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpurxs; cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); } } sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { sinfo->rx_packets = sta->rx_stats.packets; if (sta->pcpu_rx_stats) { for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpurxs; cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); sinfo->rx_packets += cpurxs->packets; } } sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { sinfo->tx_retries = sta->status_stats.retry_count; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { sinfo->tx_failed = sta->status_stats.retry_failed; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); } sinfo->rx_dropped_misc = sta->rx_stats.dropped; if (sta->pcpu_rx_stats) { for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpurxs; cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); sinfo->rx_dropped_misc += cpurxs->dropped; } } if (sdata->vif.type == NL80211_IFTYPE_STATION && !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); } if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { sinfo->signal = (s8)last_rxstats->last_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } if (!sta->pcpu_rx_stats && !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { sinfo->signal_avg = -ewma_signal_read(&sta->rx_stats_avg.signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } } /* for the average - if pcpu_rx_stats isn't set - rxstats must point to * the sta->rx_stats struct, so the check here is fine with and without * pcpu statistics */ if (last_rxstats->chains && !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); if (!sta->pcpu_rx_stats) sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); sinfo->chains = last_rxstats->chains; for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { sinfo->chain_signal[i] = last_rxstats->chain_signal_last[i]; sinfo->chain_signal_avg[i] = -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); } } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &sinfo->txrate); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) sta_set_tidstats(sta, &sinfo->pertid[i], i); } if (ieee80211_vif_is_mesh(&sdata->vif)) { #ifdef CPTCFG_MAC80211_MESH sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | BIT_ULL(NL80211_STA_INFO_PLID) | BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | BIT_ULL(NL80211_STA_INFO_PEER_PM) | BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE); sinfo->llid = sta->mesh->llid; sinfo->plid = sta->mesh->plid; sinfo->plink_state = sta->mesh->plink_state; if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); sinfo->t_offset = sta->mesh->t_offset; } sinfo->local_pm = sta->mesh->local_pm; sinfo->peer_pm = sta->mesh->peer_pm; sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; sinfo->connected_to_gate = sta->mesh->connected_to_gate; #endif } sinfo->bss_param.flags = 0; if (sdata->vif.bss_conf.use_cts_prot) sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; if (sdata->vif.bss_conf.use_short_preamble) sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; if (sdata->vif.bss_conf.use_short_slot) sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; sinfo->sta_flags.set = 0; sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED) | BIT(NL80211_STA_FLAG_TDLS_PEER); if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); if (sta->sta.wme) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); if (test_sta_flag(sta, WLAN_STA_MFP)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); if (test_sta_flag(sta, WLAN_STA_AUTH)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); if (test_sta_flag(sta, WLAN_STA_ASSOC)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); thr = sta_get_expected_throughput(sta); if (thr != 0) { sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); sinfo->expected_throughput = thr; } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && sta->status_stats.ack_signal_filled) { sinfo->ack_signal = sta->status_stats.last_ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && sta->status_stats.ack_signal_filled) { sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read( &sta->status_stats.avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } } u32 sta_get_expected_throughput(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct rate_control_ref *ref = NULL; u32 thr = 0; if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) ref = local->rate_ctrl; /* check if the driver has a SW RC implementation */ if (ref && ref->ops->get_expected_throughput) thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); else thr = drv_get_expected_throughput(local, sta); return thr; } unsigned long ieee80211_sta_last_active(struct sta_info *sta) { struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); if (time_after(stats->last_rx, sta->status_stats.last_ack)) return stats->last_rx; return sta->status_stats.last_ack; } static void sta_update_codel_params(struct sta_info *sta, u32 thr) { if (!sta->sdata->local->ops->wake_tx_queue) return; if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { sta->cparams.target = MS2TIME(50); sta->cparams.interval = MS2TIME(300); sta->cparams.ecn = false; } else { sta->cparams.target = MS2TIME(20); sta->cparams.interval = MS2TIME(100); sta->cparams.ecn = true; } } void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, u32 thr) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); sta_update_codel_params(sta, thr); } backport-iwlwifi-dkms-7906/net/mac80211/sta_info.h000066400000000000000000000717051351064634500216100ustar00rootroot00000000000000/* * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef STA_INFO_H #define STA_INFO_H #include #include #include #include #include #include #include #include #include #include "key.h" /** * enum ieee80211_sta_info_flags - Stations flags * * These flags are used with &struct sta_info's @flags member, but * only indirectly with set_sta_flag() and friends. * * @WLAN_STA_AUTH: Station is authenticated. * @WLAN_STA_ASSOC: Station is associated. * @WLAN_STA_PS_STA: Station is in power-save mode * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic. * This bit is always checked so needs to be enabled for all stations * when virtual port control is not in use. * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble * frames. * @WLAN_STA_WDS: Station is one of our WDS peers. * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next * frame to this station is transmitted. * @WLAN_STA_MFP: Management frame protection is used with this STA. * @WLAN_STA_BLOCK_BA: Used to deny ADDBA requests (both TX and RX) * during suspend/resume and station removal. * @WLAN_STA_PS_DRIVER: driver requires keeping this station in * power-save mode logically to flush frames that might still * be in the queues * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping * station in power-save mode, reply when the driver unblocks. * @WLAN_STA_TDLS_PEER: Station is a TDLS peer. * @WLAN_STA_TDLS_PEER_AUTH: This TDLS peer is authorized to send direct * packets. This means the link is enabled. * @WLAN_STA_TDLS_INITIATOR: We are the initiator of the TDLS link with this * station. * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this * TDLS peer * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on * the BSS base channel. * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was * keeping station in power-save mode, reply when the driver * unblocks the station. * @WLAN_STA_SP: Station is in a service period, so don't try to * reply to other uAPSD trigger frames or PS-Poll. * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. * @WLAN_STA_INSERTED: This station is inserted into the hash table. * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid. * @WLAN_STA_MPSP_OWNER: local STA is owner of a mesh Peer Service Period. * @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP. * @WLAN_STA_PS_DELIVER: station woke up, but we're still blocking TX * until pending frames are delivered * * @NUM_WLAN_STA_FLAGS: number of defined flags */ enum ieee80211_sta_info_flags { WLAN_STA_AUTH, WLAN_STA_ASSOC, WLAN_STA_PS_STA, WLAN_STA_AUTHORIZED, WLAN_STA_SHORT_PREAMBLE, WLAN_STA_WDS, WLAN_STA_CLEAR_PS_FILT, WLAN_STA_MFP, WLAN_STA_BLOCK_BA, WLAN_STA_PS_DRIVER, WLAN_STA_PSPOLL, WLAN_STA_TDLS_PEER, WLAN_STA_TDLS_PEER_AUTH, WLAN_STA_TDLS_INITIATOR, WLAN_STA_TDLS_CHAN_SWITCH, WLAN_STA_TDLS_OFF_CHANNEL, WLAN_STA_TDLS_WIDER_BW, WLAN_STA_UAPSD, WLAN_STA_SP, WLAN_STA_4ADDR_EVENT, WLAN_STA_INSERTED, WLAN_STA_RATE_CONTROL, WLAN_STA_TOFFSET_KNOWN, WLAN_STA_MPSP_OWNER, WLAN_STA_MPSP_RECIPIENT, WLAN_STA_PS_DELIVER, NUM_WLAN_STA_FLAGS, }; #define ADDBA_RESP_INTERVAL (CPTCFG_IWL_TIMEOUT_FACTOR * HZ) #define HT_AGG_MAX_RETRIES 15 #define HT_AGG_BURST_RETRIES 3 #define HT_AGG_RETRIES_PERIOD (CPTCFG_IWL_TIMEOUT_FACTOR * 15 * HZ) #define HT_AGG_STATE_DRV_READY 0 #define HT_AGG_STATE_RESPONSE_RECEIVED 1 #define HT_AGG_STATE_OPERATIONAL 2 #define HT_AGG_STATE_STOPPING 3 #define HT_AGG_STATE_WANT_START 4 #define HT_AGG_STATE_WANT_STOP 5 #define HT_AGG_STATE_START_CB 6 #define HT_AGG_STATE_STOP_CB 7 DECLARE_EWMA(avg_signal, 10, 8) enum ieee80211_agg_stop_reason { AGG_STOP_DECLINED, AGG_STOP_LOCAL_REQUEST, AGG_STOP_PEER_REQUEST, AGG_STOP_DESTROY_STA, }; struct sta_info; /** * struct tid_ampdu_tx - TID aggregation information (Tx). * * @rcu_head: rcu head for freeing structure * @session_timer: check if we keep Tx-ing on the TID (by timeout value) * @addba_resp_timer: timer for peer's response to addba request * @pending: pending frames queue -- use sta's spinlock to protect * @sta: station we are attached to * @dialog_token: dialog token for aggregation session * @timeout: session timeout value to be filled in ADDBA requests * @tid: TID number * @state: session state (see above) * @last_tx: jiffies of last tx activity * @stop_initiator: initiator of a session stop * @tx_stop: TX DelBA frame when stopping * @buf_size: reorder buffer size at receiver * @failed_bar_ssn: ssn of the last failed BAR tx attempt * @bar_pending: BAR needs to be re-sent * @amsdu: support A-MSDU withing A-MDPU * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. * * The TX path can access it under RCU lock-free if, and * only if, the state has the flag %HT_AGG_STATE_OPERATIONAL * set. Otherwise, the TX path must also acquire the spinlock * and re-check the state, see comments in the tx code * touching it. */ struct tid_ampdu_tx { struct rcu_head rcu_head; struct timer_list session_timer; struct timer_list addba_resp_timer; struct sk_buff_head pending; struct sta_info *sta; unsigned long state; unsigned long last_tx; u16 timeout; u8 dialog_token; u8 stop_initiator; bool tx_stop; u16 buf_size; u16 failed_bar_ssn; bool bar_pending; bool amsdu; u8 tid; }; /** * struct tid_ampdu_rx - TID aggregation information (Rx). * * @reorder_buf: buffer to reorder incoming aggregated MPDUs. An MPDU may be an * A-MSDU with individually reported subframes. * @reorder_buf_filtered: bitmap indicating where there are filtered frames in * the reorder buffer that should be ignored when releasing frames * @reorder_time: jiffies when skb was added * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) * @reorder_timer: releases expired frames from the reorder buffer. * @sta: station we are attached to * @last_rx: jiffies of last rx activity * @head_seq_num: head sequence number in reordering buffer. * @stored_mpdu_num: number of MPDUs in reordering buffer * @ssn: Starting Sequence Number expected to be aggregated. * @buf_size: buffer size for incoming A-MPDUs * @timeout: reset timer value (in TUs). * @tid: TID number * @rcu_head: RCU head used for freeing this struct * @reorder_lock: serializes access to reorder buffer, see below. * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and * and ssn. * @removed: this session is removed (but might have been found due to RCU) * @started: this session has started (head ssn or higher was received) * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. * * The @reorder_lock is used to protect the members of this * struct, except for @timeout, @buf_size and @dialog_token, * which are constant across the lifetime of the struct (the * dialog token being used only for debugging). */ struct tid_ampdu_rx { struct rcu_head rcu_head; spinlock_t reorder_lock; u64 reorder_buf_filtered; struct sk_buff_head *reorder_buf; unsigned long *reorder_time; struct sta_info *sta; struct timer_list session_timer; struct timer_list reorder_timer; unsigned long last_rx; u16 head_seq_num; u16 stored_mpdu_num; u16 ssn; u16 buf_size; u16 timeout; u8 tid; u8 auto_seq:1, removed:1, started:1; }; /** * struct sta_ampdu_mlme - STA aggregation information. * * @mtx: mutex to protect all TX data (except non-NULL assignments * to tid_tx[idx], which are protected by the sta spinlock) * tid_start_tx is also protected by sta->lock. * @tid_rx: aggregation info for Rx per TID -- RCU protected * @tid_rx_token: dialog tokens for valid aggregation sessions * @tid_rx_timer_expired: bitmap indicating on which TIDs the * RX timer expired until the work for it runs * @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the * driver requested to close until the work for it runs * @tid_rx_manage_offl: bitmap indicating which BA sessions were requested * to be treated as started/stopped due to offloading * @agg_session_valid: bitmap indicating which TID has a rx BA session open on * @unexpected_agg: bitmap indicating which TID already sent a delBA due to * unexpected aggregation related frames outside a session * @work: work struct for starting/stopping aggregation * @tid_tx: aggregation info for Tx per TID * @tid_start_tx: sessions where start was requested * @last_addba_req_time: timestamp of the last addBA request. * @addba_req_num: number of times addBA request has been sent. * @dialog_token_allocator: dialog token enumerator for each new session; */ struct sta_ampdu_mlme { struct mutex mtx; /* rx */ struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS]; u8 tid_rx_token[IEEE80211_NUM_TIDS]; unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; unsigned long tid_rx_manage_offl[BITS_TO_LONGS(2 * IEEE80211_NUM_TIDS)]; unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; unsigned long unexpected_agg[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; /* tx */ struct work_struct work; struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS]; struct tid_ampdu_tx *tid_start_tx[IEEE80211_NUM_TIDS]; unsigned long last_addba_req_time[IEEE80211_NUM_TIDS]; u8 addba_req_num[IEEE80211_NUM_TIDS]; u8 dialog_token_allocator; }; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS /* * struct ieee80211_tx_consec_loss_stat - Tx consecutive loss statistics * * Measures TX consecutive loss for a station per TID. * * @consec_late_loss: number of consecutive frames that passed the late * threshold and are considered lost * @consec_total_loss: number of consecutive frames that passed the late * threshold and are considered lost or were actually lost. * @late_bins: each bin counts how many consecutive frames latency is * greater than the threshold in a certain range, and considered lost. * @loss_bins: each bin counts how many consecutive frames were lost in a * certain range. * @total_loss_bins: counts how mant consecutive packets were lost & late * within a certain range. * @bin_count: amount of bins. */ struct ieee80211_tx_consec_loss_stat { u32 consec_late_loss; u32 consec_total_loss; u32 *late_bins; u32 *loss_bins; u32 *total_loss_bins; u32 bin_count; }; /* * struct ieee80211_tx_latency_stat - Tx latency statistics * * Measures TX latency and jitter for a station per TID. * * @max: worst case latency * @sum: sum of all latencies * @counter: amount of Tx frames sent from interface * @bins: each bin counts how many frames transmitted within a certain * latency range. when disabled it is NULL. * @bin_count: amount of bins. * @max_ts: the kernel time stamp, in ms, of the tx packet with the * highest latency. */ struct ieee80211_tx_latency_stat { u32 max; u32 sum; u32 counter; u32 *bins; u32 bin_count; u32 max_ts; }; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ /* Value to indicate no TID reservation */ #define IEEE80211_TID_UNRESERVED 0xff #define IEEE80211_FAST_XMIT_MAX_IV 18 /** * struct ieee80211_fast_tx - TX fastpath information * @key: key to use for hw crypto * @hdr: the 802.11 header to put with the frame * @hdr_len: actual 802.11 header length * @sa_offs: offset of the SA * @da_offs: offset of the DA * @pn_offs: offset where to put PN for crypto (or 0 if not needed) * @band: band this will be transmitted on, for tx_info * @rcu_head: RCU head to free this struct * * This struct is small enough so that the common case (maximum crypto * header length of 8 like for CCMP/GCMP) fits into a single 64-byte * cache line. */ struct ieee80211_fast_tx { struct ieee80211_key *key; u8 hdr_len; u8 sa_offs, da_offs, pn_offs; u8 band; u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + sizeof(rfc1042_header)] __aligned(2); struct rcu_head rcu_head; }; /** * struct ieee80211_fast_rx - RX fastpath information * @dev: netdevice for reporting the SKB * @vif_type: (P2P-less) interface type of the original sdata (sdata->vif.type) * @vif_addr: interface address * @rfc1042_hdr: copy of the RFC 1042 SNAP header (to have in cache) * @control_port_protocol: control port protocol copied from sdata * @expected_ds_bits: from/to DS bits expected * @icv_len: length of the MIC if present * @key: bool indicating encryption is expected (key is set) * @sta_notify: notify the MLME code (once) * @internal_forward: forward froms internally on AP/VLAN type interfaces * @uses_rss: copy of USES_RSS hw flag * @da_offs: offset of the DA in the header (for header conversion) * @sa_offs: offset of the SA in the header (for header conversion) * @rcu_head: RCU head for freeing this structure */ struct ieee80211_fast_rx { struct net_device *dev; enum nl80211_iftype vif_type; u8 vif_addr[ETH_ALEN] __aligned(2); u8 rfc1042_hdr[6] __aligned(2); __be16 control_port_protocol; __le16 expected_ds_bits; u8 icv_len; u8 key:1, sta_notify:1, #ifdef CPTCFG_IWLMVM_VENDOR_CMDS drop_grat_arp_unsol_na:1, #endif internal_forward:1, uses_rss:1; u8 da_offs, sa_offs; struct rcu_head rcu_head; }; /* we use only values in the range 0-100, so pick a large precision */ DECLARE_EWMA(mesh_fail_avg, 20, 8) /** * struct mesh_sta - mesh STA information * @plink_lock: serialize access to plink fields * @llid: Local link ID * @plid: Peer link ID * @aid: local aid supplied by peer * @reason: Cancel reason on PLINK_HOLDING state * @plink_retries: Retries in establishment * @plink_state: peer link state * @plink_timeout: timeout of peer link * @plink_timer: peer link watch timer * @plink_sta: peer link watch timer's sta_info * @t_offset: timing offset relative to this host * @t_offset_setpoint: reference timing offset of this sta to be used when * calculating clockdrift * @local_pm: local link-specific power save mode * @peer_pm: peer-specific power save mode towards local STA * @nonpeer_pm: STA power save mode towards non-peer neighbors * @processed_beacon: set to true after peer rates and capabilities are * processed * @connected_to_gate: true if mesh STA has a path to a mesh gate * @fail_avg: moving percentage of failed MSDUs */ struct mesh_sta { struct timer_list plink_timer; struct sta_info *plink_sta; s64 t_offset; s64 t_offset_setpoint; spinlock_t plink_lock; u16 llid; u16 plid; u16 aid; u16 reason; u8 plink_retries; bool processed_beacon; bool connected_to_gate; enum nl80211_plink_state plink_state; u32 plink_timeout; /* mesh power save */ enum nl80211_mesh_power_mode local_pm; enum nl80211_mesh_power_mode peer_pm; enum nl80211_mesh_power_mode nonpeer_pm; /* moving percentage of failed MSDUs */ struct ewma_mesh_fail_avg fail_avg; }; DECLARE_EWMA(signal, 10, 8) struct ieee80211_sta_rx_stats { unsigned long packets; unsigned long last_rx; unsigned long num_duplicates; unsigned long fragments; unsigned long dropped; int last_signal; u8 chains; s8 chain_signal_last[IEEE80211_MAX_CHAINS]; u32 last_rate; struct u64_stats_sync syncp; u64 bytes; u64 msdu[IEEE80211_NUM_TIDS + 1]; }; /* * The bandwidth threshold below which the per-station CoDel parameters will be * scaled to be more lenient (to prevent starvation of slow stations). This * value will be scaled by the number of active stations when it is being * applied. */ #define STA_SLOW_THRESHOLD 6000 /* 6 Mbps */ /** * struct sta_info - STA information * * This structure collects information about a station that * mac80211 is communicating with. * * @list: global linked list entry * @free_list: list entry for keeping track of stations to free * @hash_node: hash node for rhashtable * @addr: station's MAC address - duplicated from public part to * let the hash table work with just a single cacheline * @local: pointer to the global information * @sdata: virtual interface this station belongs to * @ptk: peer keys negotiated with this station, if any * @ptk_idx: last installed peer key index * @gtk: group keys negotiated with this station, if any * @rate_ctrl: rate control algorithm reference * @rate_ctrl_lock: spinlock used to protect rate control data * (data inside the algorithm, so serializes calls there) * @rate_ctrl_priv: rate control private per-STA pointer * @lock: used for locking all fields that require locking, see comments * in the header file. * @drv_deliver_wk: used for delivering frames after driver PS unblocking * @listen_interval: listen interval of this station, when we're acting as AP * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly * @ps_lock: used for powersave (when mac80211 is the AP) related locking * @ps_tx_buf: buffers (per AC) of frames to transmit to this station * when it leaves power saving state or polls * @tx_filtered: buffers (per AC) of frames we already tried to * transmit but were filtered by hardware due to STA having * entered power saving state, these are also delivered to * the station when it leaves powersave or polls for frames * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on * @txq_buffered_tids: bitmap of TIDs that mac80211 has txq data buffered on * @last_connected: time (in seconds) when a station got connected * @last_seq_ctrl: last received seq/frag number from this STA (per TID * plus one for non-QoS frames) * @tid_seq: per-TID sequence numbers for sending to this STA * @ampdu_mlme: A-MPDU state machine state * @mesh: mesh STA information * @debugfs_dir: debug filesystem directory dentry * @dead: set to true when sta is unlinked * @removed: set to true when sta is being removed from sta_list * @uploaded: set to true when sta is uploaded to the driver * @sta: station information we share with the driver * @sta_state: duplicates information about station state (for debug) * @rcu_head: RCU head used for freeing this station struct * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, * taken from HT/VHT capabilities or VHT operating mode notification * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for * AP only. * @cipher_scheme: optional cipher scheme for this station * @cparams: CoDel parameters for this station. * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED) * @fast_tx: TX fastpath information * @fast_rx: RX fastpath information * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to * the BSS one. * @tx_stats: TX statistics * @rx_stats: RX statistics * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs * this (by advertising the USES_RSS hw flag) * @status_stats: TX status statistics */ struct sta_info { /* General information, mostly static */ struct list_head list, free_list; struct rcu_head rcu_head; struct rhlist_head hash_node; u8 addr[ETH_ALEN]; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS]; u8 ptk_idx; struct rate_control_ref *rate_ctrl; void *rate_ctrl_priv; spinlock_t rate_ctrl_lock; spinlock_t lock; struct ieee80211_fast_tx __rcu *fast_tx; struct ieee80211_fast_rx __rcu *fast_rx; struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats; #ifdef CPTCFG_MAC80211_MESH struct mesh_sta *mesh; #endif struct work_struct drv_deliver_wk; u16 listen_interval; bool dead; bool removed; bool uploaded; enum ieee80211_sta_state sta_state; /* use the accessors defined below */ unsigned long _flags; /* STA powersave lock and frame queues */ spinlock_t ps_lock; struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS]; struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS]; unsigned long driver_buffered_tids; unsigned long txq_buffered_tids; long last_connected; /* Updated from RX path only, no locking requirements */ struct ieee80211_sta_rx_stats rx_stats; struct { struct ewma_signal signal; struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS]; } rx_stats_avg; /* Plus 1 for non-QoS frames */ __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; /* Updated from TX status path only, no locking requirements */ struct { unsigned long filtered; unsigned long retry_failed, retry_count; unsigned int lost_packets; unsigned long last_tdls_pkt_time; u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; unsigned long last_ack; s8 last_ack_signal; bool ack_signal_filled; struct ewma_avg_signal avg_ack_signal; } status_stats; /* Updated from TX path only, no locking requirements */ struct { u64 packets[IEEE80211_NUM_ACS]; u64 bytes[IEEE80211_NUM_ACS]; struct ieee80211_tx_rate last_rate; u64 msdu[IEEE80211_NUM_TIDS + 1]; } tx_stats; u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; /* * Aggregation information, locked with lock. */ struct sta_ampdu_mlme ampdu_mlme; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS struct ieee80211_tx_consec_loss_stat *tx_consec; struct ieee80211_tx_latency_stat *tx_lat; u32 *tx_lat_thrshld; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ #ifdef CPTCFG_MAC80211_DEBUGFS struct dentry *debugfs_dir; #endif enum ieee80211_sta_rx_bandwidth cur_max_bandwidth; enum ieee80211_smps_mode known_smps_mode; const struct ieee80211_cipher_scheme *cipher_scheme; struct codel_params cparams; u8 reserved_tid; struct cfg80211_chan_def tdls_chandef; /* keep last! */ struct ieee80211_sta sta; }; static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta) { #ifdef CPTCFG_MAC80211_MESH return sta->mesh->plink_state; #endif return NL80211_PLINK_LISTEN; } static inline void set_sta_flag(struct sta_info *sta, enum ieee80211_sta_info_flags flag) { WARN_ON(flag == WLAN_STA_AUTH || flag == WLAN_STA_ASSOC || flag == WLAN_STA_AUTHORIZED); set_bit(flag, &sta->_flags); } static inline void clear_sta_flag(struct sta_info *sta, enum ieee80211_sta_info_flags flag) { WARN_ON(flag == WLAN_STA_AUTH || flag == WLAN_STA_ASSOC || flag == WLAN_STA_AUTHORIZED); clear_bit(flag, &sta->_flags); } static inline int test_sta_flag(struct sta_info *sta, enum ieee80211_sta_info_flags flag) { return test_bit(flag, &sta->_flags); } static inline int test_and_clear_sta_flag(struct sta_info *sta, enum ieee80211_sta_info_flags flag) { WARN_ON(flag == WLAN_STA_AUTH || flag == WLAN_STA_ASSOC || flag == WLAN_STA_AUTHORIZED); return test_and_clear_bit(flag, &sta->_flags); } static inline int test_and_set_sta_flag(struct sta_info *sta, enum ieee80211_sta_info_flags flag) { WARN_ON(flag == WLAN_STA_AUTH || flag == WLAN_STA_ASSOC || flag == WLAN_STA_AUTHORIZED); return test_and_set_bit(flag, &sta->_flags); } int sta_info_move_state(struct sta_info *sta, enum ieee80211_sta_state new_state); static inline void sta_info_pre_move_state(struct sta_info *sta, enum ieee80211_sta_state new_state) { int ret; WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); ret = sta_info_move_state(sta, new_state); WARN_ON_ONCE(ret); } void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx); static inline struct tid_ampdu_tx * rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid) { return rcu_dereference_protected(sta->ampdu_mlme.tid_tx[tid], lockdep_is_held(&sta->lock) || lockdep_is_held(&sta->ampdu_mlme.mtx)); } /* Maximum number of frames to buffer per power saving station per AC */ #define STA_MAX_TX_BUFFER 64 /* Minimum buffered frame expiry time. If STA uses listen interval that is * smaller than this value, the minimum value here is used instead. */ #define STA_TX_BUFFER_EXPIRE (10 * HZ) /* How often station data is cleaned up (e.g., expiration of buffered frames) */ #define STA_INFO_CLEANUP_INTERVAL (10 * HZ) struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, const u8 *addr); /* * Get a STA info, must be under RCU read lock. */ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, const u8 *addr); struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr); #define for_each_sta_info(local, _addr, _sta, _tmp) \ rhl_for_each_entry_rcu(_sta, _tmp, \ sta_info_hash_lookup(local, _addr), hash_node) /* * Get STA info by index, BROKEN! */ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, int idx); /* * Create a new STA info, caller owns returned structure * until sta_info_insert(). */ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, const u8 *addr, gfp_t gfp); void sta_info_free(struct ieee80211_local *local, struct sta_info *sta); /* * Insert STA info into hash table/list, returns zero or a * -EEXIST if (if the same MAC address is already present). * * Calling the non-rcu version makes the caller relinquish, * the _rcu version calls read_lock_rcu() and must be called * without it held. */ int sta_info_insert(struct sta_info *sta); int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU); int __must_check __sta_info_destroy(struct sta_info *sta); int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr); int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr); void sta_info_recalc_tim(struct sta_info *sta); int sta_info_init(struct ieee80211_local *local); void sta_info_stop(struct ieee80211_local *local); /** * sta_info_flush - flush matching STA entries from the STA table * * Returns the number of removed STA entries. * * @sdata: sdata to remove all stations from * @vlans: if the given interface is an AP interface, also flush VLANs */ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans); static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata) { return __sta_info_flush(sdata, false); } void sta_set_rate_info_tx(struct sta_info *sta, const struct ieee80211_tx_rate *rate, struct rate_info *rinfo); void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, bool tidstats); u32 sta_get_expected_throughput(struct sta_info *sta); void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, unsigned long exp_time); u8 sta_info_tx_streams(struct sta_info *sta); void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta); void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); unsigned long ieee80211_sta_last_active(struct sta_info *sta); enum sta_stats_type { STA_STATS_RATE_TYPE_INVALID = 0, STA_STATS_RATE_TYPE_LEGACY, STA_STATS_RATE_TYPE_HT, STA_STATS_RATE_TYPE_VHT, STA_STATS_RATE_TYPE_HE, }; #define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0) #define STA_STATS_FIELD_LEGACY_IDX GENMASK( 3, 0) #define STA_STATS_FIELD_LEGACY_BAND GENMASK( 7, 4) #define STA_STATS_FIELD_VHT_MCS GENMASK( 3, 0) #define STA_STATS_FIELD_VHT_NSS GENMASK( 7, 4) #define STA_STATS_FIELD_HE_MCS GENMASK( 3, 0) #define STA_STATS_FIELD_HE_NSS GENMASK( 7, 4) #define STA_STATS_FIELD_BW GENMASK(11, 8) #define STA_STATS_FIELD_SGI GENMASK(12, 12) #define STA_STATS_FIELD_TYPE GENMASK(15, 13) #define STA_STATS_FIELD_HE_RU GENMASK(18, 16) #define STA_STATS_FIELD_HE_GI GENMASK(20, 19) #define STA_STATS_FIELD_HE_DCM GENMASK(21, 21) #define STA_STATS_FIELD(_n, _v) FIELD_PREP(STA_STATS_FIELD_ ## _n, _v) #define STA_STATS_GET(_n, _v) FIELD_GET(STA_STATS_FIELD_ ## _n, _v) #define STA_STATS_RATE_INVALID 0 static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s) { u32 r; r = STA_STATS_FIELD(BW, s->bw); if (s->enc_flags & RX_ENC_FLAG_SHORT_GI) r |= STA_STATS_FIELD(SGI, 1); switch (s->encoding) { case RX_ENC_VHT: r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_VHT); r |= STA_STATS_FIELD(VHT_NSS, s->nss); r |= STA_STATS_FIELD(VHT_MCS, s->rate_idx); break; case RX_ENC_HT: r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HT); r |= STA_STATS_FIELD(HT_MCS, s->rate_idx); break; case RX_ENC_LEGACY: r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_LEGACY); r |= STA_STATS_FIELD(LEGACY_BAND, s->band); r |= STA_STATS_FIELD(LEGACY_IDX, s->rate_idx); break; case RX_ENC_HE: r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HE); r |= STA_STATS_FIELD(HE_NSS, s->nss); r |= STA_STATS_FIELD(HE_MCS, s->rate_idx); r |= STA_STATS_FIELD(HE_GI, s->he_gi); r |= STA_STATS_FIELD(HE_RU, s->he_ru); r |= STA_STATS_FIELD(HE_DCM, s->he_dcm); break; default: WARN_ON(1); return STA_STATS_RATE_INVALID; } return r; } #endif /* STA_INFO_H */ backport-iwlwifi-dkms-7906/net/mac80211/status.c000066400000000000000000001160341351064634500213170ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2008-2010 Johannes Berg * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include "ieee80211_i.h" #include "rate.h" #include "mesh.h" #include "led.h" #include "wme.h" void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int tmp; skb->pkt_type = IEEE80211_TX_STATUS_MSG; skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ? &local->skb_queue : &local->skb_queue_unreliable, skb); tmp = skb_queue_len(&local->skb_queue) + skb_queue_len(&local->skb_queue_unreliable); while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && (skb = skb_dequeue(&local->skb_queue_unreliable))) { ieee80211_free_txskb(hw, skb); tmp--; I802_DEBUG_INC(local->tx_status_drop); } tasklet_schedule(&local->tasklet); } EXPORT_SYMBOL(ieee80211_tx_status_irqsafe); static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; int ac; if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | IEEE80211_TX_CTL_AMPDU)) { ieee80211_free_txskb(&local->hw, skb); return; } /* * This skb 'survived' a round-trip through the driver, and * hopefully the driver didn't mangle it too badly. However, * we can definitely not rely on the control information * being correct. Clear it so we don't get junk there, and * indicate that it needs new processing, but must not be * modified/encrypted again. */ memset(&info->control, 0, sizeof(info->control)); info->control.jiffies = jiffies; info->control.vif = &sta->sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING | IEEE80211_TX_INTFL_RETRANSMISSION; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; sta->status_stats.filtered++; /* * Clear more-data bit on filtered frames, it might be set * but later frames might time out so it might have to be * clear again ... It's all rather unlikely (this frame * should time out first, right?) but let's not confuse * peers unnecessarily. */ if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA); if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *p = ieee80211_get_qos_ctl(hdr); int tid = *p & IEEE80211_QOS_CTL_TID_MASK; /* * Clear EOSP if set, this could happen e.g. * if an absence period (us being a P2P GO) * shortens the SP. */ if (*p & IEEE80211_QOS_CTL_EOSP) *p &= ~IEEE80211_QOS_CTL_EOSP; ac = ieee80211_ac_from_tid(tid); } else { ac = IEEE80211_AC_BE; } /* * Clear the TX filter mask for this STA when sending the next * packet. If the STA went to power save mode, this will happen * when it wakes up for the next time. */ set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT); ieee80211_clear_fast_xmit(sta); /* * This code races in the following way: * * (1) STA sends frame indicating it will go to sleep and does so * (2) hardware/firmware adds STA to filter list, passes frame up * (3) hardware/firmware processes TX fifo and suppresses a frame * (4) we get TX status before having processed the frame and * knowing that the STA has gone to sleep. * * This is actually quite unlikely even when both those events are * processed from interrupts coming in quickly after one another or * even at the same time because we queue both TX status events and * RX frames to be processed by a tasklet and process them in the * same order that they were received or TX status last. Hence, there * is no race as long as the frame RX is processed before the next TX * status, which drivers can ensure, see below. * * Note that this can only happen if the hardware or firmware can * actually add STAs to the filter list, if this is done by the * driver in response to set_tim() (which will only reduce the race * this whole filtering tries to solve, not completely solve it) * this situation cannot happen. * * To completely solve this race drivers need to make sure that they * (a) don't mix the irq-safe/not irq-safe TX status/RX processing * functions and * (b) always process RX events before TX status events if ordering * can be unknown, for example with different interrupt status * bits. * (c) if PS mode transitions are manual (i.e. the flag * %IEEE80211_HW_AP_LINK_PS is set), always process PS state * changes before calling TX status events if ordering can be * unknown. */ if (test_sta_flag(sta, WLAN_STA_PS_STA) && skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) { skb_queue_tail(&sta->tx_filtered[ac], skb); sta_info_recalc_tim(sta); if (!timer_pending(&local->sta_cleanup)) mod_timer(&local->sta_cleanup, round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); return; } if (!test_sta_flag(sta, WLAN_STA_PS_STA) && !(info->flags & IEEE80211_TX_INTFL_RETRIED)) { /* Software retry the packet once */ info->flags |= IEEE80211_TX_INTFL_RETRIED; ieee80211_add_pending_skb(local, skb); return; } ps_dbg_ratelimited(sta->sdata, "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", skb_queue_len(&sta->tx_filtered[ac]), !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies); ieee80211_free_txskb(&local->hw, skb); } static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid) { struct tid_ampdu_tx *tid_tx; tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); if (!tid_tx || !tid_tx->bar_pending) return; tid_tx->bar_pending = false; ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn); } static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *) skb->data; struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { sta->status_stats.last_ack = jiffies; if (txinfo->status.is_valid_ack_signal) { sta->status_stats.last_ack_signal = (s8)txinfo->status.ack_signal; sta->status_stats.ack_signal_filled = true; ewma_avg_signal_add(&sta->status_stats.avg_ack_signal, -txinfo->status.ack_signal); } } if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *) skb->data; u8 *qc = ieee80211_get_qos_ctl(hdr); u16 tid = qc[0] & 0xf; ieee80211_check_pending_bar(sta, hdr->addr1, tid); } if (ieee80211_is_action(mgmt->frame_control) && !ieee80211_has_protected(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_HT && mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS && ieee80211_sdata_running(sdata)) { enum ieee80211_smps_mode smps_mode; switch (mgmt->u.action.u.ht_smps.smps_control) { case WLAN_HT_SMPS_CONTROL_DYNAMIC: smps_mode = IEEE80211_SMPS_DYNAMIC; break; case WLAN_HT_SMPS_CONTROL_STATIC: smps_mode = IEEE80211_SMPS_STATIC; break; case WLAN_HT_SMPS_CONTROL_DISABLED: default: /* shouldn't happen since we don't send that */ smps_mode = IEEE80211_SMPS_OFF; break; } if (sdata->vif.type == NL80211_IFTYPE_STATION) { /* * This update looks racy, but isn't -- if we come * here we've definitely got a station that we're * talking to, and on a managed interface that can * only be the AP. And the only other place updating * this variable in managed mode is before association. */ sdata->smps_mode = smps_mode; ieee80211_queue_work(&local->hw, &sdata->recalc_smps); } else if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { sta->known_smps_mode = smps_mode; } } } static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn) { struct tid_ampdu_tx *tid_tx; tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); if (!tid_tx) return; tid_tx->failed_bar_ssn = ssn; tid_tx->bar_pending = true; } static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info) { int len = sizeof(struct ieee80211_radiotap_header); /* IEEE80211_RADIOTAP_RATE rate */ if (info->status.rates[0].idx >= 0 && !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) len += 2; /* IEEE80211_RADIOTAP_TX_FLAGS */ len += 2; /* IEEE80211_RADIOTAP_DATA_RETRIES */ len += 1; /* IEEE80211_RADIOTAP_MCS * IEEE80211_RADIOTAP_VHT */ if (info->status.rates[0].idx >= 0) { if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) len += 3; else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) len = ALIGN(len, 2) + 12; } return len; } static void ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct sk_buff *skb, int retry_count, int rtap_len, int shift) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_radiotap_header *rthdr; unsigned char *pos; u16 txflags; rthdr = skb_push(skb, rtap_len); memset(rthdr, 0, rtap_len); rthdr->it_len = cpu_to_le16(rtap_len); rthdr->it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) | (1 << IEEE80211_RADIOTAP_DATA_RETRIES)); pos = (unsigned char *)(rthdr + 1); /* * XXX: Once radiotap gets the bitmap reset thing the vendor * extensions proposal contains, we can actually report * the whole set of tries we did. */ /* IEEE80211_RADIOTAP_RATE */ if (info->status.rates[0].idx >= 0 && !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) { u16 rate; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); rate = sband->bitrates[info->status.rates[0].idx].bitrate; *pos = DIV_ROUND_UP(rate, 5 * (1 << shift)); /* padding for tx flags */ pos += 2; } /* IEEE80211_RADIOTAP_TX_FLAGS */ txflags = 0; if (!(info->flags & IEEE80211_TX_STAT_ACK) && !is_multicast_ether_addr(hdr->addr1)) txflags |= IEEE80211_RADIOTAP_F_TX_FAIL; if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) txflags |= IEEE80211_RADIOTAP_F_TX_CTS; if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) txflags |= IEEE80211_RADIOTAP_F_TX_RTS; put_unaligned_le16(txflags, pos); pos += 2; /* IEEE80211_RADIOTAP_DATA_RETRIES */ /* for now report the total retry_count */ *pos = retry_count; pos++; if (info->status.rates[0].idx < 0) return; /* IEEE80211_RADIOTAP_MCS * IEEE80211_RADIOTAP_VHT */ if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) { rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS | IEEE80211_RADIOTAP_MCS_HAVE_GI | IEEE80211_RADIOTAP_MCS_HAVE_BW; if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) pos[1] |= IEEE80211_RADIOTAP_MCS_SGI; if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40; if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD) pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF; pos[2] = info->status.rates[0].idx; pos += 3; } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { u16 known = local->hw.radiotap_vht_details & (IEEE80211_RADIOTAP_VHT_KNOWN_GI | IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH); rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); /* required alignment from rthdr */ pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2); /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */ put_unaligned_le16(known, pos); pos += 2; /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */ if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; pos++; /* u8 bandwidth */ if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) *pos = 1; else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) *pos = 4; else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) *pos = 11; else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */ *pos = 0; pos++; /* u8 mcs_nss[4] */ *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) | ieee80211_rate_get_vht_nss(&info->status.rates[0]); pos += 4; /* u8 coding */ pos++; /* u8 group_id */ pos++; /* u16 partial_aid */ pos += 2; } } /* * Handles the tx for TDLS teardown frames. * If the frame wasn't ACKed by the peer - it will be re-sent through the AP */ static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 flags) { struct sk_buff *teardown_skb; struct sk_buff *orig_teardown_skb; bool is_teardown = false; /* Get the teardown data we need and free the lock */ spin_lock(&sdata->u.mgd.teardown_lock); teardown_skb = sdata->u.mgd.teardown_skb; orig_teardown_skb = sdata->u.mgd.orig_teardown_skb; if ((skb == orig_teardown_skb) && teardown_skb) { sdata->u.mgd.teardown_skb = NULL; sdata->u.mgd.orig_teardown_skb = NULL; is_teardown = true; } spin_unlock(&sdata->u.mgd.teardown_lock); if (is_teardown) { /* This mechanism relies on being able to get ACKs */ WARN_ON(!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)); /* Check if peer has ACKed */ if (flags & IEEE80211_TX_STAT_ACK) { dev_kfree_skb_any(teardown_skb); } else { tdls_dbg(sdata, "TDLS Resending teardown through AP\n"); ieee80211_subif_start_xmit(teardown_skb, skb->dev); } } } static struct ieee80211_sub_if_data * ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata; if (skb->dev) { list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!sdata->dev) continue; if (skb->dev == sdata->dev) return sdata; } return NULL; } return rcu_dereference(local->p2p_sdata); } static void ieee80211_report_ack_skb(struct ieee80211_local *local, struct ieee80211_tx_info *info, bool acked, bool dropped) { struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&local->ack_status_lock, flags); skb = idr_remove(&local->ack_status_frames, info->ack_frame_id); spin_unlock_irqrestore(&local->ack_status_lock, flags); if (!skb) return; if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie; struct ieee80211_sub_if_data *sdata; struct ieee80211_hdr *hdr = (void *)skb->data; rcu_read_lock(); sdata = ieee80211_sdata_from_skb(local, skb); if (sdata) { if (ieee80211_is_nullfunc(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control)) cfg80211_probe_status(sdata->dev, hdr->addr1, cookie, acked, info->status.ack_signal, info->status.is_valid_ack_signal, GFP_ATOMIC); else cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data, skb->len, acked, GFP_ATOMIC); } rcu_read_unlock(); dev_kfree_skb_any(skb); } else if (dropped) { dev_kfree_skb_any(skb); } else { /* consumes skb */ skb_complete_wifi_ack(skb, acked); } } static void ieee80211_report_used_skb(struct ieee80211_local *local, struct sk_buff *skb, bool dropped) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool acked = info->flags & IEEE80211_TX_STAT_ACK; if (dropped) acked = false; if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) { struct ieee80211_sub_if_data *sdata; rcu_read_lock(); sdata = ieee80211_sdata_from_skb(local, skb); if (!sdata) { skb->dev = NULL; } else { unsigned int hdr_size = ieee80211_hdrlen(hdr->frame_control); /* Check to see if packet is a TDLS teardown packet */ if (ieee80211_is_data(hdr->frame_control) && (ieee80211_get_tdls_action(skb, hdr_size) == WLAN_TDLS_TEARDOWN)) ieee80211_tdls_td_tx_handle(local, sdata, skb, info->flags); else ieee80211_mgd_conn_tx_status(sdata, hdr->frame_control, acked); } rcu_read_unlock(); } else if (info->ack_frame_id) { ieee80211_report_ack_skb(local, info, acked, dropped); } if (!dropped && skb->destructor) { #if LINUX_VERSION_IS_GEQ(3,3,0) skb->wifi_acked_valid = 1; skb->wifi_acked = acked; #endif } ieee80211_led_tx(local); if (skb_has_frag_list(skb)) { kfree_skb_list(skb_shinfo(skb)->frag_list); skb_shinfo(skb)->frag_list = NULL; } } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS static void update_consec_bins(u32 *bins, u32 *bin_ranges, int bin_range_count, int msrmnt) { int i; for (i = bin_range_count - 1; 0 <= i; i--) { if (bin_ranges[i] <= msrmnt) { bins[i]++; break; } } } /* * Measure how many tx frames that were consecutively lost * or that were sent successfully but there latency passed * a certain thershold and therefor considered lost. */ static void tx_consec_loss_msrmnt(struct ieee80211_tx_consec_loss_ranges *tx_consec, struct sta_info *sta, int tid, u32 msrmnt, int pkt_loss, bool send_failed) { u32 bin_range_count; u32 *bin_ranges; struct ieee80211_tx_consec_loss_stat *tx_csc; /* assert Tx consecutive packet loss stats are enabled */ if (!tx_consec) return; tx_csc = &sta->tx_consec[tid]; bin_range_count = tx_consec->n_ranges; bin_ranges = tx_consec->ranges; /* * count how many Tx frames were consecutively lost within the * appropriate range */ if (send_failed || (!send_failed && tx_consec->late_threshold < msrmnt)) { tx_csc->consec_total_loss++; } else { update_consec_bins(tx_csc->total_loss_bins, bin_ranges, bin_range_count, tx_csc->consec_total_loss); tx_csc->consec_total_loss = 0; } /* count sent successfully && before packets were lost */ if (!send_failed && pkt_loss) update_consec_bins(tx_csc->loss_bins, bin_ranges, bin_range_count, pkt_loss); /* * count how many consecutive Tx packet latencies were greater than * late threshold within the appropriate range * (and are considered lost even though they were sent successfully) */ if (send_failed) /* only count packets sent successfully */ return; if (tx_consec->late_threshold < msrmnt) { tx_csc->consec_late_loss++; } else { update_consec_bins(tx_csc->late_bins, bin_ranges, bin_range_count, tx_csc->consec_late_loss); tx_csc->consec_late_loss = 0; } } /* * Measure Tx frames latency. */ static void tx_latency_msrmnt(struct ieee80211_tx_latency_bin_ranges *tx_latency, struct sta_info *sta, int tid, u32 msrmnt) { int bin_range_count, i; u32 *bin_ranges; struct ieee80211_tx_latency_stat *tx_lat; if (!tx_latency) return; tx_lat = &sta->tx_lat[tid]; if (tx_lat->max < msrmnt) { /* update stats */ tx_lat->max = msrmnt; tx_lat->max_ts = ktime_to_ms(ktime_get()); } tx_lat->counter++; tx_lat->sum += msrmnt; if (!tx_lat->bins) /* bins not activated */ return; /* count how many Tx frames transmitted with the appropriate latency */ bin_range_count = tx_latency->n_ranges; bin_ranges = tx_latency->ranges; for (i = 0; i < bin_range_count; i++) { if (msrmnt <= bin_ranges[i]) { tx_lat->bins[i]++; break; } } if (i == bin_range_count) /* msrmnt is bigger than the biggest range */ tx_lat->bins[i]++; } #ifdef CPTCFG_NL80211_TESTMODE static void tx_latency_threshold(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_tx_latency_threshold *tx_thrshld, struct sta_info *sta, int tid, u32 msrmnt) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; /* * Make sure that tx_threshold is enabled * for this iface && tid */ if (!tx_thrshld || !sta->tx_lat_thrshld || !sta->tx_lat_thrshld[tid]) return; if (sta->tx_lat_thrshld[tid] < msrmnt) { struct ieee80211_event event = { .type = TX_LATENCY_EVENT, .u.tx_lat.mode = tx_thrshld->monitor_record_mode, .u.tx_lat.monitor_collec_wind = tx_thrshld->monitor_collec_wind, .u.tx_lat.pkt_start = ktime_to_ms(skb->tstamp), .u.tx_lat.pkt_end = ktime_to_ms(skb->tstamp) + msrmnt, .u.tx_lat.msrmnt = msrmnt, .u.tx_lat.tid = tid, .u.tx_lat.seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, }; drv_event_callback(local, sta->sdata, &event); } } #endif static u32 ieee80211_calc_tx_latency(struct ieee80211_local *local, ktime_t skb_arv) { s64 tmp; s64 ts[IEEE80211_TX_LAT_MAX_POINT]; u32 msrmnt; ts[IEEE80211_TX_LAT_DEL] = ktime_to_ms(ktime_get()); /* extract previous time stamps */ ts[IEEE80211_TX_LAT_ENTER] = ktime_to_ns(skb_arv) >> 32; tmp = ktime_to_ns(skb_arv) & 0xFFFFFFFF; ts[IEEE80211_TX_LAT_WRITE] = (tmp >> 16) + ts[IEEE80211_TX_LAT_ENTER]; ts[IEEE80211_TX_LAT_ACK] = (tmp & 0xFFFF) + ts[IEEE80211_TX_LAT_ENTER]; /* calculate packet latency */ msrmnt = ts[local->tx_msrmnt_points[1]] - ts[local->tx_msrmnt_points[0]]; return msrmnt; } /* * 1) Measure Tx frame completion and removal time for Tx latency statistics * calculation. A single Tx frame latency should be measured from when it * is entering the Kernel until we receive Tx complete confirmation indication * and remove the skb. * 2) Measure consecutive Tx frames that were lost or that there latency passed * a certain thershold and therefor considered lost. */ static void ieee80211_collect_tx_timing_stats(struct ieee80211_local *local, struct sk_buff *skb, struct sta_info *sta, struct ieee80211_hdr *hdr, int pkt_loss, bool send_fail) { u32 msrmnt; u16 tid; __le16 fc; struct ieee80211_tx_latency_bin_ranges *tx_latency; struct ieee80211_tx_consec_loss_ranges *tx_consec; struct ieee80211_tx_latency_threshold *tx_thrshld; ktime_t skb_arv = skb->tstamp; tx_latency = rcu_dereference(local->tx_latency); tx_consec = rcu_dereference(local->tx_consec); tx_thrshld = rcu_dereference(local->tx_threshold); /* * assert Tx latency or Tx consecutive packets loss stats are enabled * & frame arrived when enabled */ if ((!tx_latency && !tx_consec && !tx_thrshld) || !ktime_to_ns(skb_arv)) return; fc = hdr->frame_control; if (!ieee80211_is_data(fc)) /* make sure it is a data frame */ return; /* get frame tid */ if (ieee80211_is_data_qos(hdr->frame_control)) tid = ieee80211_get_tid(hdr); else tid = 0; /* Calculate the latency */ msrmnt = ieee80211_calc_tx_latency(local, skb_arv); /* update statistic regarding consecutive lost packets */ tx_consec_loss_msrmnt(tx_consec, sta, tid, msrmnt, pkt_loss, send_fail); /* update statistic regarding latency */ tx_latency_msrmnt(tx_latency, sta, tid, msrmnt); #ifdef CPTCFG_NL80211_TESTMODE /* * trigger retrival of monitor logs * (if a threshold was configured & passed) */ tx_latency_threshold(local, skb, tx_thrshld, sta, tid, msrmnt); #endif } static int ieee80211_tx_lat_set_thrshld(u32 **thrshlds, u32 bitmask, u32 thrshld) { u32 alloc_size_thrshld = sizeof(u32) * IEEE80211_NUM_TIDS; u32 i; if (!*thrshlds) { *thrshlds = kzalloc(alloc_size_thrshld, GFP_KERNEL); if (!*thrshlds) return -ENOMEM; } /* Set thrshld for each tid */ for (i = 0; i < IEEE80211_NUM_TIDS; i++) if (bitmask & BIT(i)) (*thrshlds)[i] = thrshld; return 0; } /* * Configure the Tx latency threshold */ void ieee80211_tx_lat_thrshld_cfg(struct ieee80211_hw *hw, u32 thrshld, u16 tid_bitmap, u16 window, u16 mode, u32 iface) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_tx_latency_threshold *tx_thrshld; u32 alloc_size_struct; /* cannot change config once we have stations */ if (local->num_sta) return; /* Tx threshold already enabled */ if (rcu_access_pointer(local->tx_threshold)) return; alloc_size_struct = sizeof(struct ieee80211_tx_latency_threshold); tx_thrshld = kzalloc(alloc_size_struct, GFP_KERNEL); if (!tx_thrshld) return; /* Not a valid interface */ if (!(iface & BIT(IEEE80211_TX_LATENCY_BSS)) && !(iface & BIT(IEEE80211_TX_LATENCY_P2P))) { kfree(tx_thrshld); return; } /* Check iface parameters is valid */ if (iface & BIT(IEEE80211_TX_LATENCY_BSS)) { if (ieee80211_tx_lat_set_thrshld(&tx_thrshld->thresholds_bss, tid_bitmap, thrshld)) { kfree(tx_thrshld); return; } } if (iface & BIT(IEEE80211_TX_LATENCY_P2P)) { if (ieee80211_tx_lat_set_thrshld(&tx_thrshld->thresholds_p2p, tid_bitmap, thrshld)) { kfree(tx_thrshld); return; } } tx_thrshld->monitor_collec_wind = window; tx_thrshld->monitor_record_mode = mode; /* Tx latency points of measurment allocation */ local->tx_msrmnt_points[0] = IEEE80211_TX_LAT_ENTER; local->tx_msrmnt_points[1] = IEEE80211_TX_LAT_DEL; rcu_assign_pointer(local->tx_threshold, tx_thrshld); synchronize_rcu(); } EXPORT_SYMBOL(ieee80211_tx_lat_thrshld_cfg); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ /* * Use a static threshold for now, best value to be determined * by testing ... * Should it depend on: * - on # of retransmissions * - current throughput (higher value for higher tpt)? */ #define STA_LOST_PKT_THRESHOLD 50 #define STA_LOST_TDLS_PKT_THRESHOLD 10 #define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */ static void ieee80211_lost_packet(struct sta_info *sta, struct ieee80211_tx_info *info) { /* If driver relies on its own algorithm for station kickout, skip * mac80211 packet loss mechanism. */ if (ieee80211_hw_check(&sta->local->hw, REPORTS_LOW_ACK)) return; /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; sta->status_stats.lost_packets++; if (!sta->sta.tdls && sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD) return; /* * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD * of the last packets were lost, and that no ACK was received in the * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss * mechanism. */ if (sta->sta.tdls && (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD || time_before(jiffies, sta->status_stats.last_tdls_pkt_time + STA_LOST_TDLS_PKT_TIME))) return; cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, sta->status_stats.lost_packets, GFP_ATOMIC); sta->status_stats.lost_packets = 0; } static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, struct ieee80211_tx_info *info, int *retry_count) { int rates_idx = -1; int count = -1; int i; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) { /* just the first aggr frame carry status info */ info->status.rates[i].idx = -1; info->status.rates[i].count = 0; break; } else if (info->status.rates[i].idx < 0) { break; } else if (i >= hw->max_report_rates) { /* the HW cannot have attempted that rate */ info->status.rates[i].idx = -1; info->status.rates[i].count = 0; break; } count += info->status.rates[i].count; } rates_idx = i - 1; if (count < 0) count = 0; *retry_count = count; return rates_idx; } void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_supported_band *sband, int retry_count, int shift, bool send_to_cooked) { struct sk_buff *skb2; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sub_if_data *sdata; struct net_device *prev_dev = NULL; int rtap_len; /* send frame to monitor interfaces now */ rtap_len = ieee80211_tx_radiotap_len(info); if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) { pr_err("ieee80211_tx_status: headroom too small\n"); dev_kfree_skb(skb); return; } ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count, rtap_len, shift); /* XXX: is this sufficient for BPF? */ skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { if (!ieee80211_sdata_running(sdata)) continue; if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) && !send_to_cooked) continue; if (prev_dev) { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { skb2->dev = prev_dev; netif_rx(skb2); } } prev_dev = sdata->dev; } } if (prev_dev) { skb->dev = prev_dev; netif_rx(skb); skb = NULL; } rcu_read_unlock(); dev_kfree_skb(skb); } static void __ieee80211_tx_status(struct ieee80211_hw *hw, struct ieee80211_tx_status *status) { struct sk_buff *skb = status->skb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_tx_info *info = status->info; struct sta_info *sta; __le16 fc; struct ieee80211_supported_band *sband; int retry_count; int rates_idx; bool send_to_cooked; bool acked; struct ieee80211_bar *bar; int shift = 0; int tid = IEEE80211_NUM_TIDS; #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS int prev_loss_pkt = 0; bool send_fail = true; #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count); sband = local->hw.wiphy->bands[info->band]; fc = hdr->frame_control; if (status->sta) { sta = container_of(status->sta, struct sta_info, sta); shift = ieee80211_vif_get_shift(&sta->sdata->vif); if (info->flags & IEEE80211_TX_STATUS_EOSP) clear_sta_flag(sta, WLAN_STA_SP); acked = !!(info->flags & IEEE80211_TX_STAT_ACK); /* mesh Peer Service Period support */ if (ieee80211_vif_is_mesh(&sta->sdata->vif) && ieee80211_is_data_qos(fc)) ieee80211_mpsp_trigger_process( ieee80211_get_qos_ctl(hdr), sta, true, acked); if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) { /* * The STA is in power save mode, so assume * that this TX packet failed because of that. */ ieee80211_handle_filtered_frame(local, sta, skb); return; } if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && (ieee80211_is_data(hdr->frame_control)) && (rates_idx != -1)) sta->tx_stats.last_rate = info->status.rates[rates_idx]; if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && (ieee80211_is_data_qos(fc))) { u16 ssn; u8 *qc; qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & 0xf; ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) & IEEE80211_SCTL_SEQ); ieee80211_send_bar(&sta->sdata->vif, hdr->addr1, tid, ssn); } else if (ieee80211_is_data_qos(fc)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & 0xf; } if (!acked && ieee80211_is_back_req(fc)) { u16 control; /* * BAR failed, store the last SSN and retry sending * the BAR when the next unicast transmission on the * same TID succeeds. */ bar = (struct ieee80211_bar *) skb->data; control = le16_to_cpu(bar->control); if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) { u16 ssn = le16_to_cpu(bar->start_seq_num); tid = (control & IEEE80211_BAR_CTRL_TID_INFO_MASK) >> IEEE80211_BAR_CTRL_TID_INFO_SHIFT; ieee80211_set_bar_pending(sta, tid, ssn); } } if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { ieee80211_handle_filtered_frame(local, sta, skb); return; } else { if (!acked) sta->status_stats.retry_failed++; sta->status_stats.retry_count += retry_count; if (ieee80211_is_data_present(fc)) { if (!acked) sta->status_stats.msdu_failed[tid]++; sta->status_stats.msdu_retries[tid] += retry_count; } } rate_control_tx_status(local, sband, status); if (ieee80211_vif_is_mesh(&sta->sdata->vif)) ieee80211s_update_metric(local, sta, status); if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked) ieee80211_frame_acked(sta, skb); if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data, acked, info->status.tx_time); if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { if (info->flags & IEEE80211_TX_STAT_ACK) { #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS send_fail = false; if (sta->status_stats.lost_packets) { /* * need to keep track of the amount for * timing statistics later on */ prev_loss_pkt = sta->status_stats.lost_packets; } #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ if (sta->status_stats.lost_packets) sta->status_stats.lost_packets = 0; /* Track when last TDLS packet was ACKed */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) sta->status_stats.last_tdls_pkt_time = jiffies; } else { ieee80211_lost_packet(sta, info); } } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS /* Measure Tx latency & Tx consecutive loss statistics */ ieee80211_collect_tx_timing_stats(local, skb, sta, hdr, prev_loss_pkt, send_fail); #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ } /* SNMP counters * Fragments are passed to low-level drivers as separate skbs, so these * are actually fragments, not frames. Update frame counters only for * the first fragment of the frame. */ if ((info->flags & IEEE80211_TX_STAT_ACK) || (info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED)) { if (ieee80211_is_first_frag(hdr->seq_ctrl)) { I802_DEBUG_INC(local->dot11TransmittedFrameCount); if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount); if (retry_count > 0) I802_DEBUG_INC(local->dot11RetryCount); if (retry_count > 1) I802_DEBUG_INC(local->dot11MultipleRetryCount); } /* This counter shall be incremented for an acknowledged MPDU * with an individual address in the address 1 field or an MPDU * with a multicast address in the address 1 field of type Data * or Management. */ if (!is_multicast_ether_addr(hdr->addr1) || ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) I802_DEBUG_INC(local->dot11TransmittedFragmentCount); } else { if (ieee80211_is_first_frag(hdr->seq_ctrl)) I802_DEBUG_INC(local->dot11FailedCount); } if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && !(info->flags & IEEE80211_TX_CTL_INJECTED) && local->ps_sdata && !(local->scanning)) { if (info->flags & IEEE80211_TX_STAT_ACK) { local->ps_sdata->u.mgd.flags |= IEEE80211_STA_NULLFUNC_ACKED; } else mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(10)); } ieee80211_report_used_skb(local, skb, false); /* this was a transmitted frame, but now we want to reuse it */ skb_orphan(skb); /* Need to make a copy before skb->cb gets cleared */ send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) || !(ieee80211_is_data(fc)); /* * This is a bit racy but we can avoid a lot of work * with this test... */ if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) { dev_kfree_skb(skb); return; } /* send to monitor interfaces */ ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked); } void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_tx_status status = { .skb = skb, .info = IEEE80211_SKB_CB(skb), }; struct rhlist_head *tmp; struct sta_info *sta; rcu_read_lock(); for_each_sta_info(local, hdr->addr1, sta, tmp) { /* skip wrong virtual interface */ if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) continue; status.sta = &sta->sta; break; } __ieee80211_tx_status(hw, &status); rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_tx_status); void ieee80211_tx_status_ext(struct ieee80211_hw *hw, struct ieee80211_tx_status *status) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_tx_info *info = status->info; struct ieee80211_sta *pubsta = status->sta; struct ieee80211_supported_band *sband; int retry_count; bool acked, noack_success; if (status->skb) return __ieee80211_tx_status(hw, status); if (!status->sta) return; ieee80211_tx_get_rates(hw, info, &retry_count); sband = hw->wiphy->bands[info->band]; acked = !!(info->flags & IEEE80211_TX_STAT_ACK); noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED); if (pubsta) { struct sta_info *sta; sta = container_of(pubsta, struct sta_info, sta); if (!acked) sta->status_stats.retry_failed++; sta->status_stats.retry_count += retry_count; if (acked) { sta->status_stats.last_ack = jiffies; if (sta->status_stats.lost_packets) sta->status_stats.lost_packets = 0; /* Track when last TDLS packet was ACKed */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) sta->status_stats.last_tdls_pkt_time = jiffies; } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { return; } else { ieee80211_lost_packet(sta, info); } rate_control_tx_status(local, sband, status); if (ieee80211_vif_is_mesh(&sta->sdata->vif)) ieee80211s_update_metric(local, sta, status); } if (acked || noack_success) { I802_DEBUG_INC(local->dot11TransmittedFrameCount); if (!pubsta) I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount); if (retry_count > 0) I802_DEBUG_INC(local->dot11RetryCount); if (retry_count > 1) I802_DEBUG_INC(local->dot11MultipleRetryCount); } else { I802_DEBUG_INC(local->dot11FailedCount); } } EXPORT_SYMBOL(ieee80211_tx_status_ext); void ieee80211_tx_rate_update(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_tx_info *info) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_supported_band *sband = hw->wiphy->bands[info->band]; struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_tx_status status = { .info = info, .sta = pubsta, }; rate_control_tx_status(local, sband, &status); if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) sta->tx_stats.last_rate = info->status.rates[0]; } EXPORT_SYMBOL(ieee80211_tx_rate_update); void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, num_packets, GFP_ATOMIC); } EXPORT_SYMBOL(ieee80211_report_low_ack); void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_local *local = hw_to_local(hw); ieee80211_report_used_skb(local, skb, true); dev_kfree_skb_any(skb); } EXPORT_SYMBOL(ieee80211_free_txskb); void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, struct sk_buff_head *skbs) { struct sk_buff *skb; while ((skb = __skb_dequeue(skbs))) ieee80211_free_txskb(hw, skb); } backport-iwlwifi-dkms-7906/net/mac80211/tdls.c000066400000000000000000001571411351064634500207460ustar00rootroot00000000000000/* * mac80211 TDLS handling code * * Copyright 2006-2010 Johannes Berg * Copyright 2014, Intel Corporation * Copyright 2014 Intel Mobile Communications GmbH * Copyright 2015 - 2016 Intel Deutschland GmbH * Copyright (C) 2019 Intel Corporation * * This file is GPLv2 as found in COPYING. */ #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "wme.h" /* give usermode some time for retries in setting up the TDLS session */ #define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) void ieee80211_tdls_peer_del_work(struct work_struct *wk) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local; sdata = container_of(wk, struct ieee80211_sub_if_data, u.mgd.tdls_peer_del_work.work); local = sdata->local; mutex_lock(&local->mtx); if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer)) { tdls_dbg(sdata, "TDLS del peer %pM\n", sdata->u.mgd.tdls_peer); sta_info_destroy_addr(sdata, sdata->u.mgd.tdls_peer); eth_zero_addr(sdata->u.mgd.tdls_peer); } mutex_unlock(&local->mtx); } static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool chan_switch = local->hw.wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH; bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && !ifmgd->tdls_wider_bw_prohibited; bool buffer_sta = ieee80211_hw_check(&local->hw, SUPPORTS_TDLS_BUFFER_STA); struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata); bool vht = sband && sband->vht_cap.vht_supported; u8 *pos = skb_put(skb, 10); *pos++ = WLAN_EID_EXT_CAPABILITY; *pos++ = 8; /* len */ *pos++ = 0x0; *pos++ = 0x0; *pos++ = 0x0; *pos++ = (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0) | (buffer_sta ? WLAN_EXT_CAPA4_TDLS_BUFFER_STA : 0); *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED; *pos++ = 0; *pos++ = 0; *pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0; } static u8 ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u16 start, u16 end, u16 spacing) { u8 subband_cnt = 0, ch_cnt = 0; struct ieee80211_channel *ch; struct cfg80211_chan_def chandef; int i, subband_start; struct wiphy *wiphy = sdata->local->hw.wiphy; for (i = start; i <= end; i += spacing) { if (!ch_cnt) subband_start = i; ch = ieee80211_get_channel(sdata->local->hw.wiphy, i); if (ch) { /* we will be active on the channel */ cfg80211_chandef_create(&chandef, ch, NL80211_CHAN_NO_HT); if (cfg80211_reg_can_beacon_relax(wiphy, &chandef, sdata->wdev.iftype)) { ch_cnt++; /* * check if the next channel is also part of * this allowed range */ continue; } } /* * we've reached the end of a range, with allowed channels * found */ if (ch_cnt) { u8 *pos = skb_put(skb, 2); *pos++ = ieee80211_frequency_to_channel(subband_start); *pos++ = ch_cnt; subband_cnt++; ch_cnt = 0; } } /* all channels in the requested range are allowed - add them here */ if (ch_cnt) { u8 *pos = skb_put(skb, 2); *pos++ = ieee80211_frequency_to_channel(subband_start); *pos++ = ch_cnt; subband_cnt++; } return subband_cnt; } static void ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { /* * Add possible channels for TDLS. These are channels that are allowed * to be active. */ u8 subband_cnt; u8 *pos = skb_put(skb, 2); *pos++ = WLAN_EID_SUPPORTED_CHANNELS; /* * 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as * this doesn't happen in real world scenarios. */ /* 2GHz, with 5MHz spacing */ subband_cnt = ieee80211_tdls_add_subband(sdata, skb, 2412, 2472, 5); /* 5GHz, with 20MHz spacing */ subband_cnt += ieee80211_tdls_add_subband(sdata, skb, 5000, 5825, 20); /* length */ *pos = 2 * subband_cnt; } static void ieee80211_tdls_add_oper_classes(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { u8 *pos; u8 op_class; if (!ieee80211_chandef_to_operating_class(&sdata->vif.bss_conf.chandef, &op_class)) return; pos = skb_put(skb, 4); *pos++ = WLAN_EID_SUPPORTED_REGULATORY_CLASSES; *pos++ = 2; /* len */ *pos++ = op_class; *pos++ = op_class; /* give current operating class as alternate too */ } static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb) { u8 *pos = skb_put(skb, 3); *pos++ = WLAN_EID_BSS_COEX_2040; *pos++ = 1; /* len */ *pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST; } static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, u16 status_code) { struct ieee80211_supported_band *sband; /* The capability will be 0 when sending a failure code */ if (status_code != 0) return 0; sband = ieee80211_get_sband(sdata); if (sband && sband->band == NL80211_BAND_2GHZ) { return WLAN_CAPABILITY_SHORT_SLOT_TIME | WLAN_CAPABILITY_SHORT_PREAMBLE; } return 0; } static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, bool initiator) { struct ieee80211_tdls_lnkie *lnkid; const u8 *init_addr, *rsp_addr; if (initiator) { init_addr = sdata->vif.addr; rsp_addr = peer; } else { init_addr = peer; rsp_addr = sdata->vif.addr; } lnkid = skb_put(skb, sizeof(struct ieee80211_tdls_lnkie)); lnkid->ie_type = WLAN_EID_LINK_ID; lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2; memcpy(lnkid->bssid, sdata->u.mgd.bssid, ETH_ALEN); memcpy(lnkid->init_sta, init_addr, ETH_ALEN); memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN); } static void ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 *pos = skb_put(skb, 4); *pos++ = WLAN_EID_AID; *pos++ = 2; /* len */ put_unaligned_le16(ifmgd->aid, pos); } /* translate numbering in the WMM parameter IE to the mac80211 notation */ static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac) { switch (ac) { default: WARN_ON_ONCE(1); /* fall through */ case 0: return IEEE80211_AC_BE; case 1: return IEEE80211_AC_BK; case 2: return IEEE80211_AC_VI; case 3: return IEEE80211_AC_VO; } } static u8 ieee80211_wmm_aci_aifsn(int aifsn, bool acm, int aci) { u8 ret; ret = aifsn & 0x0f; if (acm) ret |= 0x10; ret |= (aci << 5) & 0x60; return ret; } static u8 ieee80211_wmm_ecw(u16 cw_min, u16 cw_max) { return ((ilog2(cw_min + 1) << 0x0) & 0x0f) | ((ilog2(cw_max + 1) << 0x4) & 0xf0); } static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_wmm_param_ie *wmm; struct ieee80211_tx_queue_params *txq; int i; wmm = skb_put_zero(skb, sizeof(*wmm)); wmm->element_id = WLAN_EID_VENDOR_SPECIFIC; wmm->len = sizeof(*wmm) - 2; wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */ wmm->oui[1] = 0x50; wmm->oui[2] = 0xf2; wmm->oui_type = 2; /* WME */ wmm->oui_subtype = 1; /* WME param */ wmm->version = 1; /* WME ver */ wmm->qos_info = 0; /* U-APSD not in use */ /* * Use the EDCA parameters defined for the BSS, or default if the AP * doesn't support it, as mandated by 802.11-2012 section 10.22.4 */ for (i = 0; i < IEEE80211_NUM_ACS; i++) { txq = &sdata->tx_conf[ieee80211_ac_from_wmm(i)]; wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs, txq->acm, i); wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max); wmm->ac[i].txop_limit = cpu_to_le16(txq->txop); } } static void ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { /* IEEE802.11ac-2013 Table E-4 */ u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 }; struct cfg80211_chan_def uc = sta->tdls_chandef; enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta); int i; /* only support upgrading non-narrow channels up to 80Mhz */ if (max_width == NL80211_CHAN_WIDTH_5 || max_width == NL80211_CHAN_WIDTH_10) return; if (max_width > NL80211_CHAN_WIDTH_80) max_width = NL80211_CHAN_WIDTH_80; if (uc.width >= max_width) return; /* * Channel usage constrains in the IEEE802.11ac-2013 specification only * allow expanding a 20MHz channel to 80MHz in a single way. In * addition, there are no 40MHz allowed channels that are not part of * the allowed 80MHz range in the 5GHz spectrum (the relevant one here). */ for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++) if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) { uc.center_freq1 = centers_80mhz[i]; uc.center_freq2 = 0; uc.width = NL80211_CHAN_WIDTH_80; break; } if (!uc.center_freq1) return; /* proceed to downgrade the chandef until usable or the same as AP BW */ while (uc.width > max_width || (uc.width > sta->tdls_chandef.width && !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, sdata->wdev.iftype))) ieee80211_chandef_downgrade(&uc); if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) { tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n", sta->tdls_chandef.width, uc.width); /* * the station is not yet authorized when BW upgrade is done, * locking is not required */ sta->tdls_chandef = uc; } } static void ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, u8 action_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_supported_band *sband; struct ieee80211_local *local = sdata->local; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct sta_info *sta = NULL; size_t offset = 0, noffset; u8 *pos; sband = ieee80211_get_sband(sdata); if (!sband) return; ieee80211_add_srates_ie(sdata, skb, false, sband->band); ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band); ieee80211_tdls_add_supp_channels(sdata, skb); /* add any custom IEs that go before Extended Capabilities */ if (extra_ies_len) { static const u8 before_ext_cap[] = { WLAN_EID_SUPP_RATES, WLAN_EID_COUNTRY, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_ext_cap, ARRAY_SIZE(before_ext_cap), offset); skb_put_data(skb, extra_ies + offset, noffset - offset); offset = noffset; } ieee80211_tdls_add_ext_capab(sdata, skb); /* add the QoS element if we support it */ if (local->hw.queues >= IEEE80211_NUM_ACS && action_code != WLAN_PUB_ACTION_TDLS_DISCOVER_RES) ieee80211_add_wmm_info_ie(skb_put(skb, 9), 0); /* no U-APSD */ /* add any custom IEs that go before HT capabilities */ if (extra_ies_len) { static const u8 before_ht_cap[] = { WLAN_EID_SUPP_RATES, WLAN_EID_COUNTRY, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, WLAN_EID_EXT_CAPABILITY, WLAN_EID_QOS_CAPA, WLAN_EID_FAST_BSS_TRANSITION, WLAN_EID_TIMEOUT_INTERVAL, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_ht_cap, ARRAY_SIZE(before_ht_cap), offset); skb_put_data(skb, extra_ies + offset, noffset - offset); offset = noffset; } mutex_lock(&local->sta_mtx); /* we should have the peer STA if we're already responding */ if (action_code == WLAN_TDLS_SETUP_RESPONSE) { sta = sta_info_get(sdata, peer); if (WARN_ON_ONCE(!sta)) { mutex_unlock(&local->sta_mtx); return; } sta->tdls_chandef = sdata->vif.bss_conf.chandef; } ieee80211_tdls_add_oper_classes(sdata, skb); /* * with TDLS we can switch channels, and HT-caps are not necessarily * the same on all bands. The specification limits the setup to a * single HT-cap, so use the current band for now. */ memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); if ((action_code == WLAN_TDLS_SETUP_REQUEST || action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) && ht_cap.ht_supported) { ieee80211_apply_htcap_overrides(sdata, &ht_cap); /* disable SMPS in TDLS initiator */ ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) { /* the peer caps are already intersected with our own */ memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap)); pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); } if (ht_cap.ht_supported && (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) ieee80211_tdls_add_bss_coex_ie(skb); ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* add any custom IEs that go before VHT capabilities */ if (extra_ies_len) { static const u8 before_vht_cap[] = { WLAN_EID_SUPP_RATES, WLAN_EID_COUNTRY, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, WLAN_EID_EXT_CAPABILITY, WLAN_EID_QOS_CAPA, WLAN_EID_FAST_BSS_TRANSITION, WLAN_EID_TIMEOUT_INTERVAL, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, WLAN_EID_MULTI_BAND, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_vht_cap, ARRAY_SIZE(before_vht_cap), offset); skb_put_data(skb, extra_ies + offset, noffset - offset); offset = noffset; } /* build the VHT-cap similarly to the HT-cap */ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); if ((action_code == WLAN_TDLS_SETUP_REQUEST || action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) && vht_cap.vht_supported) { ieee80211_apply_vhtcap_overrides(sdata, &vht_cap); /* the AID is present only when VHT is implemented */ if (action_code == WLAN_TDLS_SETUP_REQUEST) ieee80211_tdls_add_aid(sdata, skb); pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && vht_cap.vht_supported && sta->sta.vht_cap.vht_supported) { /* the peer caps are already intersected with our own */ memcpy(&vht_cap, &sta->sta.vht_cap, sizeof(vht_cap)); /* the AID is present only when VHT is implemented */ ieee80211_tdls_add_aid(sdata, skb); pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); /* * if both peers support WIDER_BW, we can expand the chandef to * a wider compatible one, up to 80MHz */ if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) ieee80211_tdls_chandef_vht_upgrade(sdata, sta); } mutex_unlock(&local->sta_mtx); /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; skb_put_data(skb, extra_ies + offset, noffset - offset); } } static void ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t offset = 0, noffset; struct sta_info *sta, *ap_sta; struct ieee80211_supported_band *sband; u8 *pos; sband = ieee80211_get_sband(sdata); if (!sband) return; mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); ap_sta = sta_info_get(sdata, ifmgd->bssid); if (WARN_ON_ONCE(!sta || !ap_sta)) { mutex_unlock(&local->sta_mtx); return; } sta->tdls_chandef = sdata->vif.bss_conf.chandef; /* add any custom IEs that go before the QoS IE */ if (extra_ies_len) { static const u8 before_qos[] = { WLAN_EID_RSN, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_qos, ARRAY_SIZE(before_qos), offset); skb_put_data(skb, extra_ies + offset, noffset - offset); offset = noffset; } /* add the QoS param IE if both the peer and we support it */ if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme) ieee80211_tdls_add_wmm_param_ie(sdata, skb); /* add any custom IEs that go before HT operation */ if (extra_ies_len) { static const u8 before_ht_op[] = { WLAN_EID_RSN, WLAN_EID_QOS_CAPA, WLAN_EID_FAST_BSS_TRANSITION, WLAN_EID_TIMEOUT_INTERVAL, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_ht_op, ARRAY_SIZE(before_ht_op), offset); skb_put_data(skb, extra_ies + offset, noffset - offset); offset = noffset; } /* * if HT support is only added in TDLS, we need an HT-operation IE. * add the IE as required by IEEE802.11-2012 9.23.3.2. */ if (!ap_sta->sta.ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) { u16 prot = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap, &sdata->vif.bss_conf.chandef, prot, true); } ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* only include VHT-operation if not on the 2.4GHz band */ if (sband->band != NL80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) { /* * if both peers support WIDER_BW, we can expand the chandef to * a wider compatible one, up to 80MHz */ if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) ieee80211_tdls_chandef_vht_upgrade(sdata, sta); pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap, &sta->tdls_chandef); } mutex_unlock(&local->sta_mtx); /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; skb_put_data(skb, extra_ies + offset, noffset - offset); } } static void ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_tdls_data *tf; size_t offset = 0, noffset; if (WARN_ON_ONCE(!chandef)) return; tf = (void *)skb->data; tf->u.chan_switch_req.target_channel = ieee80211_frequency_to_channel(chandef->chan->center_freq); tf->u.chan_switch_req.oper_class = oper_class; if (extra_ies_len) { static const u8 before_lnkie[] = { WLAN_EID_SECONDARY_CHANNEL_OFFSET, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_lnkie, ARRAY_SIZE(before_lnkie), offset); skb_put_data(skb, extra_ies + offset, noffset - offset); offset = noffset; } ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; skb_put_data(skb, extra_ies + offset, noffset - offset); } } static void ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, u16 status_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { if (status_code == 0) ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); if (extra_ies_len) skb_put_data(skb, extra_ies, extra_ies_len); } static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, u8 action_code, u16 status_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, struct cfg80211_chan_def *chandef) { switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: if (status_code == 0) ieee80211_tdls_add_setup_start_ies(sdata, skb, peer, action_code, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_SETUP_CONFIRM: if (status_code == 0) ieee80211_tdls_add_setup_cfm_ies(sdata, skb, peer, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_TEARDOWN: case WLAN_TDLS_DISCOVERY_REQUEST: if (extra_ies_len) skb_put_data(skb, extra_ies, extra_ies_len); if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN) ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); break; case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: ieee80211_tdls_add_chan_switch_req_ies(sdata, skb, peer, initiator, extra_ies, extra_ies_len, oper_class, chandef); break; case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: ieee80211_tdls_add_chan_switch_resp_ies(sdata, skb, peer, status_code, initiator, extra_ies, extra_ies_len); break; } } static int ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_tdls_data *tf; tf = skb_put(skb, offsetof(struct ieee80211_tdls_data, u)); memcpy(tf->da, peer, ETH_ALEN); memcpy(tf->sa, sdata->vif.addr, ETH_ALEN); tf->ether_type = cpu_to_be16(ETH_P_TDLS); tf->payload_type = WLAN_TDLS_SNAP_RFTYPE; /* network header is after the ethernet header */ skb_set_network_header(skb, ETH_HLEN); switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_SETUP_REQUEST; skb_put(skb, sizeof(tf->u.setup_req)); tf->u.setup_req.dialog_token = dialog_token; tf->u.setup_req.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, status_code)); break; case WLAN_TDLS_SETUP_RESPONSE: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_SETUP_RESPONSE; skb_put(skb, sizeof(tf->u.setup_resp)); tf->u.setup_resp.status_code = cpu_to_le16(status_code); tf->u.setup_resp.dialog_token = dialog_token; tf->u.setup_resp.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, status_code)); break; case WLAN_TDLS_SETUP_CONFIRM: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_SETUP_CONFIRM; skb_put(skb, sizeof(tf->u.setup_cfm)); tf->u.setup_cfm.status_code = cpu_to_le16(status_code); tf->u.setup_cfm.dialog_token = dialog_token; break; case WLAN_TDLS_TEARDOWN: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_TEARDOWN; skb_put(skb, sizeof(tf->u.teardown)); tf->u.teardown.reason_code = cpu_to_le16(status_code); break; case WLAN_TDLS_DISCOVERY_REQUEST: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST; skb_put(skb, sizeof(tf->u.discover_req)); tf->u.discover_req.dialog_token = dialog_token; break; case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST; skb_put(skb, sizeof(tf->u.chan_switch_req)); break; case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; skb_put(skb, sizeof(tf->u.chan_switch_resp)); tf->u.chan_switch_resp.status_code = cpu_to_le16(status_code); break; default: return -EINVAL; } return 0; } static int ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_mgmt *mgmt; mgmt = skb_put_zero(skb, 24); memcpy(mgmt->da, peer, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); switch (action_code) { case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp)); mgmt->u.action.category = WLAN_CATEGORY_PUBLIC; mgmt->u.action.u.tdls_discover_resp.action_code = WLAN_PUB_ACTION_TDLS_DISCOVER_RES; mgmt->u.action.u.tdls_discover_resp.dialog_token = dialog_token; mgmt->u.action.u.tdls_discover_resp.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, status_code)); break; default: return -EINVAL; } return 0; } static struct sk_buff * ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; int ret; skb = netdev_alloc_skb(sdata->dev, local->hw.extra_tx_headroom + max(sizeof(struct ieee80211_mgmt), sizeof(struct ieee80211_tdls_data)) + 50 + /* supported rates */ 10 + /* ext capab */ 26 + /* max(WMM-info, WMM-param) */ 2 + max(sizeof(struct ieee80211_ht_cap), sizeof(struct ieee80211_ht_operation)) + 2 + max(sizeof(struct ieee80211_vht_cap), sizeof(struct ieee80211_vht_operation)) + 50 + /* supported channels */ 3 + /* 40/20 BSS coex */ 4 + /* AID */ 4 + /* oper classes */ extra_ies_len + sizeof(struct ieee80211_tdls_lnkie)); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: case WLAN_TDLS_SETUP_CONFIRM: case WLAN_TDLS_TEARDOWN: case WLAN_TDLS_DISCOVERY_REQUEST: case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: ret = ieee80211_prep_tdls_encap_data(local->hw.wiphy, sdata->dev, peer, action_code, dialog_token, status_code, skb); break; case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: ret = ieee80211_prep_tdls_direct(local->hw.wiphy, sdata->dev, peer, action_code, dialog_token, status_code, skb); break; default: ret = -ENOTSUPP; break; } if (ret < 0) goto fail; ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code, initiator, extra_ies, extra_ies_len, oper_class, chandef); return skb; fail: dev_kfree_skb(skb); return NULL; } static int ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sk_buff *skb = NULL; struct sta_info *sta; u32 flags = 0; int ret = 0; rcu_read_lock(); sta = sta_info_get(sdata, peer); /* infer the initiator if we can, to support old userspace */ switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: if (sta) { set_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); sta->sta.tdls_initiator = false; } /* fall-through */ case WLAN_TDLS_SETUP_CONFIRM: case WLAN_TDLS_DISCOVERY_REQUEST: initiator = true; break; case WLAN_TDLS_SETUP_RESPONSE: /* * In some testing scenarios, we send a request and response. * Make the last packet sent take effect for the initiator * value. */ if (sta) { clear_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); sta->sta.tdls_initiator = true; } /* fall-through */ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: initiator = false; break; case WLAN_TDLS_TEARDOWN: case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: /* any value is ok */ break; default: ret = -ENOTSUPP; break; } if (sta && test_sta_flag(sta, WLAN_STA_TDLS_INITIATOR)) initiator = true; rcu_read_unlock(); if (ret < 0) goto fail; skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, action_code, dialog_token, status_code, initiator, extra_ies, extra_ies_len, oper_class, chandef); if (!skb) { ret = -EINVAL; goto fail; } if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { ieee80211_tx_skb(sdata, skb); return 0; } /* * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise * we should default to AC_VI. */ switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: skb->priority = 256 + 2; break; default: skb->priority = 256 + 5; break; } skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb)); /* * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress. * Later, if no ACK is returned from peer, we will re-send the teardown * packet through the AP. */ if ((action_code == WLAN_TDLS_TEARDOWN) && ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { bool try_resend; /* Should we keep skb for possible resend */ /* If not sending directly to peer - no point in keeping skb */ rcu_read_lock(); sta = sta_info_get(sdata, peer); try_resend = sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); rcu_read_unlock(); spin_lock_bh(&sdata->u.mgd.teardown_lock); if (try_resend && !sdata->u.mgd.teardown_skb) { /* Mark it as requiring TX status callback */ flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; /* * skb is copied since mac80211 will later set * properties that might not be the same as the AP, * such as encryption, QoS, addresses, etc. * * No problem if skb_copy() fails, so no need to check. */ sdata->u.mgd.teardown_skb = skb_copy(skb, GFP_ATOMIC); sdata->u.mgd.orig_teardown_skb = skb; } spin_unlock_bh(&sdata->u.mgd.teardown_lock); } /* disable bottom halves when entering the Tx path */ local_bh_disable(); __ieee80211_subif_start_xmit(skb, dev, flags); local_bh_enable(); return ret; fail: dev_kfree_skb(skb); return ret; } static int ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode; int ret; /* don't support setup with forced SMPS mode that's not off */ if (smps_mode != IEEE80211_SMPS_AUTOMATIC && smps_mode != IEEE80211_SMPS_OFF) { tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n", smps_mode); return -ENOTSUPP; } mutex_lock(&local->mtx); /* we don't support concurrent TDLS peer setups */ if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer) && !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) { ret = -EBUSY; goto out_unlock; } /* * make sure we have a STA representing the peer so we drop or buffer * non-TDLS-setup frames to the peer. We can't send other packets * during setup through the AP path. * Allow error packets to be sent - sometimes we don't even add a STA * before failing the setup. */ if (status_code == 0) { rcu_read_lock(); if (!sta_info_get(sdata, peer)) { rcu_read_unlock(); ret = -ENOLINK; goto out_unlock; } rcu_read_unlock(); } ieee80211_flush_queues(local, sdata, false); memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN); mutex_unlock(&local->mtx); /* we cannot take the mutex while preparing the setup packet */ ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len, 0, NULL); if (ret < 0) { mutex_lock(&local->mtx); eth_zero_addr(sdata->u.mgd.tdls_peer); mutex_unlock(&local->mtx); return ret; } ieee80211_queue_delayed_work(&sdata->local->hw, &sdata->u.mgd.tdls_peer_del_work, TDLS_PEER_SETUP_TIMEOUT); return 0; out_unlock: mutex_unlock(&local->mtx); return ret; } static int ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; int ret; /* * No packets can be transmitted to the peer via the AP during setup - * the STA is set as a TDLS peer, but is not authorized. * During teardown, we prevent direct transmissions by stopping the * queues and flushing all direct packets. */ ieee80211_stop_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN); ieee80211_flush_queues(local, sdata, false); ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len, 0, NULL); if (ret < 0) sdata_err(sdata, "Failed sending TDLS teardown packet %d\n", ret); /* * Remove the STA AUTH flag to force further traffic through the AP. If * the STA was unreachable, it was already removed. */ rcu_read_lock(); sta = sta_info_get(sdata, peer); if (sta) clear_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); rcu_read_unlock(); ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN); return 0; } int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int ret; if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) return -ENOTSUPP; /* make sure we are in managed mode, and associated */ if (sdata->vif.type != NL80211_IFTYPE_STATION || !sdata->u.mgd.associated) return -EINVAL; switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_TEARDOWN: ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_DISCOVERY_REQUEST: /* * Protect the discovery so we can hear the TDLS discovery * response frame. It is transmitted directly and not buffered * by the AP. */ drv_mgd_protect_tdls_discover(sdata->local, sdata); /* fall-through */ case WLAN_TDLS_SETUP_CONFIRM: case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: /* no special handling */ ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len, 0, NULL); break; default: ret = -EOPNOTSUPP; break; } tdls_dbg(sdata, "TDLS mgmt action %d peer %pM status %d\n", action_code, peer, ret); return ret; } static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; enum nl80211_chan_width width; struct ieee80211_supported_band *sband; mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (conf) { width = conf->def.width; sband = local->hw.wiphy->bands[conf->def.chan->band]; ctx = container_of(conf, struct ieee80211_chanctx, conf); ieee80211_recalc_chanctx_chantype(local, ctx); /* if width changed and a peer is given, update its BW */ if (width != conf->def.width && sta && test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) { enum ieee80211_sta_rx_bandwidth bw; bw = ieee80211_chan_width_to_rx_bw(conf->def.width); bw = min(bw, ieee80211_sta_cap_rx_bw(sta)); if (bw != sta->sta.bandwidth) { sta->sta.bandwidth = bw; rate_control_rate_update(local, sband, sta, IEEE80211_RC_BW_CHANGED); /* * if a TDLS peer BW was updated, we need to * recalc the chandef width again, to get the * correct chanctx min_def */ ieee80211_recalc_chanctx_chantype(local, ctx); } } } mutex_unlock(&local->chanctx_mtx); } static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata) { struct sta_info *sta; bool result = false; rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) || !sta->sta.ht_cap.ht_supported) continue; result = true; break; } rcu_read_unlock(); return result; } static void iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool tdls_ht; u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; u16 opmode; /* Nothing to do if the BSS connection uses HT */ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) return; tdls_ht = (sta && sta->sta.ht_cap.ht_supported) || iee80211_tdls_have_ht_peers(sdata); opmode = sdata->vif.bss_conf.ht_operation_mode; if (tdls_ht) opmode |= protection; else opmode &= ~protection; if (opmode == sdata->vif.bss_conf.ht_operation_mode) return; sdata->vif.bss_conf.ht_operation_mode = opmode; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); } int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper) { struct sta_info *sta; struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int ret; if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) return -ENOTSUPP; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EINVAL; switch (oper) { case NL80211_TDLS_ENABLE_LINK: case NL80211_TDLS_DISABLE_LINK: break; case NL80211_TDLS_TEARDOWN: case NL80211_TDLS_SETUP: case NL80211_TDLS_DISCOVERY_REQ: /* We don't support in-driver setup/teardown/discovery */ return -ENOTSUPP; } /* protect possible bss_conf changes and avoid concurrency in * ieee80211_bss_info_change_notify() */ sdata_lock(sdata); mutex_lock(&local->mtx); tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer); switch (oper) { case NL80211_TDLS_ENABLE_LINK: if (sdata->vif.csa_active) { tdls_dbg(sdata, "TDLS: disallow link during CSA\n"); ret = -EBUSY; break; } mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); if (!sta) { mutex_unlock(&local->sta_mtx); ret = -ENOLINK; break; } iee80211_tdls_recalc_chanctx(sdata, sta); iee80211_tdls_recalc_ht_protection(sdata, sta); set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); mutex_unlock(&local->sta_mtx); WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) || !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)); ret = 0; break; case NL80211_TDLS_DISABLE_LINK: /* * The teardown message in ieee80211_tdls_mgmt_teardown() was * created while the queues were stopped, so it might still be * pending. Before flushing the queues we need to be sure the * message is handled by the tasklet handling pending messages, * otherwise we might start destroying the station before * sending the teardown packet. * Note that this only forces the tasklet to flush pendings - * not to stop the tasklet from rescheduling itself. */ tasklet_kill(&local->tx_pending_tasklet); /* flush a potentially queued teardown packet */ ieee80211_flush_queues(local, sdata, false); ret = sta_info_destroy_addr(sdata, peer); mutex_lock(&local->sta_mtx); iee80211_tdls_recalc_ht_protection(sdata, NULL); mutex_unlock(&local->sta_mtx); iee80211_tdls_recalc_chanctx(sdata, NULL); break; default: ret = -ENOTSUPP; break; } if (ret == 0 && ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) { cancel_delayed_work(&sdata->u.mgd.tdls_peer_del_work); eth_zero_addr(sdata->u.mgd.tdls_peer); } if (ret == 0) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.request_smps_work); mutex_unlock(&local->mtx); sdata_unlock(sdata); return ret; } void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) { sdata_err(sdata, "Discarding TDLS oper %d - not STA or disconnected\n", oper); return; } cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp); } EXPORT_SYMBOL(ieee80211_tdls_oper_request); static void iee80211_tdls_add_ch_switch_timing(u8 *buf, u16 switch_time, u16 switch_timeout) { struct ieee80211_ch_switch_timing *ch_sw; *buf++ = WLAN_EID_CHAN_SWITCH_TIMING; *buf++ = sizeof(struct ieee80211_ch_switch_timing); ch_sw = (void *)buf; ch_sw->switch_time = cpu_to_le16(switch_time); ch_sw->switch_timeout = cpu_to_le16(switch_timeout); } /* find switch timing IE in SKB ready for Tx */ static const u8 *ieee80211_tdls_find_sw_timing_ie(struct sk_buff *skb) { struct ieee80211_tdls_data *tf; const u8 *ie_start; /* * Get the offset for the new location of the switch timing IE. * The SKB network header will now point to the "payload_type" * element of the TDLS data frame struct. */ tf = container_of(skb->data + skb_network_offset(skb), struct ieee80211_tdls_data, payload_type); ie_start = tf->u.chan_switch_req.variable; return cfg80211_find_ie(WLAN_EID_CHAN_SWITCH_TIMING, ie_start, skb->len - (ie_start - skb->data)); } static struct sk_buff * ieee80211_tdls_ch_sw_tmpl_get(struct sta_info *sta, u8 oper_class, struct cfg80211_chan_def *chandef, u32 *ch_sw_tm_ie_offset) { struct ieee80211_sub_if_data *sdata = sta->sdata; u8 extra_ies[2 + sizeof(struct ieee80211_sec_chan_offs_ie) + 2 + sizeof(struct ieee80211_ch_switch_timing)]; int extra_ies_len = 2 + sizeof(struct ieee80211_ch_switch_timing); u8 *pos = extra_ies; struct sk_buff *skb; /* * if chandef points to a wide channel add a Secondary-Channel * Offset information element */ if (chandef->width == NL80211_CHAN_WIDTH_40) { struct ieee80211_sec_chan_offs_ie *sec_chan_ie; bool ht40plus; *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; *pos++ = sizeof(*sec_chan_ie); sec_chan_ie = (void *)pos; ht40plus = cfg80211_get_chandef_type(chandef) == NL80211_CHAN_HT40PLUS; sec_chan_ie->sec_chan_offs = ht40plus ? IEEE80211_HT_PARAM_CHA_SEC_ABOVE : IEEE80211_HT_PARAM_CHA_SEC_BELOW; pos += sizeof(*sec_chan_ie); extra_ies_len += 2 + sizeof(struct ieee80211_sec_chan_offs_ie); } /* just set the values to 0, this is a template */ iee80211_tdls_add_ch_switch_timing(pos, 0, 0); skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, WLAN_TDLS_CHANNEL_SWITCH_REQUEST, 0, 0, !sta->sta.tdls_initiator, extra_ies, extra_ies_len, oper_class, chandef); if (!skb) return NULL; skb = ieee80211_build_data_template(sdata, skb, 0); if (IS_ERR(skb)) { tdls_dbg(sdata, "Failed building TDLS channel switch frame\n"); return NULL; } if (ch_sw_tm_ie_offset) { const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb); if (!tm_ie) { tdls_dbg(sdata, "No switch timing IE in TDLS switch\n"); dev_kfree_skb_any(skb); return NULL; } *ch_sw_tm_ie_offset = tm_ie - skb->data; } tdls_dbg(sdata, "TDLS channel switch request template for %pM ch %d width %d\n", sta->sta.addr, chandef->chan->center_freq, chandef->width); return skb; } int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct sk_buff *skb = NULL; u32 ch_sw_tm_ie; int ret; mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, addr); if (!sta) { tdls_dbg(sdata, "Invalid TDLS peer %pM for channel switch request\n", addr); ret = -ENOENT; goto out; } if (!test_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH)) { tdls_dbg(sdata, "TDLS channel switch unsupported by %pM\n", addr); ret = -ENOTSUPP; goto out; } skb = ieee80211_tdls_ch_sw_tmpl_get(sta, oper_class, chandef, &ch_sw_tm_ie); if (!skb) { ret = -ENOENT; goto out; } ret = drv_tdls_channel_switch(local, sdata, &sta->sta, oper_class, chandef, skb, ch_sw_tm_ie); if (!ret) set_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(skb); return ret; } void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, addr); if (!sta) { tdls_dbg(sdata, "Invalid TDLS peer %pM for channel switch cancel\n", addr); goto out; } if (!test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { tdls_dbg(sdata, "TDLS channel switch not initiated by %pM\n", addr); goto out; } drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); out: mutex_unlock(&local->sta_mtx); } static struct sk_buff * ieee80211_tdls_ch_sw_resp_tmpl_get(struct sta_info *sta, u32 *ch_sw_tm_ie_offset) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct sk_buff *skb; u8 extra_ies[2 + sizeof(struct ieee80211_ch_switch_timing)]; /* initial timing are always zero in the template */ iee80211_tdls_add_ch_switch_timing(extra_ies, 0, 0); skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, WLAN_TDLS_CHANNEL_SWITCH_RESPONSE, 0, 0, !sta->sta.tdls_initiator, extra_ies, sizeof(extra_ies), 0, NULL); if (!skb) return NULL; skb = ieee80211_build_data_template(sdata, skb, 0); if (IS_ERR(skb)) { tdls_dbg(sdata, "Failed building TDLS channel switch resp frame\n"); return NULL; } if (ch_sw_tm_ie_offset) { const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb); if (!tm_ie) { tdls_dbg(sdata, "No switch timing IE in TDLS switch resp\n"); dev_kfree_skb_any(skb); return NULL; } *ch_sw_tm_ie_offset = tm_ie - skb->data; } tdls_dbg(sdata, "TDLS get channel switch response template for %pM\n", sta->sta.addr); return skb; } static int ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee802_11_elems elems; struct sta_info *sta; struct ieee80211_tdls_data *tf = (void *)skb->data; bool local_initiator; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); int baselen = offsetof(typeof(*tf), u.chan_switch_resp.variable); struct ieee80211_tdls_ch_sw_params params = {}; int ret; params.action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; params.timestamp = rx_status->device_timestamp; if (skb->len < baselen) { tdls_dbg(sdata, "TDLS channel switch resp too short: %d\n", skb->len); return -EINVAL; } mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, tf->sa); if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", tf->sa); ret = -EINVAL; goto out; } params.sta = &sta->sta; params.status = le16_to_cpu(tf->u.chan_switch_resp.status_code); if (params.status != 0) { ret = 0; goto call_drv; } ieee802_11_parse_elems(tf->u.chan_switch_resp.variable, skb->len - baselen, false, &elems, NULL, NULL); if (elems.parse_error) { tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n"); ret = -EINVAL; goto out; } if (!elems.ch_sw_timing || !elems.lnk_id) { tdls_dbg(sdata, "TDLS channel switch resp - missing IEs\n"); ret = -EINVAL; goto out; } /* validate the initiator is set correctly */ local_initiator = !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); if (local_initiator == sta->sta.tdls_initiator) { tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); ret = -EINVAL; goto out; } params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); params.tmpl_skb = ieee80211_tdls_ch_sw_resp_tmpl_get(sta, ¶ms.ch_sw_tm_ie); if (!params.tmpl_skb) { ret = -ENOENT; goto out; } ret = 0; call_drv: drv_tdls_recv_channel_switch(sdata->local, sdata, ¶ms); tdls_dbg(sdata, "TDLS channel switch response received from %pM status %d\n", tf->sa, params.status); out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(params.tmpl_skb); return ret; } static int ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee802_11_elems elems; struct cfg80211_chan_def chandef; struct ieee80211_channel *chan; enum nl80211_channel_type chan_type; int freq; u8 target_channel, oper_class; bool local_initiator; struct sta_info *sta; enum nl80211_band band; struct ieee80211_tdls_data *tf = (void *)skb->data; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable); struct ieee80211_tdls_ch_sw_params params = {}; int ret = 0; params.action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST; params.timestamp = rx_status->device_timestamp; if (skb->len < baselen) { tdls_dbg(sdata, "TDLS channel switch req too short: %d\n", skb->len); return -EINVAL; } target_channel = tf->u.chan_switch_req.target_channel; oper_class = tf->u.chan_switch_req.oper_class; /* * We can't easily infer the channel band. The operating class is * ambiguous - there are multiple tables (US/Europe/JP/Global). The * solution here is to treat channels with number >14 as 5GHz ones, * and specifically check for the (oper_class, channel) combinations * where this doesn't hold. These are thankfully unique according to * IEEE802.11-2012. * We consider only the 2GHz and 5GHz bands and 20MHz+ channels as * valid here. */ if ((oper_class == 112 || oper_class == 2 || oper_class == 3 || oper_class == 4 || oper_class == 5 || oper_class == 6) && target_channel < 14) band = NL80211_BAND_5GHZ; else band = target_channel < 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; freq = ieee80211_channel_to_frequency(target_channel, band); if (freq == 0) { tdls_dbg(sdata, "Invalid channel in TDLS chan switch: %d\n", target_channel); return -EINVAL; } chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq); if (!chan) { tdls_dbg(sdata, "Unsupported channel for TDLS chan switch: %d\n", target_channel); return -EINVAL; } ieee802_11_parse_elems(tf->u.chan_switch_req.variable, skb->len - baselen, false, &elems, NULL, NULL); if (elems.parse_error) { tdls_dbg(sdata, "Invalid IEs in TDLS channel switch req\n"); return -EINVAL; } if (!elems.ch_sw_timing || !elems.lnk_id) { tdls_dbg(sdata, "TDLS channel switch req - missing IEs\n"); return -EINVAL; } if (!elems.sec_chan_offs) { chan_type = NL80211_CHAN_HT20; } else { switch (elems.sec_chan_offs->sec_chan_offs) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: chan_type = NL80211_CHAN_HT40PLUS; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: chan_type = NL80211_CHAN_HT40MINUS; break; default: chan_type = NL80211_CHAN_HT20; break; } } cfg80211_chandef_create(&chandef, chan, chan_type); /* we will be active on the TDLS link */ if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef, sdata->wdev.iftype)) { tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n"); return -EINVAL; } mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, tf->sa); if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", tf->sa); ret = -EINVAL; goto out; } params.sta = &sta->sta; /* validate the initiator is set correctly */ local_initiator = !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); if (local_initiator == sta->sta.tdls_initiator) { tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); ret = -EINVAL; goto out; } /* peer should have known better */ if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs && elems.sec_chan_offs->sec_chan_offs) { tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n"); ret = -ENOTSUPP; goto out; } params.chandef = &chandef; params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); params.tmpl_skb = ieee80211_tdls_ch_sw_resp_tmpl_get(sta, ¶ms.ch_sw_tm_ie); if (!params.tmpl_skb) { ret = -ENOENT; goto out; } drv_tdls_recv_channel_switch(sdata->local, sdata, ¶ms); tdls_dbg(sdata, "TDLS ch switch request received from %pM ch %d width %d\n", tf->sa, params.chandef->chan->center_freq, params.chandef->width); out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(params.tmpl_skb); return ret; } static void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_tdls_data *tf = (void *)skb->data; struct wiphy *wiphy = sdata->local->hw.wiphy; ASSERT_RTNL(); /* make sure the driver supports it */ if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) return; /* we want to access the entire packet */ if (skb_linearize(skb)) return; /* * The packet/size was already validated by mac80211 Rx path, only look * at the action type. */ switch (tf->action_code) { case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: ieee80211_process_tdls_channel_switch_req(sdata, skb); break; case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: ieee80211_process_tdls_channel_switch_resp(sdata, skb); break; default: WARN_ON_ONCE(1); return; } } void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata) { struct sta_info *sta; u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) continue; ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr, NL80211_TDLS_TEARDOWN, reason, GFP_ATOMIC); } rcu_read_unlock(); } void ieee80211_tdls_chsw_work(struct work_struct *wk) { struct ieee80211_local *local = container_of(wk, struct ieee80211_local, tdls_chsw_work); struct ieee80211_sub_if_data *sdata; struct sk_buff *skb; struct ieee80211_tdls_data *tf; rtnl_lock(); while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) { tf = (struct ieee80211_tdls_data *)skb->data; list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata) || sdata->vif.type != NL80211_IFTYPE_STATION || !ether_addr_equal(tf->da, sdata->vif.addr)) continue; ieee80211_process_tdls_channel_switch(sdata, skb); break; } kfree_skb(skb); } rtnl_unlock(); } void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, const u8 *peer, u16 reason) { struct ieee80211_sta *sta; rcu_read_lock(); sta = ieee80211_find_sta(&sdata->vif, peer); if (!sta || !sta->tdls) { rcu_read_unlock(); return; } rcu_read_unlock(); tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n", peer, reason, ieee80211_get_reason_code_string(reason)); ieee80211_tdls_oper_request(&sdata->vif, peer, NL80211_TDLS_TEARDOWN, WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, GFP_ATOMIC); } backport-iwlwifi-dkms-7906/net/mac80211/tkip.c000066400000000000000000000250561351064634500207460ustar00rootroot00000000000000/* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright (C) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include "driver-ops.h" #include "key.h" #include "tkip.h" #include "wep.h" #define PHASE1_LOOP_COUNT 8 /* * 2-byte by 2-byte subset of the full AES S-box table; second part of this * table is identical to first part but byte-swapped */ static const u16 tkip_sbox[256] = { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }; static u16 tkipS(u16 val) { return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]); } static u8 *write_tkip_iv(u8 *pos, u16 iv16) { *pos++ = iv16 >> 8; *pos++ = ((iv16 >> 8) | 0x20) & 0x7f; *pos++ = iv16 & 0xFF; return pos; } /* * P1K := Phase1(TA, TK, TSC) * TA = transmitter address (48 bits) * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) * P1K: 80 bits */ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx, const u8 *ta, u32 tsc_IV32) { int i, j; u16 *p1k = ctx->p1k; p1k[0] = tsc_IV32 & 0xFFFF; p1k[1] = tsc_IV32 >> 16; p1k[2] = get_unaligned_le16(ta + 0); p1k[3] = get_unaligned_le16(ta + 2); p1k[4] = get_unaligned_le16(ta + 4); for (i = 0; i < PHASE1_LOOP_COUNT; i++) { j = 2 * (i & 1); p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j)); p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j)); p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j)); p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; } ctx->state = TKIP_STATE_PHASE1_DONE; ctx->p1k_iv32 = tsc_IV32; } static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, u16 tsc_IV16, u8 *rc4key) { u16 ppk[6]; const u16 *p1k = ctx->p1k; int i; ppk[0] = p1k[0]; ppk[1] = p1k[1]; ppk[2] = p1k[2]; ppk[3] = p1k[3]; ppk[4] = p1k[4]; ppk[5] = p1k[4] + tsc_IV16; ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0)); ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2)); ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4)); ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6)); ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8)); ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10)); ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1); ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1); ppk[2] += ror16(ppk[1], 1); ppk[3] += ror16(ppk[2], 1); ppk[4] += ror16(ppk[3], 1); ppk[5] += ror16(ppk[4], 1); rc4key = write_tkip_iv(rc4key, tsc_IV16); *rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF; for (i = 0; i < 6; i++) put_unaligned_le16(ppk[i], rc4key + 2 * i); } /* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets * of the IV. Returns pointer to the octet following IVs (i.e., beginning of * the packet payload). */ u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn) { pos = write_tkip_iv(pos, TKIP_PN_TO_IV16(pn)); *pos++ = (keyconf->keyidx << 6) | (1 << 5) /* Ext IV */; put_unaligned_le32(TKIP_PN_TO_IV32(pn), pos); return pos + 4; } EXPORT_SYMBOL_GPL(ieee80211_tkip_add_iv); static void ieee80211_compute_tkip_p1k(struct ieee80211_key *key, u32 iv32) { struct ieee80211_sub_if_data *sdata = key->sdata; struct tkip_ctx *ctx = &key->u.tkip.tx; const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; lockdep_assert_held(&key->u.tkip.txlock); /* * Update the P1K when the IV32 is different from the value it * had when we last computed it (or when not initialised yet). * This might flip-flop back and forth if packets are processed * out-of-order due to the different ACs, but then we have to * just compute the P1K more often. */ if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT) tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32); } void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf, u32 iv32, u16 *p1k) { struct ieee80211_key *key = (struct ieee80211_key *) container_of(keyconf, struct ieee80211_key, conf); struct tkip_ctx *ctx = &key->u.tkip.tx; spin_lock_bh(&key->u.tkip.txlock); ieee80211_compute_tkip_p1k(key, iv32); memcpy(p1k, ctx->p1k, sizeof(ctx->p1k)); spin_unlock_bh(&key->u.tkip.txlock); } EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv); void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, const u8 *ta, u32 iv32, u16 *p1k) { const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; struct tkip_ctx ctx; tkip_mixing_phase1(tk, &ctx, ta, iv32); memcpy(p1k, ctx.p1k, sizeof(ctx.p1k)); } EXPORT_SYMBOL(ieee80211_get_tkip_rx_p1k); void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb, u8 *p2k) { struct ieee80211_key *key = (struct ieee80211_key *) container_of(keyconf, struct ieee80211_key, conf); const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; struct tkip_ctx *ctx = &key->u.tkip.tx; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); u32 iv32 = get_unaligned_le32(&data[4]); u16 iv16 = data[2] | (data[0] << 8); spin_lock(&key->u.tkip.txlock); ieee80211_compute_tkip_p1k(key, iv32); tkip_mixing_phase2(tk, ctx, iv16, p2k); spin_unlock(&key->u.tkip.txlock); } EXPORT_SYMBOL(ieee80211_get_tkip_p2k); /* * Encrypt packet payload with TKIP using @key. @pos is a pointer to the * beginning of the buffer containing payload. This payload must include * the IV/Ext.IV and space for (taildroom) four octets for ICV. * @payload_len is the length of payload (_not_ including IV/ICV length). * @ta is the transmitter addresses. */ int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, struct ieee80211_key *key, struct sk_buff *skb, u8 *payload, size_t payload_len) { u8 rc4key[16]; ieee80211_get_tkip_p2k(&key->conf, skb, rc4key); return ieee80211_wep_encrypt_data(tfm, rc4key, 16, payload, payload_len); } /* Decrypt packet payload with TKIP using @key. @pos is a pointer to the * beginning of the buffer containing IEEE 802.11 header payload, i.e., * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the * length of payload, including IV, Ext. IV, MIC, ICV. */ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, struct ieee80211_key *key, u8 *payload, size_t payload_len, u8 *ta, u8 *ra, int only_iv, int queue, u32 *out_iv32, u16 *out_iv16) { u32 iv32; u32 iv16; u8 rc4key[16], keyid, *pos = payload; int res; const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; struct tkip_ctx_rx *rx_ctx = &key->u.tkip.rx[queue]; if (payload_len < 12) return -1; iv16 = (pos[0] << 8) | pos[2]; keyid = pos[3]; iv32 = get_unaligned_le32(pos + 4); pos += 8; if (!(keyid & (1 << 5))) return TKIP_DECRYPT_NO_EXT_IV; if ((keyid >> 6) != key->conf.keyidx) return TKIP_DECRYPT_INVALID_KEYIDX; if (rx_ctx->ctx.state != TKIP_STATE_NOT_INIT && (iv32 < rx_ctx->iv32 || (iv32 == rx_ctx->iv32 && iv16 <= rx_ctx->iv16))) return TKIP_DECRYPT_REPLAY; if (only_iv) { res = TKIP_DECRYPT_OK; rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED; goto done; } if (rx_ctx->ctx.state == TKIP_STATE_NOT_INIT || rx_ctx->iv32 != iv32) { /* IV16 wrapped around - perform TKIP phase 1 */ tkip_mixing_phase1(tk, &rx_ctx->ctx, ta, iv32); } if (key->local->ops->update_tkip_key && key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && rx_ctx->ctx.state != TKIP_STATE_PHASE1_HW_UPLOADED) { struct ieee80211_sub_if_data *sdata = key->sdata; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(key->sdata->bss, struct ieee80211_sub_if_data, u.ap); drv_update_tkip_key(key->local, sdata, &key->conf, key->sta, iv32, rx_ctx->ctx.p1k); rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED; } tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key); res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); done: if (res == TKIP_DECRYPT_OK) { /* * Record previously received IV, will be copied into the * key information after MIC verification. It is possible * that we don't catch replays of fragments but that's ok * because the Michael MIC verication will then fail. */ *out_iv32 = iv32; *out_iv16 = iv16; } return res; } backport-iwlwifi-dkms-7906/net/mac80211/tkip.h000066400000000000000000000015251351064634500207460ustar00rootroot00000000000000/* * Copyright 2002-2004, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef TKIP_H #define TKIP_H #include #include #include "key.h" int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, struct ieee80211_key *key, struct sk_buff *skb, u8 *payload, size_t payload_len); enum { TKIP_DECRYPT_OK = 0, TKIP_DECRYPT_NO_EXT_IV = -1, TKIP_DECRYPT_INVALID_KEYIDX = -2, TKIP_DECRYPT_REPLAY = -3, }; int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, struct ieee80211_key *key, u8 *payload, size_t payload_len, u8 *ta, u8 *ra, int only_iv, int queue, u32 *out_iv32, u16 *out_iv16); #endif /* TKIP_H */ backport-iwlwifi-dkms-7906/net/mac80211/trace.c000066400000000000000000000030531351064634500210660ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* bug in tracepoint.h, it should include this */ #include /* sparse isn't too happy with all macros... */ #ifndef __CHECKER__ #include #include "driver-ops.h" #include "debug.h" #define CREATE_TRACE_POINTS #include "trace.h" #include "trace_msg.h" #ifdef CPTCFG_MAC80211_MESSAGE_TRACING void __sdata_info(const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args, args2; va_start(args, fmt); va_copy(args2, args); vaf.va = &args2; pr_info("%pV", &vaf); va_end(args2); vaf.va = &args; trace_mac80211_info(&vaf); va_end(args); } void __sdata_dbg(bool print, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); if (print) { va_list args2; va_copy(args2, args); vaf.va = &args2; pr_debug("%pV", &vaf); va_end(args2); } vaf.va = &args; trace_mac80211_dbg(&vaf); va_end(args); } void __sdata_err(const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args, args2; va_start(args, fmt); va_copy(args2, args); vaf.va = &args2; pr_err("%pV", &vaf); va_end(args2); vaf.va = &args; trace_mac80211_err(&vaf); va_end(args); } void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); if (print) { va_list args2; va_copy(args2, args); vaf.va = &args2; pr_debug("%pV", &vaf); va_end(args2); } vaf.va = &args; trace_mac80211_dbg(&vaf); va_end(args); } #endif #endif backport-iwlwifi-dkms-7906/net/mac80211/trace.h000066400000000000000000001600331351064634500210750ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation */ #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) #define __MAC80211_DRIVER_TRACE #include #include #include "ieee80211_i.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM mac80211 #define MAXNAME 32 #define LOCAL_ENTRY __array(char, wiphy_name, 32) #define LOCAL_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME) #define LOCAL_PR_FMT "%s" #define LOCAL_PR_ARG __entry->wiphy_name #define STA_ENTRY __array(char, sta_addr, ETH_ALEN) #define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : memset(__entry->sta_addr, 0, ETH_ALEN)) #define STA_NAMED_ASSIGN(s) memcpy(__entry->sta_addr, (s)->addr, ETH_ALEN) #define STA_PR_FMT " sta:%pM" #define STA_PR_ARG __entry->sta_addr #define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \ __field(bool, p2p) \ __string(vif_name, sdata->name) #define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ __entry->p2p = sdata->vif.p2p; \ __assign_str(vif_name, sdata->name) #define VIF_PR_FMT " vif:%s(%d%s)" #define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" #define CHANDEF_ENTRY __field(u32, control_freq) \ __field(u32, chan_width) \ __field(u32, center_freq1) \ __field(u32, center_freq2) #define CHANDEF_ASSIGN(c) \ __entry->control_freq = (c) ? ((c)->chan ? (c)->chan->center_freq : 0) : 0; \ __entry->chan_width = (c) ? (c)->width : 0; \ __entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \ __entry->center_freq2 = (c) ? (c)->center_freq2 : 0; #define CHANDEF_PR_FMT " control:%d MHz width:%d center: %d/%d MHz" #define CHANDEF_PR_ARG __entry->control_freq, __entry->chan_width, \ __entry->center_freq1, __entry->center_freq2 #define MIN_CHANDEF_ENTRY \ __field(u32, min_control_freq) \ __field(u32, min_chan_width) \ __field(u32, min_center_freq1) \ __field(u32, min_center_freq2) #define MIN_CHANDEF_ASSIGN(c) \ __entry->min_control_freq = (c)->chan ? (c)->chan->center_freq : 0; \ __entry->min_chan_width = (c)->width; \ __entry->min_center_freq1 = (c)->center_freq1; \ __entry->min_center_freq2 = (c)->center_freq2; #define MIN_CHANDEF_PR_FMT " min_control:%d MHz min_width:%d min_center: %d/%d MHz" #define MIN_CHANDEF_PR_ARG __entry->min_control_freq, __entry->min_chan_width, \ __entry->min_center_freq1, __entry->min_center_freq2 #define CHANCTX_ENTRY CHANDEF_ENTRY \ MIN_CHANDEF_ENTRY \ __field(u8, rx_chains_static) \ __field(u8, rx_chains_dynamic) #define CHANCTX_ASSIGN CHANDEF_ASSIGN(&ctx->conf.def) \ MIN_CHANDEF_ASSIGN(&ctx->conf.min_def) \ __entry->rx_chains_static = ctx->conf.rx_chains_static; \ __entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic #define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT " chains:%d/%d" #define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, \ __entry->rx_chains_static, __entry->rx_chains_dynamic #define KEY_ENTRY __field(u32, cipher) \ __field(u8, hw_key_idx) \ __field(u8, flags) \ __field(s8, keyidx) #define KEY_ASSIGN(k) __entry->cipher = (k)->cipher; \ __entry->flags = (k)->flags; \ __entry->keyidx = (k)->keyidx; \ __entry->hw_key_idx = (k)->hw_key_idx; #define KEY_PR_FMT " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d" #define KEY_PR_ARG __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx #define AMPDU_ACTION_ENTRY __field(enum ieee80211_ampdu_mlme_action, \ ieee80211_ampdu_mlme_action) \ STA_ENTRY \ __field(u16, tid) \ __field(u16, ssn) \ __field(u16, buf_size) \ __field(bool, amsdu) \ __field(u16, timeout) \ __field(u16, action) #define AMPDU_ACTION_ASSIGN STA_NAMED_ASSIGN(params->sta); \ __entry->tid = params->tid; \ __entry->ssn = params->ssn; \ __entry->buf_size = params->buf_size; \ __entry->amsdu = params->amsdu; \ __entry->timeout = params->timeout; \ __entry->action = params->action; #define AMPDU_ACTION_PR_FMT STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d action %d" #define AMPDU_ACTION_PR_ARG STA_PR_ARG, __entry->tid, __entry->ssn, \ __entry->buf_size, __entry->amsdu, __entry->timeout, \ __entry->action /* * Tracing for driver callbacks. */ DECLARE_EVENT_CLASS(local_only_evt, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local), TP_STRUCT__entry( LOCAL_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; ), TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) ); DECLARE_EVENT_CLASS(local_sdata_addr_evt, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __array(char, addr, ETH_ALEN) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; memcpy(__entry->addr, sdata->vif.addr, ETH_ALEN); ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " addr:%pM", LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr ) ); DECLARE_EVENT_CLASS(local_u32_evt, TP_PROTO(struct ieee80211_local *local, u32 value), TP_ARGS(local, value), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, value) ), TP_fast_assign( LOCAL_ASSIGN; __entry->value = value; ), TP_printk( LOCAL_PR_FMT " value:%d", LOCAL_PR_ARG, __entry->value ) ); DECLARE_EVENT_CLASS(local_sdata_evt, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG ) ); DEFINE_EVENT(local_only_evt, drv_return_void, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); TRACE_EVENT(drv_return_int, TP_PROTO(struct ieee80211_local *local, int ret), TP_ARGS(local, ret), TP_STRUCT__entry( LOCAL_ENTRY __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; __entry->ret = ret; ), TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret) ); TRACE_EVENT(drv_return_bool, TP_PROTO(struct ieee80211_local *local, bool ret), TP_ARGS(local, ret), TP_STRUCT__entry( LOCAL_ENTRY __field(bool, ret) ), TP_fast_assign( LOCAL_ASSIGN; __entry->ret = ret; ), TP_printk(LOCAL_PR_FMT " - %s", LOCAL_PR_ARG, (__entry->ret) ? "true" : "false") ); TRACE_EVENT(drv_return_u32, TP_PROTO(struct ieee80211_local *local, u32 ret), TP_ARGS(local, ret), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, ret) ), TP_fast_assign( LOCAL_ASSIGN; __entry->ret = ret; ), TP_printk(LOCAL_PR_FMT " - %u", LOCAL_PR_ARG, __entry->ret) ); TRACE_EVENT(drv_return_u64, TP_PROTO(struct ieee80211_local *local, u64 ret), TP_ARGS(local, ret), TP_STRUCT__entry( LOCAL_ENTRY __field(u64, ret) ), TP_fast_assign( LOCAL_ASSIGN; __entry->ret = ret; ), TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret) ); DEFINE_EVENT(local_only_evt, drv_start, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); DEFINE_EVENT(local_u32_evt, drv_get_et_strings, TP_PROTO(struct ieee80211_local *local, u32 sset), TP_ARGS(local, sset) ); DEFINE_EVENT(local_u32_evt, drv_get_et_sset_count, TP_PROTO(struct ieee80211_local *local, u32 sset), TP_ARGS(local, sset) ); DEFINE_EVENT(local_only_evt, drv_get_et_stats, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); DEFINE_EVENT(local_only_evt, drv_suspend, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); DEFINE_EVENT(local_only_evt, drv_resume, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); TRACE_EVENT(drv_set_wakeup, TP_PROTO(struct ieee80211_local *local, bool enabled), TP_ARGS(local, enabled), TP_STRUCT__entry( LOCAL_ENTRY __field(bool, enabled) ), TP_fast_assign( LOCAL_ASSIGN; __entry->enabled = enabled; ), TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled) ); DEFINE_EVENT(local_only_evt, drv_stop, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); DEFINE_EVENT(local_sdata_addr_evt, drv_add_interface, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(drv_change_interface, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type, bool p2p), TP_ARGS(local, sdata, type, p2p), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u32, new_type) __field(bool, new_p2p) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->new_type = type; __entry->new_p2p = p2p; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " new type:%d%s", LOCAL_PR_ARG, VIF_PR_ARG, __entry->new_type, __entry->new_p2p ? "/p2p" : "" ) ); DEFINE_EVENT(local_sdata_addr_evt, drv_remove_interface, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(drv_config, TP_PROTO(struct ieee80211_local *local, u32 changed), TP_ARGS(local, changed), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, changed) __field(u32, flags) __field(int, power_level) __field(int, dynamic_ps_timeout) __field(u16, listen_interval) __field(u8, long_frame_max_tx_count) __field(u8, short_frame_max_tx_count) CHANDEF_ENTRY __field(int, smps) ), TP_fast_assign( LOCAL_ASSIGN; __entry->changed = changed; __entry->flags = local->hw.conf.flags; __entry->power_level = local->hw.conf.power_level; __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; __entry->listen_interval = local->hw.conf.listen_interval; __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count; __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count; CHANDEF_ASSIGN(&local->hw.conf.chandef) __entry->smps = local->hw.conf.smps_mode; ), TP_printk( LOCAL_PR_FMT " ch:%#x" CHANDEF_PR_FMT, LOCAL_PR_ARG, __entry->changed, CHANDEF_PR_ARG ) ); TRACE_EVENT(drv_bss_info_changed, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *info, u32 changed), TP_ARGS(local, sdata, info, changed), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u32, changed) __field(bool, assoc) __field(bool, ibss_joined) __field(bool, ibss_creator) __field(u16, aid) __field(bool, cts) __field(bool, shortpre) __field(bool, shortslot) __field(bool, enable_beacon) __field(u8, dtimper) __field(u16, bcnint) __field(u16, assoc_cap) __field(u64, sync_tsf) __field(u32, sync_device_ts) __field(u8, sync_dtim_count) __field(u32, basic_rates) __array(int, mcast_rate, NUM_NL80211_BANDS) __field(u16, ht_operation_mode) __field(s32, cqm_rssi_thold); __field(s32, cqm_rssi_hyst); __field(u32, channel_width); __field(u32, channel_cfreq1); __dynamic_array(u32, arp_addr_list, info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ? IEEE80211_BSS_ARP_ADDR_LIST_LEN : info->arp_addr_cnt); __field(int, arp_addr_cnt); __field(bool, qos); __field(bool, idle); __field(bool, ps); __dynamic_array(u8, ssid, info->ssid_len); __field(bool, hidden_ssid); __field(int, txpower) __field(u8, p2p_oppps_ctwindow) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->changed = changed; __entry->aid = info->aid; __entry->assoc = info->assoc; __entry->ibss_joined = info->ibss_joined; __entry->ibss_creator = info->ibss_creator; __entry->shortpre = info->use_short_preamble; __entry->cts = info->use_cts_prot; __entry->shortslot = info->use_short_slot; __entry->enable_beacon = info->enable_beacon; __entry->dtimper = info->dtim_period; __entry->bcnint = info->beacon_int; __entry->assoc_cap = info->assoc_capability; __entry->sync_tsf = info->sync_tsf; __entry->sync_device_ts = info->sync_device_ts; __entry->sync_dtim_count = info->sync_dtim_count; __entry->basic_rates = info->basic_rates; memcpy(__entry->mcast_rate, info->mcast_rate, sizeof(__entry->mcast_rate)); __entry->ht_operation_mode = info->ht_operation_mode; __entry->cqm_rssi_thold = info->cqm_rssi_thold; __entry->cqm_rssi_hyst = info->cqm_rssi_hyst; __entry->channel_width = info->chandef.width; __entry->channel_cfreq1 = info->chandef.center_freq1; __entry->arp_addr_cnt = info->arp_addr_cnt; memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list, sizeof(u32) * (info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ? IEEE80211_BSS_ARP_ADDR_LIST_LEN : info->arp_addr_cnt)); __entry->qos = info->qos; __entry->idle = info->idle; __entry->ps = info->ps; memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len); __entry->hidden_ssid = info->hidden_ssid; __entry->txpower = info->txpower; __entry->p2p_oppps_ctwindow = info->p2p_noa_attr.oppps_ctwindow; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " changed:%#x", LOCAL_PR_ARG, VIF_PR_ARG, __entry->changed ) ); TRACE_EVENT(drv_prepare_multicast, TP_PROTO(struct ieee80211_local *local, int mc_count), TP_ARGS(local, mc_count), TP_STRUCT__entry( LOCAL_ENTRY __field(int, mc_count) ), TP_fast_assign( LOCAL_ASSIGN; __entry->mc_count = mc_count; ), TP_printk( LOCAL_PR_FMT " prepare mc (%d)", LOCAL_PR_ARG, __entry->mc_count ) ); TRACE_EVENT(drv_configure_filter, TP_PROTO(struct ieee80211_local *local, unsigned int changed_flags, unsigned int *total_flags, u64 multicast), TP_ARGS(local, changed_flags, total_flags, multicast), TP_STRUCT__entry( LOCAL_ENTRY __field(unsigned int, changed) __field(unsigned int, total) __field(u64, multicast) ), TP_fast_assign( LOCAL_ASSIGN; __entry->changed = changed_flags; __entry->total = *total_flags; __entry->multicast = multicast; ), TP_printk( LOCAL_PR_FMT " changed:%#x total:%#x", LOCAL_PR_ARG, __entry->changed, __entry->total ) ); TRACE_EVENT(drv_config_iface_filter, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int filter_flags, unsigned int changed_flags), TP_ARGS(local, sdata, filter_flags, changed_flags), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(unsigned int, filter_flags) __field(unsigned int, changed_flags) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->filter_flags = filter_flags; __entry->changed_flags = changed_flags; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " filter_flags: %#x changed_flags: %#x", LOCAL_PR_ARG, VIF_PR_ARG, __entry->filter_flags, __entry->changed_flags ) ); TRACE_EVENT(drv_set_tim, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta, bool set), TP_ARGS(local, sta, set), TP_STRUCT__entry( LOCAL_ENTRY STA_ENTRY __field(bool, set) ), TP_fast_assign( LOCAL_ASSIGN; STA_ASSIGN; __entry->set = set; ), TP_printk( LOCAL_PR_FMT STA_PR_FMT " set:%d", LOCAL_PR_ARG, STA_PR_ARG, __entry->set ) ); TRACE_EVENT(drv_set_key, TP_PROTO(struct ieee80211_local *local, enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct ieee80211_key_conf *key), TP_ARGS(local, cmd, sdata, sta, key), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY KEY_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; KEY_ASSIGN(key); ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT KEY_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, KEY_PR_ARG ) ); TRACE_EVENT(drv_update_tkip_key, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_key_conf *conf, struct ieee80211_sta *sta, u32 iv32), TP_ARGS(local, sdata, conf, sta, iv32), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY __field(u32, iv32) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; __entry->iv32 = iv32; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x", LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->iv32 ) ); DEFINE_EVENT(local_sdata_evt, drv_hw_scan, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); DEFINE_EVENT(local_sdata_evt, drv_cancel_hw_scan, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); DEFINE_EVENT(local_sdata_evt, drv_sched_scan_start, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); DEFINE_EVENT(local_sdata_evt, drv_sched_scan_stop, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(drv_sw_scan_start, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const u8 *mac_addr), TP_ARGS(local, sdata, mac_addr), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __array(char, mac_addr, ETH_ALEN) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; memcpy(__entry->mac_addr, mac_addr, ETH_ALEN); ), TP_printk(LOCAL_PR_FMT ", " VIF_PR_FMT ", addr:%pM", LOCAL_PR_ARG, VIF_PR_ARG, __entry->mac_addr) ); DEFINE_EVENT(local_sdata_evt, drv_sw_scan_complete, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(drv_get_stats, TP_PROTO(struct ieee80211_local *local, struct ieee80211_low_level_stats *stats, int ret), TP_ARGS(local, stats, ret), TP_STRUCT__entry( LOCAL_ENTRY __field(int, ret) __field(unsigned int, ackfail) __field(unsigned int, rtsfail) __field(unsigned int, fcserr) __field(unsigned int, rtssucc) ), TP_fast_assign( LOCAL_ASSIGN; __entry->ret = ret; __entry->ackfail = stats->dot11ACKFailureCount; __entry->rtsfail = stats->dot11RTSFailureCount; __entry->fcserr = stats->dot11FCSErrorCount; __entry->rtssucc = stats->dot11RTSSuccessCount; ), TP_printk( LOCAL_PR_FMT " ret:%d", LOCAL_PR_ARG, __entry->ret ) ); TRACE_EVENT(drv_get_key_seq, TP_PROTO(struct ieee80211_local *local, struct ieee80211_key_conf *key), TP_ARGS(local, key), TP_STRUCT__entry( LOCAL_ENTRY KEY_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; KEY_ASSIGN(key); ), TP_printk( LOCAL_PR_FMT KEY_PR_FMT, LOCAL_PR_ARG, KEY_PR_ARG ) ); DEFINE_EVENT(local_u32_evt, drv_set_frag_threshold, TP_PROTO(struct ieee80211_local *local, u32 value), TP_ARGS(local, value) ); DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold, TP_PROTO(struct ieee80211_local *local, u32 value), TP_ARGS(local, value) ); TRACE_EVENT(drv_set_coverage_class, TP_PROTO(struct ieee80211_local *local, s16 value), TP_ARGS(local, value), TP_STRUCT__entry( LOCAL_ENTRY __field(s16, value) ), TP_fast_assign( LOCAL_ASSIGN; __entry->value = value; ), TP_printk( LOCAL_PR_FMT " value:%d", LOCAL_PR_ARG, __entry->value ) ); TRACE_EVENT(drv_sta_notify, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum sta_notify_cmd cmd, struct ieee80211_sta *sta), TP_ARGS(local, sdata, cmd, sta), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY __field(u32, cmd) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; __entry->cmd = cmd; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " cmd:%d", LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->cmd ) ); TRACE_EVENT(drv_sta_state, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state), TP_ARGS(local, sdata, sta, old_state, new_state), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY __field(u32, old_state) __field(u32, new_state) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; __entry->old_state = old_state; __entry->new_state = new_state; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " state: %d->%d", LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->old_state, __entry->new_state ) ); TRACE_EVENT(drv_sta_rc_update, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u32 changed), TP_ARGS(local, sdata, sta, changed), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY __field(u32, changed) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; __entry->changed = changed; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " changed: 0x%x", LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed ) ); DECLARE_EVENT_CLASS(sta_event, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta), TP_ARGS(local, sdata, sta), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG ) ); DEFINE_EVENT(sta_event, drv_sta_statistics, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta), TP_ARGS(local, sdata, sta) ); DEFINE_EVENT(sta_event, drv_sta_add, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta), TP_ARGS(local, sdata, sta) ); DEFINE_EVENT(sta_event, drv_sta_remove, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta), TP_ARGS(local, sdata, sta) ); DEFINE_EVENT(sta_event, drv_sta_pre_rcu_remove, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta), TP_ARGS(local, sdata, sta) ); DEFINE_EVENT(sta_event, drv_sync_rx_queues, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta), TP_ARGS(local, sdata, sta) ); DEFINE_EVENT(sta_event, drv_sta_rate_tbl_update, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta), TP_ARGS(local, sdata, sta) ); TRACE_EVENT(drv_conf_tx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 ac, const struct ieee80211_tx_queue_params *params), TP_ARGS(local, sdata, ac, params), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u16, ac) __field(u16, txop) __field(u16, cw_min) __field(u16, cw_max) __field(u8, aifs) __field(bool, uapsd) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->ac = ac; __entry->txop = params->txop; __entry->cw_max = params->cw_max; __entry->cw_min = params->cw_min; __entry->aifs = params->aifs; __entry->uapsd = params->uapsd; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " AC:%d", LOCAL_PR_ARG, VIF_PR_ARG, __entry->ac ) ); DEFINE_EVENT(local_sdata_evt, drv_get_tsf, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(drv_set_tsf, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u64 tsf), TP_ARGS(local, sdata, tsf), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u64, tsf) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->tsf = tsf; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " tsf:%llu", LOCAL_PR_ARG, VIF_PR_ARG, (unsigned long long)__entry->tsf ) ); TRACE_EVENT(drv_offset_tsf, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, s64 offset), TP_ARGS(local, sdata, offset), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(s64, tsf_offset) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->tsf_offset = offset; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " tsf offset:%lld", LOCAL_PR_ARG, VIF_PR_ARG, (unsigned long long)__entry->tsf_offset ) ); DEFINE_EVENT(local_sdata_evt, drv_reset_tsf, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); DEFINE_EVENT(local_only_evt, drv_tx_last_beacon, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); TRACE_EVENT(drv_ampdu_action, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_ampdu_params *params), TP_ARGS(local, sdata, params), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY AMPDU_ACTION_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; AMPDU_ACTION_ASSIGN; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT AMPDU_ACTION_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG, AMPDU_ACTION_PR_ARG ) ); TRACE_EVENT(drv_get_survey, TP_PROTO(struct ieee80211_local *local, int _idx, struct survey_info *survey), TP_ARGS(local, _idx, survey), TP_STRUCT__entry( LOCAL_ENTRY __field(int, idx) ), TP_fast_assign( LOCAL_ASSIGN; __entry->idx = _idx; ), TP_printk( LOCAL_PR_FMT " idx:%d", LOCAL_PR_ARG, __entry->idx ) ); TRACE_EVENT(drv_flush, TP_PROTO(struct ieee80211_local *local, u32 queues, bool drop), TP_ARGS(local, queues, drop), TP_STRUCT__entry( LOCAL_ENTRY __field(bool, drop) __field(u32, queues) ), TP_fast_assign( LOCAL_ASSIGN; __entry->drop = drop; __entry->queues = queues; ), TP_printk( LOCAL_PR_FMT " queues:0x%x drop:%d", LOCAL_PR_ARG, __entry->queues, __entry->drop ) ); TRACE_EVENT(drv_channel_switch, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch), TP_ARGS(local, sdata, ch_switch), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY CHANDEF_ENTRY __field(u64, timestamp) __field(u32, device_timestamp) __field(bool, block_tx) __field(u8, count) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; CHANDEF_ASSIGN(&ch_switch->chandef) __entry->timestamp = ch_switch->timestamp; __entry->device_timestamp = ch_switch->device_timestamp; __entry->block_tx = ch_switch->block_tx; __entry->count = ch_switch->count; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " new " CHANDEF_PR_FMT " count:%d", LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count ) ); TRACE_EVENT(drv_set_antenna, TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret), TP_ARGS(local, tx_ant, rx_ant, ret), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, tx_ant) __field(u32, rx_ant) __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; __entry->tx_ant = tx_ant; __entry->rx_ant = rx_ant; __entry->ret = ret; ), TP_printk( LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d", LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret ) ); TRACE_EVENT(drv_get_antenna, TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret), TP_ARGS(local, tx_ant, rx_ant, ret), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, tx_ant) __field(u32, rx_ant) __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; __entry->tx_ant = tx_ant; __entry->rx_ant = rx_ant; __entry->ret = ret; ), TP_printk( LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d", LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret ) ); TRACE_EVENT(drv_remain_on_channel, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *chan, unsigned int duration, enum ieee80211_roc_type type), TP_ARGS(local, sdata, chan, duration, type), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(int, center_freq) __field(unsigned int, duration) __field(u32, type) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->center_freq = chan->center_freq; __entry->duration = duration; __entry->type = type; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " freq:%dMHz duration:%dms type=%d", LOCAL_PR_ARG, VIF_PR_ARG, __entry->center_freq, __entry->duration, __entry->type ) ); DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); TRACE_EVENT(drv_set_ringparam, TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx), TP_ARGS(local, tx, rx), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, tx) __field(u32, rx) ), TP_fast_assign( LOCAL_ASSIGN; __entry->tx = tx; __entry->rx = rx; ), TP_printk( LOCAL_PR_FMT " tx:%d rx %d", LOCAL_PR_ARG, __entry->tx, __entry->rx ) ); TRACE_EVENT(drv_get_ringparam, TP_PROTO(struct ieee80211_local *local, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max), TP_ARGS(local, tx, tx_max, rx, rx_max), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, tx) __field(u32, tx_max) __field(u32, rx) __field(u32, rx_max) ), TP_fast_assign( LOCAL_ASSIGN; __entry->tx = *tx; __entry->tx_max = *tx_max; __entry->rx = *rx; __entry->rx_max = *rx_max; ), TP_printk( LOCAL_PR_FMT " tx:%d tx_max %d rx %d rx_max %d", LOCAL_PR_ARG, __entry->tx, __entry->tx_max, __entry->rx, __entry->rx_max ) ); DEFINE_EVENT(local_only_evt, drv_tx_frames_pending, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); TRACE_EVENT(drv_set_bitrate_mask, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct cfg80211_bitrate_mask *mask), TP_ARGS(local, sdata, mask), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u32, legacy_2g) __field(u32, legacy_5g) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->legacy_2g = mask->control[NL80211_BAND_2GHZ].legacy; __entry->legacy_5g = mask->control[NL80211_BAND_5GHZ].legacy; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " 2G Mask:0x%x 5G Mask:0x%x", LOCAL_PR_ARG, VIF_PR_ARG, __entry->legacy_2g, __entry->legacy_5g ) ); TRACE_EVENT(drv_set_rekey_data, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_gtk_rekey_data *data), TP_ARGS(local, sdata, data), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __array(u8, kek, NL80211_KEK_LEN) __array(u8, kck, NL80211_KCK_LEN) __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; memcpy(__entry->kek, data->kek, NL80211_KEK_LEN); memcpy(__entry->kck, data->kck, NL80211_KCK_LEN); memcpy(__entry->replay_ctr, data->replay_ctr, NL80211_REPLAY_CTR_LEN); ), TP_printk(LOCAL_PR_FMT VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG) ); TRACE_EVENT(drv_event_callback, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct ieee80211_event *_event), TP_ARGS(local, sdata, _event), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u32, type) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->type = _event->type; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " event:%d", LOCAL_PR_ARG, VIF_PR_ARG, __entry->type ) ); DECLARE_EVENT_CLASS(release_evt, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data), TP_ARGS(local, sta, tids, num_frames, reason, more_data), TP_STRUCT__entry( LOCAL_ENTRY STA_ENTRY __field(u16, tids) __field(int, num_frames) __field(int, reason) __field(bool, more_data) ), TP_fast_assign( LOCAL_ASSIGN; STA_ASSIGN; __entry->tids = tids; __entry->num_frames = num_frames; __entry->reason = reason; __entry->more_data = more_data; ), TP_printk( LOCAL_PR_FMT STA_PR_FMT " TIDs:0x%.4x frames:%d reason:%d more:%d", LOCAL_PR_ARG, STA_PR_ARG, __entry->tids, __entry->num_frames, __entry->reason, __entry->more_data ) ); DEFINE_EVENT(release_evt, drv_release_buffered_frames, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data), TP_ARGS(local, sta, tids, num_frames, reason, more_data) ); DEFINE_EVENT(release_evt, drv_allow_buffered_frames, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data), TP_ARGS(local, sta, tids, num_frames, reason, more_data) ); TRACE_EVENT(drv_mgd_prepare_tx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 duration), TP_ARGS(local, sdata, duration), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u32, duration) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->duration = duration; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " duration: %u", LOCAL_PR_ARG, VIF_PR_ARG, __entry->duration ) ); DEFINE_EVENT(local_sdata_evt, drv_mgd_protect_tdls_discover, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); DECLARE_EVENT_CLASS(local_chanctx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_chanctx *ctx), TP_ARGS(local, ctx), TP_STRUCT__entry( LOCAL_ENTRY CHANCTX_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; CHANCTX_ASSIGN; ), TP_printk( LOCAL_PR_FMT CHANCTX_PR_FMT, LOCAL_PR_ARG, CHANCTX_PR_ARG ) ); DEFINE_EVENT(local_chanctx, drv_add_chanctx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_chanctx *ctx), TP_ARGS(local, ctx) ); DEFINE_EVENT(local_chanctx, drv_remove_chanctx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_chanctx *ctx), TP_ARGS(local, ctx) ); TRACE_EVENT(drv_change_chanctx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, u32 changed), TP_ARGS(local, ctx, changed), TP_STRUCT__entry( LOCAL_ENTRY CHANCTX_ENTRY __field(u32, changed) ), TP_fast_assign( LOCAL_ASSIGN; CHANCTX_ASSIGN; __entry->changed = changed; ), TP_printk( LOCAL_PR_FMT CHANCTX_PR_FMT " changed:%#x", LOCAL_PR_ARG, CHANCTX_PR_ARG, __entry->changed ) ); #if !defined(__TRACE_VIF_ENTRY) #define __TRACE_VIF_ENTRY struct trace_vif_entry { enum nl80211_iftype vif_type; bool p2p; char vif_name[IFNAMSIZ]; } __packed; struct trace_chandef_entry { u32 control_freq; u32 chan_width; u32 center_freq1; u32 center_freq2; } __packed; struct trace_switch_entry { struct trace_vif_entry vif; struct trace_chandef_entry old_chandef; struct trace_chandef_entry new_chandef; } __packed; #define SWITCH_ENTRY_ASSIGN(to, from) local_vifs[i].to = vifs[i].from #endif TRACE_EVENT(drv_switch_vif_chanctx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode), TP_ARGS(local, vifs, n_vifs, mode), TP_STRUCT__entry( LOCAL_ENTRY __field(int, n_vifs) __field(u32, mode) __dynamic_array(u8, vifs, sizeof(struct trace_switch_entry) * n_vifs) ), TP_fast_assign( LOCAL_ASSIGN; __entry->n_vifs = n_vifs; __entry->mode = mode; { struct trace_switch_entry *local_vifs = __get_dynamic_array(vifs); int i; for (i = 0; i < n_vifs; i++) { struct ieee80211_sub_if_data *sdata; sdata = container_of(vifs[i].vif, struct ieee80211_sub_if_data, vif); SWITCH_ENTRY_ASSIGN(vif.vif_type, vif->type); SWITCH_ENTRY_ASSIGN(vif.p2p, vif->p2p); strncpy(local_vifs[i].vif.vif_name, sdata->name, sizeof(local_vifs[i].vif.vif_name)); SWITCH_ENTRY_ASSIGN(old_chandef.control_freq, old_ctx->def.chan->center_freq); SWITCH_ENTRY_ASSIGN(old_chandef.chan_width, old_ctx->def.width); SWITCH_ENTRY_ASSIGN(old_chandef.center_freq1, old_ctx->def.center_freq1); SWITCH_ENTRY_ASSIGN(old_chandef.center_freq2, old_ctx->def.center_freq2); SWITCH_ENTRY_ASSIGN(new_chandef.control_freq, new_ctx->def.chan->center_freq); SWITCH_ENTRY_ASSIGN(new_chandef.chan_width, new_ctx->def.width); SWITCH_ENTRY_ASSIGN(new_chandef.center_freq1, new_ctx->def.center_freq1); SWITCH_ENTRY_ASSIGN(new_chandef.center_freq2, new_ctx->def.center_freq2); } } ), TP_printk( LOCAL_PR_FMT " n_vifs:%d mode:%d", LOCAL_PR_ARG, __entry->n_vifs, __entry->mode ) ); DECLARE_EVENT_CLASS(local_sdata_chanctx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_chanctx *ctx), TP_ARGS(local, sdata, ctx), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY CHANCTX_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; CHANCTX_ASSIGN; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT CHANCTX_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG, CHANCTX_PR_ARG ) ); DEFINE_EVENT(local_sdata_chanctx, drv_assign_vif_chanctx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_chanctx *ctx), TP_ARGS(local, sdata, ctx) ); DEFINE_EVENT(local_sdata_chanctx, drv_unassign_vif_chanctx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_chanctx *ctx), TP_ARGS(local, sdata, ctx) ); TRACE_EVENT(drv_start_ap, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *info), TP_ARGS(local, sdata, info), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u8, dtimper) __field(u16, bcnint) __dynamic_array(u8, ssid, info->ssid_len); __field(bool, hidden_ssid); ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->dtimper = info->dtim_period; __entry->bcnint = info->beacon_int; memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len); __entry->hidden_ssid = info->hidden_ssid; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG ) ); DEFINE_EVENT(local_sdata_evt, drv_stop_ap, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(drv_reconfig_complete, TP_PROTO(struct ieee80211_local *local, enum ieee80211_reconfig_type reconfig_type), TP_ARGS(local, reconfig_type), TP_STRUCT__entry( LOCAL_ENTRY __field(u8, reconfig_type) ), TP_fast_assign( LOCAL_ASSIGN; __entry->reconfig_type = reconfig_type; ), TP_printk( LOCAL_PR_FMT " reconfig_type:%d", LOCAL_PR_ARG, __entry->reconfig_type ) ); #if IS_ENABLED(CONFIG_IPV6) DEFINE_EVENT(local_sdata_evt, drv_ipv6_addr_change, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); #endif TRACE_EVENT(drv_join_ibss, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *info), TP_ARGS(local, sdata, info), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u8, dtimper) __field(u16, bcnint) __dynamic_array(u8, ssid, info->ssid_len); ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->dtimper = info->dtim_period; __entry->bcnint = info->beacon_int; memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len); ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG ) ); DEFINE_EVENT(local_sdata_evt, drv_leave_ibss, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(drv_get_expected_throughput, TP_PROTO(struct ieee80211_sta *sta), TP_ARGS(sta), TP_STRUCT__entry( STA_ENTRY ), TP_fast_assign( STA_ASSIGN; ), TP_printk( STA_PR_FMT, STA_PR_ARG ) ); TRACE_EVENT(drv_start_nan, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_nan_conf *conf), TP_ARGS(local, sdata, conf), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u8, master_pref) __field(u8, bands) __field(u8, cdw_2g) __field(u8, cdw_5g) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->master_pref = conf->master_pref; __entry->bands = conf->bands; __entry->cdw_2g = conf->cdw_2g; __entry->cdw_5g = conf->cdw_5g; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT ", master preference: %u, bands: 0x%0x, cdw_2g: %u, cdw_5g: %u", LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref, __entry->bands, __entry->cdw_2g, __entry->cdw_5g ) ); TRACE_EVENT(drv_stop_nan, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG ) ); TRACE_EVENT(drv_nan_change_conf, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_nan_conf *conf, u32 changes), TP_ARGS(local, sdata, conf, changes), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u8, master_pref) __field(u8, bands) __field(u8, cdw_2g) __field(u8, cdw_5g) __field(u32, changes) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->master_pref = conf->master_pref; __entry->bands = conf->bands; __entry->cdw_2g = conf->cdw_2g; __entry->cdw_5g = conf->cdw_5g; __entry->changes = changes; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT ", master preference: %u, bands: 0x%0x, cdw_2g: %u, cdw_5g: %u, " "changes: 0x%x", LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref, __entry->bands, __entry->cdw_2g, __entry->cdw_5g, __entry->changes ) ); TRACE_EVENT(drv_add_nan_func, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct cfg80211_nan_func *func), TP_ARGS(local, sdata, func), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u8, type) __field(u8, inst_id) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->type = func->type; __entry->inst_id = func->instance_id; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT ", type: %u, inst_id: %u", LOCAL_PR_ARG, VIF_PR_ARG, __entry->type, __entry->inst_id ) ); TRACE_EVENT(drv_del_nan_func, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u8 instance_id), TP_ARGS(local, sdata, instance_id), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u8, instance_id) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->instance_id = instance_id; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT ", instance_id: %u", LOCAL_PR_ARG, VIF_PR_ARG, __entry->instance_id ) ); DEFINE_EVENT(local_sdata_evt, drv_start_pmsr, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); DEFINE_EVENT(local_sdata_evt, drv_abort_pmsr, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); /* * Tracing for API calls that drivers call. */ TRACE_EVENT(api_start_tx_ba_session, TP_PROTO(struct ieee80211_sta *sta, u16 tid), TP_ARGS(sta, tid), TP_STRUCT__entry( STA_ENTRY __field(u16, tid) ), TP_fast_assign( STA_ASSIGN; __entry->tid = tid; ), TP_printk( STA_PR_FMT " tid:%d", STA_PR_ARG, __entry->tid ) ); TRACE_EVENT(api_start_tx_ba_cb, TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid), TP_ARGS(sdata, ra, tid), TP_STRUCT__entry( VIF_ENTRY __array(u8, ra, ETH_ALEN) __field(u16, tid) ), TP_fast_assign( VIF_ASSIGN; memcpy(__entry->ra, ra, ETH_ALEN); __entry->tid = tid; ), TP_printk( VIF_PR_FMT " ra:%pM tid:%d", VIF_PR_ARG, __entry->ra, __entry->tid ) ); TRACE_EVENT(api_stop_tx_ba_session, TP_PROTO(struct ieee80211_sta *sta, u16 tid), TP_ARGS(sta, tid), TP_STRUCT__entry( STA_ENTRY __field(u16, tid) ), TP_fast_assign( STA_ASSIGN; __entry->tid = tid; ), TP_printk( STA_PR_FMT " tid:%d", STA_PR_ARG, __entry->tid ) ); TRACE_EVENT(api_stop_tx_ba_cb, TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid), TP_ARGS(sdata, ra, tid), TP_STRUCT__entry( VIF_ENTRY __array(u8, ra, ETH_ALEN) __field(u16, tid) ), TP_fast_assign( VIF_ASSIGN; memcpy(__entry->ra, ra, ETH_ALEN); __entry->tid = tid; ), TP_printk( VIF_PR_FMT " ra:%pM tid:%d", VIF_PR_ARG, __entry->ra, __entry->tid ) ); DEFINE_EVENT(local_only_evt, api_restart_hw, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); TRACE_EVENT(api_beacon_loss, TP_PROTO(struct ieee80211_sub_if_data *sdata), TP_ARGS(sdata), TP_STRUCT__entry( VIF_ENTRY ), TP_fast_assign( VIF_ASSIGN; ), TP_printk( VIF_PR_FMT, VIF_PR_ARG ) ); TRACE_EVENT(api_connection_loss, TP_PROTO(struct ieee80211_sub_if_data *sdata), TP_ARGS(sdata), TP_STRUCT__entry( VIF_ENTRY ), TP_fast_assign( VIF_ASSIGN; ), TP_printk( VIF_PR_FMT, VIF_PR_ARG ) ); TRACE_EVENT(api_cqm_rssi_notify, TP_PROTO(struct ieee80211_sub_if_data *sdata, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level), TP_ARGS(sdata, rssi_event, rssi_level), TP_STRUCT__entry( VIF_ENTRY __field(u32, rssi_event) __field(s32, rssi_level) ), TP_fast_assign( VIF_ASSIGN; __entry->rssi_event = rssi_event; __entry->rssi_level = rssi_level; ), TP_printk( VIF_PR_FMT " event:%d rssi:%d", VIF_PR_ARG, __entry->rssi_event, __entry->rssi_level ) ); DEFINE_EVENT(local_sdata_evt, api_cqm_beacon_loss_notify, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(api_scan_completed, TP_PROTO(struct ieee80211_local *local, bool aborted), TP_ARGS(local, aborted), TP_STRUCT__entry( LOCAL_ENTRY __field(bool, aborted) ), TP_fast_assign( LOCAL_ASSIGN; __entry->aborted = aborted; ), TP_printk( LOCAL_PR_FMT " aborted:%d", LOCAL_PR_ARG, __entry->aborted ) ); TRACE_EVENT(api_sched_scan_results, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local), TP_STRUCT__entry( LOCAL_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; ), TP_printk( LOCAL_PR_FMT, LOCAL_PR_ARG ) ); TRACE_EVENT(api_sched_scan_stopped, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local), TP_STRUCT__entry( LOCAL_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; ), TP_printk( LOCAL_PR_FMT, LOCAL_PR_ARG ) ); TRACE_EVENT(api_sta_block_awake, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta, bool block), TP_ARGS(local, sta, block), TP_STRUCT__entry( LOCAL_ENTRY STA_ENTRY __field(bool, block) ), TP_fast_assign( LOCAL_ASSIGN; STA_ASSIGN; __entry->block = block; ), TP_printk( LOCAL_PR_FMT STA_PR_FMT " block:%d", LOCAL_PR_ARG, STA_PR_ARG, __entry->block ) ); TRACE_EVENT(api_chswitch_done, TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success), TP_ARGS(sdata, success), TP_STRUCT__entry( VIF_ENTRY __field(bool, success) ), TP_fast_assign( VIF_ASSIGN; __entry->success = success; ), TP_printk( VIF_PR_FMT " success=%d", VIF_PR_ARG, __entry->success ) ); DEFINE_EVENT(local_only_evt, api_ready_on_channel, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) ); TRACE_EVENT(api_gtk_rekey_notify, TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *replay_ctr), TP_ARGS(sdata, bssid, replay_ctr), TP_STRUCT__entry( VIF_ENTRY __array(u8, bssid, ETH_ALEN) __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN) ), TP_fast_assign( VIF_ASSIGN; memcpy(__entry->bssid, bssid, ETH_ALEN); memcpy(__entry->replay_ctr, replay_ctr, NL80211_REPLAY_CTR_LEN); ), TP_printk(VIF_PR_FMT, VIF_PR_ARG) ); TRACE_EVENT(api_enable_rssi_reports, TP_PROTO(struct ieee80211_sub_if_data *sdata, int rssi_min_thold, int rssi_max_thold), TP_ARGS(sdata, rssi_min_thold, rssi_max_thold), TP_STRUCT__entry( VIF_ENTRY __field(int, rssi_min_thold) __field(int, rssi_max_thold) ), TP_fast_assign( VIF_ASSIGN; __entry->rssi_min_thold = rssi_min_thold; __entry->rssi_max_thold = rssi_max_thold; ), TP_printk( VIF_PR_FMT " rssi_min_thold =%d, rssi_max_thold = %d", VIF_PR_ARG, __entry->rssi_min_thold, __entry->rssi_max_thold ) ); TRACE_EVENT(api_eosp, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta), TP_ARGS(local, sta), TP_STRUCT__entry( LOCAL_ENTRY STA_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; STA_ASSIGN; ), TP_printk( LOCAL_PR_FMT STA_PR_FMT, LOCAL_PR_ARG, STA_PR_ARG ) ); TRACE_EVENT(api_send_eosp_nullfunc, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta, u8 tid), TP_ARGS(local, sta, tid), TP_STRUCT__entry( LOCAL_ENTRY STA_ENTRY __field(u8, tid) ), TP_fast_assign( LOCAL_ASSIGN; STA_ASSIGN; __entry->tid = tid; ), TP_printk( LOCAL_PR_FMT STA_PR_FMT " tid:%d", LOCAL_PR_ARG, STA_PR_ARG, __entry->tid ) ); TRACE_EVENT(api_sta_set_buffered, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta, u8 tid, bool buffered), TP_ARGS(local, sta, tid, buffered), TP_STRUCT__entry( LOCAL_ENTRY STA_ENTRY __field(u8, tid) __field(bool, buffered) ), TP_fast_assign( LOCAL_ASSIGN; STA_ASSIGN; __entry->tid = tid; __entry->buffered = buffered; ), TP_printk( LOCAL_PR_FMT STA_PR_FMT " tid:%d buffered:%d", LOCAL_PR_ARG, STA_PR_ARG, __entry->tid, __entry->buffered ) ); /* * Tracing for internal functions * (which may also be called in response to driver calls) */ TRACE_EVENT(wake_queue, TP_PROTO(struct ieee80211_local *local, u16 queue, enum queue_stop_reason reason), TP_ARGS(local, queue, reason), TP_STRUCT__entry( LOCAL_ENTRY __field(u16, queue) __field(u32, reason) ), TP_fast_assign( LOCAL_ASSIGN; __entry->queue = queue; __entry->reason = reason; ), TP_printk( LOCAL_PR_FMT " queue:%d, reason:%d", LOCAL_PR_ARG, __entry->queue, __entry->reason ) ); TRACE_EVENT(stop_queue, TP_PROTO(struct ieee80211_local *local, u16 queue, enum queue_stop_reason reason), TP_ARGS(local, queue, reason), TP_STRUCT__entry( LOCAL_ENTRY __field(u16, queue) __field(u32, reason) ), TP_fast_assign( LOCAL_ASSIGN; __entry->queue = queue; __entry->reason = reason; ), TP_printk( LOCAL_PR_FMT " queue:%d, reason:%d", LOCAL_PR_ARG, __entry->queue, __entry->reason ) ); TRACE_EVENT(drv_set_default_unicast_key, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int key_idx), TP_ARGS(local, sdata, key_idx), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(int, key_idx) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->key_idx = key_idx; ), TP_printk(LOCAL_PR_FMT VIF_PR_FMT " key_idx:%d", LOCAL_PR_ARG, VIF_PR_ARG, __entry->key_idx) ); TRACE_EVENT(api_radar_detected, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local), TP_STRUCT__entry( LOCAL_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; ), TP_printk( LOCAL_PR_FMT " radar detected", LOCAL_PR_ARG ) ); TRACE_EVENT(drv_channel_switch_beacon, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_chan_def *chandef), TP_ARGS(local, sdata, chandef), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY CHANDEF_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; CHANDEF_ASSIGN(chandef); ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG ) ); TRACE_EVENT(drv_pre_channel_switch, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch), TP_ARGS(local, sdata, ch_switch), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY CHANDEF_ENTRY __field(u64, timestamp) __field(u32, device_timestamp) __field(bool, block_tx) __field(u8, count) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; CHANDEF_ASSIGN(&ch_switch->chandef) __entry->timestamp = ch_switch->timestamp; __entry->device_timestamp = ch_switch->device_timestamp; __entry->block_tx = ch_switch->block_tx; __entry->count = ch_switch->count; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to " CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu", LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count, __entry->block_tx, __entry->timestamp ) ); DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); DEFINE_EVENT(local_sdata_evt, drv_abort_channel_switch, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), TP_ARGS(local, sdata) ); TRACE_EVENT(drv_channel_switch_rx_beacon, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch), TP_ARGS(local, sdata, ch_switch), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY CHANDEF_ENTRY __field(u64, timestamp) __field(u32, device_timestamp) __field(bool, block_tx) __field(u8, count) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; CHANDEF_ASSIGN(&ch_switch->chandef) __entry->timestamp = ch_switch->timestamp; __entry->device_timestamp = ch_switch->device_timestamp; __entry->block_tx = ch_switch->block_tx; __entry->count = ch_switch->count; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " received a channel switch beacon to " CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu", LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count, __entry->block_tx, __entry->timestamp ) ); TRACE_EVENT(drv_get_txpower, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int dbm, int ret), TP_ARGS(local, sdata, dbm, ret), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(int, dbm) __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; __entry->dbm = dbm; __entry->ret = ret; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d", LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret ) ); TRACE_EVENT(drv_tdls_channel_switch, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef), TP_ARGS(local, sdata, sta, oper_class, chandef), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY __field(u8, oper_class) CHANDEF_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; __entry->oper_class = oper_class; CHANDEF_ASSIGN(chandef) ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " tdls channel switch to" CHANDEF_PR_FMT " oper_class:%d " STA_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->oper_class, STA_PR_ARG ) ); TRACE_EVENT(drv_tdls_cancel_channel_switch, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta), TP_ARGS(local, sdata, sta), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " tdls cancel channel switch with " STA_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG ) ); TRACE_EVENT(drv_tdls_recv_channel_switch, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_tdls_ch_sw_params *params), TP_ARGS(local, sdata, params), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __field(u8, action_code) STA_ENTRY CHANDEF_ENTRY __field(u32, status) __field(bool, peer_initiator) __field(u32, timestamp) __field(u16, switch_time) __field(u16, switch_timeout) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_NAMED_ASSIGN(params->sta); CHANDEF_ASSIGN(params->chandef) __entry->peer_initiator = params->sta->tdls_initiator; __entry->action_code = params->action_code; __entry->status = params->status; __entry->timestamp = params->timestamp; __entry->switch_time = params->switch_time; __entry->switch_timeout = params->switch_timeout; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT " received tdls channel switch packet" " action:%d status:%d time:%d switch time:%d switch" " timeout:%d initiator: %d chan:" CHANDEF_PR_FMT STA_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG, __entry->action_code, __entry->status, __entry->timestamp, __entry->switch_time, __entry->switch_timeout, __entry->peer_initiator, CHANDEF_PR_ARG, STA_PR_ARG ) ); TRACE_EVENT(drv_wake_tx_queue, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct txq_info *txq), TP_ARGS(local, sdata, txq), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY __field(u8, ac) __field(u8, tid) ), TP_fast_assign( struct ieee80211_sta *sta = txq->txq.sta; LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; __entry->ac = txq->txq.ac; __entry->tid = txq->txq.tid; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ac:%d tid:%d", LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ac, __entry->tid ) ); TRACE_EVENT(drv_get_ftm_responder_stats, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_ftm_responder_stats *ftm_stats), TP_ARGS(local, sdata, ftm_stats), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; ), TP_printk( LOCAL_PR_FMT VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG ) ); #endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace #include backport-iwlwifi-dkms-7906/net/mac80211/trace_msg.h000066400000000000000000000024131351064634500217400ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright (C) 2019 Intel Corporation */ #ifdef CPTCFG_MAC80211_MESSAGE_TRACING #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) #define __MAC80211_MSG_DRIVER_TRACE #include #include #include "ieee80211_i.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM mac80211_msg #define MAX_MSG_LEN 120 DECLARE_EVENT_CLASS(mac80211_msg_event, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf), TP_STRUCT__entry( __dynamic_array(char, msg, MAX_MSG_LEN) ), TP_fast_assign( WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), MAX_MSG_LEN, vaf->fmt, *vaf->va) >= MAX_MSG_LEN); ), TP_printk("%s", __get_str(msg)) ); DEFINE_EVENT(mac80211_msg_event, mac80211_info, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); DEFINE_EVENT(mac80211_msg_event, mac80211_dbg, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); DEFINE_EVENT(mac80211_msg_event, mac80211_err, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); #endif /* !__MAC80211_MSG_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace_msg #include #endif backport-iwlwifi-dkms-7906/net/mac80211/tx.c000066400000000000000000004070451351064634500204340ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * * Transmit and frame generation functions. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "led.h" #include "mesh.h" #include "wep.h" #include "wpa.h" #include "wme.h" #include "rate.h" /* misc utils */ static inline void ieee80211_tx_stats(struct net_device *dev, u32 len) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev_tstats(dev)); u64_stats_update_begin(&tstats->syncp); tstats->tx_packets++; tstats->tx_bytes += len; u64_stats_update_end(&tstats->syncp); } static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct sk_buff *skb, int group_addr, int next_frag_len) { int rate, mrate, erp, dur, i, shift = 0; struct ieee80211_rate *txrate; struct ieee80211_local *local = tx->local; struct ieee80211_supported_band *sband; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_chanctx_conf *chanctx_conf; u32 rate_flags = 0; /* assume HW handles this */ if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS)) return 0; rcu_read_lock(); chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf); if (chanctx_conf) { shift = ieee80211_chandef_get_shift(&chanctx_conf->def); rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); } rcu_read_unlock(); /* uh huh? */ if (WARN_ON_ONCE(tx->rate.idx < 0)) return 0; sband = local->hw.wiphy->bands[info->band]; txrate = &sband->bitrates[tx->rate.idx]; erp = txrate->flags & IEEE80211_RATE_ERP_G; /* * data and mgmt (except PS Poll): * - during CFP: 32768 * - during contention period: * if addr1 is group address: 0 * if more fragments = 0 and addr1 is individual address: time to * transmit one ACK plus SIFS * if more fragments = 1 and addr1 is individual address: time to * transmit next fragment plus 2 x ACK plus 3 x SIFS * * IEEE 802.11, 9.6: * - control response frame (CTS or ACK) shall be transmitted using the * same rate as the immediately previous frame in the frame exchange * sequence, if this rate belongs to the PHY mandatory rates, or else * at the highest possible rate belonging to the PHY rates in the * BSSBasicRateSet */ hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_ctl(hdr->frame_control)) { /* TODO: These control frames are not currently sent by * mac80211, but should they be implemented, this function * needs to be updated to support duration field calculation. * * RTS: time needed to transmit pending data/mgmt frame plus * one CTS frame plus one ACK frame plus 3 x SIFS * CTS: duration of immediately previous RTS minus time * required to transmit CTS and its SIFS * ACK: 0 if immediately previous directed data/mgmt had * more=0, with more=1 duration in ACK frame is duration * from previous frame minus time needed to transmit ACK * and its SIFS * PS Poll: BIT(15) | BIT(14) | aid */ return 0; } /* data/mgmt */ if (0 /* FIX: data/mgmt during CFP */) return cpu_to_le16(32768); if (group_addr) /* Group address as the destination - no ACK */ return 0; /* Individual destination address: * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) * CTS and ACK frames shall be transmitted using the highest rate in * basic rate set that is less than or equal to the rate of the * immediately previous frame and that is using the same modulation * (CCK or OFDM). If no basic rate set matches with these requirements, * the highest mandatory rate of the PHY that is less than or equal to * the rate of the previous frame is used. * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps */ rate = -1; /* use lowest available if everything fails */ mrate = sband->bitrates[0].bitrate; for (i = 0; i < sband->n_bitrates; i++) { struct ieee80211_rate *r = &sband->bitrates[i]; if (r->bitrate > txrate->bitrate) break; if ((rate_flags & r->flags) != rate_flags) continue; if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) rate = DIV_ROUND_UP(r->bitrate, 1 << shift); switch (sband->band) { case NL80211_BAND_2GHZ: { u32 flag; if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) flag = IEEE80211_RATE_MANDATORY_G; else flag = IEEE80211_RATE_MANDATORY_B; if (r->flags & flag) mrate = r->bitrate; break; } case NL80211_BAND_5GHZ: if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; case NL80211_BAND_60GHZ: /* TODO, for now fall through */ case NUM_NL80211_BANDS: WARN_ON(1); break; } } if (rate == -1) { /* No matching basic rate found; use highest suitable mandatory * PHY rate */ rate = DIV_ROUND_UP(mrate, 1 << shift); } /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ if (ieee80211_is_data_qos(hdr->frame_control) && *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK) dur = 0; else /* Time needed to transmit ACK * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up * to closest integer */ dur = ieee80211_frame_duration(sband->band, 10, rate, erp, tx->sdata->vif.bss_conf.use_short_preamble, shift); if (next_frag_len) { /* Frame is fragmented: duration increases with time needed to * transmit next fragment plus ACK and 2 x SIFS. */ dur *= 2; /* ACK + SIFS */ /* next fragment */ dur += ieee80211_frame_duration(sband->band, next_frag_len, txrate->bitrate, erp, tx->sdata->vif.bss_conf.use_short_preamble, shift); } return cpu_to_le16(dur); } /* tx handlers */ static ieee80211_tx_result debug_noinline ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) { struct ieee80211_local *local = tx->local; struct ieee80211_if_managed *ifmgd; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); /* driver doesn't support power save */ if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) return TX_CONTINUE; /* hardware does dynamic power save */ if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) return TX_CONTINUE; /* dynamic power save disabled */ if (local->hw.conf.dynamic_ps_timeout <= 0) return TX_CONTINUE; /* we are scanning, don't enable power save */ if (local->scanning) return TX_CONTINUE; if (!local->ps_sdata) return TX_CONTINUE; /* No point if we're going to suspend */ if (local->quiescing) return TX_CONTINUE; /* dynamic ps is supported only in managed mode */ if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) return TX_CONTINUE; if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) return TX_CONTINUE; ifmgd = &tx->sdata->u.mgd; /* * Don't wakeup from power save if u-apsd is enabled, voip ac has * u-apsd enabled and the frame is in voip class. This effectively * means that even if all access categories have u-apsd enabled, in * practise u-apsd is only used with the voip ac. This is a * workaround for the case when received voip class packets do not * have correct qos tag for some reason, due the network or the * peer application. * * Note: ifmgd->uapsd_queues access is racy here. If the value is * changed via debugfs, user needs to reassociate manually to have * everything in sync. */ if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) && skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO) return TX_CONTINUE; if (local->hw.conf.flags & IEEE80211_CONF_PS) { ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_PS, false); ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; ieee80211_queue_work(&local->hw, &local->dynamic_ps_disable_work); } /* Don't restart the timer if we're not disassociated */ if (!ifmgd->associated) return TX_CONTINUE; mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); bool assoc = false; if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) return TX_CONTINUE; if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && !ieee80211_is_probe_req(hdr->frame_control) && !ieee80211_is_nullfunc(hdr->frame_control)) /* * When software scanning only nullfunc frames (to notify * the sleep state to the AP) and probe requests (for the * active scan) are allowed, all other frames should not be * sent and we should not get here, but if we do * nonetheless, drop them to avoid sending them * off-channel. See the link below and * ieee80211_start_scan() for more. * * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089 */ return TX_DROP; if (tx->sdata->vif.type == NL80211_IFTYPE_OCB) return TX_CONTINUE; if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) return TX_CONTINUE; if (tx->flags & IEEE80211_TX_PS_BUFFERED) return TX_CONTINUE; if (tx->sta) assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); if (likely(tx->flags & IEEE80211_TX_UNICAST)) { if (unlikely(!assoc && ieee80211_is_data(hdr->frame_control))) { #ifdef CPTCFG_MAC80211_VERBOSE_DEBUG sdata_info(tx->sdata, "dropped data frame to not associated station %pM\n", hdr->addr1); #endif I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); return TX_DROP; } } else if (unlikely(ieee80211_is_data(hdr->frame_control) && ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) { /* * No associated STAs - no need to send multicast * frames. */ return TX_DROP; } return TX_CONTINUE; } /* This function is called whenever the AP is about to exceed the maximum limit * of buffered frames for power saving STAs. This situation should not really * happen often during normal operation, so dropping the oldest buffered packet * from each queue should be OK to make some room for new frames. */ static void purge_old_ps_buffers(struct ieee80211_local *local) { int total = 0, purged = 0; struct sk_buff *skb; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; list_for_each_entry_rcu(sdata, &local->interfaces, list) { struct ps_data *ps; if (sdata->vif.type == NL80211_IFTYPE_AP) ps = &sdata->u.ap.ps; else if (ieee80211_vif_is_mesh(&sdata->vif)) ps = &sdata->u.mesh.ps; else continue; skb = skb_dequeue(&ps->bc_buf); if (skb) { purged++; ieee80211_free_txskb(&local->hw, skb); } total += skb_queue_len(&ps->bc_buf); } /* * Drop one frame from each station from the lowest-priority * AC that has frames at all. */ list_for_each_entry_rcu(sta, &local->sta_list, list) { int ac; for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) { skb = skb_dequeue(&sta->ps_tx_buf[ac]); total += skb_queue_len(&sta->ps_tx_buf[ac]); if (skb) { purged++; ieee80211_free_txskb(&local->hw, skb); break; } } } local->total_ps_buffered = total; ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged); } static ieee80211_tx_result ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ps_data *ps; /* * broadcast/multicast frame * * If any of the associated/peer stations is in power save mode, * the frame is buffered to be sent after DTIM beacon frame. * This is done either by the hardware or us. */ /* powersaving STAs currently only in AP/VLAN/mesh mode */ if (tx->sdata->vif.type == NL80211_IFTYPE_AP || tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (!tx->sdata->bss) return TX_CONTINUE; ps = &tx->sdata->bss->ps; } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) { ps = &tx->sdata->u.mesh.ps; } else { return TX_CONTINUE; } /* no buffering for ordered frames */ if (ieee80211_has_order(hdr->frame_control)) return TX_CONTINUE; if (ieee80211_is_probe_req(hdr->frame_control)) return TX_CONTINUE; if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) info->hw_queue = tx->sdata->vif.cab_queue; /* no stations in PS mode and no buffered packets */ if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf)) return TX_CONTINUE; info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; /* device releases frame after DTIM beacon */ if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING)) return TX_CONTINUE; /* buffered in mac80211 */ if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { ps_dbg(tx->sdata, "BC TX buffer full - dropping the oldest frame\n"); ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); } else tx->local->total_ps_buffered++; skb_queue_tail(&ps->bc_buf, tx->skb); return TX_QUEUED; } static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, struct sk_buff *skb) { if (!ieee80211_is_mgmt(fc)) return 0; if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP)) return 0; if (!ieee80211_is_robust_mgmt_frame(skb)) return 0; return 1; } static ieee80211_tx_result ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) { struct sta_info *sta = tx->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_local *local = tx->local; if (unlikely(!sta)) return TX_CONTINUE; if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER) || test_sta_flag(sta, WLAN_STA_PS_DELIVER)) && !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { int ac = skb_get_queue_mapping(tx->skb); if (ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; return TX_CONTINUE; } ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", sta->sta.addr, sta->sta.aid, ac); if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); /* sync with ieee80211_sta_ps_deliver_wakeup */ spin_lock(&sta->ps_lock); /* * STA woke up the meantime and all the frames on ps_tx_buf have * been queued to pending queue. No reordering can happen, go * ahead and Tx the packet. */ if (!test_sta_flag(sta, WLAN_STA_PS_STA) && !test_sta_flag(sta, WLAN_STA_PS_DRIVER) && !test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { spin_unlock(&sta->ps_lock); return TX_CONTINUE; } if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); ps_dbg(tx->sdata, "STA %pM TX buffer for AC %d full - dropping oldest frame\n", sta->sta.addr, ac); ieee80211_free_txskb(&local->hw, old); } else tx->local->total_ps_buffered++; info->control.jiffies = jiffies; info->control.vif = &tx->sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); spin_unlock(&sta->ps_lock); if (!timer_pending(&local->sta_cleanup)) mod_timer(&local->sta_cleanup, round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); /* * We queued up some frames, so the TIM bit might * need to be set, recalculate it. */ sta_info_recalc_tim(sta); return TX_QUEUED; } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) { ps_dbg(tx->sdata, "STA %pM in PS mode, but polling/in SP -> send frame\n", sta->sta.addr); } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) { if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) return TX_CONTINUE; if (tx->flags & IEEE80211_TX_UNICAST) return ieee80211_tx_h_unicast_ps_buf(tx); else return ieee80211_tx_h_multicast_ps_buf(tx); } static ieee80211_tx_result debug_noinline ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) { if (tx->sdata->control_port_no_encrypt) info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; info->flags |= IEEE80211_TX_CTL_USE_MINRATE; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) { struct ieee80211_key *key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) tx->key = NULL; else if (tx->sta && (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) tx->key = key; else if (ieee80211_is_group_privacy_action(tx->skb) && (key = rcu_dereference(tx->sdata->default_multicast_key))) tx->key = key; else if (ieee80211_is_mgmt(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1) && ieee80211_is_robust_mgmt_frame(tx->skb) && (key = rcu_dereference(tx->sdata->default_mgmt_key))) tx->key = key; else if (is_multicast_ether_addr(hdr->addr1) && (key = rcu_dereference(tx->sdata->default_multicast_key))) tx->key = key; else if (!is_multicast_ether_addr(hdr->addr1) && (key = rcu_dereference(tx->sdata->default_unicast_key))) tx->key = key; else tx->key = NULL; if (tx->key) { bool skip_hw = false; /* TODO: add threshold stuff again */ switch (tx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: if (!ieee80211_is_data_present(hdr->frame_control)) tx->key = NULL; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!ieee80211_is_data_present(hdr->frame_control) && !ieee80211_use_mfp(hdr->frame_control, tx->sta, tx->skb) && !ieee80211_is_group_privacy_action(tx->skb)) tx->key = NULL; else skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT_TX) && ieee80211_is_mgmt(hdr->frame_control); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (!ieee80211_is_mgmt(hdr->frame_control)) tx->key = NULL; break; } if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED && !ieee80211_is_deauth(hdr->frame_control))) return TX_DROP; if (!skip_hw && tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) info->control.hw_key = &tx->key->conf; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (void *)tx->skb->data; struct ieee80211_supported_band *sband; u32 len; struct ieee80211_tx_rate_control txrc; struct ieee80211_sta_rates *ratetbl = NULL; bool assoc = false; memset(&txrc, 0, sizeof(txrc)); sband = tx->local->hw.wiphy->bands[info->band]; len = min_t(u32, tx->skb->len + FCS_LEN, tx->local->hw.wiphy->frag_threshold); /* set up the tx rate control struct we give the RC algo */ txrc.hw = &tx->local->hw; txrc.sband = sband; txrc.bss_conf = &tx->sdata->vif.bss_conf; txrc.skb = tx->skb; txrc.reported_rate.idx = -1; txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; if (tx->sdata->rc_has_mcs_mask[info->band]) txrc.rate_idx_mcs_mask = tx->sdata->rc_rateidx_mcs_mask[info->band]; txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || tx->sdata->vif.type == NL80211_IFTYPE_ADHOC || tx->sdata->vif.type == NL80211_IFTYPE_OCB); /* set up RTS protection if desired */ if (len > tx->local->hw.wiphy->rts_threshold) { txrc.rts = true; } info->control.use_rts = txrc.rts; info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot; /* * Use short preamble if the BSS can handle it, but not for * management frames unless we know the receiver can handle * that -- the management frame might be to a station that * just wants a probe response. */ if (tx->sdata->vif.bss_conf.use_short_preamble && (ieee80211_is_data(hdr->frame_control) || (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) txrc.short_preamble = true; info->control.short_preamble = txrc.short_preamble; /* don't ask rate control when rate already injected via radiotap */ if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT) return TX_CONTINUE; if (tx->sta) assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); /* * Lets not bother rate control if we're associated and cannot * talk to the sta. This should not happen. */ if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc && !rate_usable_index_exists(sband, &tx->sta->sta), "%s: Dropped data frame as no usable bitrate found while " "scanning and associated. Target station: " "%pM on %d GHz band\n", tx->sdata->name, hdr->addr1, info->band ? 5 : 2)) return TX_DROP; /* * If we're associated with the sta at this point we know we can at * least send the frame at the lowest bit rate. */ rate_control_get_rate(tx->sdata, tx->sta, &txrc); if (tx->sta && !info->control.skip_table) ratetbl = rcu_dereference(tx->sta->sta.rates); if (unlikely(info->control.rates[0].idx < 0)) { if (ratetbl) { struct ieee80211_tx_rate rate = { .idx = ratetbl->rate[0].idx, .flags = ratetbl->rate[0].flags, .count = ratetbl->rate[0].count }; if (ratetbl->rate[0].idx < 0) return TX_DROP; tx->rate = rate; } else { return TX_DROP; } } else { tx->rate = info->control.rates[0]; } if (txrc.reported_rate.idx < 0) { txrc.reported_rate = tx->rate; if (tx->sta && ieee80211_is_data(hdr->frame_control)) tx->sta->tx_stats.last_rate = txrc.reported_rate; } else if (tx->sta) tx->sta->tx_stats.last_rate = txrc.reported_rate; if (ratetbl) return TX_CONTINUE; if (unlikely(!info->control.rates[0].count)) info->control.rates[0].count = 1; if (WARN_ON_ONCE((info->control.rates[0].count > 1) && (info->flags & IEEE80211_TX_CTL_NO_ACK))) info->control.rates[0].count = 1; return TX_CONTINUE; } static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid) { u16 *seq = &sta->tid_seq[tid]; __le16 ret = cpu_to_le16(*seq); /* Increase the sequence number. */ *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ; return ret; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; int tid; /* * Packet injection may want to control the sequence * number, if we have no matching interface then we * neither assign one ourselves nor ask the driver to. */ if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR)) return TX_CONTINUE; if (unlikely(ieee80211_is_ctl(hdr->frame_control))) return TX_CONTINUE; if (ieee80211_hdrlen(hdr->frame_control) < 24) return TX_CONTINUE; if (ieee80211_is_qos_nullfunc(hdr->frame_control)) return TX_CONTINUE; /* * Anything but QoS data that has a sequence number field * (is long enough) gets a sequence number from the global * counter. QoS data frames with a multicast destination * also use the global counter (802.11-2012 9.3.2.10). */ if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { if (tx->flags & IEEE80211_TX_NO_SEQNO) return TX_CONTINUE; /* driver should assign sequence number */ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; /* for pure STA mode without beacons, we can do it */ hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); tx->sdata->sequence_number += 0x10; if (tx->sta) tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++; return TX_CONTINUE; } /* * This should be true for injected/management frames only, for * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ * above since they are not QoS-data frames. */ if (!tx->sta) return TX_CONTINUE; /* include per-STA, per-TID sequence counter */ tid = ieee80211_get_tid(hdr); tx->sta->tx_stats.msdu[tid]++; hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); return TX_CONTINUE; } static int ieee80211_fragment(struct ieee80211_tx_data *tx, struct sk_buff *skb, int hdrlen, int frag_threshold) { struct ieee80211_local *local = tx->local; struct ieee80211_tx_info *info; struct sk_buff *tmp; int per_fragm = frag_threshold - hdrlen - FCS_LEN; int pos = hdrlen + per_fragm; int rem = skb->len - hdrlen - per_fragm; if (WARN_ON(rem < 0)) return -EINVAL; /* first fragment was already added to queue by caller */ while (rem) { int fraglen = per_fragm; if (fraglen > rem) fraglen = rem; rem -= fraglen; tmp = dev_alloc_skb(local->tx_headroom + frag_threshold + tx->sdata->encrypt_headroom + IEEE80211_ENCRYPT_TAILROOM); if (!tmp) return -ENOMEM; __skb_queue_tail(&tx->skbs, tmp); skb_reserve(tmp, local->tx_headroom + tx->sdata->encrypt_headroom); /* copy control information */ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); info = IEEE80211_SKB_CB(tmp); info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); if (rem) info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; skb_copy_queue_mapping(tmp, skb); tmp->priority = skb->priority; tmp->dev = skb->dev; /* copy header and data */ skb_put_data(tmp, skb->data, hdrlen); skb_put_data(tmp, skb->data + pos, fraglen); pos += fraglen; } /* adjust first fragment's length */ skb_trim(skb, hdrlen + per_fragm); return 0; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; int frag_threshold = tx->local->hw.wiphy->frag_threshold; int hdrlen; int fragnum; /* no matter what happens, tx->skb moves to tx->skbs */ __skb_queue_tail(&tx->skbs, skb); tx->skb = NULL; if (info->flags & IEEE80211_TX_CTL_DONTFRAG) return TX_CONTINUE; if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) return TX_CONTINUE; /* * Warn when submitting a fragmented A-MPDU frame and drop it. * This scenario is handled in ieee80211_tx_prepare but extra * caution taken here as fragmented ampdu may cause Tx stop. */ if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) return TX_DROP; hdrlen = ieee80211_hdrlen(hdr->frame_control); /* internal error, why isn't DONTFRAG set? */ if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) return TX_DROP; /* * Now fragment the frame. This will allocate all the fragments and * chain them (using skb as the first fragment) to skb->next. * During transmission, we will remove the successfully transmitted * fragments from this list. When the low-level driver rejects one * of the fragments then we will simply pretend to accept the skb * but store it away as pending. */ if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold)) return TX_DROP; /* update duration/seq/flags of fragments */ fragnum = 0; skb_queue_walk(&tx->skbs, skb) { const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); hdr = (void *)skb->data; info = IEEE80211_SKB_CB(skb); if (!skb_queue_is_last(&tx->skbs, skb)) { hdr->frame_control |= morefrags; /* * No multi-rate retries for fragmented frames, that * would completely throw off the NAV at other STAs. */ info->control.rates[1].idx = -1; info->control.rates[2].idx = -1; info->control.rates[3].idx = -1; BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4); info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { hdr->frame_control &= ~morefrags; } hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); fragnum++; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) { struct sk_buff *skb; int ac = -1; if (!tx->sta) return TX_CONTINUE; skb_queue_walk(&tx->skbs, skb) { ac = skb_get_queue_mapping(skb); tx->sta->tx_stats.bytes[ac] += skb->len; } if (ac >= 0) tx->sta->tx_stats.packets[ac]++; return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) { if (!tx->key) return TX_CONTINUE; switch (tx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return ieee80211_crypto_wep_encrypt(tx); case WLAN_CIPHER_SUITE_TKIP: return ieee80211_crypto_tkip_encrypt(tx); case WLAN_CIPHER_SUITE_CCMP: return ieee80211_crypto_ccmp_encrypt( tx, IEEE80211_CCMP_MIC_LEN); case WLAN_CIPHER_SUITE_CCMP_256: return ieee80211_crypto_ccmp_encrypt( tx, IEEE80211_CCMP_256_MIC_LEN); case WLAN_CIPHER_SUITE_AES_CMAC: return ieee80211_crypto_aes_cmac_encrypt(tx); case WLAN_CIPHER_SUITE_BIP_CMAC_256: return ieee80211_crypto_aes_cmac_256_encrypt(tx); case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: return ieee80211_crypto_aes_gmac_encrypt(tx); case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: return ieee80211_crypto_gcmp_encrypt(tx); default: return ieee80211_crypto_hw_encrypt(tx); } return TX_DROP; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_hdr *hdr; int next_len; bool group_addr; skb_queue_walk(&tx->skbs, skb) { hdr = (void *) skb->data; if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) break; /* must not overwrite AID */ if (!skb_queue_is_last(&tx->skbs, skb)) { struct sk_buff *next = skb_queue_next(&tx->skbs, skb); next_len = next->len; } else next_len = 0; group_addr = is_multicast_ether_addr(hdr->addr1); hdr->duration_id = ieee80211_duration(tx, skb, group_addr, next_len); } return TX_CONTINUE; } /* actual transmit path */ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, struct sk_buff *skb, struct ieee80211_tx_info *info, struct tid_ampdu_tx *tid_tx, int tid) { bool queued = false; bool reset_agg_timer = false; struct sk_buff *purge_skb = NULL; if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { info->flags |= IEEE80211_TX_CTL_AMPDU; reset_agg_timer = true; } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { /* * nothing -- this aggregation session is being started * but that might still fail with the driver */ } else if (!tx->sta->sta.txq[tid]) { spin_lock(&tx->sta->lock); /* * Need to re-check now, because we may get here * * 1) in the window during which the setup is actually * already done, but not marked yet because not all * packets are spliced over to the driver pending * queue yet -- if this happened we acquire the lock * either before or after the splice happens, but * need to recheck which of these cases happened. * * 2) during session teardown, if the OPERATIONAL bit * was cleared due to the teardown but the pointer * hasn't been assigned NULL yet (or we loaded it * before it was assigned) -- in this case it may * now be NULL which means we should just let the * packet pass through because splicing the frames * back is already done. */ tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid); if (!tid_tx) { /* do nothing, let packet pass through */ } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { info->flags |= IEEE80211_TX_CTL_AMPDU; reset_agg_timer = true; } else { queued = true; if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) { clear_sta_flag(tx->sta, WLAN_STA_SP); ps_dbg(tx->sta->sdata, "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n", tx->sta->sta.addr, tx->sta->sta.aid); } info->control.vif = &tx->sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; __skb_queue_tail(&tid_tx->pending, skb); if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) purge_skb = __skb_dequeue(&tid_tx->pending); } spin_unlock(&tx->sta->lock); if (purge_skb) ieee80211_free_txskb(&tx->local->hw, purge_skb); } /* reset session timer */ if (reset_agg_timer) tid_tx->last_tx = jiffies; return queued; } /* * initialises @tx * pass %NULL for the station if unknown, a valid pointer if known * or an ERR_PTR() if the station is known not to exist */ static ieee80211_tx_result ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_data *tx, struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int tid; memset(tx, 0, sizeof(*tx)); tx->skb = skb; tx->local = local; tx->sdata = sdata; __skb_queue_head_init(&tx->skbs); /* * If this flag is set to true anywhere, and we get here, * we are doing the needed processing, so remove the flag * now. */ info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING; hdr = (struct ieee80211_hdr *) skb->data; if (likely(sta)) { if (!IS_ERR(sta)) tx->sta = sta; } else { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { tx->sta = rcu_dereference(sdata->u.vlan.sta); if (!tx->sta && sdata->wdev.use_4addr) return TX_DROP; } else if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX | IEEE80211_TX_CTL_INJECTED) || tx->sdata->control_port_protocol == tx->skb->protocol) { tx->sta = sta_info_get_bss(sdata, hdr->addr1); } if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) tx->sta = sta_info_get(sdata, hdr->addr1); } if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && !ieee80211_is_qos_nullfunc(hdr->frame_control) && ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) { struct tid_ampdu_tx *tid_tx; tid = ieee80211_get_tid(hdr); tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); if (tid_tx) { bool queued; queued = ieee80211_tx_prep_agg(tx, skb, info, tid_tx, tid); if (unlikely(queued)) return TX_QUEUED; } } if (is_multicast_ether_addr(hdr->addr1)) { tx->flags &= ~IEEE80211_TX_UNICAST; info->flags |= IEEE80211_TX_CTL_NO_ACK; } else tx->flags |= IEEE80211_TX_UNICAST; if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) { if (!(tx->flags & IEEE80211_TX_UNICAST) || skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold || info->flags & IEEE80211_TX_CTL_AMPDU) info->flags |= IEEE80211_TX_CTL_DONTFRAG; } if (!tx->sta) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) { info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; ieee80211_check_fast_xmit(tx->sta); } info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; return TX_CONTINUE; } static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, struct ieee80211_vif *vif, struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_txq *txq = NULL; if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) return NULL; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) { if ((!ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_bufferable_mmpdu(hdr->frame_control) || vif->type == NL80211_IFTYPE_STATION) && sta && sta->uploaded) { /* * This will be NULL if the driver didn't set the * opt-in hardware flag. */ txq = sta->sta.txq[IEEE80211_NUM_TIDS]; } } else if (sta) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; if (!sta->uploaded) return NULL; txq = sta->sta.txq[tid]; } else if (vif) { txq = vif->txq; } if (!txq) return NULL; return to_txq_info(txq); } static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb) { IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time(); } static u32 codel_skb_len_func(const struct sk_buff *skb) { return skb->len; } static codel_time_t codel_skb_time_func(const struct sk_buff *skb) { const struct ieee80211_tx_info *info; info = (const struct ieee80211_tx_info *)skb->cb; return info->control.enqueue_time; } static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars, void *ctx) { struct ieee80211_local *local; struct txq_info *txqi; struct fq *fq; struct fq_flow *flow; txqi = ctx; local = vif_to_sdata(txqi->txq.vif)->local; fq = &local->fq; if (cvars == &txqi->def_cvars) flow = &txqi->def_flow; else flow = &fq->flows[cvars - local->cvars]; return fq_flow_dequeue(fq, flow); } static void codel_drop_func(struct sk_buff *skb, void *ctx) { struct ieee80211_local *local; struct ieee80211_hw *hw; struct txq_info *txqi; txqi = ctx; local = vif_to_sdata(txqi->txq.vif)->local; hw = &local->hw; ieee80211_free_txskb(hw, skb); } static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, struct fq_tin *tin, struct fq_flow *flow) { struct ieee80211_local *local; struct txq_info *txqi; struct codel_vars *cvars; struct codel_params *cparams; struct codel_stats *cstats; local = container_of(fq, struct ieee80211_local, fq); txqi = container_of(tin, struct txq_info, tin); cstats = &txqi->cstats; if (txqi->txq.sta) { struct sta_info *sta = container_of(txqi->txq.sta, struct sta_info, sta); cparams = &sta->cparams; } else { cparams = &local->cparams; } if (flow == &txqi->def_flow) cvars = &txqi->def_cvars; else cvars = &local->cvars[flow - fq->flows]; return codel_dequeue(txqi, &flow->backlog, cparams, cvars, cstats, codel_skb_len_func, codel_skb_time_func, codel_drop_func, codel_dequeue_func); } static void fq_skb_free_func(struct fq *fq, struct fq_tin *tin, struct fq_flow *flow, struct sk_buff *skb) { struct ieee80211_local *local; local = container_of(fq, struct ieee80211_local, fq); ieee80211_free_txskb(&local->hw, skb); } static struct fq_flow *fq_flow_get_default_func(struct fq *fq, struct fq_tin *tin, int idx, struct sk_buff *skb) { struct txq_info *txqi; txqi = container_of(tin, struct txq_info, tin); return &txqi->def_flow; } static void ieee80211_txq_enqueue(struct ieee80211_local *local, struct txq_info *txqi, struct sk_buff *skb) { struct fq *fq = &local->fq; struct fq_tin *tin = &txqi->tin; ieee80211_set_skb_enqueue_time(skb); fq_tin_enqueue(fq, tin, skb, fq_skb_free_func, fq_flow_get_default_func); } static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin, struct fq_flow *flow, struct sk_buff *skb, void *data) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); return info->control.vif == data; } void ieee80211_txq_remove_vlan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct fq *fq = &local->fq; struct txq_info *txqi; struct fq_tin *tin; struct ieee80211_sub_if_data *ap; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) return; ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); if (!ap->vif.txq) return; txqi = to_txq_info(ap->vif.txq); tin = &txqi->tin; spin_lock_bh(&fq->lock); fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif, fq_skb_free_func); spin_unlock_bh(&fq->lock); } void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct txq_info *txqi, int tid) { fq_tin_init(&txqi->tin); fq_flow_init(&txqi->def_flow); codel_vars_init(&txqi->def_cvars); codel_stats_init(&txqi->cstats); __skb_queue_head_init(&txqi->frags); txqi->txq.vif = &sdata->vif; if (!sta) { sdata->vif.txq = &txqi->txq; txqi->txq.tid = 0; txqi->txq.ac = IEEE80211_AC_BE; return; } if (tid == IEEE80211_NUM_TIDS) { if (sdata->vif.type == NL80211_IFTYPE_STATION) { /* Drivers need to opt in to the management MPDU TXQ */ if (!ieee80211_hw_check(&sdata->local->hw, STA_MMPDU_TXQ)) return; } else if (!ieee80211_hw_check(&sdata->local->hw, BUFF_MMPDU_TXQ)) { /* Drivers need to opt in to the bufferable MMPDU TXQ */ return; } txqi->txq.ac = IEEE80211_AC_VO; } else { txqi->txq.ac = ieee80211_ac_from_tid(tid); } txqi->txq.sta = &sta->sta; txqi->txq.tid = tid; sta->sta.txq[tid] = &txqi->txq; } void ieee80211_txq_purge(struct ieee80211_local *local, struct txq_info *txqi) { struct fq *fq = &local->fq; struct fq_tin *tin = &txqi->tin; fq_tin_reset(fq, tin, fq_skb_free_func); ieee80211_purge_tx_queue(&local->hw, &txqi->frags); } void ieee80211_txq_set_params(struct ieee80211_local *local) { if (local->hw.wiphy->txq_limit) local->fq.limit = local->hw.wiphy->txq_limit; else local->hw.wiphy->txq_limit = local->fq.limit; if (local->hw.wiphy->txq_memory_limit) local->fq.memory_limit = local->hw.wiphy->txq_memory_limit; else local->hw.wiphy->txq_memory_limit = local->fq.memory_limit; if (local->hw.wiphy->txq_quantum) local->fq.quantum = local->hw.wiphy->txq_quantum; else local->hw.wiphy->txq_quantum = local->fq.quantum; } int ieee80211_txq_setup_flows(struct ieee80211_local *local) { struct fq *fq = &local->fq; int ret; int i; bool supp_vht = false; enum nl80211_band band; if (!local->ops->wake_tx_queue) return 0; ret = fq_init(fq, 4096); if (ret) return ret; /* * If the hardware doesn't support VHT, it is safe to limit the maximum * queue size. 4 Mbytes is 64 max-size aggregates in 802.11n. */ for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[band]; if (!sband) continue; supp_vht = supp_vht || sband->vht_cap.vht_supported; } if (!supp_vht) fq->memory_limit = 4 << 20; /* 4 Mbytes */ codel_params_init(&local->cparams); local->cparams.interval = MS2TIME(100); local->cparams.target = MS2TIME(20); local->cparams.ecn = true; local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]), GFP_KERNEL); if (!local->cvars) { spin_lock_bh(&fq->lock); fq_reset(fq, fq_skb_free_func); spin_unlock_bh(&fq->lock); return -ENOMEM; } for (i = 0; i < fq->flows_cnt; i++) codel_vars_init(&local->cvars[i]); ieee80211_txq_set_params(local); return 0; } void ieee80211_txq_teardown_flows(struct ieee80211_local *local) { struct fq *fq = &local->fq; if (!local->ops->wake_tx_queue) return; kfree(local->cvars); local->cvars = NULL; spin_lock_bh(&fq->lock); fq_reset(fq, fq_skb_free_func); spin_unlock_bh(&fq->lock); } static bool ieee80211_queue_skb(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb) { struct fq *fq = &local->fq; struct ieee80211_vif *vif; struct txq_info *txqi; if (!local->ops->wake_tx_queue || sdata->vif.type == NL80211_IFTYPE_MONITOR) return false; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); vif = &sdata->vif; txqi = ieee80211_get_txq(local, vif, sta, skb); if (!txqi) return false; spin_lock_bh(&fq->lock); ieee80211_txq_enqueue(local, txqi, skb); spin_unlock_bh(&fq->lock); drv_wake_tx_queue(local, txqi); return true; } static bool ieee80211_tx_frags(struct ieee80211_local *local, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct sk_buff_head *skbs, bool txpending) { struct ieee80211_tx_control control = {}; struct sk_buff *skb, *tmp; unsigned long flags; skb_queue_walk_safe(skbs, skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int q = info->hw_queue; #ifdef CPTCFG_MAC80211_VERBOSE_DEBUG if (WARN_ON_ONCE(q >= local->hw.queues)) { __skb_unlink(skb, skbs); ieee80211_free_txskb(&local->hw, skb); continue; } #endif spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || (!txpending && !skb_queue_empty(&local->pending[q]))) { if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) { if (local->queue_stop_reasons[q] & ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) { /* * Drop off-channel frames if queues * are stopped for any reason other * than off-channel operation. Never * queue them. */ spin_unlock_irqrestore( &local->queue_stop_reason_lock, flags); ieee80211_purge_tx_queue(&local->hw, skbs); return true; } } else { /* * Since queue is stopped, queue up frames for * later transmission from the tx-pending * tasklet when the queue is woken again. */ if (txpending) skb_queue_splice_init(skbs, &local->pending[q]); else skb_queue_splice_tail_init(skbs, &local->pending[q]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return false; } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); info->control.vif = vif; control.sta = sta; __skb_unlink(skb, skbs); drv_tx(local, &control, skb); } return true; } /* * Returns false if the frame couldn't be transmitted but was queued instead. */ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff_head *skbs, int led_len, struct sta_info *sta, bool txpending) { struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata; struct ieee80211_vif *vif; struct ieee80211_sta *pubsta; struct sk_buff *skb; bool result = true; __le16 fc; if (WARN_ON(skb_queue_empty(skbs))) return true; skb = skb_peek(skbs); fc = ((struct ieee80211_hdr *)skb->data)->frame_control; info = IEEE80211_SKB_CB(skb); sdata = vif_to_sdata(info->control.vif); if (sta && !sta->uploaded) sta = NULL; if (sta) pubsta = &sta->sta; else pubsta = NULL; switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) { vif = &sdata->vif; break; } sdata = rcu_dereference(local->monitor_sdata); if (sdata) { vif = &sdata->vif; info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { ieee80211_purge_tx_queue(&local->hw, skbs); return true; } else vif = NULL; break; case NL80211_IFTYPE_AP_VLAN: sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); /* fall through */ default: vif = &sdata->vif; break; } result = ieee80211_tx_frags(local, vif, pubsta, skbs, txpending); ieee80211_tpt_led_trig_tx(local, fc, led_len); WARN_ON_ONCE(!skb_queue_empty(skbs)); return result; } /* * Invoke TX handlers, return 0 on success and non-zero if the * frame was dropped or queued. * * The handlers are split into an early and late part. The latter is everything * that can be sensitive to reordering, and will be deferred to after packets * are dequeued from the intermediate queues (when they are enabled). */ static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx) { ieee80211_tx_result res = TX_DROP; #define CALL_TXH(txh) \ do { \ res = txh(tx); \ if (res != TX_CONTINUE) \ goto txh_done; \ } while (0) CALL_TXH(ieee80211_tx_h_dynamic_ps); CALL_TXH(ieee80211_tx_h_check_assoc); CALL_TXH(ieee80211_tx_h_ps_buf); CALL_TXH(ieee80211_tx_h_check_control_port_protocol); CALL_TXH(ieee80211_tx_h_select_key); if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) CALL_TXH(ieee80211_tx_h_rate_ctrl); txh_done: if (unlikely(res == TX_DROP)) { I802_DEBUG_INC(tx->local->tx_handlers_drop); if (tx->skb) ieee80211_free_txskb(&tx->local->hw, tx->skb); else ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); return -1; } return 0; } /* * Late handlers can be called while the sta lock is held. Handlers that can * cause packets to be generated will cause deadlock! */ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_result res = TX_CONTINUE; if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) { __skb_queue_tail(&tx->skbs, tx->skb); tx->skb = NULL; goto txh_done; } CALL_TXH(ieee80211_tx_h_michael_mic_add); CALL_TXH(ieee80211_tx_h_sequence); CALL_TXH(ieee80211_tx_h_fragment); /* handlers after fragment must be aware of tx info fragmentation! */ CALL_TXH(ieee80211_tx_h_stats); CALL_TXH(ieee80211_tx_h_encrypt); if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) CALL_TXH(ieee80211_tx_h_calculate_duration); #undef CALL_TXH txh_done: if (unlikely(res == TX_DROP)) { I802_DEBUG_INC(tx->local->tx_handlers_drop); if (tx->skb) ieee80211_free_txskb(&tx->local->hw, tx->skb); else ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); return -1; } return 0; } static int invoke_tx_handlers(struct ieee80211_tx_data *tx) { int r = invoke_tx_handlers_early(tx); if (r) return r; return invoke_tx_handlers_late(tx); } bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct sk_buff *skb, int band, struct ieee80211_sta **sta) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_data tx; struct sk_buff *skb2; if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) return false; info->band = band; info->control.vif = vif; info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; if (invoke_tx_handlers(&tx)) return false; if (sta) { if (tx.sta) *sta = &tx.sta->sta; else *sta = NULL; } /* this function isn't suitable for fragmented data frames */ skb2 = __skb_dequeue(&tx.skbs); if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) { ieee80211_free_txskb(hw, skb2); ieee80211_purge_tx_queue(hw, &tx.skbs); return false; } return true; } EXPORT_SYMBOL(ieee80211_tx_prepare_skb); /* * Returns false if the frame couldn't be transmitted but was queued instead. */ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb, bool txpending, u32 txdata_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_data tx; ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool result = true; int led_len; if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); return true; } /* initialises tx */ led_len = skb->len; res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb); tx.flags |= txdata_flags; if (unlikely(res_prepare == TX_DROP)) { ieee80211_free_txskb(&local->hw, skb); return true; } else if (unlikely(res_prepare == TX_QUEUED)) { return true; } /* set up hw_queue value early */ if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || !ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; if (invoke_tx_handlers_early(&tx)) return true; if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb)) return true; if (!invoke_tx_handlers_late(&tx)) result = __ieee80211_tx(local, &tx.skbs, led_len, tx.sta, txpending); return result; } /* device xmit handlers */ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int head_need, bool may_encrypt) { struct ieee80211_local *local = sdata->local; int tail_need = 0; if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { tail_need = IEEE80211_ENCRYPT_TAILROOM; tail_need -= skb_tailroom(skb); tail_need = max_t(int, tail_need, 0); } if (skb_cloned(skb) && (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || !skb_clone_writable(skb, ETH_HLEN) || (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt))) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else if (head_need || tail_need) I802_DEBUG_INC(local->tx_expand_skb_head); else return 0; if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { wiphy_debug(local->hw.wiphy, "failed to reallocate TX buffer\n"); return -ENOMEM; } return 0; } void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb, u32 txdata_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; int headroom; bool may_encrypt; may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); headroom = local->tx_headroom; if (may_encrypt) headroom += sdata->encrypt_headroom; headroom -= skb_headroom(skb); headroom = max_t(int, 0, headroom); if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { ieee80211_free_txskb(&local->hw, skb); return; } hdr = (struct ieee80211_hdr *) skb->data; info->control.vif = &sdata->vif; if (ieee80211_vif_is_mesh(&sdata->vif)) { if (ieee80211_is_data(hdr->frame_control) && is_unicast_ether_addr(hdr->addr1)) { if (mesh_nexthop_resolve(sdata, skb)) return; /* skb queued: don't free */ } else { ieee80211_mps_set_frame_flags(sdata, NULL, hdr); } } ieee80211_set_qos_hdr(sdata, skb); ieee80211_tx(sdata, sta, skb, false, txdata_flags); } static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_radiotap_iterator iterator; struct ieee80211_radiotap_header *rthdr = (struct ieee80211_radiotap_header *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_supported_band *sband = local->hw.wiphy->bands[info->band]; int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, NULL); u16 txflags; u16 rate = 0; bool rate_found = false; u8 rate_retries = 0; u16 rate_flags = 0; u8 mcs_known, mcs_flags, mcs_bw; u16 vht_known; u8 vht_mcs = 0, vht_nss = 0; int i; info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_CTL_DONTFRAG; /* * for every radiotap entry that is present * (ieee80211_radiotap_iterator_next returns -ENOENT when no more * entries present, or -EINVAL on error) */ while (!ret) { ret = ieee80211_radiotap_iterator_next(&iterator); if (ret) continue; /* see if this argument is something we can use */ switch (iterator.this_arg_index) { /* * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. */ case IEEE80211_RADIOTAP_FLAGS: if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { /* * this indicates that the skb we have been * handed has the 32-bit FCS CRC at the end... * we should react to that by snipping it off * because it will be recomputed and added * on transmission */ if (skb->len < (iterator._max_length + FCS_LEN)) return false; skb_trim(skb, skb->len - FCS_LEN); } if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT; if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) info->flags &= ~IEEE80211_TX_CTL_DONTFRAG; break; case IEEE80211_RADIOTAP_TX_FLAGS: txflags = get_unaligned_le16(iterator.this_arg); if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK) info->flags |= IEEE80211_TX_CTL_NO_ACK; break; case IEEE80211_RADIOTAP_RATE: rate = *iterator.this_arg; rate_flags = 0; rate_found = true; break; case IEEE80211_RADIOTAP_DATA_RETRIES: rate_retries = *iterator.this_arg; break; case IEEE80211_RADIOTAP_MCS: mcs_known = iterator.this_arg[0]; mcs_flags = iterator.this_arg[1]; if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS)) break; rate_found = true; rate = iterator.this_arg[2]; rate_flags = IEEE80211_TX_RC_MCS; if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI && mcs_flags & IEEE80211_RADIOTAP_MCS_SGI) rate_flags |= IEEE80211_TX_RC_SHORT_GI; mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK; if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW && mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40) rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; break; case IEEE80211_RADIOTAP_VHT: vht_known = get_unaligned_le16(iterator.this_arg); rate_found = true; rate_flags = IEEE80211_TX_RC_VHT_MCS; if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) && (iterator.this_arg[2] & IEEE80211_RADIOTAP_VHT_FLAG_SGI)) rate_flags |= IEEE80211_TX_RC_SHORT_GI; if (vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { if (iterator.this_arg[3] == 1) rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; else if (iterator.this_arg[3] == 4) rate_flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; else if (iterator.this_arg[3] == 11) rate_flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; } vht_mcs = iterator.this_arg[4] >> 4; vht_nss = iterator.this_arg[4] & 0xF; break; /* * Please update the file * Documentation/networking/mac80211-injection.txt * when parsing new fields here. */ default: break; } } if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ return false; if (rate_found) { info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { info->control.rates[i].idx = -1; info->control.rates[i].flags = 0; info->control.rates[i].count = 0; } if (rate_flags & IEEE80211_TX_RC_MCS) { info->control.rates[0].idx = rate; } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) { ieee80211_rate_set_vht(info->control.rates, vht_mcs, vht_nss); } else { for (i = 0; i < sband->n_bitrates; i++) { if (rate * 5 != sband->bitrates[i].bitrate) continue; info->control.rates[0].idx = i; break; } } if (info->control.rates[0].idx < 0) info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT; info->control.rates[0].flags = rate_flags; info->control.rates[0].count = min_t(u8, rate_retries + 1, local->hw.max_rate_tries); } /* * remove the radiotap header * iterator->_max_length was sanity-checked against * skb->len by iterator init */ skb_pull(skb, iterator._max_length); return true; } netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_radiotap_header *prthdr = (struct ieee80211_radiotap_header *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *tmp_sdata, *sdata; struct cfg80211_chan_def *chandef; u16 len_rthdr; int hdrlen; /* check for not even having the fixed radiotap header part */ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) goto fail; /* too short to be possibly valid */ /* is it a header version we can trust to find length from? */ if (unlikely(prthdr->it_version)) goto fail; /* only version 0 is supported */ /* then there must be a radiotap header with a length we can use */ len_rthdr = ieee80211_get_radiotap_len(skb->data); /* does the skb contain enough to deliver on the alleged length? */ if (unlikely(skb->len < len_rthdr)) goto fail; /* skb too short for claimed rt header extent */ /* * fix up the pointers accounting for the radiotap * header still being in there. We are being given * a precooked IEEE80211 header so no need for * normal processing */ skb_set_mac_header(skb, len_rthdr); /* * these are just fixed to the end of the rt area since we * don't have any better information and at this point, nobody cares */ skb_set_network_header(skb, len_rthdr); skb_set_transport_header(skb, len_rthdr); if (skb->len < len_rthdr + 2) goto fail; hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < len_rthdr + hdrlen) goto fail; /* * Initialize skb->protocol if the injected frame is a data frame * carrying a rfc1042 header */ if (ieee80211_is_data(hdr->frame_control) && skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { u8 *payload = (u8 *)hdr + hdrlen; if (ether_addr_equal(payload, rfc1042_header)) skb->protocol = cpu_to_be16((payload[6] << 8) | payload[7]); } memset(info, 0, sizeof(*info)); info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_CTL_INJECTED; rcu_read_lock(); /* * We process outgoing injected frames that have a local address * we handle as though they are non-injected frames. * This code here isn't entirely correct, the local MAC address * isn't always enough to find the interface to use; for proper * VLAN/WDS support we will need a different mechanism (which * likely isn't going to be monitor interfaces). */ sdata = IEEE80211_DEV_TO_SUB_IF(dev); list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(tmp_sdata)) continue; if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR || tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN || tmp_sdata->vif.type == NL80211_IFTYPE_WDS) continue; if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) { sdata = tmp_sdata; break; } } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { tmp_sdata = rcu_dereference(local->monitor_sdata); if (tmp_sdata) chanctx_conf = rcu_dereference(tmp_sdata->vif.chanctx_conf); } if (chanctx_conf) chandef = &chanctx_conf->def; else if (!local->use_chanctx) chandef = &local->_oper_chandef; else goto fail_rcu; /* * Frame injection is not allowed if beaconing is not allowed * or if we need radar detection. Beaconing is usually not allowed when * the mode or operation (Adhoc, AP, Mesh) does not support DFS. * Passive scan is also used in world regulatory domains where * your country is not known and as such it should be treated as * NO TX unless the channel is explicitly allowed in which case * your current regulatory domain would not have the passive scan * flag. * * Since AP mode uses monitor interfaces to inject/TX management * frames we can make AP mode the exception to this rule once it * supports radar detection as its implementation can deal with * radar detection by itself. We can do that later by adding a * monitor flag interfaces used for AP support. */ if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef, sdata->vif.type)) goto fail_rcu; info->band = chandef->chan->band; /* process and remove the injection radiotap header */ if (!ieee80211_parse_tx_radiotap(local, skb)) goto fail_rcu; ieee80211_xmit(sdata, NULL, skb, 0); rcu_read_unlock(); return NETDEV_TX_OK; fail_rcu: rcu_read_unlock(); fail: dev_kfree_skb(skb); return NETDEV_TX_OK; /* meaning, we dealt with the skb */ } #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS /* * Measure Tx frame arrival time for Tx latency & Tx consecutive packet loss * statistics calculation. * A single Tx frame latency should be measured from when it is entering the * Kernel until we receive Tx complete confirmation indication and the skb is * freed. */ static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_tx_latency_bin_ranges *tx_latency; struct ieee80211_tx_consec_loss_ranges *tx_consec; struct ieee80211_tx_latency_threshold *tx_thrshld; s64 temp; tx_latency = rcu_dereference(local->tx_latency); tx_consec = rcu_dereference(local->tx_consec); tx_thrshld = rcu_dereference(local->tx_threshold); if (!tx_latency && !tx_consec && !tx_thrshld) return; temp = ktime_to_ms(ktime_get()); #if LINUX_VERSION_IS_LESS(4,10,0) skb->tstamp.tv64 += temp << 32; #else skb->tstamp += temp << 32; #endif } #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb) { u16 ethertype = (skb->data[12] << 8) | skb->data[13]; return ethertype == ETH_P_TDLS && skb->len > 14 && skb->data[14] == WLAN_TDLS_SNAP_RFTYPE; } static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct sta_info **sta_out) { struct sta_info *sta; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); if (sta) { *sta_out = sta; return 0; } else if (sdata->wdev.use_4addr) { return -ENOLINK; } /* fall through */ case NL80211_IFTYPE_AP: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_ADHOC: if (is_multicast_ether_addr(skb->data)) { *sta_out = ERR_PTR(-ENOENT); return 0; } sta = sta_info_get_bss(sdata, skb->data); break; case NL80211_IFTYPE_WDS: sta = sta_info_get(sdata, sdata->u.wds.remote_addr); break; #ifdef CPTCFG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: /* determined much later */ *sta_out = NULL; return 0; #endif case NL80211_IFTYPE_STATION: if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { sta = sta_info_get(sdata, skb->data); if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { *sta_out = sta; return 0; } /* * TDLS link during setup - throw out frames to * peer. Allow TDLS-setup frames to unauthorized * peers for the special case of a link teardown * after a TDLS sta is removed due to being * unreachable. */ if (!ieee80211_is_tdls_setup(skb)) return -EINVAL; } } sta = sta_info_get(sdata, sdata->u.mgd.bssid); if (!sta) return -ENOLINK; break; default: return -EINVAL; } *sta_out = sta ?: ERR_PTR(-ENOENT); return 0; } /** * ieee80211_build_hdr - build 802.11 header in the given frame * @sdata: virtual interface to build the header for * @skb: the skb to build the header in * @info_flags: skb flags to set * * This function takes the skb with 802.3 header and reformats the header to * the appropriate IEEE 802.11 header based on which interface the packet is * being transmitted on. * * Note that this function also takes care of the TX status request and * potential unsharing of the SKB - this needs to be interleaved with the * header building. * * The function requires the read-side RCU lock held * * Returns: the (possibly reallocated) skb or an ERR_PTR() code */ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags, struct sta_info *sta) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info; int head_need; u16 ethertype, hdrlen, meshhdrlen = 0; __le16 fc; struct ieee80211_hdr hdr; struct ieee80211s_hdr mesh_hdr __maybe_unused; struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; const u8 *encaps_data; int encaps_len, skip_header_bytes; bool wme_sta = false, authorized = false; bool tdls_peer; bool multicast; u16 info_id = 0; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_sub_if_data *ap_sdata; enum nl80211_band band; int ret; if (IS_ERR(sta)) sta = NULL; /* convert Ethernet header to proper 802.11 header (based on * operation mode) */ ethertype = (skb->data[12] << 8) | skb->data[13]; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: if (sdata->wdev.use_4addr) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); wme_sta = sta->sta.wme; } ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf); if (!chanctx_conf) { ret = -ENOTCONN; goto free; } band = chanctx_conf->def.chan->band; if (sdata->wdev.use_4addr) break; /* fall through */ case NL80211_IFTYPE_AP: if (sdata->vif.type == NL80211_IFTYPE_AP) chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { ret = -ENOTCONN; goto free; } fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 24; band = chanctx_conf->def.chan->band; break; case NL80211_IFTYPE_WDS: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; /* * This is the exception! WDS style interfaces are prohibited * when channel contexts are in used so this must be valid */ band = local->hw.conf.chandef.chan->band; break; #ifdef CPTCFG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: if (!is_multicast_ether_addr(skb->data)) { struct sta_info *next_hop; bool mpp_lookup = true; mpath = mesh_path_lookup(sdata, skb->data); if (mpath) { mpp_lookup = false; next_hop = rcu_dereference(mpath->next_hop); if (!next_hop || !(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING))) mpp_lookup = true; } if (mpp_lookup) { mppath = mpp_path_lookup(sdata, skb->data); if (mppath) mppath->exp_time = jiffies; } if (mppath && mpath) mesh_path_del(sdata, mpath->dst); } /* * Use address extension if it is a packet from * another interface or if we know the destination * is being proxied by a portal (i.e. portal address * differs from proxied address) */ if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) && !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, skb->data, skb->data + ETH_ALEN); meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr, NULL, NULL); } else { /* DS -> MBSS (802.11-2012 13.11.3.3). * For unicast with unknown forwarding information, * destination might be in the MBSS or if that fails * forwarded to another mesh gate. In either case * resolution will be handled in ieee80211_xmit(), so * leave the original DA. This also works for mcast */ const u8 *mesh_da = skb->data; if (mppath) mesh_da = mppath->mpp; else if (mpath) mesh_da = mpath->dst; hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, mesh_da, sdata->vif.addr); if (is_multicast_ether_addr(mesh_da)) /* DA TA mSA AE:SA */ meshhdrlen = ieee80211_new_mesh_header( sdata, &mesh_hdr, skb->data + ETH_ALEN, NULL); else /* RA TA mDA mSA AE:DA SA */ meshhdrlen = ieee80211_new_mesh_header( sdata, &mesh_hdr, skb->data, skb->data + ETH_ALEN); } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { ret = -ENOTCONN; goto free; } band = chanctx_conf->def.chan->band; break; #endif case NL80211_IFTYPE_STATION: /* we already did checks when looking up the RA STA */ tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER); if (tdls_peer) { /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN); hdrlen = 24; } else if (sdata->u.mgd.use_4addr && cpu_to_be16(ethertype) != sdata->control_port_protocol) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; } else { fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); hdrlen = 24; } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { ret = -ENOTCONN; goto free; } band = chanctx_conf->def.chan->band; break; case NL80211_IFTYPE_OCB: /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); eth_broadcast_addr(hdr.addr3); hdrlen = 24; chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { ret = -ENOTCONN; goto free; } band = chanctx_conf->def.chan->band; break; case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); hdrlen = 24; chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { ret = -ENOTCONN; goto free; } band = chanctx_conf->def.chan->band; break; default: ret = -EINVAL; goto free; } multicast = is_multicast_ether_addr(hdr.addr1); /* sta is always NULL for mesh */ if (sta) { authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); wme_sta = sta->sta.wme; } else if (ieee80211_vif_is_mesh(&sdata->vif)) { /* For mesh, the use of the QoS header is mandatory */ wme_sta = true; } /* receiver does QoS (which also means we do) use it */ if (wme_sta) { fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); hdrlen += 2; } /* * Drop unicast frames to unauthorised stations unless they are * EAPOL frames from the local station. */ if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) && (sdata->vif.type != NL80211_IFTYPE_OCB) && !multicast && !authorized && (cpu_to_be16(ethertype) != sdata->control_port_protocol || !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) { #ifdef CPTCFG_MAC80211_VERBOSE_DEBUG net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", sdata->name, hdr.addr1); #endif I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); ret = -EPERM; goto free; } if (unlikely(!multicast && skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) { struct sk_buff *ack_skb = skb_clone_sk(skb); if (ack_skb) { unsigned long flags; int id; spin_lock_irqsave(&local->ack_status_lock, flags); id = idr_alloc(&local->ack_status_frames, ack_skb, 1, 0x10000, GFP_ATOMIC); spin_unlock_irqrestore(&local->ack_status_lock, flags); if (id >= 0) { info_id = id; info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; } else { kfree_skb(ack_skb); } } } /* * If the skb is shared we need to obtain our own copy. */ if (skb_shared(skb)) { struct sk_buff *tmp_skb = skb; /* can't happen -- skb is a clone if info_id != 0 */ WARN_ON(info_id); skb = skb_clone(skb, GFP_ATOMIC); kfree_skb(tmp_skb); if (!skb) { ret = -ENOMEM; goto free; } } hdr.frame_control = fc; hdr.duration_id = 0; hdr.seq_ctrl = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= ETH_P_802_3_MIN) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } else { encaps_data = NULL; encaps_len = 0; } skb_pull(skb, skip_header_bytes); head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); /* * So we need to modify the skb header and hence need a copy of * that. The head_need variable above doesn't, so far, include * the needed header space that we don't need right away. If we * can, then we don't reallocate right now but only after the * frame arrives at the master device (if it does...) * * If we cannot, however, then we will reallocate to include all * the ever needed space. Also, if we need to reallocate it anyway, * make it big enough for everything we may ever need. */ if (head_need > 0 || skb_cloned(skb)) { head_need += sdata->encrypt_headroom; head_need += local->tx_headroom; head_need = max_t(int, 0, head_need); if (ieee80211_skb_resize(sdata, skb, head_need, true)) { ieee80211_free_txskb(&local->hw, skb); skb = NULL; return ERR_PTR(-ENOMEM); } } if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); #ifdef CPTCFG_MAC80211_MESH if (meshhdrlen > 0) memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); #endif if (ieee80211_is_data_qos(fc)) { __le16 *qos_control; qos_control = skb_push(skb, 2); memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); /* * Maybe we could actually set some fields here, for now just * initialise to zero to indicate no special operation. */ *qos_control = 0; } else memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); skb_reset_mac_header(skb); info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); info->flags = info_flags; info->ack_frame_id = info_id; info->band = band; return skb; free: kfree_skb(skb); return ERR_PTR(ret); } /* * fast-xmit overview * * The core idea of this fast-xmit is to remove per-packet checks by checking * them out of band. ieee80211_check_fast_xmit() implements the out-of-band * checks that are needed to get the sta->fast_tx pointer assigned, after which * much less work can be done per packet. For example, fragmentation must be * disabled or the fast_tx pointer will not be set. All the conditions are seen * in the code here. * * Once assigned, the fast_tx data structure also caches the per-packet 802.11 * header and other data to aid packet processing in ieee80211_xmit_fast(). * * The most difficult part of this is that when any of these assumptions * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(), * ieee80211_check_fast_xmit() or friends) is required to reset the data, * since the per-packet code no longer checks the conditions. This is reflected * by the calls to these functions throughout the rest of the code, and must be * maintained if any of the TX path checks change. */ void ieee80211_check_fast_xmit(struct sta_info *sta) { struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old; struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_hdr *hdr = (void *)build.hdr; struct ieee80211_chanctx_conf *chanctx_conf; __le16 fc; if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT)) return; /* Locking here protects both the pointer itself, and against concurrent * invocations winning data access races to, e.g., the key pointer that * is used. * Without it, the invocation of this function right after the key * pointer changes wouldn't be sufficient, as another CPU could access * the pointer, then stall, and then do the cache update after the CPU * that invalidated the key. * With the locking, such scenarios cannot happen as the check for the * key and the fast-tx assignment are done atomically, so the CPU that * modifies the key will either wait or other one will see the key * cleared/changed already. */ spin_lock_bh(&sta->lock); if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && sdata->vif.type == NL80211_IFTYPE_STATION) goto out; if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) goto out; if (test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER) || test_sta_flag(sta, WLAN_STA_PS_DELIVER) || test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT)) goto out; if (sdata->noack_map) goto out; /* fast-xmit doesn't handle fragmentation at all */ if (local->hw.wiphy->frag_threshold != (u32)-1 && !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG)) goto out; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); goto out; } build.band = chanctx_conf->def.chan->band; rcu_read_unlock(); fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); switch (sdata->vif.type) { case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN); build.hdr_len = 24; break; case NL80211_IFTYPE_STATION: if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { /* DA SA BSSID */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN); build.hdr_len = 24; break; } if (sdata->u.mgd.use_4addr) { /* non-regular ethertype cannot use the fastpath */ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr4); build.hdr_len = 30; break; } fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); build.hdr_len = 24; break; case NL80211_IFTYPE_AP_VLAN: if (sdata->wdev.use_4addr) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr4); build.hdr_len = 30; break; } /* fall through */ case NL80211_IFTYPE_AP: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); build.sa_offs = offsetof(struct ieee80211_hdr, addr3); build.hdr_len = 24; break; default: /* not handled on fast-xmit */ goto out; } if (sta->sta.wme) { build.hdr_len += 2; fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); } /* We store the key here so there's no point in using rcu_dereference() * but that's fine because the code that changes the pointers will call * this function after doing so. For a single CPU that would be enough, * for multiple see the comment above. */ build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]); if (!build.key) build.key = rcu_access_pointer(sdata->default_unicast_key); if (build.key) { bool gen_iv, iv_spc, mmic; gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; mmic = build.key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE); /* don't handle software crypto */ if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) goto out; /* Key is being removed */ if (build.key->flags & KEY_FLAG_TAINTED) goto out; switch (build.key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: /* add fixed key ID */ if (gen_iv) { (build.hdr + build.hdr_len)[3] = 0x20 | (build.key->conf.keyidx << 6); build.pn_offs = build.hdr_len; } if (gen_iv || iv_spc) build.hdr_len += IEEE80211_CCMP_HDR_LEN; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: /* add fixed key ID */ if (gen_iv) { (build.hdr + build.hdr_len)[3] = 0x20 | (build.key->conf.keyidx << 6); build.pn_offs = build.hdr_len; } if (gen_iv || iv_spc) build.hdr_len += IEEE80211_GCMP_HDR_LEN; break; case WLAN_CIPHER_SUITE_TKIP: /* cannot handle MMIC or IV generation in xmit-fast */ if (mmic || gen_iv) goto out; if (iv_spc) build.hdr_len += IEEE80211_TKIP_IV_LEN; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* cannot handle IV generation in fast-xmit */ if (gen_iv) goto out; if (iv_spc) build.hdr_len += IEEE80211_WEP_IV_LEN; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN(1, "management cipher suite 0x%x enabled for data\n", build.key->conf.cipher); goto out; default: /* we don't know how to generate IVs for this at all */ if (WARN_ON(gen_iv)) goto out; /* pure hardware keys are OK, of course */ if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME)) break; /* cipher scheme might require space allocation */ if (iv_spc && build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV) goto out; if (iv_spc) build.hdr_len += build.key->conf.iv_len; } fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); } hdr->frame_control = fc; memcpy(build.hdr + build.hdr_len, rfc1042_header, sizeof(rfc1042_header)); build.hdr_len += sizeof(rfc1042_header); fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC); /* if the kmemdup fails, continue w/o fast_tx */ if (!fast_tx) goto out; out: /* we might have raced against another call to this function */ old = rcu_dereference_protected(sta->fast_tx, lockdep_is_held(&sta->lock)); rcu_assign_pointer(sta->fast_tx, fast_tx); if (old) kfree_rcu(old, rcu_head); spin_unlock_bh(&sta->lock); } void ieee80211_check_fast_xmit_all(struct ieee80211_local *local) { struct sta_info *sta; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) ieee80211_check_fast_xmit(sta); rcu_read_unlock(); } void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata && (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) continue; ieee80211_check_fast_xmit(sta); } rcu_read_unlock(); } void ieee80211_clear_fast_xmit(struct sta_info *sta) { struct ieee80211_fast_tx *fast_tx; spin_lock_bh(&sta->lock); fast_tx = rcu_dereference_protected(sta->fast_tx, lockdep_is_held(&sta->lock)); RCU_INIT_POINTER(sta->fast_tx, NULL); spin_unlock_bh(&sta->lock); if (fast_tx) kfree_rcu(fast_tx, rcu_head); } static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, struct sk_buff *skb, int headroom) { if (skb_headroom(skb) < headroom) { I802_DEBUG_INC(local->tx_expand_skb_head); if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { wiphy_debug(local->hw.wiphy, "failed to reallocate TX buffer\n"); return false; } } return true; } static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, struct ieee80211_fast_tx *fast_tx, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ethhdr *amsdu_hdr; int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header); int subframe_len = skb->len - hdr_len; void *data; u8 *qc, *h_80211_src, *h_80211_dst; const u8 *bssid; if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) return false; if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) return true; if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr))) return false; data = skb_push(skb, sizeof(*amsdu_hdr)); memmove(data, data + sizeof(*amsdu_hdr), hdr_len); hdr = data; amsdu_hdr = data + hdr_len; /* h_80211_src/dst is addr* field within hdr */ h_80211_src = data + fast_tx->sa_offs; h_80211_dst = data + fast_tx->da_offs; amsdu_hdr->h_proto = cpu_to_be16(subframe_len); ether_addr_copy(amsdu_hdr->h_source, h_80211_src); ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst); /* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA * fields needs to be changed to BSSID for A-MSDU frames depending * on FromDS/ToDS values. */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: bssid = sdata->u.mgd.bssid; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: bssid = sdata->vif.addr; break; default: bssid = NULL; } if (bssid && ieee80211_has_fromds(hdr->frame_control)) ether_addr_copy(h_80211_src, bssid); if (bssid && ieee80211_has_tods(hdr->frame_control)) ether_addr_copy(h_80211_dst, bssid); qc = ieee80211_get_qos_ctl(hdr); *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; info->control.flags |= IEEE80211_TX_CTRL_AMSDU; return true; } static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_fast_tx *fast_tx, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct fq *fq = &local->fq; struct fq_tin *tin; struct fq_flow *flow; u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; struct ieee80211_txq *txq = sta->sta.txq[tid]; struct txq_info *txqi; struct sk_buff **frag_tail, *head; int subframe_len = skb->len - ETH_ALEN; u8 max_subframes = sta->sta.max_amsdu_subframes; int max_frags = local->hw.max_tx_fragments; int max_amsdu_len = sta->sta.max_amsdu_len; __be16 len; void *data; bool ret = false; unsigned int orig_len; int n = 2, nfrags, pad = 0; u16 hdrlen; if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) return false; if (skb_is_gso(skb)) return false; if (!txq) return false; txqi = to_txq_info(txq); if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags)) return false; if (sta->sta.max_rc_amsdu_len) max_amsdu_len = min_t(int, max_amsdu_len, sta->sta.max_rc_amsdu_len); if (sta->sta.max_tid_amsdu_len[tid]) max_amsdu_len = min_t(int, max_amsdu_len, sta->sta.max_tid_amsdu_len[tid]); spin_lock_bh(&fq->lock); /* TODO: Ideally aggregation should be done on dequeue to remain * responsive to environment changes. */ tin = &txqi->tin; flow = fq_flow_classify(fq, tin, skb, fq_flow_get_default_func); head = skb_peek_tail(&flow->queue); if (!head || skb_is_gso(head)) goto out; orig_len = head->len; if (skb->len + head->len > max_amsdu_len) goto out; if (!drv_can_aggregate_in_amsdu(local, head, skb)) goto out; nfrags = 1 + skb_shinfo(skb)->nr_frags; nfrags += 1 + skb_shinfo(head)->nr_frags; frag_tail = &skb_shinfo(head)->frag_list; while (*frag_tail) { nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags; frag_tail = &(*frag_tail)->next; n++; } if (max_subframes && n > max_subframes) goto out; if (max_frags && nfrags > max_frags) goto out; if (!drv_can_aggregate_in_amsdu(local, head, skb)) goto out; if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) goto out; /* * Pad out the previous subframe to a multiple of 4 by adding the * padding to the next one, that's being added. Note that head->len * is the length of the full A-MSDU, but that works since each time * we add a new subframe we pad out the previous one to a multiple * of 4 and thus it no longer matters in the next round. */ hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header); if ((head->len - hdrlen) & 3) pad = 4 - ((head->len - hdrlen) & 3); if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2 + pad)) goto out_recalc; ret = true; data = skb_push(skb, ETH_ALEN + 2); memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); data += 2 * ETH_ALEN; len = cpu_to_be16(subframe_len); memcpy(data, &len, 2); memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); memset(skb_push(skb, pad), 0, pad); head->len += skb->len; head->data_len += skb->len; *frag_tail = skb; out_recalc: if (head->len != orig_len) { flow->backlog += head->len - orig_len; tin->backlog_bytes += head->len - orig_len; fq_recalc_backlog(fq, tin, flow); } out: spin_unlock_bh(&fq->lock); return ret; } /* * Can be called while the sta lock is held. Anything that can cause packets to * be generated will cause deadlock! */ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 pn_offs, struct ieee80211_key *key, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; u8 tid = IEEE80211_NUM_TIDS; if (key) info->control.hw_key = &key->conf; ieee80211_tx_stats(skb->dev, skb->len); if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); } else { info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number); sdata->sequence_number += 0x10; } if (skb_shinfo(skb)->gso_size) sta->tx_stats.msdu[tid] += DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); else sta->tx_stats.msdu[tid]++; info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; /* statistics normally done by ieee80211_tx_h_stats (but that * has to consider fragmentation, so is more complex) */ sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; sta->tx_stats.packets[skb_get_queue_mapping(skb)]++; if (pn_offs) { u64 pn; u8 *crypto_hdr = skb->data + pn_offs; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: pn = atomic64_inc_return(&key->conf.tx_pn); crypto_hdr[0] = pn; crypto_hdr[1] = pn >> 8; crypto_hdr[4] = pn >> 16; crypto_hdr[5] = pn >> 24; crypto_hdr[6] = pn >> 32; crypto_hdr[7] = pn >> 40; break; } } } static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_fast_tx *fast_tx, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; u16 ethertype = (skb->data[12] << 8) | skb->data[13]; int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2); int hw_headroom = sdata->local->hw.extra_tx_headroom; struct ethhdr eth; struct ieee80211_tx_info *info; struct ieee80211_hdr *hdr = (void *)fast_tx->hdr; struct ieee80211_tx_data tx; ieee80211_tx_result r; struct tid_ampdu_tx *tid_tx = NULL; u8 tid = IEEE80211_NUM_TIDS; /* control port protocol needs a lot of special handling */ if (cpu_to_be16(ethertype) == sdata->control_port_protocol) return false; /* only RFC 1042 SNAP */ if (ethertype < ETH_P_802_3_MIN) return false; /* don't handle TX status request here either */ if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) return false; if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); if (tid_tx) { if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) return false; if (tid_tx->timeout) tid_tx->last_tx = jiffies; } } /* after this point (skb is modified) we cannot return false */ if (skb_shared(skb)) { struct sk_buff *tmp_skb = skb; skb = skb_clone(skb, GFP_ATOMIC); kfree_skb(tmp_skb); if (!skb) return true; } if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) && ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb)) return true; /* will not be crypto-handled beyond what we do here, so use false * as the may-encrypt argument for the resize to not account for * more room than we already have in 'extra_head' */ if (unlikely(ieee80211_skb_resize(sdata, skb, max_t(int, extra_head + hw_headroom - skb_headroom(skb), 0), false))) { kfree_skb(skb); return true; } memcpy(ð, skb->data, ETH_HLEN - 2); hdr = skb_push(skb, extra_head); memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len); memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN); memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN); info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); info->band = fast_tx->band; info->control.vif = &sdata->vif; info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT | IEEE80211_TX_CTL_DONTFRAG | (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; *ieee80211_get_qos_ctl(hdr) = tid; } __skb_queue_head_init(&tx.skbs); tx.flags = IEEE80211_TX_UNICAST; tx.local = local; tx.sdata = sdata; tx.sta = sta; tx.key = fast_tx->key; if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { tx.skb = skb; r = ieee80211_tx_h_rate_ctrl(&tx); skb = tx.skb; tx.skb = NULL; if (r != TX_CONTINUE) { if (r != TX_QUEUED) kfree_skb(skb); return true; } } if (ieee80211_queue_skb(local, sdata, sta, skb)) return true; ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs, fast_tx->key, skb); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); __skb_queue_tail(&tx.skbs, skb); ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false); return true; } struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ieee80211_local *local = hw_to_local(hw); struct txq_info *txqi = container_of(txq, struct txq_info, txq); struct ieee80211_hdr *hdr; struct sk_buff *skb = NULL; struct fq *fq = &local->fq; struct fq_tin *tin = &txqi->tin; struct ieee80211_tx_info *info; struct ieee80211_tx_data tx; ieee80211_tx_result r; struct ieee80211_vif *vif = txq->vif; spin_lock_bh(&fq->lock); if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) || test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) goto out; if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) { set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags); goto out; } /* Make sure fragments stay together. */ skb = __skb_dequeue(&txqi->frags); if (skb) goto out; begin: skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func); if (!skb) goto out; hdr = (struct ieee80211_hdr *)skb->data; info = IEEE80211_SKB_CB(skb); memset(&tx, 0, sizeof(tx)); __skb_queue_head_init(&tx.skbs); tx.local = local; tx.skb = skb; tx.sdata = vif_to_sdata(info->control.vif); if (txq->sta) tx.sta = container_of(txq->sta, struct sta_info, sta); /* * The key can be removed while the packet was queued, so need to call * this here to get the current key. */ r = ieee80211_tx_h_select_key(&tx); if (r != TX_CONTINUE) { ieee80211_free_txskb(&local->hw, skb); goto begin; } if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) info->flags |= IEEE80211_TX_CTL_AMPDU; else info->flags &= ~IEEE80211_TX_CTL_AMPDU; if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { struct sta_info *sta = container_of(txq->sta, struct sta_info, sta); u8 pn_offs = 0; if (tx.key && (tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) pn_offs = ieee80211_hdrlen(hdr->frame_control); ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs, tx.key, skb); } else { if (invoke_tx_handlers_late(&tx)) goto begin; skb = __skb_dequeue(&tx.skbs); if (!skb_queue_empty(&tx.skbs)) skb_queue_splice_tail(&tx.skbs, &txqi->frags); } if (skb_has_frag_list(skb) && !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) { if (skb_linearize(skb)) { ieee80211_free_txskb(&local->hw, skb); goto begin; } } switch (tx.sdata->vif.type) { case NL80211_IFTYPE_MONITOR: if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) { vif = &tx.sdata->vif; break; } tx.sdata = rcu_dereference(local->monitor_sdata); if (tx.sdata) { vif = &tx.sdata->vif; info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { ieee80211_free_txskb(&local->hw, skb); goto begin; } else { vif = NULL; } break; case NL80211_IFTYPE_AP_VLAN: tx.sdata = container_of(tx.sdata->bss, struct ieee80211_sub_if_data, u.ap); /* fall through */ default: vif = &tx.sdata->vif; break; } IEEE80211_SKB_CB(skb)->control.vif = vif; out: spin_unlock_bh(&fq->lock); return skb; } EXPORT_SYMBOL(ieee80211_tx_dequeue); void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev, u32 info_flags) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta; struct sk_buff *next; if (unlikely(skb->len < ETH_HLEN)) { kfree_skb(skb); return; } rcu_read_lock(); #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS { struct ieee80211_local *local = sdata->local; /* Measure frame arrival for Tx latency statistics * calculation */ ieee80211_tx_latency_start_msrmnt(local, skb); } #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */ if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) goto out_free; if (!IS_ERR_OR_NULL(sta)) { struct ieee80211_fast_tx *fast_tx; sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); fast_tx = rcu_dereference(sta->fast_tx); if (fast_tx && ieee80211_xmit_fast(sdata, sta, fast_tx, skb)) goto out; } if (skb_is_gso(skb)) { struct sk_buff *segs; segs = skb_gso_segment(skb, 0); if (IS_ERR(segs)) { goto out_free; } else if (segs) { consume_skb(skb); skb = segs; } } else { /* we cannot process non-linear frames on this path */ if (skb_linearize(skb)) { kfree_skb(skb); goto out; } /* the frame could be fragmented, software-encrypted, and other * things so we cannot really handle checksum offload with it - * fix it up in software before we handle anything else. */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_set_transport_header(skb, skb_checksum_start_offset(skb)); if (skb_checksum_help(skb)) goto out_free; } } next = skb; while (next) { skb = next; next = skb->next; skb->prev = NULL; skb->next = NULL; skb = ieee80211_build_hdr(sdata, skb, info_flags, sta); if (IS_ERR(skb)) goto out; ieee80211_tx_stats(dev, skb->len); ieee80211_xmit(sdata, sta, skb, 0); } goto out; out_free: kfree_skb(skb); out: rcu_read_unlock(); } static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta) { struct ethhdr *eth; int err; err = skb_ensure_writable(skb, ETH_HLEN); if (unlikely(err)) return err; eth = (void *)skb->data; ether_addr_copy(eth->h_dest, sta->sta.addr); return 0; } static bool ieee80211_multicast_to_unicast(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); const struct ethhdr *eth = (void *)skb->data; const struct vlan_ethhdr *ethvlan = (void *)skb->data; __be16 ethertype; if (likely(!is_multicast_ether_addr(eth->h_dest))) return false; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: if (sdata->u.vlan.sta) return false; if (sdata->wdev.use_4addr) return false; /* fall through */ case NL80211_IFTYPE_AP: /* check runtime toggle for this bss */ if (!sdata->bss->multicast_to_unicast) return false; break; default: return false; } /* multicast to unicast conversion only for some payload */ ethertype = eth->h_proto; if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN) ethertype = ethvlan->h_vlan_encapsulated_proto; switch (ethertype) { case htons(ETH_P_ARP): case htons(ETH_P_IP): case htons(ETH_P_IPV6): break; default: return false; } return true; } static void ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev, struct sk_buff_head *queue) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; const struct ethhdr *eth = (struct ethhdr *)skb->data; struct sta_info *sta, *first = NULL; struct sk_buff *cloned_skb; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata) /* AP-VLAN mismatch */ continue; if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr))) /* do not send back to source */ continue; if (!first) { first = sta; continue; } cloned_skb = skb_clone(skb, GFP_ATOMIC); if (!cloned_skb) goto multicast; if (unlikely(ieee80211_change_da(cloned_skb, sta))) { dev_kfree_skb(cloned_skb); goto multicast; } __skb_queue_tail(queue, cloned_skb); } if (likely(first)) { if (unlikely(ieee80211_change_da(skb, first))) goto multicast; __skb_queue_tail(queue, skb); } else { /* no STA connected, drop */ kfree_skb(skb); skb = NULL; } goto out; multicast: __skb_queue_purge(queue); __skb_queue_tail(queue, skb); out: rcu_read_unlock(); } /** * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs * @skb: packet to be sent * @dev: incoming interface * * On failure skb will be freed. */ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev) { if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) { struct sk_buff_head queue; __skb_queue_head_init(&queue); ieee80211_convert_to_unicast(skb, dev, &queue); while ((skb = __skb_dequeue(&queue))) __ieee80211_subif_start_xmit(skb, dev, 0); } else { __ieee80211_subif_start_xmit(skb, dev, 0); } return NETDEV_TX_OK; } struct sk_buff * ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags) { struct ieee80211_hdr *hdr; struct ieee80211_tx_data tx = { .local = sdata->local, .sdata = sdata, }; struct sta_info *sta; rcu_read_lock(); if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) { kfree_skb(skb); skb = ERR_PTR(-EINVAL); goto out; } skb = ieee80211_build_hdr(sdata, skb, info_flags, sta); if (IS_ERR(skb)) goto out; hdr = (void *)skb->data; tx.sta = sta_info_get(sdata, hdr->addr1); tx.skb = skb; if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) { rcu_read_unlock(); kfree_skb(skb); return ERR_PTR(-EINVAL); } out: rcu_read_unlock(); return skb; } /* * ieee80211_clear_tx_pending may not be called in a context where * it is possible that it packets could come in again. */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { struct sk_buff *skb; int i; for (i = 0; i < local->hw.queues; i++) { while ((skb = skb_dequeue(&local->pending[i])) != NULL) ieee80211_free_txskb(&local->hw, skb); } } /* * Returns false if the frame couldn't be transmitted but was queued instead, * which in this case means re-queued -- take as an indication to stop sending * more pending frames. */ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sub_if_data *sdata; struct sta_info *sta; struct ieee80211_hdr *hdr; bool result; struct ieee80211_chanctx_conf *chanctx_conf; sdata = vif_to_sdata(info->control.vif); if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (unlikely(!chanctx_conf)) { dev_kfree_skb(skb); return true; } info->band = chanctx_conf->def.chan->band; result = ieee80211_tx(sdata, NULL, skb, true, 0); } else { struct sk_buff_head skbs; __skb_queue_head_init(&skbs); __skb_queue_tail(&skbs, skb); hdr = (struct ieee80211_hdr *)skb->data; sta = sta_info_get(sdata, hdr->addr1); result = __ieee80211_tx(local, &skbs, skb->len, sta, true); } return result; } /* * Transmit all pending packets. Called from tasklet. */ void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; unsigned long flags; int i; bool txok; rcu_read_lock(); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < local->hw.queues; i++) { /* * If queue is stopped by something other than due to pending * frames, or we have no pending frames, proceed to next queue. */ if (local->queue_stop_reasons[i] || skb_queue_empty(&local->pending[i])) continue; while (!skb_queue_empty(&local->pending[i])) { struct sk_buff *skb = __skb_dequeue(&local->pending[i]); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); continue; } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); txok = ieee80211_tx_pending_skb(local, skb); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (!txok) break; } if (skb_queue_empty(&local->pending[i])) ieee80211_propagate_queue_wake(local, i); } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); rcu_read_unlock(); } /* functions for drivers to get certain frames */ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, struct ps_data *ps, struct sk_buff *skb, bool is_template) { u8 *pos, *tim; int aid0 = 0; int i, have_bits = 0, n1, n2; /* Generate bitmap for TIM only if there are any STAs in power save * mode. */ if (atomic_read(&ps->num_sta_ps) > 0) /* in the hope that this is faster than * checking byte-for-byte */ have_bits = !bitmap_empty((unsigned long *)ps->tim, IEEE80211_MAX_AID+1); if (!is_template) { if (ps->dtim_count == 0) ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1; else ps->dtim_count--; } tim = pos = skb_put(skb, 6); *pos++ = WLAN_EID_TIM; *pos++ = 4; *pos++ = ps->dtim_count; *pos++ = sdata->vif.bss_conf.dtim_period; if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf)) aid0 = 1; ps->dtim_bc_mc = aid0 == 1; if (have_bits) { /* Find largest even number N1 so that bits numbered 1 through * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits * (N2 + 1) x 8 through 2007 are 0. */ n1 = 0; for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { if (ps->tim[i]) { n1 = i & 0xfe; break; } } n2 = n1; for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { if (ps->tim[i]) { n2 = i; break; } } /* Bitmap control */ *pos++ = n1 | aid0; /* Part Virt Bitmap */ skb_put(skb, n2 - n1); memcpy(pos, ps->tim + n1, n2 - n1 + 1); tim[1] = n2 - n1 + 4; } else { *pos++ = aid0; /* Bitmap control */ *pos++ = 0; /* Part Virt Bitmap */ } } static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, struct ps_data *ps, struct sk_buff *skb, bool is_template) { struct ieee80211_local *local = sdata->local; /* * Not very nice, but we want to allow the driver to call * ieee80211_beacon_get() as a response to the set_tim() * callback. That, however, is already invoked under the * sta_lock to guarantee consistent and race-free update * of the tim bitmap in mac80211 and the driver. */ if (local->tim_in_locked_section) { __ieee80211_beacon_add_tim(sdata, ps, skb, is_template); } else { spin_lock_bh(&local->tim_lock); __ieee80211_beacon_add_tim(sdata, ps, skb, is_template); spin_unlock_bh(&local->tim_lock); } return 0; } static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon) { struct probe_resp *resp; u8 *beacon_data; size_t beacon_data_len; int i; u8 count = beacon->csa_current_counter; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: beacon_data = beacon->tail; beacon_data_len = beacon->tail_len; break; case NL80211_IFTYPE_ADHOC: beacon_data = beacon->head; beacon_data_len = beacon->head_len; break; case NL80211_IFTYPE_MESH_POINT: beacon_data = beacon->head; beacon_data_len = beacon->head_len; break; default: return; } rcu_read_lock(); for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; ++i) { resp = rcu_dereference(sdata->u.ap.probe_resp); if (beacon->csa_counter_offsets[i]) { if (WARN_ON_ONCE(beacon->csa_counter_offsets[i] >= beacon_data_len)) { rcu_read_unlock(); return; } beacon_data[beacon->csa_counter_offsets[i]] = count; } if (sdata->vif.type == NL80211_IFTYPE_AP && resp) resp->data[resp->csa_counter_offsets[i]] = count; } rcu_read_unlock(); } static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon) { beacon->csa_current_counter--; /* the counter should never reach 0 */ WARN_ON_ONCE(!beacon->csa_current_counter); return beacon->csa_current_counter; } u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct beacon_data *beacon = NULL; u8 count = 0; rcu_read_lock(); if (sdata->vif.type == NL80211_IFTYPE_AP) beacon = rcu_dereference(sdata->u.ap.beacon); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) beacon = rcu_dereference(sdata->u.ibss.presp); else if (ieee80211_vif_is_mesh(&sdata->vif)) beacon = rcu_dereference(sdata->u.mesh.beacon); if (!beacon) goto unlock; count = __ieee80211_csa_update_counter(beacon); unlock: rcu_read_unlock(); return count; } EXPORT_SYMBOL(ieee80211_csa_update_counter); void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct beacon_data *beacon = NULL; rcu_read_lock(); if (sdata->vif.type == NL80211_IFTYPE_AP) beacon = rcu_dereference(sdata->u.ap.beacon); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) beacon = rcu_dereference(sdata->u.ibss.presp); else if (ieee80211_vif_is_mesh(&sdata->vif)) beacon = rcu_dereference(sdata->u.mesh.beacon); if (!beacon) goto unlock; if (counter < beacon->csa_current_counter) beacon->csa_current_counter = counter; unlock: rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_csa_set_counter); bool ieee80211_csa_is_complete(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct beacon_data *beacon = NULL; u8 *beacon_data; size_t beacon_data_len; int ret = false; if (!ieee80211_sdata_running(sdata)) return false; rcu_read_lock(); if (vif->type == NL80211_IFTYPE_AP) { struct ieee80211_if_ap *ap = &sdata->u.ap; beacon = rcu_dereference(ap->beacon); if (WARN_ON(!beacon || !beacon->tail)) goto out; beacon_data = beacon->tail; beacon_data_len = beacon->tail_len; } else if (vif->type == NL80211_IFTYPE_ADHOC) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; beacon = rcu_dereference(ifibss->presp); if (!beacon) goto out; beacon_data = beacon->head; beacon_data_len = beacon->head_len; } else if (vif->type == NL80211_IFTYPE_MESH_POINT) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; beacon = rcu_dereference(ifmsh->beacon); if (!beacon) goto out; beacon_data = beacon->head; beacon_data_len = beacon->head_len; } else { WARN_ON(1); goto out; } if (!beacon->csa_counter_offsets[0]) goto out; if (WARN_ON_ONCE(beacon->csa_counter_offsets[0] > beacon_data_len)) goto out; if (beacon_data[beacon->csa_counter_offsets[0]] == 1) ret = true; out: rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ieee80211_csa_is_complete); static struct sk_buff * __ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs, bool is_template) { struct ieee80211_local *local = hw_to_local(hw); struct beacon_data *beacon = NULL; struct sk_buff *skb = NULL; struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata = NULL; enum nl80211_band band; struct ieee80211_tx_rate_control txrc; struct ieee80211_chanctx_conf *chanctx_conf; int csa_off_base = 0; rcu_read_lock(); sdata = vif_to_sdata(vif); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!ieee80211_sdata_running(sdata) || !chanctx_conf) goto out; if (offs) memset(offs, 0, sizeof(*offs)); if (sdata->vif.type == NL80211_IFTYPE_AP) { struct ieee80211_if_ap *ap = &sdata->u.ap; beacon = rcu_dereference(ap->beacon); if (beacon) { if (beacon->csa_counter_offsets[0]) { if (!is_template) __ieee80211_csa_update_counter(beacon); ieee80211_set_csa(sdata, beacon); } /* * headroom, head length, * tail length and maximum TIM length */ skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + beacon->tail_len + 256 + local->hw.extra_beacon_tailroom); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, beacon->head, beacon->head_len); ieee80211_beacon_add_tim(sdata, &ap->ps, skb, is_template); if (offs) { offs->tim_offset = beacon->head_len; offs->tim_length = skb->len - beacon->head_len; /* for AP the csa offsets are from tail */ csa_off_base = skb->len; } if (beacon->tail) skb_put_data(skb, beacon->tail, beacon->tail_len); } else goto out; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_hdr *hdr; beacon = rcu_dereference(ifibss->presp); if (!beacon) goto out; if (beacon->csa_counter_offsets[0]) { if (!is_template) __ieee80211_csa_update_counter(beacon); ieee80211_set_csa(sdata, beacon); } skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + local->hw.extra_beacon_tailroom); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, beacon->head, beacon->head_len); hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; beacon = rcu_dereference(ifmsh->beacon); if (!beacon) goto out; if (beacon->csa_counter_offsets[0]) { if (!is_template) /* TODO: For mesh csa_counter is in TU, so * decrementing it by one isn't correct, but * for now we leave it consistent with overall * mac80211's behavior. */ __ieee80211_csa_update_counter(beacon); ieee80211_set_csa(sdata, beacon); } if (ifmsh->sync_ops) ifmsh->sync_ops->adjust_tsf(sdata, beacon); skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + 256 + /* TIM IE */ beacon->tail_len + local->hw.extra_beacon_tailroom); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, beacon->head, beacon->head_len); ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template); if (offs) { offs->tim_offset = beacon->head_len; offs->tim_length = skb->len - beacon->head_len; } skb_put_data(skb, beacon->tail, beacon->tail_len); } else { WARN_ON(1); goto out; } /* CSA offsets */ if (offs && beacon) { int i; for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; i++) { u16 csa_off = beacon->csa_counter_offsets[i]; if (!csa_off) continue; offs->csa_counter_offs[i] = csa_off_base + csa_off; } } band = chanctx_conf->def.chan->band; info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; info->flags |= IEEE80211_TX_CTL_NO_ACK; info->band = band; memset(&txrc, 0, sizeof(txrc)); txrc.hw = hw; txrc.sband = local->hw.wiphy->bands[band]; txrc.bss_conf = &sdata->vif.bss_conf; txrc.skb = skb; txrc.reported_rate.idx = -1; txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; txrc.bss = true; rate_control_get_rate(sdata, NULL, &txrc); info->control.vif = vif; info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_ASSIGN_SEQ | IEEE80211_TX_CTL_FIRST_FRAGMENT; out: rcu_read_unlock(); return skb; } struct sk_buff * ieee80211_beacon_get_template(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs) { return __ieee80211_beacon_get(hw, vif, offs, true); } EXPORT_SYMBOL(ieee80211_beacon_get_template); struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 *tim_offset, u16 *tim_length) { struct ieee80211_mutable_offsets offs = {}; struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false); struct sk_buff *copy; struct ieee80211_supported_band *sband; int shift; if (!bcn) return bcn; if (tim_offset) *tim_offset = offs.tim_offset; if (tim_length) *tim_length = offs.tim_length; if (ieee80211_hw_check(hw, BEACON_TX_STATUS) || !hw_to_local(hw)->monitors) return bcn; /* send a copy to monitor interfaces */ copy = skb_copy(bcn, GFP_ATOMIC); if (!copy) return bcn; shift = ieee80211_vif_get_shift(vif); sband = ieee80211_get_sband(vif_to_sdata(vif)); if (!sband) return bcn; ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false); return bcn; } EXPORT_SYMBOL(ieee80211_beacon_get_tim); struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_if_ap *ap = NULL; struct sk_buff *skb = NULL; struct probe_resp *presp = NULL; struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (sdata->vif.type != NL80211_IFTYPE_AP) return NULL; rcu_read_lock(); ap = &sdata->u.ap; presp = rcu_dereference(ap->probe_resp); if (!presp) goto out; skb = dev_alloc_skb(presp->len); if (!skb) goto out; skb_put_data(skb, presp->data, presp->len); hdr = (struct ieee80211_hdr *) skb->data; memset(hdr->addr1, 0, sizeof(hdr->addr1)); out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_proberesp_get); struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata; struct ieee80211_if_managed *ifmgd; struct ieee80211_pspoll *pspoll; struct ieee80211_local *local; struct sk_buff *skb; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return NULL; sdata = vif_to_sdata(vif); ifmgd = &sdata->u.mgd; local = sdata->local; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); pspoll = skb_put_zero(skb, sizeof(*pspoll)); pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); pspoll->aid = cpu_to_le16(ifmgd->aid); /* aid in PS-Poll has its two MSBs each set to 1 */ pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14); memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN); memcpy(pspoll->ta, vif->addr, ETH_ALEN); return skb; } EXPORT_SYMBOL(ieee80211_pspoll_get); struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool qos_ok) { struct ieee80211_hdr_3addr *nullfunc; struct ieee80211_sub_if_data *sdata; struct ieee80211_if_managed *ifmgd; struct ieee80211_local *local; struct sk_buff *skb; bool qos = false; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return NULL; sdata = vif_to_sdata(vif); ifmgd = &sdata->u.mgd; local = sdata->local; if (qos_ok) { struct sta_info *sta; rcu_read_lock(); sta = sta_info_get(sdata, ifmgd->bssid); qos = sta && sta->sta.wme; rcu_read_unlock(); } skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc) + 2); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = skb_put_zero(skb, sizeof(*nullfunc)); nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS); if (qos) { __le16 qoshdr = cpu_to_le16(7); BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC | IEEE80211_STYPE_NULLFUNC) != IEEE80211_STYPE_QOS_NULLFUNC); nullfunc->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC); skb->priority = 7; skb_set_queue_mapping(skb, IEEE80211_AC_VO); skb_put_data(skb, &qoshdr, sizeof(qoshdr)); } memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN); memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN); return skb; } EXPORT_SYMBOL(ieee80211_nullfunc_get); struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, const u8 *src_addr, const u8 *ssid, size_t ssid_len, size_t tailroom) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_hdr_3addr *hdr; struct sk_buff *skb; size_t ie_ssid_len; u8 *pos; ie_ssid_len = 2 + ssid_len; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + ie_ssid_len + tailroom); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); hdr = skb_put_zero(skb, sizeof(*hdr)); hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(hdr->addr1); memcpy(hdr->addr2, src_addr, ETH_ALEN); eth_broadcast_addr(hdr->addr3); pos = skb_put(skb, ie_ssid_len); *pos++ = WLAN_EID_SSID; *pos++ = ssid_len; if (ssid_len) memcpy(pos, ssid, ssid_len); pos += ssid_len; return skb; } EXPORT_SYMBOL(ieee80211_probereq_get); void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_rts *rts) { const struct ieee80211_hdr *hdr = frame; rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); rts->duration = ieee80211_rts_duration(hw, vif, frame_len, frame_txctl); memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); } EXPORT_SYMBOL(ieee80211_rts_get); void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_cts *cts) { const struct ieee80211_hdr *hdr = frame; cts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); cts->duration = ieee80211_ctstoself_duration(hw, vif, frame_len, frame_txctl); memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); } EXPORT_SYMBOL(ieee80211_ctstoself_get); struct sk_buff * ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_local *local = hw_to_local(hw); struct sk_buff *skb = NULL; struct ieee80211_tx_data tx; struct ieee80211_sub_if_data *sdata; struct ps_data *ps; struct ieee80211_tx_info *info; struct ieee80211_chanctx_conf *chanctx_conf; sdata = vif_to_sdata(vif); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) goto out; if (sdata->vif.type == NL80211_IFTYPE_AP) { struct beacon_data *beacon = rcu_dereference(sdata->u.ap.beacon); if (!beacon || !beacon->head) goto out; ps = &sdata->u.ap.ps; } else if (ieee80211_vif_is_mesh(&sdata->vif)) { ps = &sdata->u.mesh.ps; } else { goto out; } if (ps->dtim_count != 0 || !ps->dtim_bc_mc) goto out; /* send buffered bc/mc only after DTIM beacon */ while (1) { skb = skb_dequeue(&ps->bc_buf); if (!skb) goto out; local->total_ps_buffered--; if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; /* more buffered multicast/broadcast frames ==> set * MoreData flag in IEEE 802.11 header to inform PS * STAs */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } if (sdata->vif.type == NL80211_IFTYPE_AP) sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) break; ieee80211_free_txskb(hw, skb); } info = IEEE80211_SKB_CB(skb); tx.flags |= IEEE80211_TX_PS_BUFFERED; info->band = chanctx_conf->def.chan->band; if (invoke_tx_handlers(&tx)) skb = NULL; out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_get_buffered_bc); int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; int ret; u32 queues; lockdep_assert_held(&local->sta_mtx); /* only some cases are supported right now */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: break; default: WARN_ON(1); return -EINVAL; } if (WARN_ON(tid >= IEEE80211_NUM_UPS)) return -EINVAL; if (sta->reserved_tid == tid) { ret = 0; goto out; } if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) { sdata_err(sdata, "TID reservation already active\n"); ret = -EALREADY; goto out; } ieee80211_stop_vif_queues(sdata->local, sdata, IEEE80211_QUEUE_STOP_REASON_RESERVE_TID); synchronize_net(); /* Tear down BA sessions so we stop aggregating on this TID */ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) { set_sta_flag(sta, WLAN_STA_BLOCK_BA); __ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_LOCAL_REQUEST); } queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]); __ieee80211_flush_queues(local, sdata, queues, false); sta->reserved_tid = tid; ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_RESERVE_TID); if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ret = 0; out: return ret; } EXPORT_SYMBOL(ieee80211_reserve_tid); void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sub_if_data *sdata = sta->sdata; lockdep_assert_held(&sdata->local->sta_mtx); /* only some cases are supported right now */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: break; default: WARN_ON(1); return; } if (tid != sta->reserved_tid) { sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid); return; } sta->reserved_tid = IEEE80211_TID_UNRESERVED; } EXPORT_SYMBOL(ieee80211_unreserve_tid); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, enum nl80211_band band, u32 txdata_flags) { int ac = ieee80211_ac_from_tid(tid); skb_reset_mac_header(skb); skb_set_queue_mapping(skb, ac); skb->priority = tid; skb->dev = sdata->dev; /* * The other path calling ieee80211_xmit is from the tasklet, * and while we can handle concurrent transmissions locking * requirements are that we do not come into tx with bhs on. */ local_bh_disable(); IEEE80211_SKB_CB(skb)->band = band; ieee80211_xmit(sdata, NULL, skb, txdata_flags); local_bh_enable(); } int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len, const u8 *dest, __be16 proto, bool unencrypted) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ethhdr *ehdr; u32 flags; /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE * or Pre-Authentication */ if (proto != sdata->control_port_protocol && proto != cpu_to_be16(ETH_P_PREAUTH)) return -EINVAL; if (unencrypted) flags = IEEE80211_TX_INTFL_DONT_ENCRYPT; else flags = 0; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(struct ethhdr) + len); if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr)); skb_put_data(skb, buf, len); ehdr = skb_push(skb, sizeof(struct ethhdr)); memcpy(ehdr->h_dest, dest, ETH_ALEN); memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN); ehdr->h_proto = proto; skb->dev = dev; skb->protocol = htons(ETH_P_802_3); skb_reset_network_header(skb); skb_reset_mac_header(skb); local_bh_disable(); __ieee80211_subif_start_xmit(skb, skb->dev, flags); local_bh_enable(); return 0; } backport-iwlwifi-dkms-7906/net/mac80211/util.c000066400000000000000000003140561351064634500207550ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * utilities for mac80211 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wme.h" #include "led.h" #include "wep.h" /* privid for wiphys to determine whether they belong to us or not */ const void *const mac80211_wiphy_privid = &mac80211_wiphy_privid; struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) { struct ieee80211_local *local; BUG_ON(!wiphy); local = wiphy_priv(wiphy); return &local->hw; } EXPORT_SYMBOL(wiphy_to_ieee80211_hw); void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_hdr *hdr; skb_queue_walk(&tx->skbs, skb) { hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); } } int ieee80211_frame_duration(enum nl80211_band band, size_t len, int rate, int erp, int short_preamble, int shift) { int dur; /* calculate duration (in microseconds, rounded up to next higher * integer if it includes a fractional microsecond) to send frame of * len bytes (does not include FCS) at the given rate. Duration will * also include SIFS. * * rate is in 100 kbps, so divident is multiplied by 10 in the * DIV_ROUND_UP() operations. * * shift may be 2 for 5 MHz channels or 1 for 10 MHz channels, and * is assumed to be 0 otherwise. */ if (band == NL80211_BAND_5GHZ || erp) { /* * OFDM: * * N_DBPS = DATARATE x 4 * N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS) * (16 = SIGNAL time, 6 = tail bits) * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext * * T_SYM = 4 usec * 802.11a - 18.5.2: aSIFSTime = 16 usec * 802.11g - 19.8.4: aSIFSTime = 10 usec + * signal ext = 6 usec */ dur = 16; /* SIFS + signal ext */ dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */ dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */ /* IEEE 802.11-2012 18.3.2.4: all values above are: * * times 4 for 5 MHz * * times 2 for 10 MHz */ dur *= 1 << shift; /* rates should already consider the channel bandwidth, * don't apply divisor again. */ dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, 4 * rate); /* T_SYM x N_SYM */ } else { /* * 802.11b or 802.11g with 802.11b compatibility: * 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime + * Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0. * * 802.11 (DS): 15.3.3, 802.11b: 18.3.4 * aSIFSTime = 10 usec * aPreambleLength = 144 usec or 72 usec with short preamble * aPLCPHeaderLength = 48 usec or 24 usec with short preamble */ dur = 10; /* aSIFSTime = 10 usec */ dur += short_preamble ? (72 + 24) : (144 + 48); dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate); } return dur; } /* Exported duration function for driver use */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_band band, size_t frame_len, struct ieee80211_rate *rate) { struct ieee80211_sub_if_data *sdata; u16 dur; int erp, shift = 0; bool short_preamble = false; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; shift = ieee80211_vif_get_shift(vif); } dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp, short_preamble, shift); return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_generic_frame_duration); __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_info *frame_txctl) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; int erp, shift = 0, bitrate; u16 dur; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; shift = ieee80211_vif_get_shift(vif); } bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift); /* CTS duration */ dur = ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble, shift); /* Data frame duration */ dur += ieee80211_frame_duration(sband->band, frame_len, bitrate, erp, short_preamble, shift); /* ACK duration */ dur += ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble, shift); return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_rts_duration); __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_info *frame_txctl) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; int erp, shift = 0, bitrate; u16 dur; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; shift = ieee80211_vif_get_shift(vif); } bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift); /* Data frame duration */ dur = ieee80211_frame_duration(sband->band, frame_len, bitrate, erp, short_preamble, shift); if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ACK duration */ dur += ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble, shift); } return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_ctstoself_duration); static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac) { struct ieee80211_local *local = sdata->local; struct ieee80211_vif *vif = &sdata->vif; struct fq *fq = &local->fq; struct ps_data *ps = NULL; struct txq_info *txqi; struct sta_info *sta; int i; spin_lock_bh(&fq->lock); if (sdata->vif.type == NL80211_IFTYPE_AP) ps = &sdata->bss->ps; sdata->vif.txqs_stopped[ac] = false; list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { struct ieee80211_txq *txq = sta->sta.txq[i]; if (!txq) continue; txqi = to_txq_info(txq); if (ac != txq->ac) continue; if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) continue; spin_unlock_bh(&fq->lock); drv_wake_tx_queue(local, txqi); spin_lock_bh(&fq->lock); } } if (!vif->txq) goto out; txqi = to_txq_info(vif->txq); if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) || (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac) goto out; spin_unlock_bh(&fq->lock); drv_wake_tx_queue(local, txqi); return; out: spin_unlock_bh(&fq->lock); } static void __releases(&local->queue_stop_reason_lock) __acquires(&local->queue_stop_reason_lock) _ieee80211_wake_txqs(struct ieee80211_local *local, unsigned long *flags) { struct ieee80211_sub_if_data *sdata; int n_acs = IEEE80211_NUM_ACS; int i; rcu_read_lock(); if (local->hw.queues < IEEE80211_NUM_ACS) n_acs = 1; for (i = 0; i < local->hw.queues; i++) { if (local->queue_stop_reasons[i]) continue; spin_unlock_irqrestore(&local->queue_stop_reason_lock, *flags); list_for_each_entry_rcu(sdata, &local->interfaces, list) { int ac; for (ac = 0; ac < n_acs; ac++) { int ac_queue = sdata->vif.hw_queue[ac]; if (ac_queue == i || sdata->vif.cab_queue == i) __ieee80211_wake_txqs(sdata, ac); } } spin_lock_irqsave(&local->queue_stop_reason_lock, *flags); } rcu_read_unlock(); } void ieee80211_wake_txqs(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; unsigned long flags; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); _ieee80211_wake_txqs(local, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue) { struct ieee80211_sub_if_data *sdata; int n_acs = IEEE80211_NUM_ACS; if (local->ops->wake_tx_queue) return; if (local->hw.queues < IEEE80211_NUM_ACS) n_acs = 1; list_for_each_entry_rcu(sdata, &local->interfaces, list) { int ac; if (!sdata->dev) continue; if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE && local->queue_stop_reasons[sdata->vif.cab_queue] != 0) continue; for (ac = 0; ac < n_acs; ac++) { int ac_queue = sdata->vif.hw_queue[ac]; if (ac_queue == queue || (sdata->vif.cab_queue == queue && local->queue_stop_reasons[ac_queue] == 0 && skb_queue_empty(&local->pending[ac_queue]))) netif_wake_subqueue(sdata->dev, ac); } } } static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted, unsigned long *flags) { struct ieee80211_local *local = hw_to_local(hw); trace_wake_queue(local, queue, reason); if (WARN_ON(queue >= hw->queues)) return; if (!test_bit(reason, &local->queue_stop_reasons[queue])) return; if (!refcounted) { local->q_stop_reasons[queue][reason] = 0; } else { local->q_stop_reasons[queue][reason]--; if (WARN_ON(local->q_stop_reasons[queue][reason] < 0)) local->q_stop_reasons[queue][reason] = 0; } if (local->q_stop_reasons[queue][reason] == 0) __clear_bit(reason, &local->queue_stop_reasons[queue]); if (local->queue_stop_reasons[queue] != 0) /* someone still has this queue stopped */ return; if (skb_queue_empty(&local->pending[queue])) { rcu_read_lock(); ieee80211_propagate_queue_wake(local, queue); rcu_read_unlock(); } else tasklet_schedule(&local->tx_pending_tasklet); /* * Calling _ieee80211_wake_txqs here can be a problem because it may * release queue_stop_reason_lock which has been taken by * __ieee80211_wake_queue's caller. It is certainly not very nice to * release someone's lock, but it is fine because all the callers of * __ieee80211_wake_queue call it right before releasing the lock. */ if (local->ops->wake_tx_queue) { if (reason == IEEE80211_QUEUE_STOP_REASON_DRIVER) tasklet_schedule(&local->wake_txqs_tasklet); else _ieee80211_wake_txqs(local, flags); } } void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_wake_queue(hw, queue, reason, refcounted, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) { ieee80211_wake_queue_by_reason(hw, queue, IEEE80211_QUEUE_STOP_REASON_DRIVER, false); } EXPORT_SYMBOL(ieee80211_wake_queue); static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; int n_acs = IEEE80211_NUM_ACS; trace_stop_queue(local, queue, reason); if (WARN_ON(queue >= hw->queues)) return; if (!refcounted) local->q_stop_reasons[queue][reason] = 1; else local->q_stop_reasons[queue][reason]++; if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue])) return; if (local->hw.queues < IEEE80211_NUM_ACS) n_acs = 1; rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { int ac; if (!sdata->dev) continue; for (ac = 0; ac < n_acs; ac++) { if (sdata->vif.hw_queue[ac] == queue || sdata->vif.cab_queue == queue) { if (!local->ops->wake_tx_queue) { netif_stop_subqueue(sdata->dev, ac); continue; } spin_lock(&local->fq.lock); sdata->vif.txqs_stopped[ac] = true; spin_unlock(&local->fq.lock); } } } rcu_read_unlock(); } void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_stop_queue(hw, queue, reason, refcounted); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) { ieee80211_stop_queue_by_reason(hw, queue, IEEE80211_QUEUE_STOP_REASON_DRIVER, false); } EXPORT_SYMBOL(ieee80211_stop_queue); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_hw *hw = &local->hw; unsigned long flags; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int queue = info->hw_queue; if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); return; } spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, false); __skb_queue_tail(&local->pending[queue], skb); __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, false, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_add_pending_skbs(struct ieee80211_local *local, struct sk_buff_head *skbs) { struct ieee80211_hw *hw = &local->hw; struct sk_buff *skb; unsigned long flags; int queue, i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); while ((skb = skb_dequeue(skbs))) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); continue; } queue = info->hw_queue; __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, false); __skb_queue_tail(&local->pending[queue], skb); } for (i = 0; i < hw->queues; i++) __ieee80211_wake_queue(hw, i, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, false, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for_each_set_bit(i, &queues, hw->queues) __ieee80211_stop_queue(hw, i, reason, refcounted); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queues(struct ieee80211_hw *hw) { ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_DRIVER, false); } EXPORT_SYMBOL(ieee80211_stop_queues); int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int ret; if (WARN_ON(queue >= hw->queues)) return true; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); ret = test_bit(IEEE80211_QUEUE_STOP_REASON_DRIVER, &local->queue_stop_reasons[queue]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return ret; } EXPORT_SYMBOL(ieee80211_queue_stopped); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for_each_set_bit(i, &queues, hw->queues) __ieee80211_wake_queue(hw, i, reason, refcounted, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_wake_queues(struct ieee80211_hw *hw) { ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_DRIVER, false); } EXPORT_SYMBOL(ieee80211_wake_queues); static unsigned int ieee80211_get_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { unsigned int queues; if (sdata && ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { int ac; queues = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) queues |= BIT(sdata->vif.hw_queue[ac]); if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE) queues |= BIT(sdata->vif.cab_queue); } else { /* all queues */ queues = BIT(local->hw.queues) - 1; } return queues; } void __ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int queues, bool drop) { if (!local->ops->flush) return; /* * If no queue was set, or if the HW doesn't support * IEEE80211_HW_QUEUE_CONTROL - flush all queues */ if (!queues || !ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) queues = ieee80211_get_vif_queues(local, sdata); ieee80211_stop_queues_by_reason(&local->hw, queues, IEEE80211_QUEUE_STOP_REASON_FLUSH, false); drv_flush(local, sdata, queues, drop); ieee80211_wake_queues_by_reason(&local->hw, queues, IEEE80211_QUEUE_STOP_REASON_FLUSH, false); } void ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool drop) { __ieee80211_flush_queues(local, sdata, 0, drop); } void ieee80211_stop_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) { ieee80211_stop_queues_by_reason(&local->hw, ieee80211_get_vif_queues(local, sdata), reason, true); } void ieee80211_wake_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) { ieee80211_wake_queues_by_reason(&local->hw, ieee80211_get_vif_queues(local, sdata), reason, true); } static void __iterate_interfaces(struct ieee80211_local *local, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_sub_if_data *sdata; bool active_only = iter_flags & IEEE80211_IFACE_ITER_ACTIVE; list_for_each_entry_rcu(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) continue; break; case NL80211_IFTYPE_AP_VLAN: continue; default: break; } if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) && active_only && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) continue; if (ieee80211_sdata_running(sdata) || !active_only) iterator(data, sdata->vif.addr, &sdata->vif); } sdata = rcu_dereference_check(local->monitor_sdata, lockdep_is_held(&local->iflist_mtx) || lockdep_rtnl_is_held()); if (sdata && (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only || sdata->flags & IEEE80211_SDATA_IN_DRIVER)) iterator(data, sdata->vif.addr, &sdata->vif); } void ieee80211_iterate_interfaces( struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_local *local = hw_to_local(hw); mutex_lock(&local->iflist_mtx); __iterate_interfaces(local, iter_flags, iterator, data); mutex_unlock(&local->iflist_mtx); } EXPORT_SYMBOL_GPL(ieee80211_iterate_interfaces); void ieee80211_iterate_active_interfaces_atomic( struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_local *local = hw_to_local(hw); rcu_read_lock(); __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE, iterator, data); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); void ieee80211_iterate_active_interfaces_rtnl( struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_local *local = hw_to_local(hw); ASSERT_RTNL(); __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE, iterator, data); } EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_rtnl); static void __iterate_stations(struct ieee80211_local *local, void (*iterator)(void *data, struct ieee80211_sta *sta), void *data) { struct sta_info *sta; list_for_each_entry_rcu(sta, &local->sta_list, list) { if (!sta->uploaded) continue; iterator(data, &sta->sta); } } void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, void (*iterator)(void *data, struct ieee80211_sta *sta), void *data) { struct ieee80211_local *local = hw_to_local(hw); rcu_read_lock(); __iterate_stations(local, iterator, data); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_atomic); struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (!ieee80211_sdata_running(sdata) || !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) return NULL; return &sdata->vif; } EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif); struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata; if (!vif) return NULL; sdata = vif_to_sdata(vif); if (!ieee80211_sdata_running(sdata) || !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) return NULL; return &sdata->wdev; } EXPORT_SYMBOL_GPL(ieee80211_vif_to_wdev); /* * Nothing should have been stuffed into the workqueue during * the suspend->resume cycle. Since we can't check each caller * of this function if we are already quiescing / suspended, * check here and don't WARN since this can actually happen when * the rx path (for example) is racing against __ieee80211_suspend * and suspending / quiescing was set after the rx path checked * them. */ static bool ieee80211_can_queue_work(struct ieee80211_local *local) { if (local->quiescing || (local->suspended && !local->resuming)) { pr_warn("queueing ieee80211 work while going to suspend\n"); return false; } return true; } void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work) { struct ieee80211_local *local = hw_to_local(hw); if (!ieee80211_can_queue_work(local)) return; queue_work(local->workqueue, work); } EXPORT_SYMBOL(ieee80211_queue_work); void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, struct delayed_work *dwork, unsigned long delay) { struct ieee80211_local *local = hw_to_local(hw); if (!ieee80211_can_queue_work(local)) return; queue_delayed_work(local->workqueue, dwork, delay); } EXPORT_SYMBOL(ieee80211_queue_delayed_work); static u32 _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc, const struct element *check_inherit) { const struct element *elem; bool calc_crc = filter != 0; DECLARE_BITMAP(seen_elems, 256); const u8 *ie; bitmap_zero(seen_elems, 256); for_each_element(elem, start, len) { bool elem_parse_failed; u8 id = elem->id; u8 elen = elem->datalen; const u8 *pos = elem->data; if (check_inherit && !cfg80211_is_element_inherited(elem, check_inherit)) continue; switch (id) { case WLAN_EID_SSID: case WLAN_EID_SUPP_RATES: case WLAN_EID_FH_PARAMS: case WLAN_EID_DS_PARAMS: case WLAN_EID_CF_PARAMS: case WLAN_EID_TIM: case WLAN_EID_IBSS_PARAMS: case WLAN_EID_CHALLENGE: case WLAN_EID_RSN: case WLAN_EID_ERP_INFO: case WLAN_EID_EXT_SUPP_RATES: case WLAN_EID_HT_CAPABILITY: case WLAN_EID_HT_OPERATION: case WLAN_EID_VHT_CAPABILITY: case WLAN_EID_VHT_OPERATION: case WLAN_EID_MESH_ID: case WLAN_EID_MESH_CONFIG: case WLAN_EID_PEER_MGMT: case WLAN_EID_PREQ: case WLAN_EID_PREP: case WLAN_EID_PERR: case WLAN_EID_RANN: case WLAN_EID_CHANNEL_SWITCH: case WLAN_EID_EXT_CHANSWITCH_ANN: case WLAN_EID_COUNTRY: case WLAN_EID_PWR_CONSTRAINT: case WLAN_EID_TIMEOUT_INTERVAL: case WLAN_EID_SECONDARY_CHANNEL_OFFSET: case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: case WLAN_EID_CHAN_SWITCH_PARAM: case WLAN_EID_EXT_CAPABILITY: case WLAN_EID_CHAN_SWITCH_TIMING: case WLAN_EID_LINK_ID: case WLAN_EID_BSS_MAX_IDLE_PERIOD: /* * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible * that if the content gets bigger it might be needed more than once */ if (test_bit(id, seen_elems)) { elems->parse_error = true; continue; } break; } if (calc_crc && id < 64 && (filter & (1ULL << id))) crc = crc32_be(crc, pos - 2, elen + 2); elem_parse_failed = false; switch (id) { case WLAN_EID_LINK_ID: if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) { elem_parse_failed = true; break; } elems->lnk_id = (void *)(pos - 2); break; case WLAN_EID_CHAN_SWITCH_TIMING: if (elen != sizeof(struct ieee80211_ch_switch_timing)) { elem_parse_failed = true; break; } elems->ch_sw_timing = (void *)pos; break; case WLAN_EID_EXT_CAPABILITY: elems->ext_capab = pos; elems->ext_capab_len = elen; break; case WLAN_EID_SSID: elems->ssid = pos; elems->ssid_len = elen; break; case WLAN_EID_SUPP_RATES: elems->supp_rates = pos; elems->supp_rates_len = elen; break; case WLAN_EID_DS_PARAMS: if (elen >= 1) elems->ds_params = pos; else elem_parse_failed = true; break; case WLAN_EID_TIM: if (elen >= sizeof(struct ieee80211_tim_ie)) { elems->tim = (void *)pos; elems->tim_len = elen; } else elem_parse_failed = true; break; case WLAN_EID_CHALLENGE: elems->challenge = pos; elems->challenge_len = elen; break; case WLAN_EID_VENDOR_SPECIFIC: if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { /* Microsoft OUI (00:50:F2) */ if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); if (elen >= 5 && pos[3] == 2) { /* OUI Type 2 - WMM IE */ if (pos[4] == 0) { elems->wmm_info = pos; elems->wmm_info_len = elen; } else if (pos[4] == 1) { elems->wmm_param = pos; elems->wmm_param_len = elen; } } } break; case WLAN_EID_RSN: elems->rsn = pos; elems->rsn_len = elen; break; case WLAN_EID_ERP_INFO: if (elen >= 1) elems->erp_info = pos; else elem_parse_failed = true; break; case WLAN_EID_EXT_SUPP_RATES: elems->ext_supp_rates = pos; elems->ext_supp_rates_len = elen; break; case WLAN_EID_HT_CAPABILITY: if (elen >= sizeof(struct ieee80211_ht_cap)) elems->ht_cap_elem = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_HT_OPERATION: if (elen >= sizeof(struct ieee80211_ht_operation)) elems->ht_operation = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_VHT_CAPABILITY: if (elen >= sizeof(struct ieee80211_vht_cap)) elems->vht_cap_elem = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_VHT_OPERATION: if (elen >= sizeof(struct ieee80211_vht_operation)) elems->vht_operation = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_OPMODE_NOTIF: if (elen > 0) elems->opmode_notif = pos; else elem_parse_failed = true; break; case WLAN_EID_MESH_ID: elems->mesh_id = pos; elems->mesh_id_len = elen; break; case WLAN_EID_MESH_CONFIG: if (elen >= sizeof(struct ieee80211_meshconf_ie)) elems->mesh_config = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_PEER_MGMT: elems->peering = pos; elems->peering_len = elen; break; case WLAN_EID_MESH_AWAKE_WINDOW: if (elen >= 2) elems->awake_window = (void *)pos; break; case WLAN_EID_PREQ: elems->preq = pos; elems->preq_len = elen; break; case WLAN_EID_PREP: elems->prep = pos; elems->prep_len = elen; break; case WLAN_EID_PERR: elems->perr = pos; elems->perr_len = elen; break; case WLAN_EID_RANN: if (elen >= sizeof(struct ieee80211_rann_ie)) elems->rann = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_CHANNEL_SWITCH: if (elen != sizeof(struct ieee80211_channel_sw_ie)) { elem_parse_failed = true; break; } elems->ch_switch_ie = (void *)pos; break; case WLAN_EID_EXT_CHANSWITCH_ANN: if (elen != sizeof(struct ieee80211_ext_chansw_ie)) { elem_parse_failed = true; break; } elems->ext_chansw_ie = (void *)pos; break; case WLAN_EID_SECONDARY_CHANNEL_OFFSET: if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) { elem_parse_failed = true; break; } elems->sec_chan_offs = (void *)pos; break; case WLAN_EID_CHAN_SWITCH_PARAM: if (elen != sizeof(*elems->mesh_chansw_params_ie)) { elem_parse_failed = true; break; } elems->mesh_chansw_params_ie = (void *)pos; break; case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: if (!action || elen != sizeof(*elems->wide_bw_chansw_ie)) { elem_parse_failed = true; break; } elems->wide_bw_chansw_ie = (void *)pos; break; case WLAN_EID_CHANNEL_SWITCH_WRAPPER: if (action) { elem_parse_failed = true; break; } /* * This is a bit tricky, but as we only care about * the wide bandwidth channel switch element, so * just parse it out manually. */ ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH, pos, elen); if (ie) { if (ie[1] == sizeof(*elems->wide_bw_chansw_ie)) elems->wide_bw_chansw_ie = (void *)(ie + 2); else elem_parse_failed = true; } break; case WLAN_EID_COUNTRY: elems->country_elem = pos; elems->country_elem_len = elen; break; case WLAN_EID_PWR_CONSTRAINT: if (elen != 1) { elem_parse_failed = true; break; } elems->pwr_constr_elem = pos; break; case WLAN_EID_CISCO_VENDOR_SPECIFIC: /* Lots of different options exist, but we only care * about the Dynamic Transmit Power Control element. * First check for the Cisco OUI, then for the DTPC * tag (0x00). */ if (elen < 4) { elem_parse_failed = true; break; } if (pos[0] != 0x00 || pos[1] != 0x40 || pos[2] != 0x96 || pos[3] != 0x00) break; if (elen != 6) { elem_parse_failed = true; break; } if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); elems->cisco_dtpc_elem = pos; break; case WLAN_EID_TIMEOUT_INTERVAL: if (elen >= sizeof(struct ieee80211_timeout_interval_ie)) elems->timeout_int = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_BSS_MAX_IDLE_PERIOD: if (elen >= sizeof(*elems->max_idle_period_ie)) elems->max_idle_period_ie = (void *)pos; break; case WLAN_EID_EXTENSION: if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA && elen >= (sizeof(*elems->mu_edca_param_set) + 1)) { elems->mu_edca_param_set = (void *)&pos[1]; if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); } else if (pos[0] == WLAN_EID_EXT_HE_CAPABILITY) { elems->he_cap = (void *)&pos[1]; elems->he_cap_len = elen - 1; } else if (pos[0] == WLAN_EID_EXT_HE_OPERATION && elen >= sizeof(*elems->he_operation) && elen >= ieee80211_he_oper_size(&pos[1])) { elems->he_operation = (void *)&pos[1]; } else if (pos[0] == WLAN_EID_EXT_UORA && elen >= 1) { elems->uora_element = (void *)&pos[1]; } else if (pos[0] == WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME && elen == 4) { elems->max_channel_switch_time = pos + 1; } else if (pos[0] == WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION && elen == 3) { elems->mbssid_config_ie = (void *)&pos[1]; } break; default: break; } if (elem_parse_failed) elems->parse_error = true; else __set_bit(id, seen_elems); } if (!for_each_element_completed(elem, start, len)) elems->parse_error = true; return crc; } static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, struct ieee802_11_elems *elems, u8 *transmitter_bssid, u8 *bss_bssid, u8 **nontransmitted_profile) { const struct element *elem, *sub; size_t profile_len = 0; bool found = false; if (!bss_bssid || !transmitter_bssid) return profile_len; for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) { if (elem->datalen < 2) continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 new_bssid[ETH_ALEN]; const u8 *index; if (sub->id != 0 || sub->datalen < 4) { /* not a valid BSS profile */ continue; } if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP || sub->data[1] != 2) { /* The first element of the * Nontransmitted BSSID Profile is not * the Nontransmitted BSSID Capability * element. */ continue; } memset(*nontransmitted_profile, 0, len); profile_len = cfg80211_merge_profile(start, len, elem, sub, nontransmitted_profile, len); /* found a Nontransmitted BSSID Profile */ index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, *nontransmitted_profile, profile_len); if (!index || index[1] < 1 || index[2] == 0) { /* Invalid MBSSID Index element */ continue; } cfg80211_gen_new_bssid(transmitter_bssid, elem->data[0], index[2], new_bssid); if (ether_addr_equal(new_bssid, bss_bssid)) { found = true; elems->bssid_index_len = index[1]; elems->bssid_index = (void *)&index[2]; break; } } } return found ? profile_len : 0; } u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc, u8 *transmitter_bssid, u8 *bss_bssid) { const struct element *non_inherit = NULL; u8 *nontransmitted_profile; int nontransmitted_profile_len = 0; memset(elems, 0, sizeof(*elems)); elems->ie_start = start; elems->total_len = len; nontransmitted_profile = kmalloc(len, GFP_ATOMIC); if (nontransmitted_profile) { nontransmitted_profile_len = ieee802_11_find_bssid_profile(start, len, elems, transmitter_bssid, bss_bssid, &nontransmitted_profile); non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, nontransmitted_profile, nontransmitted_profile_len); } crc = _ieee802_11_parse_elems_crc(start, len, action, elems, filter, crc, non_inherit); /* Override with nontransmitted profile, if found */ if (nontransmitted_profile_len) _ieee802_11_parse_elems_crc(nontransmitted_profile, nontransmitted_profile_len, action, elems, 0, 0, NULL); if (elems->tim && !elems->parse_error) { const struct ieee80211_tim_ie *tim_ie = elems->tim; elems->dtim_period = tim_ie->dtim_period; elems->dtim_count = tim_ie->dtim_count; } /* Override DTIM period and count if needed */ if (elems->bssid_index && elems->bssid_index_len >= offsetofend(struct ieee80211_bssid_index, dtim_period)) elems->dtim_period = elems->bssid_index->dtim_period; if (elems->bssid_index && elems->bssid_index_len >= offsetofend(struct ieee80211_bssid_index, dtim_count)) elems->dtim_count = elems->bssid_index->dtim_count; kfree(nontransmitted_profile); return crc; } void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_queue_params *qparam, int ac) { struct ieee80211_chanctx_conf *chanctx_conf; const struct ieee80211_reg_rule *rrule; const struct ieee80211_wmm_ac *wmm_ac; u16 center_freq = 0; if (sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_STATION) return; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (chanctx_conf) center_freq = chanctx_conf->def.chan->center_freq; if (!center_freq) { rcu_read_unlock(); return; } rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) { rcu_read_unlock(); return; } if (sdata->vif.type == NL80211_IFTYPE_AP) wmm_ac = &rrule->wmm_rule.ap[ac]; else wmm_ac = &rrule->wmm_rule.client[ac]; qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32); rcu_read_unlock(); } void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, bool bss_notify, bool enable_qos) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_queue_params qparam; struct ieee80211_chanctx_conf *chanctx_conf; int ac; bool use_11b; bool is_ocb; /* Use another EDCA parameters if dot11OCBActivated=true */ int aCWmin, aCWmax; if (!local->ops->conf_tx) return; if (local->hw.queues < IEEE80211_NUM_ACS) return; memset(&qparam, 0, sizeof(qparam)); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); use_11b = (chanctx_conf && chanctx_conf->def.chan->band == NL80211_BAND_2GHZ) && !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); rcu_read_unlock(); is_ocb = (sdata->vif.type == NL80211_IFTYPE_OCB); /* Set defaults according to 802.11-2007 Table 7-37 */ aCWmax = 1023; if (use_11b) aCWmin = 31; else aCWmin = 15; /* Confiure old 802.11b/g medium access rules. */ qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 2; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { /* Update if QoS is enabled. */ if (enable_qos) { switch (ac) { case IEEE80211_AC_BK: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; if (is_ocb) qparam.aifs = 9; else qparam.aifs = 7; break; /* never happens but let's not leave undefined */ default: case IEEE80211_AC_BE: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; if (is_ocb) qparam.aifs = 6; else qparam.aifs = 3; break; case IEEE80211_AC_VI: qparam.cw_max = aCWmin; qparam.cw_min = (aCWmin + 1) / 2 - 1; if (is_ocb) qparam.txop = 0; else if (use_11b) qparam.txop = 6016/32; else qparam.txop = 3008/32; if (is_ocb) qparam.aifs = 3; else qparam.aifs = 2; break; case IEEE80211_AC_VO: qparam.cw_max = (aCWmin + 1) / 2 - 1; qparam.cw_min = (aCWmin + 1) / 4 - 1; if (is_ocb) qparam.txop = 0; else if (use_11b) qparam.txop = 3264/32; else qparam.txop = 1504/32; qparam.aifs = 2; break; } } ieee80211_regulatory_limit_wmm_params(sdata, &qparam, ac); qparam.uapsd = false; sdata->tx_conf[ac] = qparam; drv_conf_tx(local, sdata, ac, &qparam); } if (sdata->vif.type != NL80211_IFTYPE_MONITOR && sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && sdata->vif.type != NL80211_IFTYPE_NAN) { sdata->vif.bss_conf.qos = enable_qos; if (bss_notify) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); } } void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *da, const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx, u32 tx_flags) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; int err; /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN + 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN); mgmt = skb_put_zero(skb, 24 + 6); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, bssid, ETH_ALEN); mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); mgmt->u.auth.status_code = cpu_to_le16(status); if (extra) skb_put_data(skb, extra, extra_len); if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) { mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx); WARN_ON(err); } IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | tx_flags; ieee80211_tx_skb(sdata, skb); } void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *bssid, u16 stype, u16 reason, bool send_frame, u8 *frame_buf) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt = (void *)frame_buf; /* build frame */ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); mgmt->duration = 0; /* initialize only */ mgmt->seq_ctrl = 0; /* initialize only */ memcpy(mgmt->da, bssid, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, bssid, ETH_ALEN); /* u.deauth.reason_code == u.disassoc.reason_code */ mgmt->u.deauth.reason_code = cpu_to_le16(reason); if (send_frame) { skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_DEAUTH_FRAME_LEN); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); /* copy in frame */ skb_put_data(skb, mgmt, IEEE80211_DEAUTH_FRAME_LEN); if (sdata->vif.type != NL80211_IFTYPE_STATION || !(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } } static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, const u8 *ie, size_t ie_len, enum nl80211_band band, u32 rate_mask, struct cfg80211_chan_def *chandef, size_t *offset, u32 flags) { struct ieee80211_supported_band *sband; const struct ieee80211_sta_he_cap *he_cap; u8 *pos = buffer, *end = buffer + buffer_len; size_t noffset; int supp_rates_len, i; u8 rates[32]; int num_rates; int ext_rates_len; int shift; u32 rate_flags; bool have_80mhz = false; *offset = 0; sband = local->hw.wiphy->bands[band]; if (WARN_ON_ONCE(!sband)) return 0; rate_flags = ieee80211_chandef_rate_flags(chandef); shift = ieee80211_chandef_get_shift(chandef); num_rates = 0; for (i = 0; i < sband->n_bitrates; i++) { if ((BIT(i) & rate_mask) == 0) continue; /* skip rate */ if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; rates[num_rates++] = (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate, (1 << shift) * 5); } supp_rates_len = min_t(int, num_rates, 8); if (end - pos < 2 + supp_rates_len) goto out_err; *pos++ = WLAN_EID_SUPP_RATES; *pos++ = supp_rates_len; memcpy(pos, rates, supp_rates_len); pos += supp_rates_len; /* insert "request information" if in custom IEs */ if (ie && ie_len) { static const u8 before_extrates[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_REQUEST, }; noffset = ieee80211_ie_split(ie, ie_len, before_extrates, ARRAY_SIZE(before_extrates), *offset); if (end - pos < noffset - *offset) goto out_err; memcpy(pos, ie + *offset, noffset - *offset); pos += noffset - *offset; *offset = noffset; } ext_rates_len = num_rates - supp_rates_len; if (ext_rates_len > 0) { if (end - pos < 2 + ext_rates_len) goto out_err; *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = ext_rates_len; memcpy(pos, rates + supp_rates_len, ext_rates_len); pos += ext_rates_len; } if (chandef->chan && sband->band == NL80211_BAND_2GHZ) { if (end - pos < 3) goto out_err; *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; *pos++ = ieee80211_frequency_to_channel( chandef->chan->center_freq); } if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT) goto done; /* insert custom IEs that go before HT */ if (ie && ie_len) { static const u8 before_ht[] = { /* * no need to list the ones split off already * (or generated here) */ WLAN_EID_DS_PARAMS, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; noffset = ieee80211_ie_split(ie, ie_len, before_ht, ARRAY_SIZE(before_ht), *offset); if (end - pos < noffset - *offset) goto out_err; memcpy(pos, ie + *offset, noffset - *offset); pos += noffset - *offset; *offset = noffset; } if (sband->ht_cap.ht_supported) { if (end - pos < 2 + sizeof(struct ieee80211_ht_cap)) goto out_err; pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); } /* insert custom IEs that go before VHT */ if (ie && ie_len) { static const u8 before_vht[] = { /* * no need to list the ones split off already * (or generated here) */ WLAN_EID_BSS_COEX_2040, WLAN_EID_EXT_CAPABILITY, WLAN_EID_SSID_LIST, WLAN_EID_CHANNEL_USAGE, WLAN_EID_INTERWORKING, WLAN_EID_MESH_ID, /* 60 GHz (Multi-band, DMG, MMS) can't happen */ }; noffset = ieee80211_ie_split(ie, ie_len, before_vht, ARRAY_SIZE(before_vht), *offset); if (end - pos < noffset - *offset) goto out_err; memcpy(pos, ie + *offset, noffset - *offset); pos += noffset - *offset; *offset = noffset; } /* Check if any channel in this sband supports at least 80 MHz */ for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_NO_80MHZ)) continue; have_80mhz = true; break; } if (sband->vht_cap.vht_supported && have_80mhz) { if (end - pos < 2 + sizeof(struct ieee80211_vht_cap)) goto out_err; pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap); } /* insert custom IEs that go before HE */ if (ie && ie_len) { static const u8 before_he[] = { /* * no need to list the ones split off before VHT * or generated here */ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_REQ_PARAMS, WLAN_EID_AP_CSN, /* TODO: add 11ah/11aj/11ak elements */ }; noffset = ieee80211_ie_split(ie, ie_len, before_he, ARRAY_SIZE(before_he), *offset); if (end - pos < noffset - *offset) goto out_err; memcpy(pos, ie + *offset, noffset - *offset); pos += noffset - *offset; *offset = noffset; } he_cap = ieee80211_get_he_sta_cap(sband); if (he_cap) { pos = ieee80211_ie_build_he_cap(pos, he_cap, end); if (!pos) goto out_err; } /* * If adding more here, adjust code in main.c * that calculates local->scan_ies_len. */ return pos - buffer; out_err: WARN_ONCE(1, "not enough space for preq IEs\n"); done: return pos - buffer; } int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, struct cfg80211_chan_def *chandef, u32 flags) { size_t pos = 0, old_pos = 0, custom_ie_offset = 0; int i; memset(ie_desc, 0, sizeof(*ie_desc)); for (i = 0; i < NUM_NL80211_BANDS; i++) { if (bands_used & BIT(i)) { pos += ieee80211_build_preq_ies_band(local, buffer + pos, buffer_len - pos, ie, ie_len, i, rate_masks[i], chandef, &custom_ie_offset, flags); ie_desc->ies[i] = buffer + old_pos; ie_desc->len[i] = pos - old_pos; old_pos = pos; } } /* add any remaining custom IEs */ if (ie && ie_len) { if (WARN_ONCE(buffer_len - pos < ie_len - custom_ie_offset, "not enough space for preq custom IEs\n")) return pos; memcpy(buffer + pos, ie + custom_ie_offset, ie_len - custom_ie_offset); ie_desc->common_ies = buffer + pos; ie_desc->common_ie_len = ie_len - custom_ie_offset; pos += ie_len - custom_ie_offset; } return pos; }; struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *bssid, u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 flags) { struct ieee80211_local *local = sdata->local; struct cfg80211_chan_def chandef; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; int ies_len; u32 rate_masks[NUM_NL80211_BANDS] = {}; struct ieee80211_scan_ies dummy_ie_desc; /* * Do not send DS Channel parameter for directed probe requests * in order to maximize the chance that we get a response. Some * badly-behaved APs don't respond when this parameter is included. */ chandef.width = sdata->vif.bss_conf.chandef.width; if (flags & IEEE80211_PROBE_FLAG_DIRECTED) chandef.chan = NULL; else chandef.chan = chan; skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len, 100 + ie_len); if (!skb) return NULL; rate_masks[chan->band] = ratemask; ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb), skb_tailroom(skb), &dummy_ie_desc, ie, ie_len, BIT(chan->band), rate_masks, &chandef, flags); skb_put(skb, ies_len); mgmt = (void *)skb->data; if (dst) memcpy(mgmt->da, dst, ETH_ALEN); if (bssid) memcpy(mgmt->bssid, bssid, ETH_ALEN); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; return skb; } u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band band, u32 *basic_rates) { struct ieee80211_supported_band *sband; size_t num_rates; u32 supp_rates, rate_flags; int i, j, shift; sband = sdata->local->hw.wiphy->bands[band]; if (WARN_ON(!sband)) return 1; rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); shift = ieee80211_vif_get_shift(&sdata->vif); num_rates = sband->n_bitrates; supp_rates = 0; for (i = 0; i < elems->supp_rates_len + elems->ext_supp_rates_len; i++) { u8 rate = 0; int own_rate; bool is_basic; if (i < elems->supp_rates_len) rate = elems->supp_rates[i]; else if (elems->ext_supp_rates) rate = elems->ext_supp_rates [i - elems->supp_rates_len]; own_rate = 5 * (rate & 0x7f); is_basic = !!(rate & 0x80); if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) continue; for (j = 0; j < num_rates; j++) { int brate; if ((rate_flags & sband->bitrates[j].flags) != rate_flags) continue; brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, 1 << shift); if (brate == own_rate) { supp_rates |= BIT(j); if (basic_rates && is_basic) *basic_rates |= BIT(j); } } } return supp_rates; } void ieee80211_stop_device(struct ieee80211_local *local) { ieee80211_led_radio(local, false); ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO); cancel_work_sync(&local->reconfig_filter); flush_workqueue(local->workqueue); drv_stop(local); } static void ieee80211_flush_completed_scan(struct ieee80211_local *local, bool aborted) { /* It's possible that we don't handle the scan completion in * time during suspend, so if it's still marked as completed * here, queue the work and flush it to clean things up. * Instead of calling the worker function directly here, we * really queue it to avoid potential races with other flows * scheduling the same work. */ if (test_bit(SCAN_COMPLETED, &local->scanning)) { /* If coming from reconfiguration failure, abort the scan so * we don't attempt to continue a partial HW scan - which is * possible otherwise if (e.g.) the 2.4 GHz portion was the * completed scan, and a 5 GHz portion is still pending. */ if (aborted) set_bit(SCAN_ABORTED, &local->scanning); ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); flush_delayed_work(&local->scan_work); } } static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; struct ieee80211_chanctx *ctx; /* * We get here if during resume the device can't be restarted properly. * We might also get here if this happens during HW reset, which is a * slightly different situation and we need to drop all connections in * the latter case. * * Ask cfg80211 to turn off all interfaces, this will result in more * warnings but at least we'll then get into a clean stopped state. */ local->resuming = false; local->suspended = false; local->in_reconfig = false; ieee80211_flush_completed_scan(local, true); /* scheduled scan clearly can't be running any more, but tell * cfg80211 and clear local state */ ieee80211_sched_scan_end(local); list_for_each_entry(sdata, &local->interfaces, list) sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; /* Mark channel contexts as not being in the driver any more to avoid * removing them from the driver during the shutdown process... */ mutex_lock(&local->chanctx_mtx); list_for_each_entry(ctx, &local->chanctx_list, list) ctx->driver_present = false; mutex_unlock(&local->chanctx_mtx); cfg80211_shutdown_all_interfaces(local->hw.wiphy); } static void ieee80211_assign_chanctx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; if (!local->use_chanctx) return; mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (conf) { ctx = container_of(conf, struct ieee80211_chanctx, conf); drv_assign_vif_chanctx(local, sdata, ctx); } mutex_unlock(&local->chanctx_mtx); } static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; /* add STAs back */ mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { enum ieee80211_sta_state state; if (!sta->uploaded || sta->sdata != sdata) continue; for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) WARN_ON(drv_sta_state(local, sta->sdata, sta, state, state + 1)); } mutex_unlock(&local->sta_mtx); } static int ieee80211_reconfig_nan(struct ieee80211_sub_if_data *sdata) { struct cfg80211_nan_func *func, **funcs; int res, id, i = 0; res = drv_start_nan(sdata->local, sdata, &sdata->u.nan.conf); if (WARN_ON(res)) return res; funcs = kcalloc(sdata->local->hw.max_nan_de_entries + 1, sizeof(*funcs), GFP_KERNEL); if (!funcs) return -ENOMEM; /* Add all the functions: * This is a little bit ugly. We need to call a potentially sleeping * callback for each NAN function, so we can't hold the spinlock. */ spin_lock_bh(&sdata->u.nan.func_lock); idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, id) funcs[i++] = func; spin_unlock_bh(&sdata->u.nan.func_lock); for (i = 0; funcs[i]; i++) { res = drv_add_nan_func(sdata->local, sdata, funcs[i]); if (WARN_ON(res)) ieee80211_nan_func_terminated(&sdata->vif, funcs[i]->instance_id, NL80211_NAN_FUNC_TERM_REASON_ERROR, GFP_KERNEL); } kfree(funcs); return 0; } int ieee80211_reconfig(struct ieee80211_local *local) { struct ieee80211_hw *hw = &local->hw; struct ieee80211_sub_if_data *sdata; struct ieee80211_chanctx *ctx; struct sta_info *sta; int res, i; bool reconfig_due_to_wowlan = false; struct ieee80211_sub_if_data *sched_scan_sdata; struct cfg80211_sched_scan_request *sched_scan_req; bool sched_scan_stopped = false; bool suspended = local->suspended; /* nothing to do if HW shouldn't run */ if (!local->open_count) goto wake_up; #ifdef CONFIG_PM if (suspended) local->resuming = true; if (local->wowlan) { /* * In the wowlan case, both mac80211 and the device * are functional when the resume op is called, so * clear local->suspended so the device could operate * normally (e.g. pass rx frames). */ local->suspended = false; res = drv_resume(local); local->wowlan = false; if (res < 0) { local->resuming = false; return res; } if (res == 0) goto wake_up; WARN_ON(res > 1); /* * res is 1, which means the driver requested * to go through a regular reset on wakeup. * restore local->suspended in this case. */ reconfig_due_to_wowlan = true; local->suspended = true; } #endif /* * In case of hw_restart during suspend (without wowlan), * cancel restart work, as we are reconfiguring the device * anyway. * Note that restart_work is scheduled on a frozen workqueue, * so we can't deadlock in this case. */ if (suspended && local->in_reconfig && !reconfig_due_to_wowlan) cancel_work_sync(&local->restart_work); local->started = false; /* * Upon resume hardware can sometimes be goofy due to * various platform / driver / bus issues, so restarting * the device may at times not work immediately. Propagate * the error. */ res = drv_start(local); if (res) { if (suspended) WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n"); else WARN(1, "Hardware became unavailable during restart.\n"); ieee80211_handle_reconfig_failure(local); return res; } /* setup fragmentation threshold */ drv_set_frag_threshold(local, hw->wiphy->frag_threshold); /* setup RTS threshold */ drv_set_rts_threshold(local, hw->wiphy->rts_threshold); /* reset coverage class */ drv_set_coverage_class(local, hw->wiphy->coverage_class); ieee80211_led_radio(local, true); ieee80211_mod_tpt_led_trig(local, IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); /* add interfaces */ sdata = rtnl_dereference(local->monitor_sdata); if (sdata) { /* in HW restart it exists already */ WARN_ON(local->resuming); res = drv_add_interface(local, sdata); if (WARN_ON(res)) { RCU_INIT_POINTER(local->monitor_sdata, NULL); synchronize_net(); kfree(sdata); } } list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && ieee80211_sdata_running(sdata)) { res = drv_add_interface(local, sdata); if (WARN_ON(res)) break; } } /* If adding any of the interfaces failed above, roll back and * report failure. */ if (res) { list_for_each_entry_continue_reverse(sdata, &local->interfaces, list) if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && ieee80211_sdata_running(sdata)) drv_remove_interface(local, sdata); ieee80211_handle_reconfig_failure(local); return res; } /* add channel contexts */ if (local->use_chanctx) { mutex_lock(&local->chanctx_mtx); list_for_each_entry(ctx, &local->chanctx_list, list) if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) WARN_ON(drv_add_chanctx(local, ctx)); mutex_unlock(&local->chanctx_mtx); sdata = rtnl_dereference(local->monitor_sdata); if (sdata && ieee80211_sdata_running(sdata)) ieee80211_assign_chanctx(local, sdata); } /* reconfigure hardware */ ieee80211_hw_config(local, ~0); ieee80211_configure_filter(local); /* Finally also reconfigure all the BSS information */ list_for_each_entry(sdata, &local->interfaces, list) { u32 changed; if (!ieee80211_sdata_running(sdata)) continue; ieee80211_assign_chanctx(local, sdata); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: break; case NL80211_IFTYPE_ADHOC: if (sdata->vif.bss_conf.ibss_joined) WARN_ON(drv_join_ibss(local, sdata)); /* fall through */ default: ieee80211_reconfig_stations(sdata); /* fall through */ case NL80211_IFTYPE_AP: /* AP stations are handled later */ for (i = 0; i < IEEE80211_NUM_ACS; i++) drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]); break; } /* common change flags for all interface types */ changed = BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT | BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT | BSS_CHANGED_BSSID | BSS_CHANGED_CQM | BSS_CHANGED_QOS | BSS_CHANGED_IDLE | BSS_CHANGED_TXPOWER | BSS_CHANGED_MCAST_RATE; if (sdata->vif.mu_mimo_owner) changed |= BSS_CHANGED_MU_GROUPS; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: changed |= BSS_CHANGED_ASSOC | BSS_CHANGED_ARP_FILTER | BSS_CHANGED_PS; /* Re-send beacon info report to the driver */ if (sdata->u.mgd.have_beacon) changed |= BSS_CHANGED_BEACON_INFO; if (sdata->vif.bss_conf.max_idle_period || sdata->vif.bss_conf.protected_keep_alive) changed |= BSS_CHANGED_KEEP_ALIVE; sdata_lock(sdata); ieee80211_bss_info_change_notify(sdata, changed); sdata_unlock(sdata); break; case NL80211_IFTYPE_OCB: changed |= BSS_CHANGED_OCB; ieee80211_bss_info_change_notify(sdata, changed); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; /* fall through */ case NL80211_IFTYPE_AP: changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS; if (sdata->vif.bss_conf.ftm_responder == 1 && wiphy_ext_feature_isset(sdata->local->hw.wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER)) changed |= BSS_CHANGED_FTM_RESPONDER; if (sdata->vif.type == NL80211_IFTYPE_AP) { changed |= BSS_CHANGED_AP_PROBE_RESP; if (rcu_access_pointer(sdata->u.ap.beacon)) drv_start_ap(local, sdata); } /* fall through */ case NL80211_IFTYPE_MESH_POINT: if (sdata->vif.bss_conf.enable_beacon) { changed |= BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED; ieee80211_bss_info_change_notify(sdata, changed); } break; case NL80211_IFTYPE_NAN: res = ieee80211_reconfig_nan(sdata); if (res < 0) { ieee80211_handle_reconfig_failure(local); return res; } break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_DEVICE: /* nothing to do */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_NAN_DATA: WARN_ON(1); break; } } ieee80211_recalc_ps(local); /* * The sta might be in psm against the ap (e.g. because * this was the state before a hw restart), so we * explicitly send a null packet in order to make sure * it'll sync against the ap (and get out of psm). */ if (!(local->hw.conf.flags & IEEE80211_CONF_PS)) { list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; if (!sdata->u.mgd.associated) continue; ieee80211_send_nullfunc(local, sdata, false); } } /* APs are now beaconing, add back stations */ mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { enum ieee80211_sta_state state; if (!sta->uploaded) continue; if (sta->sdata->vif.type != NL80211_IFTYPE_AP && sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) continue; for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) WARN_ON(drv_sta_state(local, sta->sdata, sta, state, state + 1)); } mutex_unlock(&local->sta_mtx); /* add back keys */ list_for_each_entry(sdata, &local->interfaces, list) ieee80211_reset_crypto_tx_tailroom(sdata); list_for_each_entry(sdata, &local->interfaces, list) if (ieee80211_sdata_running(sdata)) ieee80211_enable_keys(sdata); /* Reconfigure sched scan if it was interrupted by FW restart */ mutex_lock(&local->mtx); sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, lockdep_is_held(&local->mtx)); sched_scan_req = rcu_dereference_protected(local->sched_scan_req, lockdep_is_held(&local->mtx)); if (sched_scan_sdata && sched_scan_req) /* * Sched scan stopped, but we don't want to report it. Instead, * we're trying to reschedule. However, if more than one scan * plan was set, we cannot reschedule since we don't know which * scan plan was currently running (and some scan plans may have * already finished). */ if (sched_scan_req->n_scan_plans > 1 || __ieee80211_request_sched_scan_start(sched_scan_sdata, sched_scan_req)) { RCU_INIT_POINTER(local->sched_scan_sdata, NULL); RCU_INIT_POINTER(local->sched_scan_req, NULL); sched_scan_stopped = true; } mutex_unlock(&local->mtx); if (sched_scan_stopped) cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0); wake_up: if (local->monitors == local->open_count && local->monitors > 0) ieee80211_add_virtual_monitor(local); /* * Clear the WLAN_STA_BLOCK_BA flag so new aggregation * sessions can be established after a resume. * * Also tear down aggregation sessions since reconfiguring * them in a hardware restart scenario is not easily done * right now, and the hardware will have lost information * about the sessions, but we and the AP still think they * are active. This is really a workaround though. */ if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) { mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { if (!local->resuming) ieee80211_sta_tear_down_BA_sessions( sta, AGG_STOP_LOCAL_REQUEST); clear_sta_flag(sta, WLAN_STA_BLOCK_BA); } mutex_unlock(&local->sta_mtx); } if (local->in_reconfig) { local->in_reconfig = false; barrier(); /* Restart deferred ROCs */ mutex_lock(&local->mtx); ieee80211_start_next_roc(local); mutex_unlock(&local->mtx); /* Requeue all works */ list_for_each_entry(sdata, &local->interfaces, list) ieee80211_queue_work(&local->hw, &sdata->work); } ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); /* * If this is for hw restart things are still running. * We may want to change that later, however. */ if (local->open_count && (!suspended || reconfig_due_to_wowlan)) drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART); if (!suspended) return 0; #ifdef CONFIG_PM /* first set suspended false, then resuming */ local->suspended = false; mb(); local->resuming = false; ieee80211_flush_completed_scan(local, false); if (local->open_count && !reconfig_due_to_wowlan) drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_sta_restart(sdata); } mod_timer(&local->sta_cleanup, jiffies + 1); #else WARN_ON(1); #endif return 0; } void ieee80211_resume_disconnect(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local; struct ieee80211_key *key; if (WARN_ON(!vif)) return; sdata = vif_to_sdata(vif); local = sdata->local; if (WARN_ON(!local->resuming)) return; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return; sdata->flags |= IEEE80211_SDATA_DISCONNECT_RESUME; mutex_lock(&local->key_mtx); list_for_each_entry(key, &sdata->key_list, list) key->flags |= KEY_FLAG_TAINTED; mutex_unlock(&local->key_mtx); } EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect); void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_chanctx *chanctx; mutex_lock(&local->chanctx_mtx); chanctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); /* * This function can be called from a work, thus it may be possible * that the chanctx_conf is removed (due to a disconnection, for * example). * So nothing should be done in such case. */ if (!chanctx_conf) goto unlock; chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); ieee80211_recalc_smps_chanctx(local, chanctx); unlock: mutex_unlock(&local->chanctx_mtx); } void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_chanctx *chanctx; mutex_lock(&local->chanctx_mtx); chanctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (WARN_ON_ONCE(!chanctx_conf)) goto unlock; chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); ieee80211_recalc_chanctx_min_def(local, chanctx); unlock: mutex_unlock(&local->chanctx_mtx); } size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset) { size_t pos = offset; while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC) pos += 2 + ies[pos + 1]; return pos; } static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata, int rssi_min_thold, int rssi_max_thold) { trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold); if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; /* * Scale up threshold values before storing it, as the RSSI averaging * algorithm uses a scaled up value as well. Change this scaling * factor if the RSSI averaging algorithm changes. */ sdata->u.mgd.rssi_min_thold = rssi_min_thold*16; sdata->u.mgd.rssi_max_thold = rssi_max_thold*16; } void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, int rssi_min_thold, int rssi_max_thold) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); WARN_ON(rssi_min_thold == rssi_max_thold || rssi_min_thold > rssi_max_thold); _ieee80211_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold); } EXPORT_SYMBOL(ieee80211_enable_rssi_reports); void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); _ieee80211_enable_rssi_reports(sdata, 0, 0); } EXPORT_SYMBOL(ieee80211_disable_rssi_reports); u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap) { __le16 tmp; *pos++ = WLAN_EID_HT_CAPABILITY; *pos++ = sizeof(struct ieee80211_ht_cap); memset(pos, 0, sizeof(struct ieee80211_ht_cap)); /* capability flags */ tmp = cpu_to_le16(cap); memcpy(pos, &tmp, sizeof(u16)); pos += sizeof(u16); /* AMPDU parameters */ *pos++ = ht_cap->ampdu_factor | (ht_cap->ampdu_density << IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); /* MCS set */ memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs)); pos += sizeof(ht_cap->mcs); /* extended capabilities */ pos += sizeof(__le16); /* BF capabilities */ pos += sizeof(__le32); /* antenna selection */ pos += sizeof(u8); return pos; } u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap) { __le32 tmp; *pos++ = WLAN_EID_VHT_CAPABILITY; *pos++ = sizeof(struct ieee80211_vht_cap); memset(pos, 0, sizeof(struct ieee80211_vht_cap)); /* capability flags */ tmp = cpu_to_le32(cap); memcpy(pos, &tmp, sizeof(u32)); pos += sizeof(u32); /* VHT MCS set */ memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs)); pos += sizeof(vht_cap->vht_mcs); return pos; } u8 *ieee80211_ie_build_he_cap(u8 *pos, const struct ieee80211_sta_he_cap *he_cap, u8 *end) { u8 n; u8 ie_len; u8 *orig_pos = pos; /* Make sure we have place for the IE */ /* * TODO: the 1 added is because this temporarily is under the EXTENSION * IE. Get rid of it when it moves. */ if (!he_cap) return orig_pos; n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem); ie_len = 2 + 1 + sizeof(he_cap->he_cap_elem) + n + ieee80211_he_ppe_size(he_cap->ppe_thres[0], he_cap->he_cap_elem.phy_cap_info); if ((end - pos) < ie_len) return orig_pos; *pos++ = WLAN_EID_EXTENSION; pos++; /* We'll set the size later below */ *pos++ = WLAN_EID_EXT_HE_CAPABILITY; /* Fixed data */ memcpy(pos, &he_cap->he_cap_elem, sizeof(he_cap->he_cap_elem)); pos += sizeof(he_cap->he_cap_elem); memcpy(pos, &he_cap->he_mcs_nss_supp, n); pos += n; /* Check if PPE Threshold should be present */ if ((he_cap->he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) goto end; /* * Calculate how many PPET16/PPET8 pairs are to come. Algorithm: * (NSS_M1 + 1) x (num of 1 bits in RU_INDEX_BITMASK) */ n = hweight8(he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); n *= (1 + ((he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) >> IEEE80211_PPE_THRES_NSS_POS)); /* * Each pair is 6 bits, and we need to add the 7 "header" bits to the * total size. */ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; n = DIV_ROUND_UP(n, 8); /* Copy PPE Thresholds */ memcpy(pos, &he_cap->ppe_thres, n); pos += n; end: orig_pos[1] = (pos - orig_pos) - 2; return pos; } u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, const struct cfg80211_chan_def *chandef, u16 prot_mode, bool rifs_mode) { struct ieee80211_ht_operation *ht_oper; /* Build HT Information */ *pos++ = WLAN_EID_HT_OPERATION; *pos++ = sizeof(struct ieee80211_ht_operation); ht_oper = (struct ieee80211_ht_operation *)pos; ht_oper->primary_chan = ieee80211_frequency_to_channel( chandef->chan->center_freq); switch (chandef->width) { case NL80211_CHAN_WIDTH_160: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_40: if (chandef->center_freq1 > chandef->chan->center_freq) ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; else ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; break; default: ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; break; } if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && chandef->width != NL80211_CHAN_WIDTH_20_NOHT && chandef->width != NL80211_CHAN_WIDTH_20) ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; if (rifs_mode) ht_oper->ht_param |= IEEE80211_HT_PARAM_RIFS_MODE; ht_oper->operation_mode = cpu_to_le16(prot_mode); ht_oper->stbc_param = 0x0000; /* It seems that Basic MCS set and Supported MCS set are identical for the first 10 bytes */ memset(&ht_oper->basic_set, 0, 16); memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10); return pos + sizeof(struct ieee80211_ht_operation); } void ieee80211_ie_build_wide_bw_cs(u8 *pos, const struct cfg80211_chan_def *chandef) { *pos++ = WLAN_EID_WIDE_BW_CHANNEL_SWITCH; /* EID */ *pos++ = 3; /* IE length */ /* New channel width */ switch (chandef->width) { case NL80211_CHAN_WIDTH_80: *pos++ = IEEE80211_VHT_CHANWIDTH_80MHZ; break; case NL80211_CHAN_WIDTH_160: *pos++ = IEEE80211_VHT_CHANWIDTH_160MHZ; break; case NL80211_CHAN_WIDTH_80P80: *pos++ = IEEE80211_VHT_CHANWIDTH_80P80MHZ; break; default: *pos++ = IEEE80211_VHT_CHANWIDTH_USE_HT; } /* new center frequency segment 0 */ *pos++ = ieee80211_frequency_to_channel(chandef->center_freq1); /* new center frequency segment 1 */ if (chandef->center_freq2) *pos++ = ieee80211_frequency_to_channel(chandef->center_freq2); else *pos++ = 0; } u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, const struct cfg80211_chan_def *chandef) { struct ieee80211_vht_operation *vht_oper; *pos++ = WLAN_EID_VHT_OPERATION; *pos++ = sizeof(struct ieee80211_vht_operation); vht_oper = (struct ieee80211_vht_operation *)pos; vht_oper->center_freq_seg0_idx = ieee80211_frequency_to_channel( chandef->center_freq1); if (chandef->center_freq2) vht_oper->center_freq_seg1_idx = ieee80211_frequency_to_channel(chandef->center_freq2); else vht_oper->center_freq_seg1_idx = 0x00; switch (chandef->width) { case NL80211_CHAN_WIDTH_160: /* * Convert 160 MHz channel width to new style as interop * workaround. */ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; vht_oper->center_freq_seg1_idx = vht_oper->center_freq_seg0_idx; if (chandef->chan->center_freq < chandef->center_freq1) vht_oper->center_freq_seg0_idx -= 8; else vht_oper->center_freq_seg0_idx += 8; break; case NL80211_CHAN_WIDTH_80P80: /* * Convert 80+80 MHz channel width to new style as interop * workaround. */ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; break; case NL80211_CHAN_WIDTH_80: vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; break; default: vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT; break; } /* don't require special VHT peer rates */ vht_oper->basic_mcs_set = cpu_to_le16(0xffff); return pos + sizeof(struct ieee80211_vht_operation); } bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, struct cfg80211_chan_def *chandef) { enum nl80211_channel_type channel_type; if (!ht_oper) return false; switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_NONE: channel_type = NL80211_CHAN_HT20; break; case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: channel_type = NL80211_CHAN_HT40PLUS; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: channel_type = NL80211_CHAN_HT40MINUS; break; default: channel_type = NL80211_CHAN_NO_HT; return false; } cfg80211_chandef_create(chandef, chandef->chan, channel_type); return true; } bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, const struct ieee80211_vht_operation *oper, const struct ieee80211_ht_operation *htop, struct cfg80211_chan_def *chandef) { struct cfg80211_chan_def new = *chandef; int cf0, cf1; int ccfs0, ccfs1, ccfs2; int ccf0, ccf1; if (!oper || !htop) return false; ccfs0 = oper->center_freq_seg0_idx; ccfs1 = oper->center_freq_seg1_idx; ccfs2 = (le16_to_cpu(htop->operation_mode) & IEEE80211_HT_OP_MODE_CCFS2_MASK) >> IEEE80211_HT_OP_MODE_CCFS2_SHIFT; /* when parsing (and we know how to) CCFS1 and CCFS2 are equivalent */ ccf0 = ccfs0; ccf1 = ccfs1; if (!ccfs1 && ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW)) ccf1 = ccfs2; cf0 = ieee80211_channel_to_frequency(ccf0, chandef->chan->band); cf1 = ieee80211_channel_to_frequency(ccf1, chandef->chan->band); switch (oper->chan_width) { case IEEE80211_VHT_CHANWIDTH_USE_HT: /* just use HT information directly */ break; case IEEE80211_VHT_CHANWIDTH_80MHZ: new.width = NL80211_CHAN_WIDTH_80; new.center_freq1 = cf0; /* If needed, adjust based on the newer interop workaround. */ if (ccf1) { unsigned int diff; diff = abs(ccf1 - ccf0); if (diff == 8) { new.width = NL80211_CHAN_WIDTH_160; new.center_freq1 = cf1; } else if (diff > 8) { new.width = NL80211_CHAN_WIDTH_80P80; new.center_freq2 = cf1; } } break; case IEEE80211_VHT_CHANWIDTH_160MHZ: /* deprecated encoding */ new.width = NL80211_CHAN_WIDTH_160; new.center_freq1 = cf0; break; case IEEE80211_VHT_CHANWIDTH_80P80MHZ: /* deprecated encoding */ new.width = NL80211_CHAN_WIDTH_80P80; new.center_freq1 = cf0; new.center_freq2 = cf1; break; default: return false; } if (!cfg80211_chandef_valid(&new)) return false; *chandef = new; return true; } int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, const struct ieee80211_supported_band *sband, const u8 *srates, int srates_len, u32 *rates) { u32 rate_flags = ieee80211_chandef_rate_flags(chandef); int shift = ieee80211_chandef_get_shift(chandef); struct ieee80211_rate *br; int brate, rate, i, j, count = 0; *rates = 0; for (i = 0; i < srates_len; i++) { rate = srates[i] & 0x7f; for (j = 0; j < sband->n_bitrates; j++) { br = &sband->bitrates[j]; if ((rate_flags & br->flags) != rate_flags) continue; brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); if (brate == rate) { *rates |= BIT(j); count++; break; } } } return count; } int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, enum nl80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int rate, shift; u8 i, rates, *pos; u32 basic_rates = sdata->vif.bss_conf.basic_rates; u32 rate_flags; shift = ieee80211_vif_get_shift(&sdata->vif); rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); sband = local->hw.wiphy->bands[band]; rates = 0; for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; rates++; } if (rates > 8) rates = 8; if (skb_tailroom(skb) < rates + 2) return -ENOMEM; pos = skb_put(skb, rates + 2); *pos++ = WLAN_EID_SUPP_RATES; *pos++ = rates; for (i = 0; i < rates; i++) { u8 basic = 0; if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; if (need_basic && basic_rates & BIT(i)) basic = 0x80; rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5 * (1 << shift)); *pos++ = basic | (u8) rate; } return 0; } int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, enum nl80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int rate, shift; u8 i, exrates, *pos; u32 basic_rates = sdata->vif.bss_conf.basic_rates; u32 rate_flags; rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); shift = ieee80211_vif_get_shift(&sdata->vif); sband = local->hw.wiphy->bands[band]; exrates = 0; for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; exrates++; } if (exrates > 8) exrates -= 8; else exrates = 0; if (skb_tailroom(skb) < exrates + 2) return -ENOMEM; if (exrates) { pos = skb_put(skb, exrates + 2); *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = exrates; for (i = 8; i < sband->n_bitrates; i++) { u8 basic = 0; if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; if (need_basic && basic_rates & BIT(i)) basic = 0x80; rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5 * (1 << shift)); *pos++ = basic | (u8) rate; } } return 0; } int ieee80211_ave_rssi(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) { /* non-managed type inferfaces */ return 0; } return -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal); } EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs) { if (!mcs) return 1; /* TODO: consider rx_highest */ if (mcs->rx_mask[3]) return 4; if (mcs->rx_mask[2]) return 3; if (mcs->rx_mask[1]) return 2; return 1; } /** * ieee80211_calculate_rx_timestamp - calculate timestamp in frame * @local: mac80211 hw info struct * @status: RX status * @mpdu_len: total MPDU length (including FCS) * @mpdu_offset: offset into MPDU to calculate timestamp at * * This function calculates the RX timestamp at the given MPDU offset, taking * into account what the RX timestamp was. An offset of 0 will just normalize * the timestamp to TSF at beginning of MPDU reception. */ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, struct ieee80211_rx_status *status, unsigned int mpdu_len, unsigned int mpdu_offset) { u64 ts = status->mactime; struct rate_info ri; u16 rate; if (WARN_ON(!ieee80211_have_rx_timestamp(status))) return 0; memset(&ri, 0, sizeof(ri)); ri.bw = status->bw; /* Fill cfg80211 rate info */ switch (status->encoding) { case RX_ENC_HT: ri.mcs = status->rate_idx; ri.flags |= RATE_INFO_FLAGS_MCS; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; break; case RX_ENC_VHT: ri.flags |= RATE_INFO_FLAGS_VHT_MCS; ri.mcs = status->rate_idx; ri.nss = status->nss; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; break; default: WARN_ON(1); /* fall through */ case RX_ENC_LEGACY: { struct ieee80211_supported_band *sband; int shift = 0; int bitrate; switch (status->bw) { case RATE_INFO_BW_10: shift = 1; break; case RATE_INFO_BW_5: shift = 2; break; } sband = local->hw.wiphy->bands[status->band]; bitrate = sband->bitrates[status->rate_idx].bitrate; ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift)); if (status->flag & RX_FLAG_MACTIME_PLCP_START) { /* TODO: handle HT/VHT preambles */ if (status->band == NL80211_BAND_5GHZ) { ts += 20 << shift; mpdu_offset += 2; } else if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) { ts += 96; } else { ts += 192; } } break; } } rate = cfg80211_calculate_bitrate(&ri); if (WARN_ONCE(!rate, "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n", (unsigned long long)status->flag, status->rate_idx, status->nss)) return 0; /* rewind from end of MPDU */ if (status->flag & RX_FLAG_MACTIME_END) ts -= mpdu_len * 8 * 10 / rate; ts += mpdu_offset * 8 * 10 / rate; return ts; } void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; struct cfg80211_chan_def chandef; /* for interface list, to avoid linking iflist_mtx and chanctx_mtx */ ASSERT_RTNL(); mutex_lock(&local->mtx); list_for_each_entry(sdata, &local->interfaces, list) { /* it might be waiting for the local->mtx, but then * by the time it gets it, sdata->wdev.cac_started * will no longer be true */ cancel_delayed_work(&sdata->dfs_cac_timer_work); if (sdata->wdev.cac_started) { chandef = sdata->vif.bss_conf.chandef; ieee80211_vif_release_channel(sdata); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); } } mutex_unlock(&local->mtx); } void ieee80211_dfs_radar_detected_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, radar_detected_work); struct cfg80211_chan_def chandef = local->hw.conf.chandef; struct ieee80211_chanctx *ctx; int num_chanctx = 0; mutex_lock(&local->chanctx_mtx); list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) continue; num_chanctx++; chandef = ctx->conf.def; } mutex_unlock(&local->chanctx_mtx); rtnl_lock(); ieee80211_dfs_cac_cancel(local); rtnl_unlock(); if (num_chanctx > 1) /* XXX: multi-channel is not supported yet */ WARN_ON(1); else cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); } void ieee80211_radar_detected(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_radar_detected(local); schedule_work(&local->radar_detected_work); } EXPORT_SYMBOL(ieee80211_radar_detected); u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c) { u32 ret; int tmp; switch (c->width) { case NL80211_CHAN_WIDTH_20: c->width = NL80211_CHAN_WIDTH_20_NOHT; ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; break; case NL80211_CHAN_WIDTH_40: c->width = NL80211_CHAN_WIDTH_20; c->center_freq1 = c->chan->center_freq; ret = IEEE80211_STA_DISABLE_40MHZ | IEEE80211_STA_DISABLE_VHT; break; case NL80211_CHAN_WIDTH_80: tmp = (30 + c->chan->center_freq - c->center_freq1)/20; /* n_P40 */ tmp /= 2; /* freq_P40 */ c->center_freq1 = c->center_freq1 - 20 + 40 * tmp; c->width = NL80211_CHAN_WIDTH_40; ret = IEEE80211_STA_DISABLE_VHT; break; case NL80211_CHAN_WIDTH_80P80: c->center_freq2 = 0; c->width = NL80211_CHAN_WIDTH_80; ret = IEEE80211_STA_DISABLE_80P80MHZ | IEEE80211_STA_DISABLE_160MHZ; break; case NL80211_CHAN_WIDTH_160: /* n_P20 */ tmp = (70 + c->chan->center_freq - c->center_freq1)/20; /* n_P80 */ tmp /= 4; c->center_freq1 = c->center_freq1 - 40 + 80 * tmp; c->width = NL80211_CHAN_WIDTH_80; ret = IEEE80211_STA_DISABLE_80P80MHZ | IEEE80211_STA_DISABLE_160MHZ; break; default: case NL80211_CHAN_WIDTH_20_NOHT: WARN_ON_ONCE(1); c->width = NL80211_CHAN_WIDTH_20_NOHT; ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: WARN_ON_ONCE(1); /* keep c->width */ ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; break; } WARN_ON_ONCE(!cfg80211_chandef_valid(c)); return ret; } /* * Returns true if smps_mode_new is strictly more restrictive than * smps_mode_old. */ bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old, enum ieee80211_smps_mode smps_mode_new) { if (WARN_ON_ONCE(smps_mode_old == IEEE80211_SMPS_AUTOMATIC || smps_mode_new == IEEE80211_SMPS_AUTOMATIC)) return false; switch (smps_mode_old) { case IEEE80211_SMPS_STATIC: return false; case IEEE80211_SMPS_DYNAMIC: return smps_mode_new == IEEE80211_SMPS_STATIC; case IEEE80211_SMPS_OFF: return smps_mode_new != IEEE80211_SMPS_OFF; default: WARN_ON(1); } return false; } int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings) { struct sk_buff *skb; struct ieee80211_mgmt *mgmt; struct ieee80211_local *local = sdata->local; int freq; int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.chan_switch); u8 *pos; if (sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; skb = dev_alloc_skb(local->tx_headroom + hdr_len + 5 + /* channel switch announcement element */ 3 + /* secondary channel offset element */ 5 + /* wide bandwidth channel switch announcement */ 8); /* mesh channel switch parameters element */ if (!skb) return -ENOMEM; skb_reserve(skb, local->tx_headroom); mgmt = skb_put_zero(skb, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); eth_broadcast_addr(mgmt->da); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (ieee80211_vif_is_mesh(&sdata->vif)) { memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); } else { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); } mgmt->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; mgmt->u.action.u.chan_switch.action_code = WLAN_ACTION_SPCT_CHL_SWITCH; pos = skb_put(skb, 5); *pos++ = WLAN_EID_CHANNEL_SWITCH; /* EID */ *pos++ = 3; /* IE length */ *pos++ = csa_settings->block_tx ? 1 : 0; /* CSA mode */ freq = csa_settings->chandef.chan->center_freq; *pos++ = ieee80211_frequency_to_channel(freq); /* channel */ *pos++ = csa_settings->count; /* count */ if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_40) { enum nl80211_channel_type ch_type; skb_put(skb, 3); *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; /* EID */ *pos++ = 1; /* IE length */ ch_type = cfg80211_get_chandef_type(&csa_settings->chandef); if (ch_type == NL80211_CHAN_HT40PLUS) *pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; else *pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW; } if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; skb_put(skb, 8); *pos++ = WLAN_EID_CHAN_SWITCH_PARAM; /* EID */ *pos++ = 6; /* IE length */ *pos++ = sdata->u.mesh.mshcfg.dot11MeshTTL; /* Mesh TTL */ *pos = 0x00; /* Mesh Flag: Tx Restrict, Initiator, Reason */ *pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; *pos++ |= csa_settings->block_tx ? WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */ pos += 2; put_unaligned_le16(ifmsh->pre_value, pos);/* Precedence Value */ pos += 2; } if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_80 || csa_settings->chandef.width == NL80211_CHAN_WIDTH_80P80 || csa_settings->chandef.width == NL80211_CHAN_WIDTH_160) { skb_put(skb, 5); ieee80211_ie_build_wide_bw_cs(pos, &csa_settings->chandef); } ieee80211_tx_skb(sdata, skb); return 0; } bool ieee80211_cs_valid(const struct ieee80211_cipher_scheme *cs) { return !(cs == NULL || cs->cipher == 0 || cs->hdr_len < cs->pn_len + cs->pn_off || cs->hdr_len <= cs->key_idx_off || cs->key_idx_shift > 7 || cs->key_idx_mask == 0); } bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n) { int i; /* Ensure we have enough iftype bitmap space for all iftype values */ WARN_ON((NUM_NL80211_IFTYPES / 8 + 1) > sizeof(cs[0].iftype)); for (i = 0; i < n; i++) if (!ieee80211_cs_valid(&cs[i])) return false; return true; } const struct ieee80211_cipher_scheme * ieee80211_cs_get(struct ieee80211_local *local, u32 cipher, enum nl80211_iftype iftype) { const struct ieee80211_cipher_scheme *l = local->hw.cipher_schemes; int n = local->hw.n_cipher_schemes; int i; const struct ieee80211_cipher_scheme *cs = NULL; for (i = 0; i < n; i++) { if (l[i].cipher == cipher) { cs = &l[i]; break; } } if (!cs || !(cs->iftype & BIT(iftype))) return NULL; return cs; } int ieee80211_cs_headroom(struct ieee80211_local *local, struct cfg80211_crypto_settings *crypto, enum nl80211_iftype iftype) { const struct ieee80211_cipher_scheme *cs; int headroom = IEEE80211_ENCRYPT_HEADROOM; int i; for (i = 0; i < crypto->n_ciphers_pairwise; i++) { cs = ieee80211_cs_get(local, crypto->ciphers_pairwise[i], iftype); if (cs && headroom < cs->hdr_len) headroom = cs->hdr_len; } for (i = 0; i < crypto->n_ciphers_group; i++) { cs = ieee80211_cs_get(local, crypto->ciphers_group[i], iftype); if (cs && headroom < cs->hdr_len) headroom = cs->hdr_len; } return headroom; } static bool ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i) { s32 end = data->desc[i].start + data->desc[i].duration - (tsf + 1); int skip; if (end > 0) return false; /* One shot NOA */ if (data->count[i] == 1) return false; if (data->desc[i].interval == 0) return false; /* End time is in the past, check for repetitions */ skip = DIV_ROUND_UP(-end, data->desc[i].interval); if (data->count[i] < 255) { if (data->count[i] <= skip) { data->count[i] = 0; return false; } data->count[i] -= skip; } data->desc[i].start += skip * data->desc[i].interval; return true; } static bool ieee80211_extend_absent_time(struct ieee80211_noa_data *data, u32 tsf, s32 *offset) { bool ret = false; int i; for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { s32 cur; if (!data->count[i]) continue; if (ieee80211_extend_noa_desc(data, tsf + *offset, i)) ret = true; cur = data->desc[i].start - tsf; if (cur > *offset) continue; cur = data->desc[i].start + data->desc[i].duration - tsf; if (cur > *offset) *offset = cur; } return ret; } static u32 ieee80211_get_noa_absent_time(struct ieee80211_noa_data *data, u32 tsf) { s32 offset = 0; int tries = 0; /* * arbitrary limit, used to avoid infinite loops when combined NoA * descriptors cover the full time period. */ int max_tries = 5; ieee80211_extend_absent_time(data, tsf, &offset); do { if (!ieee80211_extend_absent_time(data, tsf, &offset)) break; tries++; } while (tries < max_tries); return offset; } void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf) { u32 next_offset = BIT(31) - 1; int i; data->absent = 0; data->has_next_tsf = false; for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { s32 start; if (!data->count[i]) continue; ieee80211_extend_noa_desc(data, tsf, i); start = data->desc[i].start - tsf; if (start <= 0) data->absent |= BIT(i); if (next_offset > start) next_offset = start; data->has_next_tsf = true; } if (data->absent) next_offset = ieee80211_get_noa_absent_time(data, tsf); data->next_tsf = tsf + next_offset; } EXPORT_SYMBOL(ieee80211_update_p2p_noa); int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr, struct ieee80211_noa_data *data, u32 tsf) { int ret = 0; int i; memset(data, 0, sizeof(*data)); for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { const struct ieee80211_p2p_noa_desc *desc = &attr->desc[i]; if (!desc->count || !desc->duration) continue; data->count[i] = desc->count; data->desc[i].start = le32_to_cpu(desc->start_time); data->desc[i].duration = le32_to_cpu(desc->duration); data->desc[i].interval = le32_to_cpu(desc->interval); if (data->count[i] > 1 && data->desc[i].interval < data->desc[i].duration) continue; ieee80211_extend_noa_desc(data, tsf, i); ret++; } if (ret) ieee80211_update_p2p_noa(data, tsf); return ret; } EXPORT_SYMBOL(ieee80211_parse_p2p_noa); void ieee80211_recalc_dtim(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { u64 tsf = drv_get_tsf(local, sdata); u64 dtim_count = 0; u16 beacon_int = sdata->vif.bss_conf.beacon_int * 1024; u8 dtim_period = sdata->vif.bss_conf.dtim_period; struct ps_data *ps; u8 bcns_from_dtim; if (tsf == -1ULL || !beacon_int || !dtim_period) return; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (!sdata->bss) return; ps = &sdata->bss->ps; } else if (ieee80211_vif_is_mesh(&sdata->vif)) { ps = &sdata->u.mesh.ps; } else { return; } /* * actually finds last dtim_count, mac80211 will update in * __beacon_add_tim(). * dtim_count = dtim_period - (tsf / bcn_int) % dtim_period */ do_div(tsf, beacon_int); bcns_from_dtim = do_div(tsf, dtim_period); /* just had a DTIM */ if (!bcns_from_dtim) dtim_count = 0; else dtim_count = dtim_period - bcns_from_dtim; ps->dtim_count = dtim_count; } static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_sub_if_data *sdata; u8 radar_detect = 0; lockdep_assert_held(&local->chanctx_mtx); if (WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)) return 0; list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list) if (sdata->reserved_radar_required) radar_detect |= BIT(sdata->reserved_chandef.width); /* * An in-place reservation context should not have any assigned vifs * until it replaces the other context. */ WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER && !list_empty(&ctx->assigned_vifs)); list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list) if (sdata->radar_required) radar_detect |= BIT(sdata->vif.bss_conf.chandef.width); return radar_detect; } int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode chanmode, u8 radar_detect) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *sdata_iter; enum nl80211_iftype iftype = sdata->wdev.iftype; struct ieee80211_chanctx *ctx; int total = 1; struct iface_combination_params params = { .radar_detect = radar_detect, }; lockdep_assert_held(&local->chanctx_mtx); if (WARN_ON(hweight32(radar_detect) > 1)) return -EINVAL; if (WARN_ON(chandef && chanmode == IEEE80211_CHANCTX_SHARED && !chandef->chan)) return -EINVAL; if (WARN_ON(iftype >= NUM_NL80211_IFTYPES)) return -EINVAL; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_MESH_POINT) { /* * always passing this is harmless, since it'll be the * same value that cfg80211 finds if it finds the same * interface ... and that's always allowed */ params.new_beacon_int = sdata->vif.bss_conf.beacon_int; } /* Always allow software iftypes */ if (local->hw.wiphy->software_iftypes & BIT(iftype)) { if (radar_detect) return -EINVAL; return 0; } if (chandef) params.num_different_channels = 1; if (iftype != NL80211_IFTYPE_UNSPECIFIED) params.iftype_num[iftype] = 1; list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; params.radar_detect |= ieee80211_chanctx_radar_detect(local, ctx); if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) { params.num_different_channels++; continue; } if (chandef && chanmode == IEEE80211_CHANCTX_SHARED && cfg80211_chandef_compatible(chandef, &ctx->conf.def)) continue; params.num_different_channels++; } list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) { struct wireless_dev *wdev_iter; wdev_iter = &sdata_iter->wdev; if (sdata_iter == sdata || !ieee80211_sdata_running(sdata_iter) || local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) continue; params.iftype_num[wdev_iter->iftype]++; total++; } if (total == 1 && !params.radar_detect) return 0; return cfg80211_check_combinations(local->hw.wiphy, ¶ms); } static void ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c, void *data) { u32 *max_num_different_channels = data; *max_num_different_channels = max(*max_num_different_channels, c->num_different_channels); } int ieee80211_max_num_channels(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; struct ieee80211_chanctx *ctx; u32 max_num_different_channels = 1; int err; struct iface_combination_params params = {0}; lockdep_assert_held(&local->chanctx_mtx); list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; params.num_different_channels++; params.radar_detect |= ieee80211_chanctx_radar_detect(local, ctx); } list_for_each_entry_rcu(sdata, &local->interfaces, list) params.iftype_num[sdata->wdev.iftype]++; err = cfg80211_iter_combinations(local->hw.wiphy, ¶ms, ieee80211_iter_max_chans, &max_num_different_channels); if (err < 0) return err; return max_num_different_channels; } u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo) { *buf++ = WLAN_EID_VENDOR_SPECIFIC; *buf++ = 7; /* len */ *buf++ = 0x00; /* Microsoft OUI 00:50:F2 */ *buf++ = 0x50; *buf++ = 0xf2; *buf++ = 2; /* WME */ *buf++ = 0; /* WME info */ *buf++ = 1; /* WME ver */ *buf++ = qosinfo; /* U-APSD no in use */ return buf; } void ieee80211_txq_get_depth(struct ieee80211_txq *txq, unsigned long *frame_cnt, unsigned long *byte_cnt) { struct txq_info *txqi = to_txq_info(txq); u32 frag_cnt = 0, frag_bytes = 0; struct sk_buff *skb; skb_queue_walk(&txqi->frags, skb) { frag_cnt++; frag_bytes += skb->len; } if (frame_cnt) *frame_cnt = txqi->tin.backlog_packets + frag_cnt; if (byte_cnt) *byte_cnt = txqi->tin.backlog_bytes + frag_bytes; } EXPORT_SYMBOL(ieee80211_txq_get_depth); const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS] = { IEEE80211_WMM_IE_STA_QOSINFO_AC_VO, IEEE80211_WMM_IE_STA_QOSINFO_AC_VI, IEEE80211_WMM_IE_STA_QOSINFO_AC_BE, IEEE80211_WMM_IE_STA_QOSINFO_AC_BK }; backport-iwlwifi-dkms-7906/net/mac80211/vht.c000066400000000000000000000454021351064634500205750ustar00rootroot00000000000000/* * VHT handling * * Portions of this file * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include "ieee80211_i.h" #include "rate.h" static void __check_vhtcap_disable(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap, u32 flag) { __le32 le_flag = cpu_to_le32(flag); if (sdata->u.mgd.vht_capa_mask.vht_cap_info & le_flag && !(sdata->u.mgd.vht_capa.vht_cap_info & le_flag)) vht_cap->cap &= ~flag; } void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap) { int i; u16 rxmcs_mask, rxmcs_cap, rxmcs_n, txmcs_mask, txmcs_cap, txmcs_n; if (!vht_cap->vht_supported) return; if (sdata->vif.type != NL80211_IFTYPE_STATION) return; __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_RXLDPC); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SHORT_GI_80); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SHORT_GI_160); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_TXSTBC); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN); /* Allow user to decrease AMPDU length exponent */ if (sdata->u.mgd.vht_capa_mask.vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK)) { u32 cap, n; n = le32_to_cpu(sdata->u.mgd.vht_capa.vht_cap_info) & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; n >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; cap = vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; cap >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; if (n < cap) { vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; vht_cap->cap |= n << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; } } /* Allow the user to decrease MCSes */ rxmcs_mask = le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.rx_mcs_map); rxmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.rx_mcs_map); rxmcs_n &= rxmcs_mask; rxmcs_cap = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); txmcs_mask = le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.tx_mcs_map); txmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.tx_mcs_map); txmcs_n &= txmcs_mask; txmcs_cap = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); for (i = 0; i < 8; i++) { u8 m, n, c; m = (rxmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; n = (rxmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; c = (rxmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { rxmcs_cap &= ~(3 << 2*i); rxmcs_cap |= (rxmcs_n & (3 << 2*i)); } m = (txmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; n = (txmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; c = (txmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { txmcs_cap &= ~(3 << 2*i); txmcs_cap |= (txmcs_n & (3 << 2*i)); } } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_cap); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_cap); } void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_vht_cap *vht_cap_ie, struct sta_info *sta) { struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; struct ieee80211_sta_vht_cap own_cap; u32 cap_info, i; bool have_80mhz; memset(vht_cap, 0, sizeof(*vht_cap)); if (!sta->sta.ht_cap.ht_supported) return; if (!vht_cap_ie || !sband->vht_cap.vht_supported) return; /* Allow VHT if at least one channel on the sband supports 80 MHz */ have_80mhz = false; for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_NO_80MHZ)) continue; have_80mhz = true; break; } if (!have_80mhz) return; /* * A VHT STA must support 40 MHz, but if we verify that here * then we break a few things - some APs (e.g. Netgear R6300v2 * and others based on the BCM4360 chipset) will unset this * capability bit when operating in 20 MHz. */ vht_cap->vht_supported = true; own_cap = sband->vht_cap; /* * If user has specified capability overrides, take care * of that if the station we're setting up is the AP that * we advertised a restricted capability set to. Override * our own capabilities and then use those below. */ if (sdata->vif.type == NL80211_IFTYPE_STATION && !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) ieee80211_apply_vhtcap_overrides(sdata, &own_cap); /* take some capabilities as-is */ cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); vht_cap->cap = cap_info; vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_VHT_TXOP_PS | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB | IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; /* and some based on our own capabilities */ switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; break; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; break; default: /* nothing */ break; } /* symmetric capabilities */ vht_cap->cap |= cap_info & own_cap.cap & (IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160); /* remaining ones */ if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) vht_cap->cap |= cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) vht_cap->cap |= cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; if (own_cap.cap & IEEE80211_VHT_CAP_TXSTBC) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_RXSTBC_MASK; if (own_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_TXSTBC; /* Copy peer MCS info, the driver might need them. */ memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, sizeof(struct ieee80211_vht_mcs_info)); /* copy EXT_NSS_BW Support value or remove the capability */ if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW)) vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); else vht_cap->vht_mcs.tx_highest &= ~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); /* but also restrict MCSes */ for (i = 0; i < 8; i++) { u16 own_rx, own_tx, peer_rx, peer_tx; own_rx = le16_to_cpu(own_cap.vht_mcs.rx_mcs_map); own_rx = (own_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; own_tx = le16_to_cpu(own_cap.vht_mcs.tx_mcs_map); own_tx = (own_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; peer_rx = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); peer_rx = (peer_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; peer_tx = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); peer_tx = (peer_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (peer_tx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { if (own_rx == IEEE80211_VHT_MCS_NOT_SUPPORTED) peer_tx = IEEE80211_VHT_MCS_NOT_SUPPORTED; else if (own_rx < peer_tx) peer_tx = own_rx; } if (peer_rx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { if (own_tx == IEEE80211_VHT_MCS_NOT_SUPPORTED) peer_rx = IEEE80211_VHT_MCS_NOT_SUPPORTED; else if (own_tx < peer_rx) peer_rx = own_tx; } vht_cap->vht_mcs.rx_mcs_map &= ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(peer_rx << i * 2); vht_cap->vht_mcs.tx_mcs_map &= ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); } /* * This is a workaround for VHT-enabled STAs which break the spec * and have the VHT-MCS Rx map filled in with value 3 for all eight * spacial streams, an example is AR9462. * * As per spec, in section 22.1.1 Introduction to the VHT PHY * A VHT STA shall support at least single spactial stream VHT-MCSs * 0 to 7 (transmit and receive) in all supported channel widths. */ if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) { vht_cap->vht_supported = false; sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n", sta->addr); return; } /* finally set up the bandwidth */ switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; break; default: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; if (!(vht_cap->vht_mcs.tx_highest & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE))) break; /* * If this is non-zero, then it does support 160 MHz after all, * in one form or the other. We don't distinguish here (or even * above) between 160 and 80+80 yet. */ if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; } sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta); /* If HT IE reported 3839 bytes only, stay with that size. */ if (sta->sta.max_amsdu_len == IEEE80211_MAX_MPDU_LEN_HT_3839) return; switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454; break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991; break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: default: sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895; break; } } enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta) { struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; u32 cap_width; if (!vht_cap->vht_supported) return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ || cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) return IEEE80211_STA_RX_BW_160; /* * If this is non-zero, then it does support 160 MHz after all, * in one form or the other. We don't distinguish here (or even * above) between 160 and 80+80 yet. */ if (vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) return IEEE80211_STA_RX_BW_160; return IEEE80211_STA_RX_BW_80; } enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta) { struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; u32 cap_width; if (!vht_cap->vht_supported) { if (!sta->sta.ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20_NOHT; return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20; } cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) return NL80211_CHAN_WIDTH_160; else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) return NL80211_CHAN_WIDTH_80P80; return NL80211_CHAN_WIDTH_80; } enum nl80211_chan_width ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta) { enum ieee80211_sta_rx_bandwidth cur_bw = sta->sta.bandwidth; struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; u32 cap_width; switch (cur_bw) { case IEEE80211_STA_RX_BW_20: if (!sta->sta.ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20_NOHT; else return NL80211_CHAN_WIDTH_20; case IEEE80211_STA_RX_BW_40: return NL80211_CHAN_WIDTH_40; case IEEE80211_STA_RX_BW_80: return NL80211_CHAN_WIDTH_80; case IEEE80211_STA_RX_BW_160: cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) return NL80211_CHAN_WIDTH_160; return NL80211_CHAN_WIDTH_80P80; default: return NL80211_CHAN_WIDTH_20; } } enum ieee80211_sta_rx_bandwidth ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) { switch (width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: return IEEE80211_STA_RX_BW_20; case NL80211_CHAN_WIDTH_40: return IEEE80211_STA_RX_BW_40; case NL80211_CHAN_WIDTH_80: return IEEE80211_STA_RX_BW_80; case NL80211_CHAN_WIDTH_160: case NL80211_CHAN_WIDTH_80P80: return IEEE80211_STA_RX_BW_160; default: WARN_ON_ONCE(1); return IEEE80211_STA_RX_BW_20; } } enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; enum ieee80211_sta_rx_bandwidth bw; enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width; bw = ieee80211_sta_cap_rx_bw(sta); bw = min(bw, sta->cur_max_bandwidth); /* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of * IEEE80211-2016 specification makes higher bandwidth operation * possible on the TDLS link if the peers have wider bandwidth * capability. */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) return bw; bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); return bw; } void ieee80211_sta_set_rx_nss(struct sta_info *sta) { u8 ht_rx_nss = 0, vht_rx_nss = 0; /* if we received a notification already don't overwrite it */ if (sta->sta.rx_nss) return; if (sta->sta.ht_cap.ht_supported) { if (sta->sta.ht_cap.mcs.rx_mask[0]) ht_rx_nss++; if (sta->sta.ht_cap.mcs.rx_mask[1]) ht_rx_nss++; if (sta->sta.ht_cap.mcs.rx_mask[2]) ht_rx_nss++; if (sta->sta.ht_cap.mcs.rx_mask[3]) ht_rx_nss++; /* FIXME: consider rx_highest? */ } if (sta->sta.vht_cap.vht_supported) { int i; u16 rx_mcs_map; rx_mcs_map = le16_to_cpu(sta->sta.vht_cap.vht_mcs.rx_mcs_map); for (i = 7; i >= 0; i--) { u8 mcs = (rx_mcs_map >> (2 * i)) & 3; if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) { vht_rx_nss = i + 1; break; } } /* FIXME: consider rx_highest? */ } ht_rx_nss = max(ht_rx_nss, vht_rx_nss); sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss); } u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, enum nl80211_band band) { enum ieee80211_sta_rx_bandwidth new_bw; struct sta_opmode_info sta_opmode = {}; u32 changed = 0; u8 nss; /* ignore - no support for BF yet */ if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF) return 0; nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; nss += 1; if (sta->sta.rx_nss != nss) { sta->sta.rx_nss = nss; sta_opmode.rx_nss = nss; changed |= IEEE80211_RC_NSS_CHANGED; sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED; } switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; break; } new_bw = ieee80211_sta_cur_vht_bw(sta); if (new_bw != sta->sta.bandwidth) { sta->sta.bandwidth = new_bw; sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(sta); changed |= IEEE80211_RC_BW_CHANGED; sta_opmode.changed |= STA_OPMODE_MAX_BW_CHANGED; } if (sta_opmode.changed) cfg80211_sta_opmode_change_notify(sdata->dev, sta->addr, &sta_opmode, GFP_KERNEL); return changed; } void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt) { struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; if (!sdata->vif.mu_mimo_owner) return; if (!memcmp(mgmt->u.action.u.vht_group_notif.position, bss_conf->mu_group.position, WLAN_USER_POSITION_LEN) && !memcmp(mgmt->u.action.u.vht_group_notif.membership, bss_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN)) return; memcpy(bss_conf->mu_group.membership, mgmt->u.action.u.vht_group_notif.membership, WLAN_MEMBERSHIP_LEN); memcpy(bss_conf->mu_group.position, mgmt->u.action.u.vht_group_notif.position, WLAN_USER_POSITION_LEN); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS); } void ieee80211_update_mu_groups(struct ieee80211_vif *vif, const u8 *membership, const u8 *position) { struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; if (WARN_ON_ONCE(!vif->mu_mimo_owner)) return; memcpy(bss_conf->mu_group.membership, membership, WLAN_MEMBERSHIP_LEN); memcpy(bss_conf->mu_group.position, position, WLAN_USER_POSITION_LEN); } EXPORT_SYMBOL_GPL(ieee80211_update_mu_groups); void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 opmode, enum nl80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); if (changed > 0) { ieee80211_recalc_min_chandef(sdata); rate_control_rate_update(local, sband, sta, changed); } } void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, u16 vht_mask[NL80211_VHT_NSS_MAX]) { int i; u16 mask, cap = le16_to_cpu(vht_cap); for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; switch (mask) { case IEEE80211_VHT_MCS_SUPPORT_0_7: vht_mask[i] = 0x00FF; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: vht_mask[i] = 0x01FF; break; case IEEE80211_VHT_MCS_SUPPORT_0_9: vht_mask[i] = 0x03FF; break; case IEEE80211_VHT_MCS_NOT_SUPPORTED: default: vht_mask[i] = 0; break; } } } backport-iwlwifi-dkms-7906/net/mac80211/wep.c000066400000000000000000000222321351064634500205630ustar00rootroot00000000000000/* * Software WEP encryption implementation * Copyright 2002, Jouni Malinen * Copyright 2003, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "wep.h" int ieee80211_wep_init(struct ieee80211_local *local) { /* start WEP IV from a random value */ get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN); local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, 0); if (IS_ERR(local->wep_tx_tfm)) { local->wep_rx_tfm = ERR_PTR(-EINVAL); return PTR_ERR(local->wep_tx_tfm); } local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, 0); if (IS_ERR(local->wep_rx_tfm)) { crypto_free_cipher(local->wep_tx_tfm); local->wep_tx_tfm = ERR_PTR(-EINVAL); return PTR_ERR(local->wep_rx_tfm); } return 0; } void ieee80211_wep_free(struct ieee80211_local *local) { if (!IS_ERR(local->wep_tx_tfm)) crypto_free_cipher(local->wep_tx_tfm); if (!IS_ERR(local->wep_rx_tfm)) crypto_free_cipher(local->wep_rx_tfm); } static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) { /* * Fluhrer, Mantin, and Shamir have reported weaknesses in the * key scheduling algorithm of RC4. At least IVs (KeyByte + 3, * 0xff, N) can be used to speedup attacks, so avoid using them. */ if ((iv & 0xff00) == 0xff00) { u8 B = (iv >> 16) & 0xff; if (B >= 3 && B < 3 + keylen) return true; } return false; } static void ieee80211_wep_get_iv(struct ieee80211_local *local, int keylen, int keyidx, u8 *iv) { local->wep_iv++; if (ieee80211_wep_weak_iv(local->wep_iv, keylen)) local->wep_iv += 0x0100; if (!iv) return; *iv++ = (local->wep_iv >> 16) & 0xff; *iv++ = (local->wep_iv >> 8) & 0xff; *iv++ = local->wep_iv & 0xff; *iv++ = keyidx << 6; } static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, struct sk_buff *skb, int keylen, int keyidx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int hdrlen; u8 *newhdr; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) return NULL; hdrlen = ieee80211_hdrlen(hdr->frame_control); newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN); memmove(newhdr, newhdr + IEEE80211_WEP_IV_LEN, hdrlen); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return newhdr + hdrlen; ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); return newhdr + hdrlen; } static void ieee80211_wep_remove_iv(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; unsigned int hdrlen; hdrlen = ieee80211_hdrlen(hdr->frame_control); memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_WEP_IV_LEN); } /* Perform WEP encryption using given key. data buffer must have tailroom * for 4-byte ICV. data_len must not include this ICV. Note: this function * does _not_ add IV. data = RC4(data | CRC32(data)) */ int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, size_t klen, u8 *data, size_t data_len) { __le32 icv; int i; if (IS_ERR(tfm)) return -1; icv = cpu_to_le32(~crc32_le(~0, data, data_len)); put_unaligned(icv, (__le32 *)(data + data_len)); crypto_cipher_setkey(tfm, rc4key, klen); for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) crypto_cipher_encrypt_one(tfm, data + i, data + i); return 0; } /* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the * beginning of the buffer 4 bytes of extra space (ICV) in the end of the * buffer will be added. Both IV and ICV will be transmitted, so the * payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, const u8 *key, int keylen, int keyidx) { u8 *iv; size_t len; u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN)) return -1; iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); if (!iv) return -1; len = skb->len - (iv + IEEE80211_WEP_IV_LEN - skb->data); /* Prepend 24-bit IV to RC4 key */ memcpy(rc4key, iv, 3); /* Copy rest of the WEP key (the secret part) */ memcpy(rc4key + 3, key, keylen); /* Add room for ICV */ skb_put(skb, IEEE80211_WEP_ICV_LEN); return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, iv + IEEE80211_WEP_IV_LEN, len); } /* Perform WEP decryption using given key. data buffer includes encrypted * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. * Return 0 on success and -1 on ICV mismatch. */ int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, size_t klen, u8 *data, size_t data_len) { __le32 crc; int i; if (IS_ERR(tfm)) return -1; crypto_cipher_setkey(tfm, rc4key, klen); for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) crypto_cipher_decrypt_one(tfm, data + i, data + i); crc = cpu_to_le32(~crc32_le(~0, data, data_len)); if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0) /* ICV mismatch */ return -1; return 0; } /* Perform WEP decryption on given skb. Buffer includes whole WEP part of * the frame: IV (4 bytes), encrypted payload (including SNAP header), * ICV (4 bytes). skb->len includes both IV and ICV. * * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on * failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload * is moved to the beginning of the skb and skb length will be reduced. */ static int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key) { u32 klen; u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; u8 keyidx; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; unsigned int hdrlen; size_t len; int ret = 0; if (!ieee80211_has_protected(hdr->frame_control)) return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen + IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN) return -1; len = skb->len - hdrlen - IEEE80211_WEP_IV_LEN - IEEE80211_WEP_ICV_LEN; keyidx = skb->data[hdrlen + 3] >> 6; if (!key || keyidx != key->conf.keyidx) return -1; klen = 3 + key->conf.keylen; /* Prepend 24-bit IV to RC4 key */ memcpy(rc4key, skb->data + hdrlen, 3); /* Copy rest of the WEP key (the secret part) */ memcpy(rc4key + 3, key->conf.key, key->conf.keylen); if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, skb->data + hdrlen + IEEE80211_WEP_IV_LEN, len)) ret = -1; /* Trim ICV */ skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); /* Remove IV */ memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_WEP_IV_LEN); return ret; } ieee80211_rx_result ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; if (!ieee80211_is_data(fc) && !ieee80211_is_auth(fc)) return RX_CONTINUE; if (!(status->flag & RX_FLAG_DECRYPTED)) { if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) return RX_DROP_UNUSABLE; } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + IEEE80211_WEP_IV_LEN)) return RX_DROP_UNUSABLE; ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); /* remove ICV */ if (!(status->flag & RX_FLAG_ICV_STRIPPED) && pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) return RX_DROP_UNUSABLE; } return RX_CONTINUE; } static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_key_conf *hw_key = info->control.hw_key; if (!hw_key) { if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, tx->key->conf.keylen, tx->key->conf.keyidx)) return -1; } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) || (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { if (!ieee80211_wep_add_iv(tx->local, skb, tx->key->conf.keylen, tx->key->conf.keyidx)) return -1; } return 0; } ieee80211_tx_result ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (wep_encrypt_skb(tx, skb) < 0) { I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); return TX_DROP; } } return TX_CONTINUE; } backport-iwlwifi-dkms-7906/net/mac80211/wep.h000066400000000000000000000021361351064634500205710ustar00rootroot00000000000000/* * Software WEP encryption implementation * Copyright 2002, Jouni Malinen * Copyright 2003, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef WEP_H #define WEP_H #include #include #include "ieee80211_i.h" #include "key.h" int ieee80211_wep_init(struct ieee80211_local *local); void ieee80211_wep_free(struct ieee80211_local *local); int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, size_t klen, u8 *data, size_t data_len); int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, const u8 *key, int keylen, int keyidx); int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, size_t klen, u8 *data, size_t data_len); ieee80211_rx_result ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); ieee80211_tx_result ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx); #endif /* WEP_H */ backport-iwlwifi-dkms-7906/net/mac80211/wme.c000066400000000000000000000143131351064634500205610ustar00rootroot00000000000000/* * Copyright 2004, Instant802 Networks, Inc. * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "wme.h" /* Default mapping in classifier to work with default * queue setup. */ const int ieee802_1d_to_ac[8] = { IEEE80211_AC_BE, IEEE80211_AC_BK, IEEE80211_AC_BK, IEEE80211_AC_BE, IEEE80211_AC_VI, IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO }; static int wme_downgrade_ac(struct sk_buff *skb) { switch (skb->priority) { case 6: case 7: skb->priority = 5; /* VO -> VI */ return 0; case 4: case 5: skb->priority = 3; /* VI -> BE */ return 0; case 0: case 3: skb->priority = 2; /* BE -> BK */ return 0; default: return -1; } } /** * ieee80211_fix_reserved_tid - return the TID to use if this one is reserved * @tid: the assumed-reserved TID * * Returns: the alternative TID to use, or 0 on error */ static inline u8 ieee80211_fix_reserved_tid(u8 tid) { switch (tid) { case 0: return 3; case 1: return 2; case 2: return 1; case 3: return 0; case 4: return 5; case 5: return 4; case 6: return 7; case 7: return 6; } return 0; } static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* in case we are a client verify acm is not set for this ac */ while (sdata->wmm_acm & BIT(skb->priority)) { int ac = ieee802_1d_to_ac[skb->priority]; if (ifmgd->tx_tspec[ac].admitted_time && skb->priority == ifmgd->tx_tspec[ac].up) return ac; if (wme_downgrade_ac(skb)) { /* * This should not really happen. The AP has marked all * lower ACs to require admission control which is not * a reasonable configuration. Allow the frame to be * transmitted using AC_BK as a workaround. */ break; } } /* Check to see if this is a reserved TID */ if (sta && sta->reserved_tid == skb->priority) skb->priority = ieee80211_fix_reserved_tid(skb->priority); /* look up which queue to use for frames with this 1d tag */ return ieee802_1d_to_ac[skb->priority]; } /* Indicate which queue to use for this fully formed 802.11 frame */ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_hdr *hdr) { struct ieee80211_local *local = sdata->local; u8 *p; if (local->hw.queues < IEEE80211_NUM_ACS) return 0; if (!ieee80211_is_data(hdr->frame_control)) { skb->priority = 7; return ieee802_1d_to_ac[skb->priority]; } if (!ieee80211_is_data_qos(hdr->frame_control)) { skb->priority = 0; return ieee802_1d_to_ac[skb->priority]; } p = ieee80211_get_qos_ctl(hdr); skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; return ieee80211_downgrade_queue(sdata, NULL, skb); } /* Indicate which queue to use. */ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct sta_info *sta = NULL; const u8 *ra = NULL; bool qos = false; struct mac80211_qos_map *qos_map; u16 ret; if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) { skb->priority = 0; /* required for correct WPA/11i MIC */ return 0; } rcu_read_lock(); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); if (sta) { qos = sta->sta.wme; break; } /* fall through */ case NL80211_IFTYPE_AP: ra = skb->data; break; case NL80211_IFTYPE_WDS: ra = sdata->u.wds.remote_addr; break; #ifdef CPTCFG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: qos = true; break; #endif case NL80211_IFTYPE_STATION: /* might be a TDLS station */ sta = sta_info_get(sdata, skb->data); if (sta) qos = sta->sta.wme; ra = sdata->u.mgd.bssid; break; case NL80211_IFTYPE_ADHOC: ra = skb->data; break; case NL80211_IFTYPE_OCB: /* all stations are required to support WME */ qos = true; break; default: break; } if (!sta && ra && !is_multicast_ether_addr(ra)) { sta = sta_info_get(sdata, ra); if (sta) qos = sta->sta.wme; } if (!qos) { skb->priority = 0; /* required for correct WPA/11i MIC */ ret = IEEE80211_AC_BE; goto out; } if (skb->protocol == sdata->control_port_protocol) { skb->priority = 7; goto downgrade; } /* use the data classifier to determine what 802.1d tag the * data frame has */ qos_map = rcu_dereference(sdata->qos_map); skb->priority = cfg80211_classify8021d(skb, qos_map ? &qos_map->qos_map : NULL); downgrade: ret = ieee80211_downgrade_queue(sdata, sta, skb); out: rcu_read_unlock(); return ret; } /** * ieee80211_set_qos_hdr - Fill in the QoS header if there is one. * * @sdata: local subif * @skb: packet to be updated */ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; u8 flags; u8 *p; if (!ieee80211_is_data_qos(hdr->frame_control)) return; p = ieee80211_get_qos_ctl(hdr); /* set up the first byte */ /* * preserve everything but the TID and ACK policy * (which we both write here) */ flags = *p & ~(IEEE80211_QOS_CTL_TID_MASK | IEEE80211_QOS_CTL_ACK_POLICY_MASK); if (is_multicast_ether_addr(hdr->addr1) || sdata->noack_map & BIT(tid)) { flags |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; info->flags |= IEEE80211_TX_CTL_NO_ACK; } *p = flags | tid; /* set up the second byte */ p++; if (ieee80211_vif_is_mesh(&sdata->vif)) { /* preserve RSPI and Mesh PS Level bit */ *p &= ((IEEE80211_QOS_CTL_RSPI | IEEE80211_QOS_CTL_MESH_PS_LEVEL) >> 8); /* Nulls don't have a mesh header (frame body) */ if (!ieee80211_is_qos_nullfunc(hdr->frame_control)) *p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8); } else { *p = 0; } } backport-iwlwifi-dkms-7906/net/mac80211/wme.h000066400000000000000000000013051351064634500205630ustar00rootroot00000000000000/* * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _WME_H #define _WME_H #include #include "ieee80211_i.h" u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_hdr *hdr); u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); #endif /* _WME_H */ backport-iwlwifi-dkms-7906/net/mac80211/wpa.c000066400000000000000000000772771351064634500206020ustar00rootroot00000000000000/* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2008, Jouni Malinen * Copyright (C) 2016-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include "ieee80211_i.h" #include "michael.h" #include "tkip.h" #include "aes_ccm.h" #include "aes_cmac.h" #include "aes_gmac.h" #include "aes_gcm.h" #include "wpa.h" ieee80211_tx_result ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) { u8 *data, *key, *mic; size_t data_len; unsigned int hdrlen; struct ieee80211_hdr *hdr; struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int tail; hdr = (struct ieee80211_hdr *)skb->data; if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control)) return TX_CONTINUE; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen) return TX_DROP; data = skb->data + hdrlen; data_len = skb->len - hdrlen; if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) { /* Need to use software crypto for the test */ info->control.hw_key = NULL; } if (info->control.hw_key && (info->flags & IEEE80211_TX_CTL_DONTFRAG || ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) && !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) { /* hwaccel - with no need for SW-generated MMIC or MIC space */ return TX_CONTINUE; } tail = MICHAEL_MIC_LEN; if (!info->control.hw_key) tail += IEEE80211_TKIP_ICV_LEN; if (WARN(skb_tailroom(skb) < tail || skb_headroom(skb) < IEEE80211_TKIP_IV_LEN, "mmic: not enough head/tail (%d/%d,%d/%d)\n", skb_headroom(skb), IEEE80211_TKIP_IV_LEN, skb_tailroom(skb), tail)) return TX_DROP; mic = skb_put(skb, MICHAEL_MIC_LEN); if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) { /* Zeroed MIC can help with debug */ memset(mic, 0, MICHAEL_MIC_LEN); return TX_CONTINUE; } key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) mic[0]++; return TX_CONTINUE; } ieee80211_rx_result ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) { u8 *data, *key = NULL; size_t data_len; unsigned int hdrlen; u8 mic[MICHAEL_MIC_LEN]; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; /* * it makes no sense to check for MIC errors on anything other * than data frames. */ if (!ieee80211_is_data_present(hdr->frame_control)) return RX_CONTINUE; /* * No way to verify the MIC if the hardware stripped it or * the IV with the key index. In this case we have solely rely * on the driver to set RX_FLAG_MMIC_ERROR in the event of a * MIC failure report. */ if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) { if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail_no_key; if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP) goto update_iv; return RX_CONTINUE; } /* * Some hardware seems to generate Michael MIC failure reports; even * though, the frame was not encrypted with TKIP and therefore has no * MIC. Ignore the flag them to avoid triggering countermeasures. */ if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || !(status->flag & RX_FLAG_DECRYPTED)) return RX_CONTINUE; if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) { /* * APs with pairwise keys should never receive Michael MIC * errors for non-zero keyidx because these are reserved for * group keys and only the AP is sending real multicast * frames in the BSS. */ return RX_DROP_UNUSABLE; } if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen + MICHAEL_MIC_LEN) return RX_DROP_UNUSABLE; if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; hdr = (void *)skb->data; data = skb->data + hdrlen; data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN)) goto mic_fail; /* remove Michael MIC from payload */ skb_trim(skb, skb->len - MICHAEL_MIC_LEN); update_iv: /* update IV in key information to be able to detect replays */ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32; rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16; return RX_CONTINUE; mic_fail: rx->key->u.tkip.mic_failures++; mic_fail_no_key: /* * In some cases the key can be unset - e.g. a multicast packet, in * a driver that supports HW encryption. Send up the key idx only if * the key is set. */ cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2, is_multicast_ether_addr(hdr->addr1) ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE, rx->key ? rx->key->conf.keyidx : -1, NULL, GFP_ATOMIC); return RX_DROP_UNUSABLE; } static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int hdrlen; int len, tail; u64 pn; u8 *pos; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { /* hwaccel - with no need for software-generated IV */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = IEEE80211_TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) return -1; pos = skb_push(skb, IEEE80211_TKIP_IV_LEN); memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen); pos += hdrlen; /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; /* Increase IV for the frame */ pn = atomic64_inc_return(&key->conf.tx_pn); pos = ieee80211_tkip_add_iv(pos, &key->conf, pn); /* hwaccel - with software IV */ if (info->control.hw_key) return 0; /* Add room for ICV */ skb_put(skb, IEEE80211_TKIP_ICV_LEN); return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, key, skb, pos, len); } ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (tkip_encrypt_skb(tx, skb) < 0) return TX_DROP; } return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; int hdrlen, res, hwaccel = 0; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control)) return RX_CONTINUE; if (!rx->sta || skb->len - hdrlen < 12) return RX_DROP_UNUSABLE; /* it may be possible to optimize this a bit more */ if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; hdr = (void *)skb->data; /* * Let TKIP code verify IV, but skip decryption. * In the case where hardware checks the IV as well, * we don't even get here, see ieee80211_rx_h_decrypt() */ if (status->flag & RX_FLAG_DECRYPTED) hwaccel = 1; res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, key, skb->data + hdrlen, skb->len - hdrlen, rx->sta->sta.addr, hdr->addr1, hwaccel, rx->security_idx, &rx->tkip_iv32, &rx->tkip_iv16); if (res != TKIP_DECRYPT_OK) return RX_DROP_UNUSABLE; /* Trim ICV */ if (!(status->flag & RX_FLAG_ICV_STRIPPED)) skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); /* Remove IV */ memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_TKIP_IV_LEN); return RX_CONTINUE; } static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad) { __le16 mask_fc; int a4_included, mgmt; u8 qos_tid; u16 len_a; unsigned int hdrlen; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; /* * Mask FC: zero subtype b4 b5 b6 (if not mgmt) * Retry, PwrMgt, MoreData; set Protected */ mgmt = ieee80211_is_mgmt(hdr->frame_control); mask_fc = hdr->frame_control; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); if (!mgmt) mask_fc &= ~cpu_to_le16(0x0070); mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); hdrlen = ieee80211_hdrlen(hdr->frame_control); len_a = hdrlen - 2; a4_included = ieee80211_has_a4(hdr->frame_control); if (ieee80211_is_data_qos(hdr->frame_control)) qos_tid = ieee80211_get_tid(hdr); else qos_tid = 0; /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC * mode authentication are not allowed to collide, yet both are derived * from this vector b_0. We only set L := 1 here to indicate that the * data size can be represented in (L+1) bytes. The CCM layer will take * care of storing the data length in the top (L+1) bytes and setting * and clearing the other bits as is required to derive the two IVs. */ b_0[0] = 0x1; /* Nonce: Nonce Flags | A2 | PN * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7) */ b_0[1] = qos_tid | (mgmt << 4); memcpy(&b_0[2], hdr->addr2, ETH_ALEN); memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN); /* AAD (extra authenticate-only data) / masked 802.11 header * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ put_unaligned_be16(len_a, &aad[0]); put_unaligned(mask_fc, (__le16 *)&aad[2]); memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN); /* Mask Seq#, leave Frag# */ aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; aad[23] = 0; if (a4_included) { memcpy(&aad[24], hdr->addr4, ETH_ALEN); aad[30] = qos_tid; aad[31] = 0; } else { memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); aad[24] = qos_tid; } } static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id) { hdr[0] = pn[5]; hdr[1] = pn[4]; hdr[2] = 0; hdr[3] = 0x20 | (key_id << 6); hdr[4] = pn[3]; hdr[5] = pn[2]; hdr[6] = pn[1]; hdr[7] = pn[0]; } static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr) { pn[0] = hdr[7]; pn[1] = hdr[6]; pn[2] = hdr[5]; pn[3] = hdr[4]; pn[4] = hdr[1]; pn[5] = hdr[0]; } static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, unsigned int mic_len) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, len, tail; u8 *pos; u8 pn[6]; u64 pn64; u8 aad[CCM_AAD_LEN]; u8 b_0[AES_BLOCK_SIZE]; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && !((info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) && ieee80211_is_mgmt(hdr->frame_control))) { /* * hwaccel has no need for preallocated room for CCMP * header or MIC fields */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = mic_len; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN)) return -1; pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN); memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; hdr = (struct ieee80211_hdr *) pos; pos += hdrlen; pn64 = atomic64_inc_return(&key->conf.tx_pn); pn[5] = pn64; pn[4] = pn64 >> 8; pn[3] = pn64 >> 16; pn[2] = pn64 >> 24; pn[1] = pn64 >> 32; pn[0] = pn64 >> 40; ccmp_pn2hdr(pos, pn, key->conf.keyidx); /* hwaccel - with software CCMP header */ if (info->control.hw_key) return 0; pos += IEEE80211_CCMP_HDR_LEN; ccmp_special_blocks(skb, pn, b_0, aad); return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, skb_put(skb, mic_len)); } ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx, unsigned int mic_len) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (ccmp_encrypt_skb(tx, skb, mic_len) < 0) return TX_DROP; } return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, unsigned int mic_len) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; int hdrlen; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[IEEE80211_CCMP_PN_LEN]; int data_len; int queue; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control) && !ieee80211_is_robust_mgmt_frame(skb)) return RX_CONTINUE; if (status->flag & RX_FLAG_DECRYPTED) { if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; if (status->flag & RX_FLAG_MIC_STRIPPED) mic_len = 0; } else { if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; } data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; if (!(status->flag & RX_FLAG_PN_VALIDATED)) { int res; ccmp_hdr2pn(pn, skb->data + hdrlen); queue = rx->security_idx; res = memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN); if (res < 0 || (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) { key->u.ccmp.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { u8 aad[2 * AES_BLOCK_SIZE]; u8 b_0[AES_BLOCK_SIZE]; /* hardware didn't decrypt/verify MIC */ ccmp_special_blocks(skb, pn, b_0, aad); if (ieee80211_aes_ccm_decrypt( key->u.ccmp.tfm, b_0, aad, skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN, data_len, skb->data + skb->len - mic_len)) return RX_DROP_UNUSABLE; } memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); } /* Remove CCMP header and MIC */ if (pskb_trim(skb, skb->len - mic_len)) return RX_DROP_UNUSABLE; memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_CCMP_HDR_LEN); return RX_CONTINUE; } static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad) { __le16 mask_fc; u8 qos_tid; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; memcpy(j_0, hdr->addr2, ETH_ALEN); memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN); j_0[13] = 0; j_0[14] = 0; j_0[AES_BLOCK_SIZE - 1] = 0x01; /* AAD (extra authenticate-only data) / masked 802.11 header * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ put_unaligned_be16(ieee80211_hdrlen(hdr->frame_control) - 2, &aad[0]); /* Mask FC: zero subtype b4 b5 b6 (if not mgmt) * Retry, PwrMgt, MoreData; set Protected */ mask_fc = hdr->frame_control; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); if (!ieee80211_is_mgmt(hdr->frame_control)) mask_fc &= ~cpu_to_le16(0x0070); mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); put_unaligned(mask_fc, (__le16 *)&aad[2]); memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN); /* Mask Seq#, leave Frag# */ aad[22] = *((u8 *)&hdr->seq_ctrl) & 0x0f; aad[23] = 0; if (ieee80211_is_data_qos(hdr->frame_control)) qos_tid = ieee80211_get_tid(hdr); else qos_tid = 0; if (ieee80211_has_a4(hdr->frame_control)) { memcpy(&aad[24], hdr->addr4, ETH_ALEN); aad[30] = qos_tid; aad[31] = 0; } else { memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); aad[24] = qos_tid; } } static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id) { hdr[0] = pn[5]; hdr[1] = pn[4]; hdr[2] = 0; hdr[3] = 0x20 | (key_id << 6); hdr[4] = pn[3]; hdr[5] = pn[2]; hdr[6] = pn[1]; hdr[7] = pn[0]; } static inline void gcmp_hdr2pn(u8 *pn, const u8 *hdr) { pn[0] = hdr[7]; pn[1] = hdr[6]; pn[2] = hdr[5]; pn[3] = hdr[4]; pn[4] = hdr[1]; pn[5] = hdr[0]; } static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, len, tail; u8 *pos; u8 pn[6]; u64 pn64; u8 aad[GCM_AAD_LEN]; u8 j_0[AES_BLOCK_SIZE]; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && !((info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) && ieee80211_is_mgmt(hdr->frame_control))) { /* hwaccel has no need for preallocated room for GCMP * header or MIC fields */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = IEEE80211_GCMP_MIC_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN)) return -1; pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN); memmove(pos, pos + IEEE80211_GCMP_HDR_LEN, hdrlen); skb_set_network_header(skb, skb_network_offset(skb) + IEEE80211_GCMP_HDR_LEN); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; hdr = (struct ieee80211_hdr *)pos; pos += hdrlen; pn64 = atomic64_inc_return(&key->conf.tx_pn); pn[5] = pn64; pn[4] = pn64 >> 8; pn[3] = pn64 >> 16; pn[2] = pn64 >> 24; pn[1] = pn64 >> 32; pn[0] = pn64 >> 40; gcmp_pn2hdr(pos, pn, key->conf.keyidx); /* hwaccel - with software GCMP header */ if (info->control.hw_key) return 0; pos += IEEE80211_GCMP_HDR_LEN; gcmp_special_blocks(skb, pn, j_0, aad); return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, skb_put(skb, IEEE80211_GCMP_MIC_LEN)); } ieee80211_tx_result ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (gcmp_encrypt_skb(tx, skb) < 0) return TX_DROP; } return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; int hdrlen; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[IEEE80211_GCMP_PN_LEN]; int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control) && !ieee80211_is_robust_mgmt_frame(skb)) return RX_CONTINUE; if (status->flag & RX_FLAG_DECRYPTED) { if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN)) return RX_DROP_UNUSABLE; if (status->flag & RX_FLAG_MIC_STRIPPED) mic_len = 0; } else { if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; } data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; if (!(status->flag & RX_FLAG_PN_VALIDATED)) { int res; gcmp_hdr2pn(pn, skb->data + hdrlen); queue = rx->security_idx; res = memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN); if (res < 0 || (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) { key->u.gcmp.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { u8 aad[2 * AES_BLOCK_SIZE]; u8 j_0[AES_BLOCK_SIZE]; /* hardware didn't decrypt/verify MIC */ gcmp_special_blocks(skb, pn, j_0, aad); if (ieee80211_aes_gcm_decrypt( key->u.gcmp.tfm, j_0, aad, skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN, data_len, skb->data + skb->len - IEEE80211_GCMP_MIC_LEN)) return RX_DROP_UNUSABLE; } memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN); } /* Remove GCMP header and MIC */ if (pskb_trim(skb, skb->len - mic_len)) return RX_DROP_UNUSABLE; memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_GCMP_HDR_LEN); return RX_CONTINUE; } static ieee80211_tx_result ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen; u8 *pos, iv_len = key->conf.iv_len; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { /* hwaccel has no need for preallocated head room */ return TX_CONTINUE; } if (unlikely(skb_headroom(skb) < iv_len && pskb_expand_head(skb, iv_len, 0, GFP_ATOMIC))) return TX_DROP; hdrlen = ieee80211_hdrlen(hdr->frame_control); pos = skb_push(skb, iv_len); memmove(pos, pos + iv_len, hdrlen); return TX_CONTINUE; } static inline int ieee80211_crypto_cs_pn_compare(u8 *pn1, u8 *pn2, int len) { int i; /* pn is little endian */ for (i = len - 1; i >= 0; i--) { if (pn1[i] < pn2[i]) return -1; else if (pn1[i] > pn2[i]) return 1; } return 0; } static ieee80211_rx_result ieee80211_crypto_cs_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_key *key = rx->key; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; const struct ieee80211_cipher_scheme *cs = NULL; int hdrlen = ieee80211_hdrlen(hdr->frame_control); struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); int data_len; u8 *rx_pn; u8 *skb_pn; u8 qos_tid; if (!rx->sta || !rx->sta->cipher_scheme || !(status->flag & RX_FLAG_DECRYPTED)) return RX_DROP_UNUSABLE; if (!ieee80211_is_data(hdr->frame_control)) return RX_CONTINUE; cs = rx->sta->cipher_scheme; data_len = rx->skb->len - hdrlen - cs->hdr_len; if (data_len < 0) return RX_DROP_UNUSABLE; if (ieee80211_is_data_qos(hdr->frame_control)) qos_tid = ieee80211_get_tid(hdr); else qos_tid = 0; if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; hdr = (struct ieee80211_hdr *)rx->skb->data; rx_pn = key->u.gen.rx_pn[qos_tid]; skb_pn = rx->skb->data + hdrlen + cs->pn_off; if (ieee80211_crypto_cs_pn_compare(skb_pn, rx_pn, cs->pn_len) <= 0) return RX_DROP_UNUSABLE; memcpy(rx_pn, skb_pn, cs->pn_len); /* remove security header and MIC */ if (pskb_trim(rx->skb, rx->skb->len - cs->mic_len)) return RX_DROP_UNUSABLE; memmove(rx->skb->data + cs->hdr_len, rx->skb->data, hdrlen); skb_pull(rx->skb, cs->hdr_len); return RX_CONTINUE; } static void bip_aad(struct sk_buff *skb, u8 *aad) { __le16 mask_fc; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; /* BIP AAD: FC(masked) || A1 || A2 || A3 */ /* FC type/subtype */ /* Mask FC Retry, PwrMgt, MoreData flags to zero */ mask_fc = hdr->frame_control; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); put_unaligned(mask_fc, (__le16 *) &aad[0]); /* A1 || A2 || A3 */ memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN); } static inline void bip_ipn_set64(u8 *d, u64 pn) { *d++ = pn; *d++ = pn >> 8; *d++ = pn >> 16; *d++ = pn >> 24; *d++ = pn >> 32; *d = pn >> 40; } static inline void bip_ipn_swap(u8 *d, const u8 *s) { *d++ = s[5]; *d++ = s[4]; *d++ = s[3]; *d++ = s[2]; *d++ = s[1]; *d = s[0]; } ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_key *key = tx->key; struct ieee80211_mmie *mmie; u8 aad[20]; u64 pn64; if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) return TX_DROP; skb = skb_peek(&tx->skbs); info = IEEE80211_SKB_CB(skb); if (info->control.hw_key) return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn64 = atomic64_inc_return(&key->conf.tx_pn); bip_ipn_set64(mmie->sequence_number, pn64); bip_aad(skb, aad); /* * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */ ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mmie->mic); return TX_CONTINUE; } ieee80211_tx_result ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_key *key = tx->key; struct ieee80211_mmie_16 *mmie; u8 aad[20]; u64 pn64; if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) return TX_DROP; skb = skb_peek(&tx->skbs); info = IEEE80211_SKB_CB(skb); if (info->control.hw_key) return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn64 = atomic64_inc_return(&key->conf.tx_pn); bip_ipn_set64(mmie->sequence_number, pn64); bip_aad(skb, aad); /* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */ ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mmie->mic); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie *mmie; u8 aad[20], mic[8], ipn[6]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; /* management frames are already linear */ if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_UNUSABLE; mmie = (struct ieee80211_mmie *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_UNUSABLE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { key->u.aes_cmac.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } } memcpy(key->u.aes_cmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie_16 *mmie; u8 aad[20], mic[16], ipn[6]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; /* management frames are already linear */ if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_UNUSABLE; mmie = (struct ieee80211_mmie_16 *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_UNUSABLE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { key->u.aes_cmac.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } } memcpy(key->u.aes_cmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; } ieee80211_tx_result ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_key *key = tx->key; struct ieee80211_mmie_16 *mmie; struct ieee80211_hdr *hdr; u8 aad[GMAC_AAD_LEN]; u64 pn64; u8 nonce[GMAC_NONCE_LEN]; if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) return TX_DROP; skb = skb_peek(&tx->skbs); info = IEEE80211_SKB_CB(skb); if (info->control.hw_key) return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn64 = atomic64_inc_return(&key->conf.tx_pn); bip_ipn_set64(mmie->sequence_number, pn64); bip_aad(skb, aad); hdr = (struct ieee80211_hdr *)skb->data; memcpy(nonce, hdr->addr2, ETH_ALEN); bip_ipn_swap(nonce + ETH_ALEN, mmie->sequence_number); /* MIC = AES-GMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */ if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mmie->mic) < 0) return TX_DROP; return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie_16 *mmie; u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; /* management frames are already linear */ if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_UNUSABLE; mmie = (struct ieee80211_mmie_16 *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_UNUSABLE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_gmac.rx_pn, 6) <= 0) { key->u.aes_gmac.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); memcpy(nonce, hdr->addr2, ETH_ALEN); memcpy(nonce + ETH_ALEN, ipn, 6); if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mic) < 0 || crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_gmac.icverrors++; return RX_DROP_UNUSABLE; } } memcpy(key->u.aes_gmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; } ieee80211_tx_result ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info = NULL; ieee80211_tx_result res; skb_queue_walk(&tx->skbs, skb) { info = IEEE80211_SKB_CB(skb); /* handle hw-only algorithm */ if (!info->control.hw_key) return TX_DROP; if (tx->key->flags & KEY_FLAG_CIPHER_SCHEME) { res = ieee80211_crypto_cs_encrypt(tx, skb); if (res != TX_CONTINUE) return res; } } ieee80211_tx_set_protected(tx); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx) { if (rx->sta && rx->sta->cipher_scheme) return ieee80211_crypto_cs_decrypt(rx); return RX_DROP_UNUSABLE; } backport-iwlwifi-dkms-7906/net/mac80211/wpa.h000066400000000000000000000033511351064634500205650ustar00rootroot00000000000000/* * Copyright 2002-2004, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef WPA_H #define WPA_H #include #include #include "ieee80211_i.h" ieee80211_tx_result ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx); ieee80211_rx_result ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx); ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx); ieee80211_rx_result ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx); ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx, unsigned int mic_len); ieee80211_rx_result ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, unsigned int mic_len); ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx); ieee80211_tx_result ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx); ieee80211_rx_result ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx); ieee80211_rx_result ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx); ieee80211_tx_result ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx); ieee80211_rx_result ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx); ieee80211_tx_result ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx); ieee80211_rx_result ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx); ieee80211_tx_result ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx); ieee80211_rx_result ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx); #endif /* WPA_H */ backport-iwlwifi-dkms-7906/net/wireless/000077500000000000000000000000001351064634500203245ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/net/wireless/.gitignore000066400000000000000000000000361351064634500223130ustar00rootroot00000000000000shipped-certs.c extra-certs.c backport-iwlwifi-dkms-7906/net/wireless/Kconfig000066400000000000000000000164441351064634500216400ustar00rootroot00000000000000config CFG80211 tristate "cfg80211 - wireless configuration API" depends on m depends on RFKILL || !RFKILL depends on FW_LOADER # may need to update this when certificates are changed and are # using a different algorithm, though right now they shouldn't # (this is here rather than below to allow it to be a module) select CRYPTO_SHA256 if CFG80211_USE_KERNEL_REGDB_KEYS ---help--- cfg80211 is the Linux wireless LAN (802.11) configuration API. Enable this if you have a wireless device. For more information refer to documentation on the wireless wiki: http://wireless.kernel.org/en/developers/Documentation/cfg80211 When built as a module it will be called cfg80211. if CFG80211 config NL80211_TESTMODE bool "nl80211 testmode command" help The nl80211 testmode command helps implementing things like factory calibration or validation tools for wireless chips. Select this option ONLY for kernels that are specifically built for such purposes. Debugging tools that are supposed to end up in the hands of users should better be implemented with debugfs. Say N. config CFG80211_DEVELOPER_WARNINGS bool "enable developer warnings" default n help This option enables some additional warnings that help cfg80211 developers and driver developers, but beware that they can also trigger due to races with userspace. For example, when a driver reports that it was disconnected from the AP, but the user disconnects manually at the same time, the warning might trigger spuriously due to races. Say Y only if you are developing cfg80211 or a driver based on it (or mac80211). config CFG80211_CERTIFICATION_ONUS bool "cfg80211 certification onus" depends on EXPERT default n ---help--- You should disable this option unless you are both capable and willing to ensure your system will remain regulatory compliant with the features available under this option. Some options may still be under heavy development and for whatever reason regulatory compliance has not or cannot yet be verified. Regulatory verification may at times only be possible until you have the final system in place. This option should only be enabled by system integrators or distributions that have done work necessary to ensure regulatory certification on the system with the enabled features. Alternatively you can enable this option if you are a wireless researcher and are working in a controlled and approved environment by your local regulatory agency. config CFG80211_REQUIRE_SIGNED_REGDB bool "require regdb signature" if CFG80211_CERTIFICATION_ONUS default y select BPAUTO_SYSTEM_DATA_VERIFICATION help Require that in addition to the "regulatory.db" file a "regulatory.db.p7s" can be loaded with a valid PKCS#7 signature for the regulatory.db file made by one of the keys in the certs/ directory. config CFG80211_USE_KERNEL_REGDB_KEYS bool "allow regdb keys shipped with the kernel" if CFG80211_CERTIFICATION_ONUS default y depends on CFG80211_REQUIRE_SIGNED_REGDB help Allow the regulatory database to be signed by one of the keys for which certificates are part of the kernel sources (in net/wireless/certs/). This is currently only Seth Forshee's key, who is the regulatory database maintainer. config CFG80211_EXTRA_REGDB_KEYDIR string "additional regdb key directory" if CFG80211_CERTIFICATION_ONUS depends on CFG80211_REQUIRE_SIGNED_REGDB help If selected, point to a directory with DER-encoded X.509 certificates like in the kernel sources (net/wireless/certs/) that shall be accepted for a signed regulatory database. Note that you need to also select the correct CRYPTO_ modules for your certificates, and if cfg80211 is built-in they also must be. config CFG80211_REG_CELLULAR_HINTS bool "cfg80211 regulatory support for cellular base station hints" depends on CFG80211_CERTIFICATION_ONUS ---help--- This option enables support for parsing regulatory hints from cellular base stations. If enabled and at least one driver claims support for parsing cellular base station hints the regulatory core will allow and parse these regulatory hints. The regulatory core will only apply these regulatory hints on drivers that support this feature. You should only enable this feature if you have tested and validated this feature on your systems. config CFG80211_REG_RELAX_NO_IR bool "cfg80211 support for NO_IR relaxation" depends on CFG80211_CERTIFICATION_ONUS ---help--- This option enables support for relaxation of the NO_IR flag for situations that certain regulatory bodies have provided clarifications on how relaxation can occur. This feature has an inherent dependency on userspace features which must have been properly tested and as such is not enabled by default. A relaxation feature example is allowing the operation of a P2P group owner (GO) on channels marked with NO_IR if there is an additional BSS interface which associated to an AP which userspace assumes or confirms to be an authorized master, i.e., with radar detection support and DFS capabilities. However, note that in order to not create daisy chain scenarios, this relaxation is not allowed in cases where the BSS client is associated to P2P GO and in addition the P2P GO instantiated on a channel due to this relaxation should not allow connection from non P2P clients. The regulatory core will apply these relaxations only for drivers that support this feature by declaring the appropriate channel flags and capabilities in their registration flow. config CFG80211_DEFAULT_PS bool "enable powersave by default" default y help This option enables powersave mode by default. If this causes your applications to misbehave you should fix your applications instead -- they need to register their network latency requirement, see Documentation/power/pm_qos_interface.txt. config CFG80211_DEBUGFS bool "cfg80211 DebugFS entries" depends on DEBUG_FS ---help--- You can enable this if you want debugfs entries for cfg80211. If unsure, say N. config CFG80211_CRDA_SUPPORT bool "support CRDA" if EXPERT default y help You should enable this option unless you know for sure you have no need for it, for example when using internal regdb (above) or the database loaded as a firmware file. If unsure, say Y. config CFG80211_WEXT bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT depends on WEXT_CORE default y if CFG80211_WEXT_EXPORT help Enable this option if you need old userspace for wireless extensions with cfg80211-based drivers. config CFG80211_WEXT_EXPORT bool help Drivers should select this option if they require cfg80211's wext compatibility symbols to be exported. endif # CFG80211 config LIB80211 tristate depends on m default n help This options enables a library of common routines used by IEEE802.11 wireless LAN drivers. Drivers should select this themselves if needed. config LIB80211_CRYPT_WEP tristate depends on m config LIB80211_CRYPT_CCMP tristate depends on m config LIB80211_CRYPT_TKIP tristate depends on m config LIB80211_DEBUG bool "lib80211 debugging messages" depends on LIB80211 default n ---help--- You can enable this if you want verbose debugging messages from lib80211. If unsure, say N. backport-iwlwifi-dkms-7906/net/wireless/Makefile000066400000000000000000000041371351064634500217710ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 obj-$(CPTCFG_CFG80211) += cfg80211.o obj-$(CPTCFG_LIB80211) += lib80211.o obj-$(CPTCFG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o obj-$(CPTCFG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o obj-$(CPTCFG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o obj-$(CONFIG_WEXT_CORE) += wext-core.o obj-$(CONFIG_WEXT_PROC) += wext-proc.o obj-$(CONFIG_WEXT_SPY) += wext-spy.o obj-$(CONFIG_WEXT_PRIV) += wext-priv.o cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o cfg80211-y += pmsr.o cfg80211-$(CONFIG_OF) += of.o cfg80211-$(CPTCFG_CFG80211_DEBUGFS) += debugfs.o cfg80211-$(CPTCFG_CFG80211_WEXT) += wext-compat.o wext-sme.o CFLAGS_trace.o := -I$(src) subdir-ccflags-y += $(call cc-option,-Wimplicit-fallthrough) cfg80211-$(CPTCFG_CFG80211_USE_KERNEL_REGDB_KEYS) += shipped-certs.o ifneq ($(CPTCFG_CFG80211_EXTRA_REGDB_KEYDIR),) cfg80211-y += extra-certs.o endif $(obj)/shipped-certs.c: $(wildcard $(src)/certs/*.hex) @$(kecho) " GEN $@" @(echo '#include "reg.h"'; \ echo 'const u8 shipped_regdb_certs[] = {'; \ cat $^ ; \ echo '};'; \ echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \ ) > $@ $(obj)/extra-certs.c: $(CPTCFG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \ $(wildcard $(CPTCFG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509) @$(kecho) " GEN $@" @(set -e; \ allf=""; \ for f in $^ ; do \ # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \ thisf=$$(od -An -v -tx1 < $$f | \ sed -e 's/ /\n/g' | \ sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \ sed -e 's/^/0x/;s/$$/,/'); \ # file should not be empty - maybe command substitution failed? \ test ! -z "$$thisf";\ allf=$$allf$$thisf;\ done; \ ( \ echo '#include "reg.h"'; \ echo 'const u8 extra_regdb_certs[] = {'; \ echo "$$allf"; \ echo '};'; \ echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \ ) > $@) clean-files += shipped-certs.c extra-certs.c backport-iwlwifi-dkms-7906/net/wireless/ap.c000066400000000000000000000024761351064634500211010ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 #include #include #include #include "nl80211.h" #include "core.h" #include "rdev-ops.h" int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, bool notify) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (!rdev->ops->stop_ap) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!wdev->beacon_interval) return -ENOENT; err = rdev_stop_ap(rdev, dev); if (!err) { wdev->conn_owner_nlportid = 0; wdev->beacon_interval = 0; memset(&wdev->chandef, 0, sizeof(wdev->chandef)); wdev->ssid_len = 0; rdev_set_qos_map(rdev, dev, NULL); if (notify) nl80211_send_ap_stopped(wdev); /* Should we apply the grace period during beaconing interface * shutdown also? */ cfg80211_sched_dfs_chan_update(rdev); } schedule_work(&cfg80211_disconnect_work); return err; } int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, bool notify) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_stop_ap(rdev, dev, notify); wdev_unlock(wdev); return err; } backport-iwlwifi-dkms-7906/net/wireless/certs/000077500000000000000000000000001351064634500214445ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/net/wireless/certs/sforshee.hex000066400000000000000000000100271351064634500237700ustar00rootroot00000000000000/* Seth Forshee's regdb certificate */ 0x30, 0x82, 0x02, 0xa4, 0x30, 0x82, 0x01, 0x8c, 0x02, 0x09, 0x00, 0xb2, 0x8d, 0xdf, 0x47, 0xae, 0xf9, 0xce, 0xa7, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x37, 0x31, 0x30, 0x30, 0x36, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a, 0x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x39, 0x31, 0x32, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb5, 0x40, 0xe3, 0x9c, 0x28, 0x84, 0x39, 0x03, 0xf2, 0x39, 0xd7, 0x66, 0x2c, 0x41, 0x38, 0x15, 0xac, 0x7e, 0xa5, 0x83, 0x71, 0x25, 0x7e, 0x90, 0x7c, 0x68, 0xdd, 0x6f, 0x3f, 0xd9, 0xd7, 0x59, 0x38, 0x9f, 0x7c, 0x6a, 0x52, 0xc2, 0x03, 0x2a, 0x2d, 0x7e, 0x66, 0xf4, 0x1e, 0xb3, 0x12, 0x70, 0x20, 0x5b, 0xd4, 0x97, 0x32, 0x3d, 0x71, 0x8b, 0x3b, 0x1b, 0x08, 0x17, 0x14, 0x6b, 0x61, 0xc4, 0x57, 0x8b, 0x96, 0x16, 0x1c, 0xfd, 0x24, 0xd5, 0x0b, 0x09, 0xf9, 0x68, 0x11, 0x84, 0xfb, 0xca, 0x51, 0x0c, 0xd1, 0x45, 0x19, 0xda, 0x10, 0x44, 0x8a, 0xd9, 0xfe, 0x76, 0xa9, 0xfd, 0x60, 0x2d, 0x18, 0x0b, 0x28, 0x95, 0xb2, 0x2d, 0xea, 0x88, 0x98, 0xb8, 0xd1, 0x56, 0x21, 0xf0, 0x53, 0x1f, 0xf1, 0x02, 0x6f, 0xe9, 0x46, 0x9b, 0x93, 0x5f, 0x28, 0x90, 0x0f, 0xac, 0x36, 0xfa, 0x68, 0x23, 0x71, 0x57, 0x56, 0xf6, 0xcc, 0xd3, 0xdf, 0x7d, 0x2a, 0xd9, 0x1b, 0x73, 0x45, 0xeb, 0xba, 0x27, 0x85, 0xef, 0x7a, 0x7f, 0xa5, 0xcb, 0x80, 0xc7, 0x30, 0x36, 0xd2, 0x53, 0xee, 0xec, 0xac, 0x1e, 0xe7, 0x31, 0xf1, 0x36, 0xa2, 0x9c, 0x63, 0xc6, 0x65, 0x5b, 0x7f, 0x25, 0x75, 0x68, 0xa1, 0xea, 0xd3, 0x7e, 0x00, 0x5c, 0x9a, 0x5e, 0xd8, 0x20, 0x18, 0x32, 0x77, 0x07, 0x29, 0x12, 0x66, 0x1e, 0x36, 0x73, 0xe7, 0x97, 0x04, 0x41, 0x37, 0xb1, 0xb1, 0x72, 0x2b, 0xf4, 0xa1, 0x29, 0x20, 0x7c, 0x96, 0x79, 0x0b, 0x2b, 0xd0, 0xd8, 0xde, 0xc8, 0x6c, 0x3f, 0x93, 0xfb, 0xc5, 0xee, 0x78, 0x52, 0x11, 0x15, 0x1b, 0x7a, 0xf6, 0xe2, 0x68, 0x99, 0xe7, 0xfb, 0x46, 0x16, 0x84, 0xe3, 0xc7, 0xa1, 0xe6, 0xe0, 0xd2, 0x46, 0xd5, 0xe1, 0xc4, 0x5f, 0xa0, 0x66, 0xf4, 0xda, 0xc4, 0xff, 0x95, 0x1d, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x87, 0x03, 0xda, 0xf2, 0x82, 0xc2, 0xdd, 0xaf, 0x7c, 0x44, 0x2f, 0x86, 0xd3, 0x5f, 0x4c, 0x93, 0x48, 0xb9, 0xfe, 0x07, 0x17, 0xbb, 0x21, 0xf7, 0x25, 0x23, 0x4e, 0xaa, 0x22, 0x0c, 0x16, 0xb9, 0x73, 0xae, 0x9d, 0x46, 0x7c, 0x75, 0xd9, 0xc3, 0x49, 0x57, 0x47, 0xbf, 0x33, 0xb7, 0x97, 0xec, 0xf5, 0x40, 0x75, 0xc0, 0x46, 0x22, 0xf0, 0xa0, 0x5d, 0x9c, 0x79, 0x13, 0xa1, 0xff, 0xb8, 0xa3, 0x2f, 0x7b, 0x8e, 0x06, 0x3f, 0xc8, 0xb6, 0xe4, 0x6a, 0x28, 0xf2, 0x34, 0x5c, 0x23, 0x3f, 0x32, 0xc0, 0xe6, 0xad, 0x0f, 0xac, 0xcf, 0x55, 0x74, 0x47, 0x73, 0xd3, 0x01, 0x85, 0xb7, 0x0b, 0x22, 0x56, 0x24, 0x7d, 0x9f, 0x09, 0xa9, 0x0e, 0x86, 0x9e, 0x37, 0x5b, 0x9c, 0x6d, 0x02, 0xd9, 0x8c, 0xc8, 0x50, 0x6a, 0xe2, 0x59, 0xf3, 0x16, 0x06, 0xea, 0xb2, 0x42, 0xb5, 0x58, 0xfe, 0xba, 0xd1, 0x81, 0x57, 0x1a, 0xef, 0xb2, 0x38, 0x88, 0x58, 0xf6, 0xaa, 0xc4, 0x2e, 0x8b, 0x5a, 0x27, 0xe4, 0xa5, 0xe8, 0xa4, 0xca, 0x67, 0x5c, 0xac, 0x72, 0x67, 0xc3, 0x6f, 0x13, 0xc3, 0x2d, 0x35, 0x79, 0xd7, 0x8a, 0xe7, 0xf5, 0xd4, 0x21, 0x30, 0x4a, 0xd5, 0xf6, 0xa3, 0xd9, 0x79, 0x56, 0xf2, 0x0f, 0x10, 0xf7, 0x7d, 0xd0, 0x51, 0x93, 0x2f, 0x47, 0xf8, 0x7d, 0x4b, 0x0a, 0x84, 0x55, 0x12, 0x0a, 0x7d, 0x4e, 0x3b, 0x1f, 0x2b, 0x2f, 0xfc, 0x28, 0xb3, 0x69, 0x34, 0xe1, 0x80, 0x80, 0xbb, 0xe2, 0xaf, 0xb9, 0xd6, 0x30, 0xf1, 0x1d, 0x54, 0x87, 0x23, 0x99, 0x9f, 0x51, 0x03, 0x4c, 0x45, 0x7d, 0x02, 0x65, 0x73, 0xab, 0xfd, 0xcf, 0x94, 0xcc, 0x0d, 0x3a, 0x60, 0xfd, 0x3c, 0x14, 0x2f, 0x16, 0x33, 0xa9, 0x21, 0x1f, 0xcb, 0x50, 0xb1, 0x8f, 0x03, 0xee, 0xa0, 0x66, 0xa9, 0x16, 0x79, 0x14, backport-iwlwifi-dkms-7906/net/wireless/chan.c000066400000000000000000000650531351064634500214120ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * This file contains helper code to handle channel * settings and keeping track of what is possible at * any point in time. * * Copyright 2009 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2018 Intel Corporation */ #include #include #include "core.h" #include "rdev-ops.h" void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, struct ieee80211_channel *chan, enum nl80211_channel_type chan_type) { if (WARN_ON(!chan)) return; chandef->chan = chan; chandef->center_freq2 = 0; switch (chan_type) { case NL80211_CHAN_NO_HT: chandef->width = NL80211_CHAN_WIDTH_20_NOHT; chandef->center_freq1 = chan->center_freq; break; case NL80211_CHAN_HT20: chandef->width = NL80211_CHAN_WIDTH_20; chandef->center_freq1 = chan->center_freq; break; case NL80211_CHAN_HT40PLUS: chandef->width = NL80211_CHAN_WIDTH_40; chandef->center_freq1 = chan->center_freq + 10; break; case NL80211_CHAN_HT40MINUS: chandef->width = NL80211_CHAN_WIDTH_40; chandef->center_freq1 = chan->center_freq - 10; break; default: WARN_ON(1); } } EXPORT_SYMBOL(cfg80211_chandef_create); bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) { u32 control_freq; if (!chandef->chan) return false; control_freq = chandef->chan->center_freq; switch (chandef->width) { case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: if (chandef->center_freq1 != control_freq) return false; if (chandef->center_freq2) return false; break; case NL80211_CHAN_WIDTH_40: if (chandef->center_freq1 != control_freq + 10 && chandef->center_freq1 != control_freq - 10) return false; if (chandef->center_freq2) return false; break; case NL80211_CHAN_WIDTH_80P80: if (chandef->center_freq1 != control_freq + 30 && chandef->center_freq1 != control_freq + 10 && chandef->center_freq1 != control_freq - 10 && chandef->center_freq1 != control_freq - 30) return false; if (!chandef->center_freq2) return false; /* adjacent is not allowed -- that's a 160 MHz channel */ if (chandef->center_freq1 - chandef->center_freq2 == 80 || chandef->center_freq2 - chandef->center_freq1 == 80) return false; break; case NL80211_CHAN_WIDTH_80: if (chandef->center_freq1 != control_freq + 30 && chandef->center_freq1 != control_freq + 10 && chandef->center_freq1 != control_freq - 10 && chandef->center_freq1 != control_freq - 30) return false; if (chandef->center_freq2) return false; break; case NL80211_CHAN_WIDTH_160: if (chandef->center_freq1 != control_freq + 70 && chandef->center_freq1 != control_freq + 50 && chandef->center_freq1 != control_freq + 30 && chandef->center_freq1 != control_freq + 10 && chandef->center_freq1 != control_freq - 10 && chandef->center_freq1 != control_freq - 30 && chandef->center_freq1 != control_freq - 50 && chandef->center_freq1 != control_freq - 70) return false; if (chandef->center_freq2) return false; break; default: return false; } return true; } EXPORT_SYMBOL(cfg80211_chandef_valid); static void chandef_primary_freqs(const struct cfg80211_chan_def *c, u32 *pri40, u32 *pri80) { int tmp; switch (c->width) { case NL80211_CHAN_WIDTH_40: *pri40 = c->center_freq1; *pri80 = 0; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: *pri80 = c->center_freq1; /* n_P20 */ tmp = (30 + c->chan->center_freq - c->center_freq1)/20; /* n_P40 */ tmp /= 2; /* freq_P40 */ *pri40 = c->center_freq1 - 20 + 40 * tmp; break; case NL80211_CHAN_WIDTH_160: /* n_P20 */ tmp = (70 + c->chan->center_freq - c->center_freq1)/20; /* n_P40 */ tmp /= 2; /* freq_P40 */ *pri40 = c->center_freq1 - 60 + 40 * tmp; /* n_P80 */ tmp /= 2; *pri80 = c->center_freq1 - 40 + 80 * tmp; break; default: WARN_ON_ONCE(1); } } static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) { int width; switch (c->width) { case NL80211_CHAN_WIDTH_5: width = 5; break; case NL80211_CHAN_WIDTH_10: width = 10; break; case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: width = 20; break; case NL80211_CHAN_WIDTH_40: width = 40; break; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_80: width = 80; break; case NL80211_CHAN_WIDTH_160: width = 160; break; default: WARN_ON_ONCE(1); return -1; } return width; } const struct cfg80211_chan_def * cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1, const struct cfg80211_chan_def *c2) { u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80; /* If they are identical, return */ if (cfg80211_chandef_identical(c1, c2)) return c1; /* otherwise, must have same control channel */ if (c1->chan != c2->chan) return NULL; /* * If they have the same width, but aren't identical, * then they can't be compatible. */ if (c1->width == c2->width) return NULL; /* * can't be compatible if one of them is 5 or 10 MHz, * but they don't have the same width. */ if (c1->width == NL80211_CHAN_WIDTH_5 || c1->width == NL80211_CHAN_WIDTH_10 || c2->width == NL80211_CHAN_WIDTH_5 || c2->width == NL80211_CHAN_WIDTH_10) return NULL; if (c1->width == NL80211_CHAN_WIDTH_20_NOHT || c1->width == NL80211_CHAN_WIDTH_20) return c2; if (c2->width == NL80211_CHAN_WIDTH_20_NOHT || c2->width == NL80211_CHAN_WIDTH_20) return c1; chandef_primary_freqs(c1, &c1_pri40, &c1_pri80); chandef_primary_freqs(c2, &c2_pri40, &c2_pri80); if (c1_pri40 != c2_pri40) return NULL; WARN_ON(!c1_pri80 && !c2_pri80); if (c1_pri80 && c2_pri80 && c1_pri80 != c2_pri80) return NULL; if (c1->width > c2->width) return c1; return c2; } EXPORT_SYMBOL(cfg80211_chandef_compatible); static void cfg80211_set_chans_dfs_state(struct wiphy *wiphy, u32 center_freq, u32 bandwidth, enum nl80211_dfs_state dfs_state) { struct ieee80211_channel *c; u32 freq; for (freq = center_freq - bandwidth/2 + 10; freq <= center_freq + bandwidth/2 - 10; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c || !(c->flags & IEEE80211_CHAN_RADAR)) continue; c->dfs_state = dfs_state; c->dfs_state_entered = jiffies; } } void cfg80211_set_dfs_state(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, enum nl80211_dfs_state dfs_state) { int width; if (WARN_ON(!cfg80211_chandef_valid(chandef))) return; width = cfg80211_chandef_get_width(chandef); if (width < 0) return; cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq1, width, dfs_state); if (!chandef->center_freq2) return; cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq2, width, dfs_state); } static u32 cfg80211_get_start_freq(u32 center_freq, u32 bandwidth) { u32 start_freq; if (bandwidth <= 20) start_freq = center_freq; else start_freq = center_freq - bandwidth/2 + 10; return start_freq; } static u32 cfg80211_get_end_freq(u32 center_freq, u32 bandwidth) { u32 end_freq; if (bandwidth <= 20) end_freq = center_freq; else end_freq = center_freq + bandwidth/2 - 10; return end_freq; } static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy, u32 center_freq, u32 bandwidth) { struct ieee80211_channel *c; u32 freq, start_freq, end_freq; start_freq = cfg80211_get_start_freq(center_freq, bandwidth); end_freq = cfg80211_get_end_freq(center_freq, bandwidth); for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c) return -EINVAL; if (c->flags & IEEE80211_CHAN_RADAR) return 1; } return 0; } int cfg80211_chandef_dfs_required(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype) { int width; int ret; if (WARN_ON(!cfg80211_chandef_valid(chandef))) return -EINVAL; switch (iftype) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_MESH_POINT: width = cfg80211_chandef_get_width(chandef); if (width < 0) return -EINVAL; ret = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq1, width); if (ret < 0) return ret; else if (ret > 0) return BIT(chandef->width); if (!chandef->center_freq2) return 0; ret = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2, width); if (ret < 0) return ret; else if (ret > 0) return BIT(chandef->width); break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_NAN_DATA: break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: WARN_ON(1); } return 0; } EXPORT_SYMBOL(cfg80211_chandef_dfs_required); static int cfg80211_get_chans_dfs_usable(struct wiphy *wiphy, u32 center_freq, u32 bandwidth) { struct ieee80211_channel *c; u32 freq, start_freq, end_freq; int count = 0; start_freq = cfg80211_get_start_freq(center_freq, bandwidth); end_freq = cfg80211_get_end_freq(center_freq, bandwidth); /* * Check entire range of channels for the bandwidth. * Check all channels are DFS channels (DFS_USABLE or * DFS_AVAILABLE). Return number of usable channels * (require CAC). Allow DFS and non-DFS channel mix. */ for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c) return -EINVAL; if (c->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; if (c->flags & IEEE80211_CHAN_RADAR) { if (c->dfs_state == NL80211_DFS_UNAVAILABLE) return -EINVAL; if (c->dfs_state == NL80211_DFS_USABLE) count++; } } return count; } bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef) { int width; int r1, r2 = 0; if (WARN_ON(!cfg80211_chandef_valid(chandef))) return false; width = cfg80211_chandef_get_width(chandef); if (width < 0) return false; r1 = cfg80211_get_chans_dfs_usable(wiphy, chandef->center_freq1, width); if (r1 < 0) return false; switch (chandef->width) { case NL80211_CHAN_WIDTH_80P80: WARN_ON(!chandef->center_freq2); r2 = cfg80211_get_chans_dfs_usable(wiphy, chandef->center_freq2, width); if (r2 < 0) return false; break; default: WARN_ON(chandef->center_freq2); break; } return (r1 + r2 > 0); } /* * Checks if center frequency of chan falls with in the bandwidth * range of chandef. */ bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef, struct ieee80211_channel *chan) { int width; u32 freq; if (chandef->chan->center_freq == chan->center_freq) return true; width = cfg80211_chandef_get_width(chandef); if (width <= 20) return false; for (freq = chandef->center_freq1 - width / 2 + 10; freq <= chandef->center_freq1 + width / 2 - 10; freq += 20) { if (chan->center_freq == freq) return true; } if (!chandef->center_freq2) return false; for (freq = chandef->center_freq2 - width / 2 + 10; freq <= chandef->center_freq2 + width / 2 - 10; freq += 20) { if (chan->center_freq == freq) return true; } return false; } bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev) { bool active = false; ASSERT_WDEV_LOCK(wdev); if (!wdev->chandef.chan) return false; switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: active = wdev->beacon_interval != 0; break; case NL80211_IFTYPE_ADHOC: active = wdev->ssid_len != 0; break; case NL80211_IFTYPE_MESH_POINT: active = wdev->mesh_id_len != 0; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_P2P_DEVICE: /* Can NAN type be considered as beaconing interface? */ case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_NAN_DATA: break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: WARN_ON(1); } return active; } static bool cfg80211_is_wiphy_oper_chan(struct wiphy *wiphy, struct ieee80211_channel *chan) { struct wireless_dev *wdev; list_for_each_entry(wdev, &wiphy->wdev_list, list) { wdev_lock(wdev); if (!cfg80211_beaconing_iface_active(wdev)) { wdev_unlock(wdev); continue; } if (cfg80211_is_sub_chan(&wdev->chandef, chan)) { wdev_unlock(wdev); return true; } wdev_unlock(wdev); } return false; } bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, struct ieee80211_channel *chan) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); if (!(chan->flags & IEEE80211_CHAN_RADAR)) return false; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!reg_dfs_domain_same(wiphy, &rdev->wiphy)) continue; if (cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan)) return true; } return false; } static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy, u32 center_freq, u32 bandwidth) { struct ieee80211_channel *c; u32 freq, start_freq, end_freq; bool dfs_offload; dfs_offload = wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD); start_freq = cfg80211_get_start_freq(center_freq, bandwidth); end_freq = cfg80211_get_end_freq(center_freq, bandwidth); /* * Check entire range of channels for the bandwidth. * If any channel in between is disabled or has not * had gone through CAC return false */ for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c) return false; if (c->flags & IEEE80211_CHAN_DISABLED) return false; if ((c->flags & IEEE80211_CHAN_RADAR) && (c->dfs_state != NL80211_DFS_AVAILABLE) && !(c->dfs_state == NL80211_DFS_USABLE && dfs_offload)) return false; } return true; } static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef) { int width; int r; if (WARN_ON(!cfg80211_chandef_valid(chandef))) return false; width = cfg80211_chandef_get_width(chandef); if (width < 0) return false; r = cfg80211_get_chans_dfs_available(wiphy, chandef->center_freq1, width); /* If any of channels unavailable for cf1 just return */ if (!r) return r; switch (chandef->width) { case NL80211_CHAN_WIDTH_80P80: WARN_ON(!chandef->center_freq2); r = cfg80211_get_chans_dfs_available(wiphy, chandef->center_freq2, width); break; default: WARN_ON(chandef->center_freq2); break; } return r; } static unsigned int cfg80211_get_chans_dfs_cac_time(struct wiphy *wiphy, u32 center_freq, u32 bandwidth) { struct ieee80211_channel *c; u32 start_freq, end_freq, freq; unsigned int dfs_cac_ms = 0; start_freq = cfg80211_get_start_freq(center_freq, bandwidth); end_freq = cfg80211_get_end_freq(center_freq, bandwidth); for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c) return 0; if (c->flags & IEEE80211_CHAN_DISABLED) return 0; if (!(c->flags & IEEE80211_CHAN_RADAR)) continue; if (c->dfs_cac_ms > dfs_cac_ms) dfs_cac_ms = c->dfs_cac_ms; } return dfs_cac_ms; } unsigned int cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef) { int width; unsigned int t1 = 0, t2 = 0; if (WARN_ON(!cfg80211_chandef_valid(chandef))) return 0; width = cfg80211_chandef_get_width(chandef); if (width < 0) return 0; t1 = cfg80211_get_chans_dfs_cac_time(wiphy, chandef->center_freq1, width); if (!chandef->center_freq2) return t1; t2 = cfg80211_get_chans_dfs_cac_time(wiphy, chandef->center_freq2, width); return max(t1, t2); } static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, u32 center_freq, u32 bandwidth, u32 prohibited_flags) { struct ieee80211_channel *c; u32 freq, start_freq, end_freq; start_freq = cfg80211_get_start_freq(center_freq, bandwidth); end_freq = cfg80211_get_end_freq(center_freq, bandwidth); for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c || c->flags & prohibited_flags) return false; } return true; } bool cfg80211_chandef_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, u32 prohibited_flags) { struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; u32 width, control_freq, cap; if (WARN_ON(!cfg80211_chandef_valid(chandef))) return false; ht_cap = &wiphy->bands[chandef->chan->band]->ht_cap; vht_cap = &wiphy->bands[chandef->chan->band]->vht_cap; control_freq = chandef->chan->center_freq; switch (chandef->width) { case NL80211_CHAN_WIDTH_5: width = 5; break; case NL80211_CHAN_WIDTH_10: prohibited_flags |= IEEE80211_CHAN_NO_10MHZ; width = 10; break; case NL80211_CHAN_WIDTH_20: if (!ht_cap->ht_supported) return false; /* fall through */ case NL80211_CHAN_WIDTH_20_NOHT: prohibited_flags |= IEEE80211_CHAN_NO_20MHZ; width = 20; break; case NL80211_CHAN_WIDTH_40: width = 40; if (!ht_cap->ht_supported) return false; if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) return false; if (chandef->center_freq1 < control_freq && chandef->chan->flags & IEEE80211_CHAN_NO_HT40MINUS) return false; if (chandef->center_freq1 > control_freq && chandef->chan->flags & IEEE80211_CHAN_NO_HT40PLUS) return false; break; case NL80211_CHAN_WIDTH_80P80: cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) return false; /* fall through */ case NL80211_CHAN_WIDTH_80: if (!vht_cap->vht_supported) return false; prohibited_flags |= IEEE80211_CHAN_NO_80MHZ; width = 80; break; case NL80211_CHAN_WIDTH_160: if (!vht_cap->vht_supported) return false; cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) return false; prohibited_flags |= IEEE80211_CHAN_NO_160MHZ; width = 160; break; default: WARN_ON_ONCE(1); return false; } /* * TODO: What if there are only certain 80/160/80+80 MHz channels * allowed by the driver, or only certain combinations? * For 40 MHz the driver can set the NO_HT40 flags, but for * 80/160 MHz and in particular 80+80 MHz this isn't really * feasible and we only have NO_80MHZ/NO_160MHZ so far but * no way to cover 80+80 MHz or more complex restrictions. * Note that such restrictions also need to be advertised to * userspace, for example for P2P channel selection. */ if (width > 20) prohibited_flags |= IEEE80211_CHAN_NO_OFDM; /* 5 and 10 MHz are only defined for the OFDM PHY */ if (width < 20) prohibited_flags |= IEEE80211_CHAN_NO_OFDM; if (!cfg80211_secondary_chans_ok(wiphy, chandef->center_freq1, width, prohibited_flags)) return false; if (!chandef->center_freq2) return true; return cfg80211_secondary_chans_ok(wiphy, chandef->center_freq2, width, prohibited_flags); } EXPORT_SYMBOL(cfg80211_chandef_usable); /* * Check if the channel can be used under permissive conditions mandated by * some regulatory bodies, i.e., the channel is marked with * IEEE80211_CHAN_IR_CONCURRENT and there is an additional station interface * associated to an AP on the same channel or on the same UNII band * (assuming that the AP is an authorized master). * In addition allow operation on a channel on which indoor operation is * allowed, iff we are currently operating in an indoor environment. */ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, enum nl80211_iftype iftype, struct ieee80211_channel *chan) { struct wireless_dev *wdev; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ASSERT_RTNL(); if (!IS_ENABLED(CPTCFG_CFG80211_REG_RELAX_NO_IR) || !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR)) return false; /* only valid for GO and TDLS off-channel (station/p2p-CL) */ if (iftype != NL80211_IFTYPE_P2P_GO && iftype != NL80211_IFTYPE_STATION && iftype != NL80211_IFTYPE_P2P_CLIENT) return false; if (regulatory_indoor_allowed() && (chan->flags & IEEE80211_CHAN_INDOOR_ONLY)) return true; if (!(chan->flags & IEEE80211_CHAN_IR_CONCURRENT)) return false; /* * Generally, it is possible to rely on another device/driver to allow * the IR concurrent relaxation, however, since the device can further * enforce the relaxation (by doing a similar verifications as this), * and thus fail the GO instantiation, consider only the interfaces of * the current registered device. */ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { struct ieee80211_channel *other_chan = NULL; int r1, r2; wdev_lock(wdev); if (wdev->iftype == NL80211_IFTYPE_STATION && wdev->current_bss) other_chan = wdev->current_bss->pub.channel; /* * If a GO already operates on the same GO_CONCURRENT channel, * this one (maybe the same one) can beacon as well. We allow * the operation even if the station we relied on with * GO_CONCURRENT is disconnected now. But then we must make sure * we're not outdoor on an indoor-only channel. */ if (iftype == NL80211_IFTYPE_P2P_GO && wdev->iftype == NL80211_IFTYPE_P2P_GO && wdev->beacon_interval && !(chan->flags & IEEE80211_CHAN_INDOOR_ONLY)) other_chan = wdev->chandef.chan; wdev_unlock(wdev); if (!other_chan) continue; if (chan == other_chan) return true; if (chan->band != NL80211_BAND_5GHZ) continue; r1 = cfg80211_get_unii(chan->center_freq); r2 = cfg80211_get_unii(other_chan->center_freq); if (r1 != -EINVAL && r1 == r2) { /* * At some locations channels 149-165 are considered a * bundle, but at other locations, e.g., Indonesia, * channels 149-161 are considered a bundle while * channel 165 is left out and considered to be in a * different bundle. Thus, in case that there is a * station interface connected to an AP on channel 165, * it is assumed that channels 149-161 are allowed for * GO operations. However, having a station interface * connected to an AP on channels 149-161, does not * allow GO operation on channel 165. */ if (chan->center_freq == 5825 && other_chan->center_freq != 5825) continue; return true; } } return false; } static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype, bool check_no_ir) { bool res; u32 prohibited_flags = IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR; trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); if (check_no_ir) prohibited_flags |= IEEE80211_CHAN_NO_IR; if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && cfg80211_chandef_dfs_available(wiphy, chandef)) { /* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */ prohibited_flags = IEEE80211_CHAN_DISABLED; } res = cfg80211_chandef_usable(wiphy, chandef, prohibited_flags); trace_cfg80211_return_bool(res); return res; } bool cfg80211_reg_can_beacon(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype) { return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true); } EXPORT_SYMBOL(cfg80211_reg_can_beacon); bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype) { bool check_no_ir; ASSERT_RTNL(); /* * Under certain conditions suggested by some regulatory bodies a * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag * only if such relaxations are not enabled and the conditions are not * met. */ check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan); return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); } EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax); int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) { if (!rdev->ops->set_monitor_channel) return -EOPNOTSUPP; if (!cfg80211_has_monitors_only(rdev)) return -EBUSY; return rdev_set_monitor_channel(rdev, chandef); } void cfg80211_get_chan_state(struct wireless_dev *wdev, struct ieee80211_channel **chan, enum cfg80211_chan_mode *chanmode, u8 *radar_detect) { int ret; *chan = NULL; *chanmode = CHAN_MODE_UNDEFINED; ASSERT_WDEV_LOCK(wdev); if (wdev->netdev && !netif_running(wdev->netdev)) return; switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: if (wdev->current_bss) { *chan = wdev->current_bss->pub.channel; *chanmode = (wdev->ibss_fixed && !wdev->ibss_dfs_possible) ? CHAN_MODE_SHARED : CHAN_MODE_EXCLUSIVE; /* consider worst-case - IBSS can try to return to the * original user-specified channel as creator */ if (wdev->ibss_dfs_possible) *radar_detect |= BIT(wdev->chandef.width); return; } break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (wdev->current_bss) { *chan = wdev->current_bss->pub.channel; *chanmode = CHAN_MODE_SHARED; return; } break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (wdev->cac_started) { *chan = wdev->chandef.chan; *chanmode = CHAN_MODE_SHARED; *radar_detect |= BIT(wdev->chandef.width); } else if (wdev->beacon_interval) { *chan = wdev->chandef.chan; *chanmode = CHAN_MODE_SHARED; ret = cfg80211_chandef_dfs_required(wdev->wiphy, &wdev->chandef, wdev->iftype); WARN_ON(ret < 0); if (ret > 0) *radar_detect |= BIT(wdev->chandef.width); } return; case NL80211_IFTYPE_MESH_POINT: if (wdev->mesh_id_len) { *chan = wdev->chandef.chan; *chanmode = CHAN_MODE_SHARED; ret = cfg80211_chandef_dfs_required(wdev->wiphy, &wdev->chandef, wdev->iftype); WARN_ON(ret < 0); if (ret > 0) *radar_detect |= BIT(wdev->chandef.width); } return; case NL80211_IFTYPE_OCB: if (wdev->chandef.chan) { *chan = wdev->chandef.chan; *chanmode = CHAN_MODE_SHARED; return; } break; case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_NAN_DATA: /* these interface types don't really have a channel */ return; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: WARN_ON(1); } } backport-iwlwifi-dkms-7906/net/wireless/core.c000066400000000000000000001152141351064634500214240ustar00rootroot00000000000000/* * This is the linux wireless configuration interface. * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "nl80211.h" #include "core.h" #include "sysfs.h" #include "debugfs.h" #include "wext-compat.h" #include "rdev-ops.h" /* name for sysfs, %d is appended */ #define PHY_NAME "phy" MODULE_AUTHOR("Johannes Berg"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("wireless configuration support"); MODULE_ALIAS_GENL_FAMILY(NL80211_GENL_NAME); /* RCU-protected (and RTNL for writers) */ LIST_HEAD(cfg80211_rdev_list); int cfg80211_rdev_list_generation; /* for debugfs */ static struct dentry *ieee80211_debugfs_dir; /* for the cleanup, scan and event works */ struct workqueue_struct *cfg80211_wq; static bool cfg80211_disable_40mhz_24ghz; module_param(cfg80211_disable_40mhz_24ghz, bool, 0644); MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz, "Disable 40MHz support in the 2.4GHz band"); struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) { struct cfg80211_registered_device *result = NULL, *rdev; ASSERT_RTNL(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (rdev->wiphy_idx == wiphy_idx) { result = rdev; break; } } return result; } int get_wiphy_idx(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); return rdev->wiphy_idx; } struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx); if (!rdev) return NULL; return &rdev->wiphy; } static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev, const char *newname) { struct cfg80211_registered_device *rdev2; int wiphy_idx, taken = -1, digits; ASSERT_RTNL(); if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN) return -EINVAL; /* prohibit calling the thing phy%d when %d is not its number */ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { /* count number of places needed to print wiphy_idx */ digits = 1; while (wiphy_idx /= 10) digits++; /* * deny the name if it is phy where is printed * without leading zeroes. taken == strlen(newname) here */ if (taken == strlen(PHY_NAME) + digits) return -EINVAL; } /* Ensure another device does not already have this name. */ list_for_each_entry(rdev2, &cfg80211_rdev_list, list) if (strcmp(newname, wiphy_name(&rdev2->wiphy)) == 0) return -EINVAL; return 0; } int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, char *newname) { int result; ASSERT_RTNL(); /* Ignore nop renames */ if (strcmp(newname, wiphy_name(&rdev->wiphy)) == 0) return 0; result = cfg80211_dev_check_name(rdev, newname); if (result < 0) return result; result = device_rename(&rdev->wiphy.dev, newname); if (result) return result; if (rdev->wiphy.debugfsdir && !debugfs_rename(rdev->wiphy.debugfsdir->d_parent, rdev->wiphy.debugfsdir, rdev->wiphy.debugfsdir->d_parent, newname)) pr_err("failed to rename debugfs dir to %s!\n", newname); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); return 0; } int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net) { struct wireless_dev *wdev; int err = 0; if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK)) return -EOPNOTSUPP; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); if (err) break; wdev->netdev->features |= NETIF_F_NETNS_LOCAL; } if (err) { /* failed -- clean up to old netns */ net = wiphy_net(&rdev->wiphy); list_for_each_entry_continue_reverse(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); WARN_ON(err); wdev->netdev->features |= NETIF_F_NETNS_LOCAL; } return err; } list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); } nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY); wiphy_net_set(&rdev->wiphy, net); err = device_rename(&rdev->wiphy.dev, dev_name(&rdev->wiphy.dev)); WARN_ON(err); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); } return 0; } static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) { struct cfg80211_registered_device *rdev = data; rdev_rfkill_poll(rdev); } void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { ASSERT_RTNL(); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) return; if (!wdev_running(wdev)) return; rdev_stop_p2p_device(rdev, wdev); wdev->is_running = false; rdev->opencount--; if (rdev->scan_req && rdev->scan_req->wdev == wdev) { if (WARN_ON(!rdev->scan_req->notified)) rdev->scan_req->info.aborted = true; ___cfg80211_scan_done(rdev, false); } } void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { ASSERT_RTNL(); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_NAN)) return; if (!wdev_running(wdev)) return; rdev_stop_nan(rdev, wdev); wdev->is_running = false; rdev->opencount--; } void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct wireless_dev *wdev; ASSERT_RTNL(); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (wdev->netdev) { dev_close(wdev->netdev); continue; } /* otherwise, check iftype */ switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: cfg80211_stop_p2p_device(rdev, wdev); break; case NL80211_IFTYPE_NAN: cfg80211_stop_nan(rdev, wdev); break; default: break; } } } EXPORT_SYMBOL_GPL(cfg80211_shutdown_all_interfaces); static int cfg80211_rfkill_set_block(void *data, bool blocked) { struct cfg80211_registered_device *rdev = data; if (!blocked) return 0; rtnl_lock(); cfg80211_shutdown_all_interfaces(&rdev->wiphy); rtnl_unlock(); return 0; } static void cfg80211_rfkill_block_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, rfkill_block); cfg80211_rfkill_set_block(rdev, true); } static void cfg80211_event_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, event_work); rtnl_lock(); cfg80211_process_rdev_events(rdev); rtnl_unlock(); } void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev, *tmp; ASSERT_RTNL(); list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) { if (wdev->nl_owner_dead) rdev_del_virtual_intf(rdev, wdev); } } static void cfg80211_destroy_iface_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, destroy_work); rtnl_lock(); cfg80211_destroy_ifaces(rdev); rtnl_unlock(); } static void cfg80211_sched_scan_stop_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; struct cfg80211_sched_scan_request *req, *tmp; rdev = container_of(work, struct cfg80211_registered_device, sched_scan_stop_wk); rtnl_lock(); list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { if (req->nl_owner_dead) cfg80211_stop_sched_scan_req(rdev, req, false); } rtnl_unlock(); } static void cfg80211_propagate_radar_detect_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, propagate_radar_detect_wk); rtnl_lock(); regulatory_propagate_dfs_state(&rdev->wiphy, &rdev->radar_chandef, NL80211_DFS_UNAVAILABLE, NL80211_RADAR_DETECTED); rtnl_unlock(); } static void cfg80211_propagate_cac_done_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, propagate_cac_done_wk); rtnl_lock(); regulatory_propagate_dfs_state(&rdev->wiphy, &rdev->cac_done_chandef, NL80211_DFS_AVAILABLE, NL80211_RADAR_CAC_FINISHED); rtnl_unlock(); } /* exported functions */ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, const char *requested_name) { static atomic_t wiphy_counter = ATOMIC_INIT(0); struct cfg80211_registered_device *rdev; int alloc_size; /* * Make sure the padding is >= the rest of the struct so that we * always keep it large enough to pad out the entire original * kernel's struct. We really only need to make sure it's larger * than the kernel compat is compiled against, but since it'll * only increase in size make sure it's larger than the current * version of it. Subtract since it's included. */ BUILD_BUG_ON(WIPHY_COMPAT_PAD_SIZE < sizeof(struct wiphy) - WIPHY_COMPAT_PAD_SIZE); WARN_ON(ops->add_key && (!ops->del_key || !ops->set_default_key)); WARN_ON(ops->auth && (!ops->assoc || !ops->deauth || !ops->disassoc)); WARN_ON(ops->connect && !ops->disconnect); WARN_ON(ops->join_ibss && !ops->leave_ibss); WARN_ON(ops->add_virtual_intf && !ops->del_virtual_intf); WARN_ON(ops->add_station && !ops->del_station); WARN_ON(ops->add_mpath && !ops->del_mpath); WARN_ON(ops->join_mesh && !ops->leave_mesh); WARN_ON(ops->start_p2p_device && !ops->stop_p2p_device); WARN_ON(ops->start_ap && !ops->stop_ap); WARN_ON(ops->join_ocb && !ops->leave_ocb); WARN_ON(ops->suspend && !ops->resume); WARN_ON(ops->sched_scan_start && !ops->sched_scan_stop); WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); alloc_size = sizeof(*rdev) + sizeof_priv; rdev = kzalloc(alloc_size, GFP_KERNEL); if (!rdev) return NULL; rdev->ops = ops; rdev->wiphy_idx = atomic_inc_return(&wiphy_counter); if (unlikely(rdev->wiphy_idx < 0)) { /* ugh, wrapped! */ atomic_dec(&wiphy_counter); kfree(rdev); return NULL; } /* atomic_inc_return makes it start at 1, make it start at 0 */ rdev->wiphy_idx--; /* give it a proper name */ if (requested_name && requested_name[0]) { int rv; rtnl_lock(); rv = cfg80211_dev_check_name(rdev, requested_name); if (rv < 0) { rtnl_unlock(); goto use_default_name; } rv = dev_set_name(&rdev->wiphy.dev, "%s", requested_name); rtnl_unlock(); if (rv) goto use_default_name; } else { int rv; use_default_name: /* NOTE: This is *probably* safe w/out holding rtnl because of * the restrictions on phy names. Probably this call could * fail if some other part of the kernel (re)named a device * phyX. But, might should add some locking and check return * value, and use a different name if this one exists? */ rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); if (rv < 0) { kfree(rdev); return NULL; } } INIT_LIST_HEAD(&rdev->wiphy.wdev_list); INIT_LIST_HEAD(&rdev->beacon_registrations); spin_lock_init(&rdev->beacon_registrations_lock); spin_lock_init(&rdev->bss_lock); INIT_LIST_HEAD(&rdev->bss_list); INIT_LIST_HEAD(&rdev->sched_scan_req_list); INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); INIT_LIST_HEAD(&rdev->mlme_unreg); spin_lock_init(&rdev->mlme_unreg_lock); INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk); INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk, cfg80211_dfs_channels_update_work); device_initialize(&rdev->wiphy.dev); rdev->wiphy.dev.class = &ieee80211_class; rdev->wiphy.dev.platform_data = rdev; device_enable_async_suspend(&rdev->wiphy.dev); INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk); INIT_WORK(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk); INIT_WORK(&rdev->sched_scan_res_wk, cfg80211_sched_scan_results_wk); INIT_WORK(&rdev->propagate_radar_detect_wk, cfg80211_propagate_radar_detect_wk); INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); #ifdef CPTCFG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; #endif wiphy_net_set(&rdev->wiphy, &init_net); rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block; rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev), &rdev->wiphy.dev, RFKILL_TYPE_WLAN, &rdev->rfkill_ops, rdev); if (!rdev->rfkill) { kfree(rdev); return NULL; } INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); init_waitqueue_head(&rdev->dev_wait); /* * Initialize wiphy parameters to IEEE 802.11 MIB default values. * Fragmentation and RTS threshold are disabled by default with the * special -1 value. */ rdev->wiphy.retry_short = 7; rdev->wiphy.retry_long = 4; rdev->wiphy.frag_threshold = (u32) -1; rdev->wiphy.rts_threshold = (u32) -1; rdev->wiphy.coverage_class = 0; rdev->wiphy.max_num_csa_counters = 1; rdev->wiphy.max_sched_scan_plans = 1; rdev->wiphy.max_sched_scan_plan_interval = U32_MAX; return &rdev->wiphy; } EXPORT_SYMBOL(wiphy_new_nm); static int wiphy_verify_combinations(struct wiphy *wiphy) { const struct ieee80211_iface_combination *c; int i, j; for (i = 0; i < wiphy->n_iface_combinations; i++) { u32 cnt = 0; u16 all_iftypes = 0; c = &wiphy->iface_combinations[i]; /* * Combinations with just one interface aren't real, * however we make an exception for DFS. */ if (WARN_ON((c->max_interfaces < 2) && !c->radar_detect_widths)) return -EINVAL; /* Need at least one channel */ if (WARN_ON(!c->num_different_channels)) return -EINVAL; /* * Put a sane limit on maximum number of different * channels to simplify channel accounting code. */ if (WARN_ON(c->num_different_channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS)) return -EINVAL; /* DFS only works on one channel. */ if (WARN_ON(c->radar_detect_widths && (c->num_different_channels > 1))) return -EINVAL; if (WARN_ON(!c->n_limits)) return -EINVAL; for (j = 0; j < c->n_limits; j++) { u16 types = c->limits[j].types; /* interface types shouldn't overlap */ if (WARN_ON(types & all_iftypes)) return -EINVAL; all_iftypes |= types; if (WARN_ON(!c->limits[j].max)) return -EINVAL; /* Shouldn't list software iftypes in combinations! */ if (WARN_ON(wiphy->software_iftypes & types)) return -EINVAL; /* Only a single P2P_DEVICE can be allowed */ if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) && c->limits[j].max > 1)) return -EINVAL; /* Only a single NAN can be allowed */ if (WARN_ON(types & BIT(NL80211_IFTYPE_NAN) && c->limits[j].max > 1)) return -EINVAL; /* * This isn't well-defined right now. If you have an * IBSS interface, then its beacon interval may change * by joining other networks, and nothing prevents it * from doing that. * So technically we probably shouldn't even allow AP * and IBSS in the same interface, but it seems that * some drivers support that, possibly only with fixed * beacon intervals for IBSS. */ if (WARN_ON(types & BIT(NL80211_IFTYPE_ADHOC) && c->beacon_int_min_gcd)) { return -EINVAL; } cnt += c->limits[j].max; /* * Don't advertise an unsupported type * in a combination. */ if (WARN_ON((wiphy->interface_modes & types) != types)) return -EINVAL; } #ifndef CPTCFG_WIRELESS_WDS if (WARN_ON(all_iftypes & BIT(NL80211_IFTYPE_WDS))) return -EINVAL; #endif /* You can't even choose that many! */ if (WARN_ON(cnt < c->max_interfaces)) return -EINVAL; } return 0; } int wiphy_register(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); int res; enum nl80211_band band; struct ieee80211_supported_band *sband; bool have_band = false; int i; u16 ifmodes = wiphy->interface_modes; #ifdef CONFIG_PM if (WARN_ON(wiphy->wowlan && (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && !(wiphy->wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) return -EINVAL; if (WARN_ON(wiphy->wowlan && !wiphy->wowlan->flags && !wiphy->wowlan->n_patterns && !wiphy->wowlan->tcp)) return -EINVAL; #endif if (WARN_ON((wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && (!rdev->ops->tdls_channel_switch || !rdev->ops->tdls_cancel_channel_switch))) return -EINVAL; if (WARN_ON((wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN)) && (!rdev->ops->start_nan || !rdev->ops->stop_nan || !rdev->ops->add_nan_func || !rdev->ops->del_nan_func || !(wiphy->nan_supported_bands & BIT(NL80211_BAND_2GHZ))))) return -EINVAL; #ifndef CPTCFG_WIRELESS_WDS if (WARN_ON(wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))) return -EINVAL; #endif if (WARN_ON(wiphy->pmsr_capa && !wiphy->pmsr_capa->ftm.supported)) return -EINVAL; if (wiphy->pmsr_capa && wiphy->pmsr_capa->ftm.supported) { if (WARN_ON(!wiphy->pmsr_capa->ftm.asap && !wiphy->pmsr_capa->ftm.non_asap)) return -EINVAL; if (WARN_ON(!wiphy->pmsr_capa->ftm.preambles || !wiphy->pmsr_capa->ftm.bandwidths)) return -EINVAL; if (WARN_ON(wiphy->pmsr_capa->ftm.preambles & ~(BIT(NL80211_PREAMBLE_LEGACY) | BIT(NL80211_PREAMBLE_HT) | BIT(NL80211_PREAMBLE_VHT) | BIT(NL80211_PREAMBLE_DMG)))) return -EINVAL; if (WARN_ON(wiphy->pmsr_capa->ftm.bandwidths & ~(BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_80P80) | BIT(NL80211_CHAN_WIDTH_160) | BIT(NL80211_CHAN_WIDTH_5) | BIT(NL80211_CHAN_WIDTH_10)))) return -EINVAL; } /* * if a wiphy has unsupported modes for regulatory channel enforcement, * opt-out of enforcement checking */ if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_NAN) | BIT(NL80211_IFTYPE_AP_VLAN) | BIT(NL80211_IFTYPE_MONITOR))) wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF; if (WARN_ON((wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) && (wiphy->regulatory_flags & (REGULATORY_CUSTOM_REG | REGULATORY_STRICT_REG | REGULATORY_COUNTRY_IE_FOLLOW_POWER | REGULATORY_COUNTRY_IE_IGNORE)))) return -EINVAL; if (WARN_ON(wiphy->coalesce && (!wiphy->coalesce->n_rules || !wiphy->coalesce->n_patterns) && (!wiphy->coalesce->pattern_min_len || wiphy->coalesce->pattern_min_len > wiphy->coalesce->pattern_max_len))) return -EINVAL; if (WARN_ON(wiphy->ap_sme_capa && !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME))) return -EINVAL; if (WARN_ON(wiphy->addresses && !wiphy->n_addresses)) return -EINVAL; if (WARN_ON(wiphy->addresses && !is_zero_ether_addr(wiphy->perm_addr) && memcmp(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN))) return -EINVAL; if (WARN_ON(wiphy->max_acl_mac_addrs && (!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME) || !rdev->ops->set_mac_acl))) return -EINVAL; /* assure only valid behaviours are flagged by driver * hence subtract 2 as bit 0 is invalid. */ if (WARN_ON(wiphy->bss_select_support && (wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2)))) return -EINVAL; if (WARN_ON(wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X) && (!rdev->ops->set_pmk || !rdev->ops->del_pmk))) return -EINVAL; if (WARN_ON(!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) && rdev->ops->update_connect_params)) return -EINVAL; if (wiphy->addresses) memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN); /* sanity check ifmodes */ WARN_ON(!ifmodes); ifmodes &= ((1 << NUM_NL80211_IFTYPES) - 1) & ~1; if (WARN_ON(ifmodes != wiphy->interface_modes)) wiphy->interface_modes = ifmodes; res = wiphy_verify_combinations(wiphy); if (res) return res; /* sanity check supported bands/channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { u16 types = 0; sband = wiphy->bands[band]; if (!sband) continue; sband->band = band; if (WARN_ON(!sband->n_channels)) return -EINVAL; /* * on 60GHz band, there are no legacy rates, so * n_bitrates is 0 */ if (WARN_ON(band != NL80211_BAND_60GHZ && !sband->n_bitrates)) return -EINVAL; /* * Since cfg80211_disable_40mhz_24ghz is global, we can * modify the sband's ht data even if the driver uses a * global structure for that. */ if (cfg80211_disable_40mhz_24ghz && band == NL80211_BAND_2GHZ && sband->ht_cap.ht_supported) { sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; } /* * Since we use a u32 for rate bitmaps in * ieee80211_get_response_rate, we cannot * have more than 32 legacy rates. */ if (WARN_ON(sband->n_bitrates > 32)) return -EINVAL; for (i = 0; i < sband->n_channels; i++) { sband->channels[i].orig_flags = sband->channels[i].flags; sband->channels[i].orig_mag = INT_MAX; sband->channels[i].orig_mpwr = sband->channels[i].max_power; sband->channels[i].band = band; } for (i = 0; i < sband->n_iftype_data; i++) { const struct ieee80211_sband_iftype_data *iftd; iftd = &sband->iftype_data[i]; if (WARN_ON(!iftd->types_mask)) return -EINVAL; if (WARN_ON(types & iftd->types_mask)) return -EINVAL; /* at least one piece of information must be present */ if (WARN_ON(!iftd->he_cap.has_he)) return -EINVAL; types |= iftd->types_mask; } have_band = true; } if (!have_band) { WARN_ON(1); return -EINVAL; } #ifdef CONFIG_PM if (WARN_ON(rdev->wiphy.wowlan && rdev->wiphy.wowlan->n_patterns && (!rdev->wiphy.wowlan->pattern_min_len || rdev->wiphy.wowlan->pattern_min_len > rdev->wiphy.wowlan->pattern_max_len))) return -EINVAL; #endif /* check and set up bitrates */ ieee80211_set_bitrate_flags(wiphy); rdev->wiphy.features |= NL80211_FEATURE_SCAN_FLUSH; rtnl_lock(); res = device_add(&rdev->wiphy.dev); if (res) { rtnl_unlock(); return res; } /* set up regulatory info */ wiphy_regulatory_register(wiphy); list_add_rcu(&rdev->list, &cfg80211_rdev_list); cfg80211_rdev_list_generation++; /* add to debugfs */ rdev->wiphy.debugfsdir = debugfs_create_dir(wiphy_name(&rdev->wiphy), ieee80211_debugfs_dir); if (IS_ERR(rdev->wiphy.debugfsdir)) rdev->wiphy.debugfsdir = NULL; cfg80211_debugfs_rdev_add(rdev); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { struct regulatory_request request; request.wiphy_idx = get_wiphy_idx(wiphy); request.initiator = NL80211_REGDOM_SET_BY_DRIVER; request.alpha2[0] = '9'; request.alpha2[1] = '9'; nl80211_send_reg_change_event(&request); } /* Check that nobody globally advertises any capabilities they do not * advertise on all possible interface types. */ if (wiphy->extended_capabilities_len && wiphy->num_iftype_ext_capab && wiphy->iftype_ext_capab) { u8 supported_on_all, j; const struct wiphy_iftype_ext_capab *capab; capab = wiphy->iftype_ext_capab; for (j = 0; j < wiphy->extended_capabilities_len; j++) { if (capab[0].extended_capabilities_len > j) supported_on_all = capab[0].extended_capabilities[j]; else supported_on_all = 0x00; for (i = 1; i < wiphy->num_iftype_ext_capab; i++) { if (j >= capab[i].extended_capabilities_len) { supported_on_all = 0x00; break; } supported_on_all &= capab[i].extended_capabilities[j]; } if (WARN_ON(wiphy->extended_capabilities[j] & ~supported_on_all)) break; } } rdev->wiphy.registered = true; rtnl_unlock(); res = rfkill_register(rdev->rfkill); if (res) { rfkill_destroy(rdev->rfkill); rdev->rfkill = NULL; wiphy_unregister(&rdev->wiphy); return res; } return 0; } EXPORT_SYMBOL(wiphy_register); void wiphy_rfkill_start_polling(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); if (!rdev->ops->rfkill_poll) return; rdev->rfkill_ops.poll = cfg80211_rfkill_poll; rfkill_resume_polling(rdev->rfkill); } EXPORT_SYMBOL(wiphy_rfkill_start_polling); void wiphy_rfkill_stop_polling(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); rfkill_pause_polling(rdev->rfkill); } EXPORT_SYMBOL(wiphy_rfkill_stop_polling); void wiphy_unregister(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); wait_event(rdev->dev_wait, ({ int __count; rtnl_lock(); __count = rdev->opencount; rtnl_unlock(); __count == 0; })); if (rdev->rfkill) rfkill_unregister(rdev->rfkill); rtnl_lock(); nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY); rdev->wiphy.registered = false; WARN_ON(!list_empty(&rdev->wiphy.wdev_list)); /* * First remove the hardware from everywhere, this makes * it impossible to find from userspace. */ debugfs_remove_recursive(rdev->wiphy.debugfsdir); list_del_rcu(&rdev->list); synchronize_rcu(); /* * If this device got a regulatory hint tell core its * free to listen now to a new shiny device regulatory hint */ wiphy_regulatory_deregister(wiphy); cfg80211_rdev_list_generation++; device_del(&rdev->wiphy.dev); rtnl_unlock(); flush_work(&rdev->scan_done_wk); cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); flush_work(&rdev->destroy_work); flush_work(&rdev->sched_scan_stop_wk); flush_work(&rdev->mlme_unreg_wk); flush_work(&rdev->propagate_radar_detect_wk); flush_work(&rdev->propagate_cac_done_wk); #ifdef CONFIG_PM if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) rdev_set_wakeup(rdev, false); #endif cfg80211_rdev_free_wowlan(rdev); cfg80211_rdev_free_coalesce(rdev); } EXPORT_SYMBOL(wiphy_unregister); void cfg80211_dev_free(struct cfg80211_registered_device *rdev) { struct cfg80211_internal_bss *scan, *tmp; struct cfg80211_beacon_registration *reg, *treg; rfkill_destroy(rdev->rfkill); list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { list_del(®->list); kfree(reg); } list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&rdev->wiphy, &scan->pub); kfree(rdev); } void wiphy_free(struct wiphy *wiphy) { put_device(&wiphy->dev); } EXPORT_SYMBOL(wiphy_free); void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); if (rfkill_set_hw_state(rdev->rfkill, blocked)) schedule_work(&rdev->rfkill_block); } EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); void cfg80211_cqm_config_free(struct wireless_dev *wdev) { kfree(wdev->cqm_config); wdev->cqm_config = NULL; } static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); ASSERT_RTNL(); flush_work(&wdev->pmsr_free_wk); nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); list_del_rcu(&wdev->list); if (sync) synchronize_rcu(); rdev->devlist_generation++; cfg80211_mlme_purge_registrations(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: cfg80211_stop_p2p_device(rdev, wdev); break; case NL80211_IFTYPE_NAN: cfg80211_stop_nan(rdev, wdev); break; default: break; } #ifdef CPTCFG_CFG80211_WEXT kzfree(wdev->wext.keys); #endif /* only initialized if we have a netdev */ if (wdev->netdev) flush_work(&wdev->disconnect_wk); cfg80211_cqm_config_free(wdev); } void cfg80211_unregister_wdev(struct wireless_dev *wdev) { if (WARN_ON(wdev->netdev)) return; __cfg80211_unregister_wdev(wdev, true); } EXPORT_SYMBOL(cfg80211_unregister_wdev); static const struct device_type wiphy_type = { .name = "wlan", }; void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, int num) { ASSERT_RTNL(); rdev->num_running_ifaces += num; if (iftype == NL80211_IFTYPE_MONITOR) rdev->num_running_monitor_ifaces += num; } void __cfg80211_leave(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct net_device *dev = wdev->netdev; struct cfg80211_sched_scan_request *pos, *tmp; ASSERT_RTNL(); ASSERT_WDEV_LOCK(wdev); cfg80211_pmsr_wdev_down(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: __cfg80211_leave_ibss(rdev, dev, true); break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: list_for_each_entry_safe(pos, tmp, &rdev->sched_scan_req_list, list) { if (dev == pos->dev) cfg80211_stop_sched_scan_req(rdev, pos, false); } #ifdef CPTCFG_CFG80211_WEXT kfree(wdev->wext.ie); wdev->wext.ie = NULL; wdev->wext.ie_len = 0; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; #endif cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, true); break; case NL80211_IFTYPE_MESH_POINT: __cfg80211_leave_mesh(rdev, dev); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: __cfg80211_stop_ap(rdev, dev, true); break; case NL80211_IFTYPE_OCB: __cfg80211_leave_ocb(rdev, dev); break; case NL80211_IFTYPE_WDS: /* must be handled by mac80211/driver, has no APIs */ break; case NL80211_IFTYPE_NAN_DATA: /* TODO: Need to add low level APIs */ WARN_ON(1); break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: /* cannot happen, has no netdev */ break; case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: /* nothing to do */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: /* invalid */ break; } } void cfg80211_leave(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { wdev_lock(wdev); __cfg80211_leave(rdev, wdev); wdev_unlock(wdev); } void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_event *ev; unsigned long flags; trace_cfg80211_stop_iface(wiphy, wdev); ev = kzalloc(sizeof(*ev), gfp); if (!ev) return; ev->type = EVENT_STOPPED; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_stop_iface); void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { mutex_init(&wdev->mtx); INIT_LIST_HEAD(&wdev->event_list); spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock); INIT_LIST_HEAD(&wdev->pmsr_list); spin_lock_init(&wdev->pmsr_lock); INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); /* * We get here also when the interface changes network namespaces, * as it's registered into the new one, but we don't want it to * change ID in that case. Checking if the ID is already assigned * works, because 0 isn't considered a valid ID and the memory is * 0-initialized. */ if (!wdev->identifier) wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list); rdev->devlist_generation++; nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); } static int cfg80211_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; struct cfg80211_sched_scan_request *pos, *tmp; if (!wdev) return NOTIFY_DONE; rdev = wiphy_to_rdev(wdev->wiphy); WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); switch (state) { case NETDEV_POST_INIT: SET_NETDEV_DEVTYPE(dev, &wiphy_type); break; case NETDEV_REGISTER: /* * NB: cannot take rdev->mtx here because this may be * called within code protected by it when interfaces * are added with nl80211. */ /* can only change netns with wiphy */ dev->features |= NETIF_F_NETNS_LOCAL; if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, "phy80211")) { pr_err("failed to add phy80211 symlink to netdev!\n"); } wdev->netdev = dev; #ifdef CPTCFG_CFG80211_WEXT #ifdef CONFIG_WIRELESS_EXT if (!dev->wireless_handlers) dev->wireless_handlers = &cfg80211_wext_handler; #else printk_once(KERN_WARNING "cfg80211: wext will not work because " "kernel was compiled with CONFIG_WIRELESS_EXT=n. " "Tools using wext interface, like iwconfig will " "not work.\n"); #endif wdev->wext.default_key = -1; wdev->wext.default_mgmt_key = -1; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; #endif if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) wdev->ps = true; else wdev->ps = false; /* allow mac80211 to determine the timeout */ wdev->ps_timeout = -1; if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) dev->priv_flags |= IFF_DONT_BRIDGE; INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); cfg80211_init_wdev(rdev, wdev); break; case NETDEV_GOING_DOWN: cfg80211_leave(rdev, wdev); break; case NETDEV_DOWN: cfg80211_update_iface_num(rdev, wdev->iftype, -1); if (rdev->scan_req && rdev->scan_req->wdev == wdev) { if (WARN_ON(!rdev->scan_req->notified)) rdev->scan_req->info.aborted = true; ___cfg80211_scan_done(rdev, false); } list_for_each_entry_safe(pos, tmp, &rdev->sched_scan_req_list, list) { if (WARN_ON(pos->dev == wdev->netdev)) cfg80211_stop_sched_scan_req(rdev, pos, false); } rdev->opencount--; wake_up(&rdev->dev_wait); break; case NETDEV_UP: cfg80211_update_iface_num(rdev, wdev->iftype, 1); wdev_lock(wdev); switch (wdev->iftype) { #ifdef CPTCFG_CFG80211_WEXT case NL80211_IFTYPE_ADHOC: cfg80211_ibss_wext_join(rdev, wdev); break; case NL80211_IFTYPE_STATION: cfg80211_mgd_wext_connect(rdev, wdev); break; #endif #ifdef CPTCFG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: { /* backward compat code... */ struct mesh_setup setup; memcpy(&setup, &default_mesh_setup, sizeof(setup)); /* back compat only needed for mesh_id */ setup.mesh_id = wdev->ssid; setup.mesh_id_len = wdev->mesh_id_up_len; if (wdev->mesh_id_up_len) __cfg80211_join_mesh(rdev, dev, &setup, &default_mesh_config); break; } #endif default: break; } wdev_unlock(wdev); rdev->opencount++; /* * Configure power management to the driver here so that its * correctly set also after interface type changes etc. */ if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && rdev->ops->set_power_mgmt && rdev_set_power_mgmt(rdev, dev, wdev->ps, wdev->ps_timeout)) { /* assume this means it's off */ wdev->ps = false; } break; case NETDEV_UNREGISTER: /* * It is possible to get NETDEV_UNREGISTER * multiple times. To detect that, check * that the interface is still on the list * of registered interfaces, and only then * remove and clean it up. */ if (!list_empty(&wdev->list)) { __cfg80211_unregister_wdev(wdev, false); sysfs_remove_link(&dev->dev.kobj, "phy80211"); } /* * synchronise (so that we won't find this netdev * from other code any more) and then clear the list * head so that the above code can safely check for * !list_empty() to avoid double-cleanup. */ synchronize_rcu(); INIT_LIST_HEAD(&wdev->list); /* * Ensure that all events have been processed and * freed. */ cfg80211_process_wdev_events(wdev); if (WARN_ON(wdev->current_bss)) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); wdev->current_bss = NULL; } break; case NETDEV_PRE_UP: if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) return notifier_from_errno(-EOPNOTSUPP); if (rfkill_blocked(rdev->rfkill)) return notifier_from_errno(-ERFKILL); break; default: return NOTIFY_DONE; } wireless_nlevent_flush(); return NOTIFY_OK; } static struct notifier_block cfg80211_netdev_notifier = { .notifier_call = cfg80211_netdev_notifier_call, }; static void __net_exit cfg80211_pernet_exit(struct net *net) { struct cfg80211_registered_device *rdev; rtnl_lock(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (net_eq(wiphy_net(&rdev->wiphy), net)) WARN_ON(cfg80211_switch_netns(rdev, &init_net)); } rtnl_unlock(); } static struct pernet_operations cfg80211_pernet_ops = { .exit = cfg80211_pernet_exit, }; static int __init cfg80211_init(void) { int err; err = register_pernet_device(&cfg80211_pernet_ops); if (err) goto out_fail_pernet; err = wiphy_sysfs_init(); if (err) goto out_fail_sysfs; err = register_netdevice_notifier(&cfg80211_netdev_notifier); if (err) goto out_fail_notifier; err = nl80211_init(); if (err) goto out_fail_nl80211; ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); err = regulatory_init(); if (err) goto out_fail_reg; cfg80211_wq = alloc_ordered_workqueue("cfg80211", WQ_MEM_RECLAIM); if (!cfg80211_wq) { err = -ENOMEM; goto out_fail_wq; } return 0; out_fail_wq: regulatory_exit(); out_fail_reg: debugfs_remove(ieee80211_debugfs_dir); nl80211_exit(); out_fail_nl80211: unregister_netdevice_notifier(&cfg80211_netdev_notifier); out_fail_notifier: wiphy_sysfs_exit(); out_fail_sysfs: unregister_pernet_device(&cfg80211_pernet_ops); out_fail_pernet: return err; } fs_initcall(cfg80211_init); static void __exit cfg80211_exit(void) { debugfs_remove(ieee80211_debugfs_dir); nl80211_exit(); unregister_netdevice_notifier(&cfg80211_netdev_notifier); wiphy_sysfs_exit(); regulatory_exit(); unregister_pernet_device(&cfg80211_pernet_ops); destroy_workqueue(cfg80211_wq); } module_exit(cfg80211_exit); backport-iwlwifi-dkms-7906/net/wireless/core.h000066400000000000000000000425361351064634500214370ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Wireless configuration interface internals. * * Copyright 2006-2010 Johannes Berg * Copyright 2015-2016 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation */ #ifndef __NET_WIRELESS_CORE_H #define __NET_WIRELESS_CORE_H #include #include #include #include #include #include #include #include #include #include "reg.h" #define WIPHY_IDX_INVALID -1 struct cfg80211_registered_device { const struct cfg80211_ops *ops; struct list_head list; /* rfkill support */ struct rfkill_ops rfkill_ops; struct rfkill *rfkill; struct work_struct rfkill_block; /* ISO / IEC 3166 alpha2 for which this device is receiving * country IEs on, this can help disregard country IEs from APs * on the same alpha2 quickly. The alpha2 may differ from * cfg80211_regdomain's alpha2 when an intersection has occurred. * If the AP is reconfigured this can also be used to tell us if * the country on the country IE changed. */ char country_ie_alpha2[2]; /* * the driver requests the regulatory core to set this regulatory * domain as the wiphy's. Only used for %REGULATORY_WIPHY_SELF_MANAGED * devices using the regulatory_set_wiphy_regd() API */ const struct ieee80211_regdomain *requested_regd; /* If a Country IE has been received this tells us the environment * which its telling us its in. This defaults to ENVIRON_ANY */ enum environment_cap env; /* wiphy index, internal only */ int wiphy_idx; /* protected by RTNL */ int devlist_generation, wdev_id; int opencount; wait_queue_head_t dev_wait; struct list_head beacon_registrations; spinlock_t beacon_registrations_lock; struct list_head mlme_unreg; spinlock_t mlme_unreg_lock; struct work_struct mlme_unreg_wk; /* protected by RTNL only */ int num_running_ifaces; int num_running_monitor_ifaces; u64 cookie_counter; /* BSSes/scanning */ spinlock_t bss_lock; struct list_head bss_list; struct rb_root bss_tree; u32 bss_generation; u32 bss_entries; struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct sk_buff *scan_msg; struct list_head sched_scan_req_list; time64_t suspend_at; struct work_struct scan_done_wk; struct genl_info *cur_cmd_info; struct work_struct conn_work; struct work_struct event_work; struct delayed_work dfs_update_channels_wk; /* netlink port which started critical protocol (0 means not started) */ u32 crit_proto_nlportid; struct cfg80211_coalesce *coalesce; struct work_struct destroy_work; struct work_struct sched_scan_stop_wk; struct work_struct sched_scan_res_wk; struct cfg80211_chan_def radar_chandef; struct work_struct propagate_radar_detect_wk; struct cfg80211_chan_def cac_done_chandef; struct work_struct propagate_cac_done_wk; /* must be last because of the way we do wiphy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ struct wiphy wiphy __aligned(NETDEV_ALIGN); }; static inline struct cfg80211_registered_device *wiphy_to_rdev(struct wiphy *wiphy) { BUG_ON(!wiphy); return container_of(wiphy, struct cfg80211_registered_device, wiphy); } static inline void cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev) { #ifdef CONFIG_PM int i; if (!rdev->wiphy.wowlan_config) return; for (i = 0; i < rdev->wiphy.wowlan_config->n_patterns; i++) kfree(rdev->wiphy.wowlan_config->patterns[i].mask); kfree(rdev->wiphy.wowlan_config->patterns); if (rdev->wiphy.wowlan_config->tcp && rdev->wiphy.wowlan_config->tcp->sock) sock_release(rdev->wiphy.wowlan_config->tcp->sock); kfree(rdev->wiphy.wowlan_config->tcp); kfree(rdev->wiphy.wowlan_config->nd_config); kfree(rdev->wiphy.wowlan_config); #endif } static inline u64 cfg80211_assign_cookie(struct cfg80211_registered_device *rdev) { u64 r = ++rdev->cookie_counter; if (WARN_ON(r == 0)) r = ++rdev->cookie_counter; return r; } extern struct workqueue_struct *cfg80211_wq; extern struct list_head cfg80211_rdev_list; extern int cfg80211_rdev_list_generation; struct cfg80211_internal_bss { struct list_head list; struct list_head hidden_list; struct rb_node rbn; u64 ts_boottime; unsigned long ts; unsigned long refcount; atomic_t hold; /* time at the start of the reception of the first octet of the * timestamp field of the last beacon/probe received for this BSS. * The time is the TSF of the BSS specified by %parent_bssid. */ u64 parent_tsf; /* the BSS according to which %parent_tsf is set. This is set to * the BSS that the interface that requested the scan was connected to * when the beacon/probe was received. */ u8 parent_bssid[ETH_ALEN] __aligned(2); /* must be last because of priv member */ struct cfg80211_bss pub; }; static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pub) { return container_of(pub, struct cfg80211_internal_bss, pub); } static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss) { atomic_inc(&bss->hold); if (bss->pub.transmitted_bss) { bss = container_of(bss->pub.transmitted_bss, struct cfg80211_internal_bss, pub); atomic_inc(&bss->hold); } } static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss) { int r = atomic_dec_return(&bss->hold); WARN_ON(r < 0); if (bss->pub.transmitted_bss) { bss = container_of(bss->pub.transmitted_bss, struct cfg80211_internal_bss, pub); r = atomic_dec_return(&bss->hold); WARN_ON(r < 0); } } struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx); int get_wiphy_idx(struct wiphy *wiphy); struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net); void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); static inline void wdev_lock(struct wireless_dev *wdev) __acquires(wdev) { mutex_lock(&wdev->mtx); __acquire(wdev->mtx); } static inline void wdev_unlock(struct wireless_dev *wdev) __releases(wdev) { __release(wdev->mtx); mutex_unlock(&wdev->mtx); } #define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx) static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev) { ASSERT_RTNL(); return rdev->num_running_ifaces == rdev->num_running_monitor_ifaces && rdev->num_running_ifaces > 0; } enum cfg80211_event_type { EVENT_CONNECT_RESULT, EVENT_ROAMED, EVENT_DISCONNECTED, EVENT_IBSS_JOINED, EVENT_STOPPED, EVENT_PORT_AUTHORIZED, }; struct cfg80211_event { struct list_head list; enum cfg80211_event_type type; union { struct cfg80211_connect_resp_params cr; struct cfg80211_roam_info rm; struct { const u8 *ie; size_t ie_len; u16 reason; bool locally_generated; } dc; struct { u8 bssid[ETH_ALEN]; struct ieee80211_channel *channel; } ij; struct { u8 bssid[ETH_ALEN]; } pa; }; }; struct cfg80211_cached_keys { struct key_params params[CFG80211_MAX_WEP_KEYS]; u8 data[CFG80211_MAX_WEP_KEYS][WLAN_KEY_LEN_WEP104]; int def; }; enum cfg80211_chan_mode { CHAN_MODE_UNDEFINED, CHAN_MODE_SHARED, CHAN_MODE_EXCLUSIVE, }; struct cfg80211_beacon_registration { struct list_head list; u32 nlportid; }; struct cfg80211_cqm_config { u32 rssi_hyst; s32 last_rssi_event_value; int n_rssi_thresholds; s32 rssi_thresholds[0]; }; void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev); /* free object */ void cfg80211_dev_free(struct cfg80211_registered_device *rdev); int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, char *newname); void ieee80211_set_bitrate_flags(struct wiphy *wiphy); void cfg80211_bss_expire(struct cfg80211_registered_device *rdev); void cfg80211_bss_age(struct cfg80211_registered_device *rdev, unsigned long age_secs); /* IBSS */ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ibss_params *params, struct cfg80211_cached_keys *connkeys); void cfg80211_clear_ibss(struct net_device *dev, bool nowext); int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, bool nowext); int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, bool nowext); void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, struct ieee80211_channel *channel); int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); /* mesh */ extern const struct mesh_config default_mesh_config; extern const struct mesh_setup default_mesh_setup; int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev, struct mesh_setup *setup, const struct mesh_config *conf); int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev); int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev); int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef); /* OCB */ int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ocb_setup *setup); int cfg80211_join_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ocb_setup *setup); int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev); int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev); /* AP */ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, bool notify); int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, bool notify); /* MLME */ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_auth_type auth_type, const u8 *bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, const u8 *key, int key_len, int key_idx, const u8 *auth_data, int auth_data_len); int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, const u8 *bssid, const u8 *ssid, int ssid_len, struct cfg80211_assoc_request *req); int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change); int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change); void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, struct net_device *dev); int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid, u16 frame_type, const u8 *match_data, int match_len); void cfg80211_mlme_unreg_wk(struct work_struct *wk); void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid); void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev); int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, const struct ieee80211_ht_cap *ht_capa_mask); void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, const struct ieee80211_vht_cap *vht_capa_mask); /* SME events */ int cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys, const u8 *prev_bssid); void __cfg80211_connect_result(struct net_device *dev, struct cfg80211_connect_resp_params *params, bool wextev); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap); int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev); void __cfg80211_roamed(struct wireless_dev *wdev, struct cfg80211_roam_info *info); void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid); int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); void cfg80211_autodisconnect_wk(struct work_struct *work); /* SME implementation */ void cfg80211_conn_work(struct work_struct *work); void cfg80211_sme_scan_done(struct net_device *dev); bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status); void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len); void cfg80211_sme_disassoc(struct wireless_dev *wdev); void cfg80211_sme_deauth(struct wireless_dev *wdev); void cfg80211_sme_auth_timeout(struct wireless_dev *wdev); void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev); void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev); /* internal helpers */ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher); int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr); void __cfg80211_scan_done(struct work_struct *wk); void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool send_message); void cfg80211_add_sched_scan_req(struct cfg80211_registered_device *rdev, struct cfg80211_sched_scan_request *req); int cfg80211_sched_scan_req_possible(struct cfg80211_registered_device *rdev, bool want_multi); void cfg80211_sched_scan_results_wk(struct work_struct *work); int cfg80211_stop_sched_scan_req(struct cfg80211_registered_device *rdev, struct cfg80211_sched_scan_request *req, bool driver_initiated); int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, u64 reqid, bool driver_initiated); void cfg80211_upload_connect_keys(struct wireless_dev *wdev); int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, struct vif_params *params); void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); void cfg80211_process_wdev_events(struct wireless_dev *wdev); bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, u32 center_freq_khz, u32 bw_khz); extern struct work_struct cfg80211_disconnect_work; /** * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable * @wiphy: the wiphy to validate against * @chandef: the channel definition to check * * Checks if chandef is usable and we can/need start CAC on such channel. * * Return: Return true if all channels available and at least * one channel require CAC (NL80211_DFS_USABLE) */ bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef); void cfg80211_set_dfs_state(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, enum nl80211_dfs_state dfs_state); void cfg80211_dfs_channels_update_work(struct work_struct *work); unsigned int cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef); void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev); bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, struct ieee80211_channel *chan); bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev); bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef, struct ieee80211_channel *chan); static inline unsigned int elapsed_jiffies_msecs(unsigned long start) { unsigned long end = jiffies; if (end >= start) return jiffies_to_msecs(end - start); return jiffies_to_msecs(end + (ULONG_MAX - start) + 1); } void cfg80211_get_chan_state(struct wireless_dev *wdev, struct ieee80211_channel **chan, enum cfg80211_chan_mode *chanmode, u8 *radar_detect); int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef); int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, const u8 *rates, unsigned int n_rates, u32 *mask); int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, u32 beacon_int); void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, int num); void __cfg80211_leave(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); void cfg80211_leave(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); #ifdef CPTCFG_CFG80211_DEVELOPER_WARNINGS #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) #else /* * Trick to enable using it as a condition, * and also not give a warning when it's * not used that way. */ #define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; }) #endif void cfg80211_cqm_config_free(struct wireless_dev *wdev); void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid); void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev); void cfg80211_pmsr_free_wk(struct work_struct *work); #endif /* __NET_WIRELESS_CORE_H */ backport-iwlwifi-dkms-7906/net/wireless/debugfs.c000066400000000000000000000060271351064634500221140ustar00rootroot00000000000000/* * cfg80211 debugfs * * Copyright 2009 Luis R. Rodriguez * Copyright 2007 Johannes Berg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include "core.h" #include "debugfs.h" #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct wiphy *wiphy = file->private_data; \ char buf[buflen]; \ int res; \ \ res = scnprintf(buf, buflen, fmt "\n", ##value); \ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ } \ \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", wiphy->rts_threshold); DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", wiphy->frag_threshold); DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", wiphy->retry_short); DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); static int ht_print_chan(struct ieee80211_channel *chan, char *buf, int buf_size, int offset) { if (WARN_ON(offset > buf_size)) return 0; if (chan->flags & IEEE80211_CHAN_DISABLED) return scnprintf(buf + offset, buf_size - offset, "%d Disabled\n", chan->center_freq); return scnprintf(buf + offset, buf_size - offset, "%d HT40 %c%c\n", chan->center_freq, (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-', (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+'); } static ssize_t ht40allow_map_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct wiphy *wiphy = file->private_data; char *buf; unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; enum nl80211_band band; struct ieee80211_supported_band *sband; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; rtnl_lock(); for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) offset += ht_print_chan(&sband->channels[i], buf, buf_size, offset); } rtnl_unlock(); r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); kfree(buf); return r; } static const struct file_operations ht40allow_map_ops = { .read = ht40allow_map_read, .open = simple_open, .llseek = default_llseek, }; #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0444, phyd, &rdev->wiphy, &name## _ops) void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { struct dentry *phyd = rdev->wiphy.debugfsdir; DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); } backport-iwlwifi-dkms-7906/net/wireless/debugfs.h000066400000000000000000000005231351064634500221140ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CFG80211_DEBUGFS_H #define __CFG80211_DEBUGFS_H #ifdef CPTCFG_CFG80211_DEBUGFS void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev); #else static inline void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {} #endif #endif /* __CFG80211_DEBUGFS_H */ backport-iwlwifi-dkms-7906/net/wireless/ethtool.c000066400000000000000000000013351351064634500221500ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 #include #include #include "core.h" #include "rdev-ops.h" void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct wireless_dev *wdev = dev->ieee80211_ptr; strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, sizeof(info->driver)); strlcpy(info->version, init_utsname()->release, sizeof(info->version)); if (wdev->wiphy->fw_version[0]) strlcpy(info->fw_version, wdev->wiphy->fw_version, sizeof(info->fw_version)); else strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)), sizeof(info->bus_info)); } EXPORT_SYMBOL(cfg80211_get_drvinfo); backport-iwlwifi-dkms-7906/net/wireless/ibss.c000066400000000000000000000302221351064634500214270ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * Some IBSS support code for cfg80211. * * Copyright 2009 Johannes Berg */ #include #include #include #include #include #include "wext-compat.h" #include "nl80211.h" #include "rdev-ops.h" void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, struct ieee80211_channel *channel) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss; #ifdef CPTCFG_CFG80211_WEXT union iwreq_data wrqu; #endif if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return; if (!wdev->ssid_len) return; bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY_ANY); if (WARN_ON(!bss)) return; if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); } cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP)) cfg80211_upload_connect_keys(wdev); nl80211_send_ibss_bssid(wiphy_to_rdev(wdev->wiphy), dev, bssid, GFP_KERNEL); #ifdef CPTCFG_CFG80211_WEXT memset(&wrqu, 0, sizeof(wrqu)); memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); #endif } void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, struct ieee80211_channel *channel, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; trace_cfg80211_ibss_joined(dev, bssid, channel); if (WARN_ON(!channel)) return; ev = kzalloc(sizeof(*ev), gfp); if (!ev) return; ev->type = EVENT_IBSS_JOINED; memcpy(ev->ij.bssid, bssid, ETH_ALEN); ev->ij.channel = channel; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_ibss_joined); int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ibss_params *params, struct cfg80211_cached_keys *connkeys) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_RTNL(); ASSERT_WDEV_LOCK(wdev); if (wdev->ssid_len) return -EALREADY; if (!params->basic_rates) { /* * If no rates were explicitly configured, * use the mandatory rate set for 11b or * 11a for maximum compatibility. */ struct ieee80211_supported_band *sband = rdev->wiphy.bands[params->chandef.chan->band]; int j; u32 flag = params->chandef.chan->band == NL80211_BAND_5GHZ ? IEEE80211_RATE_MANDATORY_A : IEEE80211_RATE_MANDATORY_B; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].flags & flag) params->basic_rates |= BIT(j); } } if (WARN_ON(connkeys && connkeys->def < 0)) return -EINVAL; if (WARN_ON(wdev->connect_keys)) kzfree(wdev->connect_keys); wdev->connect_keys = connkeys; wdev->ibss_fixed = params->channel_fixed; wdev->ibss_dfs_possible = params->userspace_handles_dfs; wdev->chandef = params->chandef; if (connkeys) { params->wep_keys = connkeys->params; params->wep_tx_key = connkeys->def; } #ifdef CPTCFG_CFG80211_WEXT wdev->wext.ibss.chandef = params->chandef; #endif err = rdev_join_ibss(rdev, dev, params); if (err) { wdev->connect_keys = NULL; return err; } memcpy(wdev->ssid, params->ssid, params->ssid_len); wdev->ssid_len = params->ssid_len; return 0; } static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int i; ASSERT_WDEV_LOCK(wdev); kzfree(wdev->connect_keys); wdev->connect_keys = NULL; rdev_set_qos_map(rdev, dev, NULL); /* * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. */ if (rdev->ops->del_key) for (i = 0; i < 6; i++) rdev_del_key(rdev, dev, i, false, NULL); if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); } wdev->current_bss = NULL; wdev->ssid_len = 0; memset(&wdev->chandef, 0, sizeof(wdev->chandef)); #ifdef CPTCFG_CFG80211_WEXT if (!nowext) wdev->wext.ibss.ssid_len = 0; #endif cfg80211_sched_dfs_chan_update(rdev); } void cfg80211_clear_ibss(struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; wdev_lock(wdev); __cfg80211_clear_ibss(dev, nowext); wdev_unlock(wdev); } int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (!wdev->ssid_len) return -ENOLINK; err = rdev_leave_ibss(rdev, dev); if (err) return err; wdev->conn_owner_nlportid = 0; __cfg80211_clear_ibss(dev, nowext); return 0; } int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_leave_ibss(rdev, dev, nowext); wdev_unlock(wdev); return err; } #ifdef CPTCFG_CFG80211_WEXT int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; enum nl80211_band band; int i, err; ASSERT_WDEV_LOCK(wdev); if (!wdev->wext.ibss.beacon_interval) wdev->wext.ibss.beacon_interval = 100; /* try to find an IBSS channel if none requested ... */ if (!wdev->wext.ibss.chandef.chan) { struct ieee80211_channel *new_chan = NULL; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; sband = rdev->wiphy.bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; if (chan->flags & IEEE80211_CHAN_NO_IR) continue; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; new_chan = chan; break; } if (new_chan) break; } if (!new_chan) return -EINVAL; cfg80211_chandef_create(&wdev->wext.ibss.chandef, new_chan, NL80211_CHAN_NO_HT); } /* don't join -- SSID is not there */ if (!wdev->wext.ibss.ssid_len) return 0; if (!netif_running(wdev->netdev)) return 0; if (wdev->wext.keys) wdev->wext.keys->def = wdev->wext.default_key; wdev->wext.ibss.privacy = wdev->wext.default_key != -1; if (wdev->wext.keys && wdev->wext.keys->def != -1) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++) ck->params[i].key = ck->data[i]; } err = __cfg80211_join_ibss(rdev, wdev->netdev, &wdev->wext.ibss, ck); if (err) kfree(ck); return err; } int cfg80211_ibss_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_channel *chan = NULL; int err, freq; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq) { chan = ieee80211_get_channel(wdev->wiphy, freq); if (!chan) return -EINVAL; if (chan->flags & IEEE80211_CHAN_NO_IR || chan->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } if (wdev->wext.ibss.chandef.chan == chan) return 0; wdev_lock(wdev); err = 0; if (wdev->ssid_len) err = __cfg80211_leave_ibss(rdev, dev, true); wdev_unlock(wdev); if (err) return err; if (chan) { cfg80211_chandef_create(&wdev->wext.ibss.chandef, chan, NL80211_CHAN_NO_HT); wdev->wext.ibss.channel_fixed = true; } else { /* cfg80211_ibss_wext_join will pick one if needed */ wdev->wext.ibss.channel_fixed = false; } wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); return err; } int cfg80211_ibss_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_channel *chan = NULL; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; wdev_lock(wdev); if (wdev->current_bss) chan = wdev->current_bss->pub.channel; else if (wdev->wext.ibss.chandef.chan) chan = wdev->wext.ibss.chandef.chan; wdev_unlock(wdev); if (chan) { freq->m = chan->center_freq; freq->e = 6; return 0; } /* no channel if not joining */ return -EINVAL; } int cfg80211_ibss_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); size_t len = data->length; int err; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; wdev_lock(wdev); err = 0; if (wdev->ssid_len) err = __cfg80211_leave_ibss(rdev, dev, true); wdev_unlock(wdev); if (err) return err; /* iwconfig uses nul termination in SSID.. */ if (len > 0 && ssid[len - 1] == '\0') len--; memcpy(wdev->ssid, ssid, len); wdev->wext.ibss.ssid = wdev->ssid; wdev->wext.ibss.ssid_len = len; wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); return err; } int cfg80211_ibss_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; data->flags = 0; wdev_lock(wdev); if (wdev->ssid_len) { data->flags = 1; data->length = wdev->ssid_len; memcpy(ssid, wdev->ssid, data->length); } else if (wdev->wext.ibss.ssid && wdev->wext.ibss.ssid_len) { data->flags = 1; data->length = wdev->wext.ibss.ssid_len; memcpy(ssid, wdev->wext.ibss.ssid, data->length); } wdev_unlock(wdev); return 0; } int cfg80211_ibss_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *bssid = ap_addr->sa_data; int err; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; if (ap_addr->sa_family != ARPHRD_ETHER) return -EINVAL; /* automatic mode */ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; if (bssid && !is_valid_ether_addr(bssid)) return -EINVAL; /* both automatic */ if (!bssid && !wdev->wext.ibss.bssid) return 0; /* fixed already - and no change */ if (wdev->wext.ibss.bssid && bssid && ether_addr_equal(bssid, wdev->wext.ibss.bssid)) return 0; wdev_lock(wdev); err = 0; if (wdev->ssid_len) err = __cfg80211_leave_ibss(rdev, dev, true); wdev_unlock(wdev); if (err) return err; if (bssid) { memcpy(wdev->wext.bssid, bssid, ETH_ALEN); wdev->wext.ibss.bssid = wdev->wext.bssid; } else wdev->wext.ibss.bssid = NULL; wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); return err; } int cfg80211_ibss_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; ap_addr->sa_family = ARPHRD_ETHER; wdev_lock(wdev); if (wdev->current_bss) memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); else if (wdev->wext.ibss.bssid) memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN); else eth_zero_addr(ap_addr->sa_data); wdev_unlock(wdev); return 0; } #endif backport-iwlwifi-dkms-7906/net/wireless/lib80211.c000066400000000000000000000150701351064634500216350ustar00rootroot00000000000000/* * lib80211 -- common bits for IEEE802.11 drivers * * Copyright(c) 2008 John W. Linville * * Portions copied from old ieee80211 component, w/ original copyright * notices below: * * Host AP crypto routines * * Copyright (c) 2002-2003, Jouni Malinen * Portions Copyright (C) 2004, Intel Corporation * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #define DRV_NAME "lib80211" #define DRV_DESCRIPTION "common routines for IEEE802.11 drivers" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("John W. Linville "); MODULE_LICENSE("GPL"); struct lib80211_crypto_alg { struct list_head list; struct lib80211_crypto_ops *ops; }; static LIST_HEAD(lib80211_crypto_algs); static DEFINE_SPINLOCK(lib80211_crypto_lock); static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force); static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info); static void lib80211_crypt_deinit_handler(struct timer_list *t); int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name, spinlock_t *lock) { memset(info, 0, sizeof(*info)); info->name = name; info->lock = lock; INIT_LIST_HEAD(&info->crypt_deinit_list); timer_setup(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler, 0); return 0; } EXPORT_SYMBOL(lib80211_crypt_info_init); void lib80211_crypt_info_free(struct lib80211_crypt_info *info) { int i; lib80211_crypt_quiescing(info); del_timer_sync(&info->crypt_deinit_timer); lib80211_crypt_deinit_entries(info, 1); for (i = 0; i < NUM_WEP_KEYS; i++) { struct lib80211_crypt_data *crypt = info->crypt[i]; if (crypt) { if (crypt->ops) { crypt->ops->deinit(crypt->priv); module_put(crypt->ops->owner); } kfree(crypt); info->crypt[i] = NULL; } } } EXPORT_SYMBOL(lib80211_crypt_info_free); static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force) { struct lib80211_crypt_data *entry, *next; unsigned long flags; spin_lock_irqsave(info->lock, flags); list_for_each_entry_safe(entry, next, &info->crypt_deinit_list, list) { if (atomic_read(&entry->refcnt) != 0 && !force) continue; list_del(&entry->list); if (entry->ops) { entry->ops->deinit(entry->priv); module_put(entry->ops->owner); } kfree(entry); } spin_unlock_irqrestore(info->lock, flags); } /* After this, crypt_deinit_list won't accept new members */ static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info) { unsigned long flags; spin_lock_irqsave(info->lock, flags); info->crypt_quiesced = 1; spin_unlock_irqrestore(info->lock, flags); } static void lib80211_crypt_deinit_handler(struct timer_list *t) { struct lib80211_crypt_info *info = from_timer(info, t, crypt_deinit_timer); unsigned long flags; lib80211_crypt_deinit_entries(info, 0); spin_lock_irqsave(info->lock, flags); if (!list_empty(&info->crypt_deinit_list) && !info->crypt_quiesced) { printk(KERN_DEBUG "%s: entries remaining in delayed crypt " "deletion list\n", info->name); info->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&info->crypt_deinit_timer); } spin_unlock_irqrestore(info->lock, flags); } void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, struct lib80211_crypt_data **crypt) { struct lib80211_crypt_data *tmp; unsigned long flags; if (*crypt == NULL) return; tmp = *crypt; *crypt = NULL; /* must not run ops->deinit() while there may be pending encrypt or * decrypt operations. Use a list of delayed deinits to avoid needing * locking. */ spin_lock_irqsave(info->lock, flags); if (!info->crypt_quiesced) { list_add(&tmp->list, &info->crypt_deinit_list); if (!timer_pending(&info->crypt_deinit_timer)) { info->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&info->crypt_deinit_timer); } } spin_unlock_irqrestore(info->lock, flags); } EXPORT_SYMBOL(lib80211_crypt_delayed_deinit); int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops) { unsigned long flags; struct lib80211_crypto_alg *alg; alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (alg == NULL) return -ENOMEM; alg->ops = ops; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_add(&alg->list, &lib80211_crypto_algs); spin_unlock_irqrestore(&lib80211_crypto_lock, flags); printk(KERN_DEBUG "lib80211_crypt: registered algorithm '%s'\n", ops->name); return 0; } EXPORT_SYMBOL(lib80211_register_crypto_ops); int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops) { struct lib80211_crypto_alg *alg; unsigned long flags; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_for_each_entry(alg, &lib80211_crypto_algs, list) { if (alg->ops == ops) goto found; } spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return -EINVAL; found: printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm '%s'\n", ops->name); list_del(&alg->list); spin_unlock_irqrestore(&lib80211_crypto_lock, flags); kfree(alg); return 0; } EXPORT_SYMBOL(lib80211_unregister_crypto_ops); struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name) { struct lib80211_crypto_alg *alg; unsigned long flags; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_for_each_entry(alg, &lib80211_crypto_algs, list) { if (strcmp(alg->ops->name, name) == 0) goto found; } spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return NULL; found: spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return alg->ops; } EXPORT_SYMBOL(lib80211_get_crypto_ops); static void *lib80211_crypt_null_init(int keyidx) { return (void *)1; } static void lib80211_crypt_null_deinit(void *priv) { } static struct lib80211_crypto_ops lib80211_crypt_null = { .name = "NULL", .init = lib80211_crypt_null_init, .deinit = lib80211_crypt_null_deinit, .owner = THIS_MODULE, }; static int __init lib80211_init(void) { pr_info(DRV_DESCRIPTION "\n"); return lib80211_register_crypto_ops(&lib80211_crypt_null); } static void __exit lib80211_exit(void) { lib80211_unregister_crypto_ops(&lib80211_crypt_null); BUG_ON(!list_empty(&lib80211_crypto_algs)); } module_init(lib80211_init); module_exit(lib80211_exit); backport-iwlwifi-dkms-7906/net/wireless/lib80211_crypt_ccmp.c000066400000000000000000000271221351064634500240610ustar00rootroot00000000000000/* * lib80211 crypt: host-based CCMP encryption implementation for lib80211 * * Copyright (c) 2003-2004, Jouni Malinen * Copyright (c) 2008, John W. Linville * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Host AP crypt: CCMP"); MODULE_LICENSE("GPL"); #define AES_BLOCK_LEN 16 #define CCMP_HDR_LEN 8 #define CCMP_MIC_LEN 8 #define CCMP_TK_LEN 16 #define CCMP_PN_LEN 6 struct lib80211_ccmp_data { u8 key[CCMP_TK_LEN]; int key_set; u8 tx_pn[CCMP_PN_LEN]; u8 rx_pn[CCMP_PN_LEN]; u32 dot11RSNAStatsCCMPFormatErrors; u32 dot11RSNAStatsCCMPReplays; u32 dot11RSNAStatsCCMPDecryptErrors; int key_idx; struct crypto_cipher *tfm; /* scratch buffers for virt_to_page() (crypto API) */ u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN]; u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; }; static inline void lib80211_ccmp_aes_encrypt(struct crypto_cipher *tfm, const u8 pt[16], u8 ct[16]) { crypto_cipher_encrypt_one(tfm, ct, pt); } static void *lib80211_ccmp_init(int key_idx) { struct lib80211_ccmp_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(priv->tfm)) { priv->tfm = NULL; goto fail; } return priv; fail: if (priv) { if (priv->tfm) crypto_free_cipher(priv->tfm); kfree(priv); } return NULL; } static void lib80211_ccmp_deinit(void *priv) { struct lib80211_ccmp_data *_priv = priv; if (_priv && _priv->tfm) crypto_free_cipher(_priv->tfm); kfree(priv); } static inline void xor_block(u8 * b, u8 * a, size_t len) { int i; for (i = 0; i < len; i++) b[i] ^= a[i]; } static void ccmp_init_blocks(struct crypto_cipher *tfm, struct ieee80211_hdr *hdr, u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) { u8 *pos, qc = 0; size_t aad_len; int a4_included, qc_included; u8 aad[2 * AES_BLOCK_LEN]; a4_included = ieee80211_has_a4(hdr->frame_control); qc_included = ieee80211_is_data_qos(hdr->frame_control); aad_len = 22; if (a4_included) aad_len += 6; if (qc_included) { pos = (u8 *) & hdr->addr4; if (a4_included) pos += 6; qc = *pos & 0x0f; aad_len += 2; } /* CCM Initial Block: * Flag (Include authentication header, M=3 (8-octet MIC), * L=1 (2-octet Dlen)) * Nonce: 0x00 | A2 | PN * Dlen */ b0[0] = 0x59; b0[1] = qc; memcpy(b0 + 2, hdr->addr2, ETH_ALEN); memcpy(b0 + 8, pn, CCMP_PN_LEN); b0[14] = (dlen >> 8) & 0xff; b0[15] = dlen & 0xff; /* AAD: * FC with bits 4..6 and 11..13 masked to zero; 14 is always one * A1 | A2 | A3 * SC with bits 4..15 (seq#) masked to zero * A4 (if present) * QC (if present) */ pos = (u8 *) hdr; aad[0] = 0; /* aad_len >> 8 */ aad[1] = aad_len & 0xff; aad[2] = pos[0] & 0x8f; aad[3] = pos[1] & 0xc7; memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN); pos = (u8 *) & hdr->seq_ctrl; aad[22] = pos[0] & 0x0f; aad[23] = 0; /* all bits masked */ memset(aad + 24, 0, 8); if (a4_included) memcpy(aad + 24, hdr->addr4, ETH_ALEN); if (qc_included) { aad[a4_included ? 30 : 24] = qc; /* rest of QC masked */ } /* Start with the first block and AAD */ lib80211_ccmp_aes_encrypt(tfm, b0, auth); xor_block(auth, aad, AES_BLOCK_LEN); lib80211_ccmp_aes_encrypt(tfm, auth, auth); xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN); lib80211_ccmp_aes_encrypt(tfm, auth, auth); b0[0] &= 0x07; b0[14] = b0[15] = 0; lib80211_ccmp_aes_encrypt(tfm, b0, s0); } static int lib80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, u8 *aeskey, int keylen, void *priv) { struct lib80211_ccmp_data *key = priv; int i; u8 *pos; if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) return -1; if (aeskey != NULL && keylen >= CCMP_TK_LEN) memcpy(aeskey, key->key, CCMP_TK_LEN); pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdr_len); pos += hdr_len; i = CCMP_PN_LEN - 1; while (i >= 0) { key->tx_pn[i]++; if (key->tx_pn[i] != 0) break; i--; } *pos++ = key->tx_pn[5]; *pos++ = key->tx_pn[4]; *pos++ = 0; *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */ ; *pos++ = key->tx_pn[3]; *pos++ = key->tx_pn[2]; *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; return CCMP_HDR_LEN; } static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_ccmp_data *key = priv; int data_len, i, blocks, last, len; u8 *pos, *mic; struct ieee80211_hdr *hdr; u8 *b0 = key->tx_b0; u8 *b = key->tx_b; u8 *e = key->tx_e; u8 *s0 = key->tx_s0; if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) return -1; data_len = skb->len - hdr_len; len = lib80211_ccmp_hdr(skb, hdr_len, NULL, 0, priv); if (len < 0) return -1; pos = skb->data + hdr_len + CCMP_HDR_LEN; hdr = (struct ieee80211_hdr *)skb->data; ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Authentication */ xor_block(b, pos, len); lib80211_ccmp_aes_encrypt(key->tfm, b, b); /* Encryption, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; lib80211_ccmp_aes_encrypt(key->tfm, b0, e); xor_block(pos, e, len); pos += len; } mic = skb_put(skb, CCMP_MIC_LEN); for (i = 0; i < CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s0[i]; return 0; } /* * deal with seq counter wrapping correctly. * refer to timer_after() for jiffies wrapping handling */ static inline int ccmp_replay_check(u8 *pn_n, u8 *pn_o) { u32 iv32_n, iv16_n; u32 iv32_o, iv16_o; iv32_n = (pn_n[0] << 24) | (pn_n[1] << 16) | (pn_n[2] << 8) | pn_n[3]; iv16_n = (pn_n[4] << 8) | pn_n[5]; iv32_o = (pn_o[0] << 24) | (pn_o[1] << 16) | (pn_o[2] << 8) | pn_o[3]; iv16_o = (pn_o[4] << 8) | pn_o[5]; if ((s32)iv32_n - (s32)iv32_o < 0 || (iv32_n == iv32_o && iv16_n <= iv16_o)) return 1; return 0; } static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_ccmp_data *key = priv; u8 keyidx, *pos; struct ieee80211_hdr *hdr; u8 *b0 = key->rx_b0; u8 *b = key->rx_b; u8 *a = key->rx_a; u8 pn[6]; int i, blocks, last, len; size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN; u8 *mic = skb->data + skb->len - CCMP_MIC_LEN; if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { key->dot11RSNAStatsCCMPFormatErrors++; return -1; } hdr = (struct ieee80211_hdr *)skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n", hdr->addr2); key->dot11RSNAStatsCCMPFormatErrors++; return -2; } keyidx >>= 6; if (key->key_idx != keyidx) { net_dbg_ratelimited("CCMP: RX tkey->key_idx=%d frame keyidx=%d\n", key->key_idx, keyidx); return -6; } if (!key->key_set) { net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n", hdr->addr2, keyidx); return -3; } pn[0] = pos[7]; pn[1] = pos[6]; pn[2] = pos[5]; pn[3] = pos[4]; pn[4] = pos[1]; pn[5] = pos[0]; pos += 8; if (ccmp_replay_check(pn, key->rx_pn)) { #ifdef CPTCFG_LIB80211_DEBUG net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n", hdr->addr2, key->rx_pn[0], key->rx_pn[1], key->rx_pn[2], key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); #endif key->dot11RSNAStatsCCMPReplays++; return -4; } ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); xor_block(mic, b, CCMP_MIC_LEN); blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Decrypt, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; lib80211_ccmp_aes_encrypt(key->tfm, b0, b); xor_block(pos, b, len); /* Authentication */ xor_block(a, pos, len); lib80211_ccmp_aes_encrypt(key->tfm, a, a); pos += len; } if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM\n", hdr->addr2); key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } memcpy(key->rx_pn, pn, CCMP_PN_LEN); /* Remove hdr and MIC */ memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len); skb_pull(skb, CCMP_HDR_LEN); skb_trim(skb, skb->len - CCMP_MIC_LEN); return keyidx; } static int lib80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_ccmp_data *data = priv; int keyidx; struct crypto_cipher *tfm = data->tfm; keyidx = data->key_idx; memset(data, 0, sizeof(*data)); data->key_idx = keyidx; data->tfm = tfm; if (len == CCMP_TK_LEN) { memcpy(data->key, key, CCMP_TK_LEN); data->key_set = 1; if (seq) { data->rx_pn[0] = seq[5]; data->rx_pn[1] = seq[4]; data->rx_pn[2] = seq[3]; data->rx_pn[3] = seq[2]; data->rx_pn[4] = seq[1]; data->rx_pn[5] = seq[0]; } crypto_cipher_setkey(data->tfm, data->key, CCMP_TK_LEN); } else if (len == 0) data->key_set = 0; else return -1; return 0; } static int lib80211_ccmp_get_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_ccmp_data *data = priv; if (len < CCMP_TK_LEN) return -1; if (!data->key_set) return 0; memcpy(key, data->key, CCMP_TK_LEN); if (seq) { seq[0] = data->tx_pn[5]; seq[1] = data->tx_pn[4]; seq[2] = data->tx_pn[3]; seq[3] = data->tx_pn[2]; seq[4] = data->tx_pn[1]; seq[5] = data->tx_pn[0]; } return CCMP_TK_LEN; } static void lib80211_ccmp_print_stats(struct seq_file *m, void *priv) { struct lib80211_ccmp_data *ccmp = priv; seq_printf(m, "key[%d] alg=CCMP key_set=%d " "tx_pn=%02x%02x%02x%02x%02x%02x " "rx_pn=%02x%02x%02x%02x%02x%02x " "format_errors=%d replays=%d decrypt_errors=%d\n", ccmp->key_idx, ccmp->key_set, ccmp->tx_pn[0], ccmp->tx_pn[1], ccmp->tx_pn[2], ccmp->tx_pn[3], ccmp->tx_pn[4], ccmp->tx_pn[5], ccmp->rx_pn[0], ccmp->rx_pn[1], ccmp->rx_pn[2], ccmp->rx_pn[3], ccmp->rx_pn[4], ccmp->rx_pn[5], ccmp->dot11RSNAStatsCCMPFormatErrors, ccmp->dot11RSNAStatsCCMPReplays, ccmp->dot11RSNAStatsCCMPDecryptErrors); } static struct lib80211_crypto_ops lib80211_crypt_ccmp = { .name = "CCMP", .init = lib80211_ccmp_init, .deinit = lib80211_ccmp_deinit, .encrypt_mpdu = lib80211_ccmp_encrypt, .decrypt_mpdu = lib80211_ccmp_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = lib80211_ccmp_set_key, .get_key = lib80211_ccmp_get_key, .print_stats = lib80211_ccmp_print_stats, .extra_mpdu_prefix_len = CCMP_HDR_LEN, .extra_mpdu_postfix_len = CCMP_MIC_LEN, .owner = THIS_MODULE, }; static int __init lib80211_crypto_ccmp_init(void) { return lib80211_register_crypto_ops(&lib80211_crypt_ccmp); } static void __exit lib80211_crypto_ccmp_exit(void) { lib80211_unregister_crypto_ops(&lib80211_crypt_ccmp); } module_init(lib80211_crypto_ccmp_init); module_exit(lib80211_crypto_ccmp_exit); backport-iwlwifi-dkms-7906/net/wireless/lib80211_crypt_tkip.c000066400000000000000000000476451351064634500241220ustar00rootroot00000000000000/* * lib80211 crypt: host-based TKIP encryption implementation for lib80211 * * Copyright (c) 2003-2004, Jouni Malinen * Copyright (c) 2008, John W. Linville * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("lib80211 crypt: TKIP"); MODULE_LICENSE("GPL"); #define TKIP_HDR_LEN 8 struct lib80211_tkip_data { #define TKIP_KEY_LEN 32 u8 key[TKIP_KEY_LEN]; int key_set; u32 tx_iv32; u16 tx_iv16; u16 tx_ttak[5]; int tx_phase1_done; u32 rx_iv32; u16 rx_iv16; u16 rx_ttak[5]; int rx_phase1_done; u32 rx_iv32_new; u16 rx_iv16_new; u32 dot11RSNAStatsTKIPReplays; u32 dot11RSNAStatsTKIPICVErrors; u32 dot11RSNAStatsTKIPLocalMICFailures; int key_idx; struct crypto_cipher *rx_tfm_arc4; struct crypto_shash *rx_tfm_michael; struct crypto_cipher *tx_tfm_arc4; struct crypto_shash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; unsigned long flags; }; static unsigned long lib80211_tkip_set_flags(unsigned long flags, void *priv) { struct lib80211_tkip_data *_priv = priv; unsigned long old_flags = _priv->flags; _priv->flags = flags; return old_flags; } static unsigned long lib80211_tkip_get_flags(void *priv) { struct lib80211_tkip_data *_priv = priv; return _priv->flags; } static void *lib80211_tkip_init(int key_idx) { struct lib80211_tkip_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0); if (IS_ERR(priv->tx_tfm_arc4)) { priv->tx_tfm_arc4 = NULL; goto fail; } priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->tx_tfm_michael)) { priv->tx_tfm_michael = NULL; goto fail; } priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0); if (IS_ERR(priv->rx_tfm_arc4)) { priv->rx_tfm_arc4 = NULL; goto fail; } priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->rx_tfm_michael)) { priv->rx_tfm_michael = NULL; goto fail; } return priv; fail: if (priv) { crypto_free_shash(priv->tx_tfm_michael); crypto_free_cipher(priv->tx_tfm_arc4); crypto_free_shash(priv->rx_tfm_michael); crypto_free_cipher(priv->rx_tfm_arc4); kfree(priv); } return NULL; } static void lib80211_tkip_deinit(void *priv) { struct lib80211_tkip_data *_priv = priv; if (_priv) { crypto_free_shash(_priv->tx_tfm_michael); crypto_free_cipher(_priv->tx_tfm_arc4); crypto_free_shash(_priv->rx_tfm_michael); crypto_free_cipher(_priv->rx_tfm_arc4); } kfree(priv); } static inline u16 RotR1(u16 val) { return (val >> 1) | (val << 15); } static inline u8 Lo8(u16 val) { return val & 0xff; } static inline u8 Hi8(u16 val) { return val >> 8; } static inline u16 Lo16(u32 val) { return val & 0xffff; } static inline u16 Hi16(u32 val) { return val >> 16; } static inline u16 Mk16(u8 hi, u8 lo) { return lo | (((u16) hi) << 8); } static inline u16 Mk16_le(__le16 * v) { return le16_to_cpu(*v); } static const u16 Sbox[256] = { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }; static inline u16 _S_(u16 v) { u16 t = Sbox[Hi8(v)]; return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); } #define PHASE1_LOOP_COUNT 8 static void tkip_mixing_phase1(u16 * TTAK, const u8 * TK, const u8 * TA, u32 IV32) { int i, j; /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */ TTAK[0] = Lo16(IV32); TTAK[1] = Hi16(IV32); TTAK[2] = Mk16(TA[1], TA[0]); TTAK[3] = Mk16(TA[3], TA[2]); TTAK[4] = Mk16(TA[5], TA[4]); for (i = 0; i < PHASE1_LOOP_COUNT; i++) { j = 2 * (i & 1); TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j])); TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j])); TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j])); TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j])); TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i; } } static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK, u16 IV16) { /* Make temporary area overlap WEP seed so that the final copy can be * avoided on little endian hosts. */ u16 *PPK = (u16 *) & WEPSeed[4]; /* Step 1 - make copy of TTAK and bring in TSC */ PPK[0] = TTAK[0]; PPK[1] = TTAK[1]; PPK[2] = TTAK[2]; PPK[3] = TTAK[3]; PPK[4] = TTAK[4]; PPK[5] = TTAK[4] + IV16; /* Step 2 - 96-bit bijective mixing using S-box */ PPK[0] += _S_(PPK[5] ^ Mk16_le((__le16 *) & TK[0])); PPK[1] += _S_(PPK[0] ^ Mk16_le((__le16 *) & TK[2])); PPK[2] += _S_(PPK[1] ^ Mk16_le((__le16 *) & TK[4])); PPK[3] += _S_(PPK[2] ^ Mk16_le((__le16 *) & TK[6])); PPK[4] += _S_(PPK[3] ^ Mk16_le((__le16 *) & TK[8])); PPK[5] += _S_(PPK[4] ^ Mk16_le((__le16 *) & TK[10])); PPK[0] += RotR1(PPK[5] ^ Mk16_le((__le16 *) & TK[12])); PPK[1] += RotR1(PPK[0] ^ Mk16_le((__le16 *) & TK[14])); PPK[2] += RotR1(PPK[1]); PPK[3] += RotR1(PPK[2]); PPK[4] += RotR1(PPK[3]); PPK[5] += RotR1(PPK[4]); /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value * WEPSeed[0..2] is transmitted as WEP IV */ WEPSeed[0] = Hi8(IV16); WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F; WEPSeed[2] = Lo8(IV16); WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((__le16 *) & TK[0])) >> 1); #ifdef __BIG_ENDIAN { int i; for (i = 0; i < 6; i++) PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8); } #endif } static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len, u8 * rc4key, int keylen, void *priv) { struct lib80211_tkip_data *tkey = priv; u8 *pos; struct ieee80211_hdr *hdr; hdr = (struct ieee80211_hdr *)skb->data; if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len) return -1; if (rc4key == NULL || keylen < 16) return -1; if (!tkey->tx_phase1_done) { tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, tkey->tx_iv32); tkey->tx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); pos = skb_push(skb, TKIP_HDR_LEN); memmove(pos, pos + TKIP_HDR_LEN, hdr_len); pos += hdr_len; *pos++ = *rc4key; *pos++ = *(rc4key + 1); *pos++ = *(rc4key + 2); *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ; *pos++ = tkey->tx_iv32 & 0xff; *pos++ = (tkey->tx_iv32 >> 8) & 0xff; *pos++ = (tkey->tx_iv32 >> 16) & 0xff; *pos++ = (tkey->tx_iv32 >> 24) & 0xff; tkey->tx_iv16++; if (tkey->tx_iv16 == 0) { tkey->tx_phase1_done = 0; tkey->tx_iv32++; } return TKIP_HDR_LEN; } static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; int len; u8 rc4key[16], *pos, *icv; u32 crc; int i; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n", hdr->addr1); return -1; } if (skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; len = skb->len - hdr_len; pos = skb->data + hdr_len; if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0) return -1; crc = ~crc32_le(~0, pos, len); icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); for (i = 0; i < len + 4; i++) crypto_cipher_encrypt_one(tkey->tx_tfm_arc4, pos + i, pos + i); return 0; } /* * deal with seq counter wrapping correctly. * refer to timer_after() for jiffies wrapping handling */ static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n, u32 iv32_o, u16 iv16_o) { if ((s32)iv32_n - (s32)iv32_o < 0 || (iv32_n == iv32_o && iv16_n <= iv16_o)) return 1; return 0; } static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; u8 rc4key[16]; u8 keyidx, *pos; u32 iv32; u16 iv16; struct ieee80211_hdr *hdr; u8 icv[4]; u32 crc; int plen; int i; hdr = (struct ieee80211_hdr *)skb->data; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n", hdr->addr2); return -1; } if (skb->len < hdr_len + TKIP_HDR_LEN + 4) return -1; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n", hdr->addr2); return -2; } keyidx >>= 6; if (tkey->key_idx != keyidx) { net_dbg_ratelimited("TKIP: RX tkey->key_idx=%d frame keyidx=%d\n", tkey->key_idx, keyidx); return -6; } if (!tkey->key_set) { net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n", hdr->addr2, keyidx); return -3; } iv16 = (pos[0] << 8) | pos[2]; iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); pos += TKIP_HDR_LEN; if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { #ifdef CPTCFG_LIB80211_DEBUG net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n", hdr->addr2, tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); #endif tkey->dot11RSNAStatsTKIPReplays++; return -4; } if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) { tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32); tkey->rx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16); plen = skb->len - hdr_len - 12; crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); for (i = 0; i < plen + 4; i++) crypto_cipher_decrypt_one(tkey->rx_tfm_arc4, pos + i, pos + i); crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { if (iv32 != tkey->rx_iv32) { /* Previously cached Phase1 result was already lost, so * it needs to be recalculated for the next packet. */ tkey->rx_phase1_done = 0; } #ifdef CPTCFG_LIB80211_DEBUG net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n", hdr->addr2); #endif tkey->dot11RSNAStatsTKIPICVErrors++; return -5; } /* Update real counters only after Michael MIC verification has * completed */ tkey->rx_iv32_new = iv32; tkey->rx_iv16_new = iv16; /* Remove IV and ICV */ memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len); skb_pull(skb, TKIP_HDR_LEN); skb_trim(skb, skb->len - 4); return keyidx; } static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr, u8 *data, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm_michael); int err; if (tfm_michael == NULL) { pr_warn("%s(): tfm_michael == NULL\n", __func__); return -1; } desc->tfm = tfm_michael; desc->flags = 0; if (crypto_shash_setkey(tfm_michael, key, 8)) return -1; err = crypto_shash_init(desc); if (err) goto out; err = crypto_shash_update(desc, hdr, 16); if (err) goto out; err = crypto_shash_update(desc, data, data_len); if (err) goto out; err = crypto_shash_final(desc, mic); out: shash_desc_zero(desc); return err; } static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) { struct ieee80211_hdr *hdr11; hdr11 = (struct ieee80211_hdr *)skb->data; switch (le16_to_cpu(hdr11->frame_control) & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_TODS: memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; case IEEE80211_FCTL_FROMDS: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */ break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */ break; default: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; } if (ieee80211_is_data_qos(hdr11->frame_control)) { hdr[12] = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(hdr11))) & IEEE80211_QOS_CTL_TID_MASK; } else hdr[12] = 0; /* priority */ hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ } static int lib80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; u8 *pos; if (skb_tailroom(skb) < 8 || skb->len < hdr_len) { printk(KERN_DEBUG "Invalid packet for Michael MIC add " "(tailroom=%d hdr_len=%d skb->len=%d)\n", skb_tailroom(skb), hdr_len, skb->len); return -1; } michael_mic_hdr(skb, tkey->tx_hdr); pos = skb_put(skb, 8); if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr, skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) return -1; return 0; } static void lib80211_michael_mic_failure(struct net_device *dev, struct ieee80211_hdr *hdr, int keyidx) { union iwreq_data wrqu; struct iw_michaelmicfailure ev; /* TODO: needed parameters: count, keyid, key type, TSC */ memset(&ev, 0, sizeof(ev)); ev.flags = keyidx & IW_MICFAILURE_KEY_ID; if (hdr->addr1[0] & 0x01) ev.flags |= IW_MICFAILURE_GROUP; else ev.flags |= IW_MICFAILURE_PAIRWISE; ev.src_addr.sa_family = ARPHRD_ETHER; memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = sizeof(ev); wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev); } static int lib80211_michael_mic_verify(struct sk_buff *skb, int keyidx, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; u8 mic[8]; if (!tkey->key_set) return -1; michael_mic_hdr(skb, tkey->rx_hdr); if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr, skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) return -1; if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { struct ieee80211_hdr *hdr; hdr = (struct ieee80211_hdr *)skb->data; printk(KERN_DEBUG "%s: Michael MIC verification failed for " "MSDU from %pM keyidx=%d\n", skb->dev ? skb->dev->name : "N/A", hdr->addr2, keyidx); if (skb->dev) lib80211_michael_mic_failure(skb->dev, hdr, keyidx); tkey->dot11RSNAStatsTKIPLocalMICFailures++; return -1; } /* Update TSC counters for RX now that the packet verification has * completed. */ tkey->rx_iv32 = tkey->rx_iv32_new; tkey->rx_iv16 = tkey->rx_iv16_new; skb_trim(skb, skb->len - 8); return 0; } static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_tkip_data *tkey = priv; int keyidx; struct crypto_shash *tfm = tkey->tx_tfm_michael; struct crypto_cipher *tfm2 = tkey->tx_tfm_arc4; struct crypto_shash *tfm3 = tkey->rx_tfm_michael; struct crypto_cipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); tkey->key_idx = keyidx; tkey->tx_tfm_michael = tfm; tkey->tx_tfm_arc4 = tfm2; tkey->rx_tfm_michael = tfm3; tkey->rx_tfm_arc4 = tfm4; if (len == TKIP_KEY_LEN) { memcpy(tkey->key, key, TKIP_KEY_LEN); tkey->key_set = 1; tkey->tx_iv16 = 1; /* TSC is initialized to 1 */ if (seq) { tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) | (seq[3] << 8) | seq[2]; tkey->rx_iv16 = (seq[1] << 8) | seq[0]; } } else if (len == 0) tkey->key_set = 0; else return -1; return 0; } static int lib80211_tkip_get_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_tkip_data *tkey = priv; if (len < TKIP_KEY_LEN) return -1; if (!tkey->key_set) return 0; memcpy(key, tkey->key, TKIP_KEY_LEN); if (seq) { /* Return the sequence number of the last transmitted frame. */ u16 iv16 = tkey->tx_iv16; u32 iv32 = tkey->tx_iv32; if (iv16 == 0) iv32--; iv16--; seq[0] = tkey->tx_iv16; seq[1] = tkey->tx_iv16 >> 8; seq[2] = tkey->tx_iv32; seq[3] = tkey->tx_iv32 >> 8; seq[4] = tkey->tx_iv32 >> 16; seq[5] = tkey->tx_iv32 >> 24; } return TKIP_KEY_LEN; } static void lib80211_tkip_print_stats(struct seq_file *m, void *priv) { struct lib80211_tkip_data *tkip = priv; seq_printf(m, "key[%d] alg=TKIP key_set=%d " "tx_pn=%02x%02x%02x%02x%02x%02x " "rx_pn=%02x%02x%02x%02x%02x%02x " "replays=%d icv_errors=%d local_mic_failures=%d\n", tkip->key_idx, tkip->key_set, (tkip->tx_iv32 >> 24) & 0xff, (tkip->tx_iv32 >> 16) & 0xff, (tkip->tx_iv32 >> 8) & 0xff, tkip->tx_iv32 & 0xff, (tkip->tx_iv16 >> 8) & 0xff, tkip->tx_iv16 & 0xff, (tkip->rx_iv32 >> 24) & 0xff, (tkip->rx_iv32 >> 16) & 0xff, (tkip->rx_iv32 >> 8) & 0xff, tkip->rx_iv32 & 0xff, (tkip->rx_iv16 >> 8) & 0xff, tkip->rx_iv16 & 0xff, tkip->dot11RSNAStatsTKIPReplays, tkip->dot11RSNAStatsTKIPICVErrors, tkip->dot11RSNAStatsTKIPLocalMICFailures); } static struct lib80211_crypto_ops lib80211_crypt_tkip = { .name = "TKIP", .init = lib80211_tkip_init, .deinit = lib80211_tkip_deinit, .encrypt_mpdu = lib80211_tkip_encrypt, .decrypt_mpdu = lib80211_tkip_decrypt, .encrypt_msdu = lib80211_michael_mic_add, .decrypt_msdu = lib80211_michael_mic_verify, .set_key = lib80211_tkip_set_key, .get_key = lib80211_tkip_get_key, .print_stats = lib80211_tkip_print_stats, .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */ .extra_mpdu_postfix_len = 4, /* ICV */ .extra_msdu_postfix_len = 8, /* MIC */ .get_flags = lib80211_tkip_get_flags, .set_flags = lib80211_tkip_set_flags, .owner = THIS_MODULE, }; static int __init lib80211_crypto_tkip_init(void) { return lib80211_register_crypto_ops(&lib80211_crypt_tkip); } static void __exit lib80211_crypto_tkip_exit(void) { lib80211_unregister_crypto_ops(&lib80211_crypt_tkip); } module_init(lib80211_crypto_tkip_init); module_exit(lib80211_crypto_tkip_exit); backport-iwlwifi-dkms-7906/net/wireless/lib80211_crypt_wep.c000066400000000000000000000154451351064634500237370ustar00rootroot00000000000000/* * lib80211 crypt: host-based WEP encryption implementation for lib80211 * * Copyright (c) 2002-2004, Jouni Malinen * Copyright (c) 2008, John W. Linville * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include #include #include #include #include #include #include #include #include #include #include #include MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("lib80211 crypt: WEP"); MODULE_LICENSE("GPL"); struct lib80211_wep_data { u32 iv; #define WEP_KEY_LEN 13 u8 key[WEP_KEY_LEN + 1]; u8 key_len; u8 key_idx; struct crypto_cipher *tx_tfm; struct crypto_cipher *rx_tfm; }; static void *lib80211_wep_init(int keyidx) { struct lib80211_wep_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = keyidx; priv->tx_tfm = crypto_alloc_cipher("arc4", 0, 0); if (IS_ERR(priv->tx_tfm)) { priv->tx_tfm = NULL; goto fail; } priv->rx_tfm = crypto_alloc_cipher("arc4", 0, 0); if (IS_ERR(priv->rx_tfm)) { priv->rx_tfm = NULL; goto fail; } /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; fail: if (priv) { crypto_free_cipher(priv->tx_tfm); crypto_free_cipher(priv->rx_tfm); kfree(priv); } return NULL; } static void lib80211_wep_deinit(void *priv) { struct lib80211_wep_data *_priv = priv; if (_priv) { crypto_free_cipher(_priv->tx_tfm); crypto_free_cipher(_priv->rx_tfm); } kfree(priv); } /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len, u8 *key, int keylen, void *priv) { struct lib80211_wep_data *wep = priv; u32 klen; u8 *pos; if (skb_headroom(skb) < 4 || skb->len < hdr_len) return -1; pos = skb_push(skb, 4); memmove(pos, pos + 4, hdr_len); pos += hdr_len; klen = 3 + wep->key_len; wep->iv++; /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N) * can be used to speedup attacks, so avoid using them. */ if ((wep->iv & 0xff00) == 0xff00) { u8 B = (wep->iv >> 16) & 0xff; if (B >= 3 && B < klen) wep->iv += 0x0100; } /* Prepend 24-bit IV to RC4 key and TX frame */ *pos++ = (wep->iv >> 16) & 0xff; *pos++ = (wep->iv >> 8) & 0xff; *pos++ = wep->iv & 0xff; *pos++ = wep->key_idx << 6; return 0; } /* Perform WEP encryption on given skb that has at least 4 bytes of headroom * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted, * so the payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_wep_data *wep = priv; u32 crc, klen, len; u8 *pos, *icv; u8 key[WEP_KEY_LEN + 3]; int i; /* other checks are in lib80211_wep_build_iv */ if (skb_tailroom(skb) < 4) return -1; /* add the IV to the frame */ if (lib80211_wep_build_iv(skb, hdr_len, NULL, 0, priv)) return -1; /* Copy the IV into the first 3 bytes of the key */ skb_copy_from_linear_data_offset(skb, hdr_len, key, 3); /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); len = skb->len - hdr_len - 4; pos = skb->data + hdr_len + 4; klen = 3 + wep->key_len; /* Append little-endian CRC32 over only the data and encrypt it to produce ICV */ crc = ~crc32_le(~0, pos, len); icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; crypto_cipher_setkey(wep->tx_tfm, key, klen); for (i = 0; i < len + 4; i++) crypto_cipher_encrypt_one(wep->tx_tfm, pos + i, pos + i); return 0; } /* Perform WEP decryption on given buffer. Buffer includes whole WEP part of * the frame: IV (4 bytes), encrypted payload (including SNAP header), * ICV (4 bytes). len includes both IV and ICV. * * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on * failure. If frame is OK, IV and ICV will be removed. */ static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_wep_data *wep = priv; u32 crc, klen, plen; u8 key[WEP_KEY_LEN + 3]; u8 keyidx, *pos, icv[4]; int i; if (skb->len < hdr_len + 8) return -1; pos = skb->data + hdr_len; key[0] = *pos++; key[1] = *pos++; key[2] = *pos++; keyidx = *pos++ >> 6; if (keyidx != wep->key_idx) return -1; klen = 3 + wep->key_len; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); /* Apply RC4 to data and compute CRC32 over decrypted data */ plen = skb->len - hdr_len - 8; crypto_cipher_setkey(wep->rx_tfm, key, klen); for (i = 0; i < plen + 4; i++) crypto_cipher_decrypt_one(wep->rx_tfm, pos + i, pos + i); crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { /* ICV mismatch - drop frame */ return -2; } /* Remove IV and ICV */ memmove(skb->data + 4, skb->data, hdr_len); skb_pull(skb, 4); skb_trim(skb, skb->len - 4); return 0; } static int lib80211_wep_set_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_wep_data *wep = priv; if (len < 0 || len > WEP_KEY_LEN) return -1; memcpy(wep->key, key, len); wep->key_len = len; return 0; } static int lib80211_wep_get_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_wep_data *wep = priv; if (len < wep->key_len) return -1; memcpy(key, wep->key, wep->key_len); return wep->key_len; } static void lib80211_wep_print_stats(struct seq_file *m, void *priv) { struct lib80211_wep_data *wep = priv; seq_printf(m, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len); } static struct lib80211_crypto_ops lib80211_crypt_wep = { .name = "WEP", .init = lib80211_wep_init, .deinit = lib80211_wep_deinit, .encrypt_mpdu = lib80211_wep_encrypt, .decrypt_mpdu = lib80211_wep_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = lib80211_wep_set_key, .get_key = lib80211_wep_get_key, .print_stats = lib80211_wep_print_stats, .extra_mpdu_prefix_len = 4, /* IV */ .extra_mpdu_postfix_len = 4, /* ICV */ .owner = THIS_MODULE, }; static int __init lib80211_crypto_wep_init(void) { return lib80211_register_crypto_ops(&lib80211_crypt_wep); } static void __exit lib80211_crypto_wep_exit(void) { lib80211_unregister_crypto_ops(&lib80211_crypt_wep); } module_init(lib80211_crypto_wep_init); module_exit(lib80211_crypto_wep_exit); backport-iwlwifi-dkms-7906/net/wireless/mesh.c000066400000000000000000000200051351064634500214210ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 #include #include #include #include "nl80211.h" #include "core.h" #include "rdev-ops.h" /* Default values, timeouts in ms */ #define MESH_TTL 31 #define MESH_DEFAULT_ELEMENT_TTL 31 #define MESH_MAX_RETR 3 #define MESH_RET_T 100 #define MESH_CONF_T 100 #define MESH_HOLD_T 100 #define MESH_PATH_TIMEOUT 5000 #define MESH_RANN_INTERVAL 5000 #define MESH_PATH_TO_ROOT_TIMEOUT 6000 #define MESH_ROOT_INTERVAL 5000 #define MESH_ROOT_CONFIRMATION_INTERVAL 2000 #define MESH_DEFAULT_PLINK_TIMEOUT 1800 /* timeout in seconds */ /* * Minimum interval between two consecutive PREQs originated by the same * interface */ #define MESH_PREQ_MIN_INT 10 #define MESH_PERR_MIN_INT 100 #define MESH_DIAM_TRAVERSAL_TIME 50 #define MESH_RSSI_THRESHOLD 0 /* * A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds * before timing out. This way it will remain ACTIVE and no data frames * will be unnecessarily held in the pending queue. */ #define MESH_PATH_REFRESH_TIME 1000 #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) /* Default maximum number of established plinks per interface */ #define MESH_MAX_ESTAB_PLINKS 32 #define MESH_MAX_PREQ_RETRIES 4 #define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50 #define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units (=TUs) */ #define MESH_DEFAULT_DTIM_PERIOD 2 #define MESH_DEFAULT_AWAKE_WINDOW 10 /* in 1024 us units (=TUs) */ const struct mesh_config default_mesh_config = { .dot11MeshRetryTimeout = MESH_RET_T, .dot11MeshConfirmTimeout = MESH_CONF_T, .dot11MeshHoldingTimeout = MESH_HOLD_T, .dot11MeshMaxRetries = MESH_MAX_RETR, .dot11MeshTTL = MESH_TTL, .element_ttl = MESH_DEFAULT_ELEMENT_TTL, .auto_open_plinks = true, .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS, .dot11MeshNbrOffsetMaxNeighbor = MESH_SYNC_NEIGHBOR_OFFSET_MAX, .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT, .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT, .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT, .dot11MeshHWMPnetDiameterTraversalTime = MESH_DIAM_TRAVERSAL_TIME, .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES, .path_refresh_time = MESH_PATH_REFRESH_TIME, .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT, .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL, .dot11MeshGateAnnouncementProtocol = false, .dot11MeshForwarding = true, .rssi_threshold = MESH_RSSI_THRESHOLD, .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED, .dot11MeshHWMPactivePathToRootTimeout = MESH_PATH_TO_ROOT_TIMEOUT, .dot11MeshHWMProotInterval = MESH_ROOT_INTERVAL, .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL, .power_mode = NL80211_MESH_POWER_ACTIVE, .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW, .plink_timeout = MESH_DEFAULT_PLINK_TIMEOUT, }; const struct mesh_setup default_mesh_setup = { /* cfg80211_join_mesh() will pick a channel if needed */ .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, .path_metric = IEEE80211_PATH_METRIC_AIRTIME, .auth_id = 0, /* open */ .ie = NULL, .ie_len = 0, .is_secure = false, .user_mpm = false, .beacon_interval = MESH_DEFAULT_BEACON_INTERVAL, .dtim_period = MESH_DEFAULT_DTIM_PERIOD, }; int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev, struct mesh_setup *setup, const struct mesh_config *conf) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); ASSERT_WDEV_LOCK(wdev); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!(rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) && setup->is_secure) return -EOPNOTSUPP; if (wdev->mesh_id_len) return -EALREADY; if (!setup->mesh_id_len) return -EINVAL; if (!rdev->ops->join_mesh) return -EOPNOTSUPP; if (!setup->chandef.chan) { /* if no channel explicitly given, use preset channel */ setup->chandef = wdev->preset_chandef; } if (!setup->chandef.chan) { /* if we don't have that either, use the first usable channel */ enum nl80211_band band; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; int i; sband = rdev->wiphy.bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR)) continue; setup->chandef.chan = chan; break; } if (setup->chandef.chan) break; } /* no usable channel ... */ if (!setup->chandef.chan) return -EINVAL; setup->chandef.width = NL80211_CHAN_WIDTH_20_NOHT; setup->chandef.center_freq1 = setup->chandef.chan->center_freq; } /* * check if basic rates are available otherwise use mandatory rates as * basic rates */ if (!setup->basic_rates) { enum nl80211_bss_scan_width scan_width; struct ieee80211_supported_band *sband = rdev->wiphy.bands[setup->chandef.chan->band]; if (setup->chandef.chan->band == NL80211_BAND_2GHZ) { int i; /* * Older versions selected the mandatory rates for * 2.4 GHz as well, but were broken in that only * 1 Mbps was regarded as a mandatory rate. Keep * using just 1 Mbps as the default basic rate for * mesh to be interoperable with older versions. */ for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == 10) { setup->basic_rates = BIT(i); break; } } } else { scan_width = cfg80211_chandef_to_scan_width(&setup->chandef); setup->basic_rates = ieee80211_mandatory_rates(sband, scan_width); } } err = cfg80211_chandef_dfs_required(&rdev->wiphy, &setup->chandef, NL80211_IFTYPE_MESH_POINT); if (err < 0) return err; if (err > 0 && !setup->userspace_handles_dfs) return -EINVAL; if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef, NL80211_IFTYPE_MESH_POINT)) return -EINVAL; err = rdev_join_mesh(rdev, dev, conf, setup); if (!err) { memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len); wdev->mesh_id_len = setup->mesh_id_len; wdev->chandef = setup->chandef; wdev->beacon_interval = setup->beacon_interval; } return err; } int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef) { int err; /* * Workaround for libertas (only!), it puts the interface * into mesh mode but doesn't implement join_mesh. Instead, * it is configured via sysfs and then joins the mesh when * you set the channel. Note that the libertas mesh isn't * compatible with 802.11 mesh. */ if (rdev->ops->libertas_set_mesh_channel) { if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT) return -EINVAL; if (!netif_running(wdev->netdev)) return -ENETDOWN; err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev, chandef->chan); if (!err) wdev->chandef = *chandef; return err; } if (wdev->mesh_id_len) return -EBUSY; wdev->preset_chandef = *chandef; return 0; } int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!rdev->ops->leave_mesh) return -EOPNOTSUPP; if (!wdev->mesh_id_len) return -ENOTCONN; err = rdev_leave_mesh(rdev, dev); if (!err) { wdev->conn_owner_nlportid = 0; wdev->mesh_id_len = 0; wdev->beacon_interval = 0; memset(&wdev->chandef, 0, sizeof(wdev->chandef)); rdev_set_qos_map(rdev, dev, NULL); cfg80211_sched_dfs_chan_update(rdev); } return err; } int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_leave_mesh(rdev, dev); wdev_unlock(wdev); return err; } backport-iwlwifi-dkms-7906/net/wireless/mlme.c000066400000000000000000000573231351064634500214340ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * cfg80211 MLME SAP interface * * Copyright (c) 2009, Jouni Malinen * Copyright (c) 2015 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include #include #include "core.h" #include "nl80211.h" #include "rdev-ops.h" void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, const u8 *buf, size_t len, int uapsd_queues, const u8 *req_ies, size_t req_ies_len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; struct cfg80211_connect_resp_params cr; memset(&cr, 0, sizeof(cr)); cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code); cr.bssid = mgmt->bssid; cr.bss = bss; cr.req_ie = req_ies; cr.req_ie_len = req_ies_len; cr.resp_ie = mgmt->u.assoc_resp.variable; cr.resp_ie_len = len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED; trace_cfg80211_send_rx_assoc(dev, bss); /* * This is a bit of a hack, we don't notify userspace of * a (re-)association reply if we tried to send a reassoc * and got a reject -- we only try again with an assoc * frame instead of reassoc. */ if (cfg80211_sme_rx_assoc_resp(wdev, cr.status)) { cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wiphy, bss); return; } nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues, req_ies, req_ies_len); /* update current_bss etc., consumes the bss reference */ __cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS); } EXPORT_SYMBOL(cfg80211_rx_assoc_resp); static void cfg80211_process_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL); cfg80211_sme_rx_auth(wdev, buf, len); } static void cfg80211_process_deauth(struct wireless_dev *wdev, const u8 *buf, size_t len) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); nl80211_send_deauth(rdev, wdev->netdev, buf, len, GFP_KERNEL); if (!wdev->current_bss || !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) return; __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); cfg80211_sme_deauth(wdev); } static void cfg80211_process_disassoc(struct wireless_dev *wdev, const u8 *buf, size_t len) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); nl80211_send_disassoc(rdev, wdev->netdev, buf, len, GFP_KERNEL); if (WARN_ON(!wdev->current_bss || !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return; __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); cfg80211_sme_disassoc(wdev); } void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_mgmt *mgmt = (void *)buf; ASSERT_WDEV_LOCK(wdev); trace_cfg80211_rx_mlme_mgmt(dev, buf, len); if (WARN_ON(len < 2)) return; if (ieee80211_is_auth(mgmt->frame_control)) cfg80211_process_auth(wdev, buf, len); else if (ieee80211_is_deauth(mgmt->frame_control)) cfg80211_process_deauth(wdev, buf, len); else if (ieee80211_is_disassoc(mgmt->frame_control)) cfg80211_process_disassoc(wdev, buf, len); } EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt); void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_send_auth_timeout(dev, addr); nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); cfg80211_sme_auth_timeout(wdev); } EXPORT_SYMBOL(cfg80211_auth_timeout); void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_send_assoc_timeout(dev, bss->bssid); nl80211_send_assoc_timeout(rdev, dev, bss->bssid, GFP_KERNEL); cfg80211_sme_assoc_timeout(wdev); cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wiphy, bss); } EXPORT_SYMBOL(cfg80211_assoc_timeout); void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; cfg80211_sme_abandon_assoc(wdev); cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wiphy, bss); } EXPORT_SYMBOL(cfg80211_abandon_assoc); void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_mgmt *mgmt = (void *)buf; ASSERT_WDEV_LOCK(wdev); trace_cfg80211_tx_mlme_mgmt(dev, buf, len); if (WARN_ON(len < 2)) return; if (ieee80211_is_deauth(mgmt->frame_control)) cfg80211_process_deauth(wdev, buf, len); else cfg80211_process_disassoc(wdev, buf, len); } EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt); void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); #ifdef CPTCFG_CFG80211_WEXT union iwreq_data wrqu; char *buf = kmalloc(128, gfp); if (buf) { sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" "keyid=%d %scast addr=%pM)", key_id, key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni", addr); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); kfree(buf); } #endif trace_cfg80211_michael_mic_failure(dev, addr, key_type, key_id, tsc); nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp); } EXPORT_SYMBOL(cfg80211_michael_mic_failure); /* some MLME handling for userspace SME */ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_auth_type auth_type, const u8 *bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, const u8 *key, int key_len, int key_idx, const u8 *auth_data, int auth_data_len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_auth_request req = { .ie = ie, .ie_len = ie_len, .auth_data = auth_data, .auth_data_len = auth_data_len, .auth_type = auth_type, .key = key, .key_len = key_len, .key_idx = key_idx, }; int err; ASSERT_WDEV_LOCK(wdev); if (auth_type == NL80211_AUTHTYPE_SHARED_KEY) if (!key || !key_len || key_idx < 0 || key_idx > 3) return -EINVAL; if (wdev->current_bss && ether_addr_equal(bssid, wdev->current_bss->pub.bssid)) return -EALREADY; req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); if (!req.bss) return -ENOENT; err = rdev_auth(rdev, dev, &req); cfg80211_put_bss(&rdev->wiphy, req.bss); return err; } /* Do a logical ht_capa &= ht_capa_mask. */ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, const struct ieee80211_ht_cap *ht_capa_mask) { int i; u8 *p1, *p2; if (!ht_capa_mask) { memset(ht_capa, 0, sizeof(*ht_capa)); return; } p1 = (u8*)(ht_capa); p2 = (u8*)(ht_capa_mask); for (i = 0; i < sizeof(*ht_capa); i++) p1[i] &= p2[i]; } /* Do a logical vht_capa &= vht_capa_mask. */ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, const struct ieee80211_vht_cap *vht_capa_mask) { int i; u8 *p1, *p2; if (!vht_capa_mask) { memset(vht_capa, 0, sizeof(*vht_capa)); return; } p1 = (u8*)(vht_capa); p2 = (u8*)(vht_capa_mask); for (i = 0; i < sizeof(*vht_capa); i++) p1[i] &= p2[i]; } int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, const u8 *bssid, const u8 *ssid, int ssid_len, struct cfg80211_assoc_request *req) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (wdev->current_bss && (!req->prev_bssid || !ether_addr_equal(wdev->current_bss->pub.bssid, req->prev_bssid))) return -EALREADY; cfg80211_oper_and_ht_capa(&req->ht_capa_mask, rdev->wiphy.ht_capa_mod_mask); cfg80211_oper_and_vht_capa(&req->vht_capa_mask, rdev->wiphy.vht_capa_mod_mask); req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); if (!req->bss) return -ENOENT; err = rdev_assoc(rdev, dev, req); if (!err) cfg80211_hold_bss(bss_from_pub(req->bss)); else cfg80211_put_bss(&rdev->wiphy, req->bss); return err; } int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_deauth_request req = { .bssid = bssid, .reason_code = reason, .ie = ie, .ie_len = ie_len, .local_state_change = local_state_change, }; ASSERT_WDEV_LOCK(wdev); if (local_state_change && (!wdev->current_bss || !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return 0; if (ether_addr_equal(wdev->disconnect_bssid, bssid) || (wdev->current_bss && ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) wdev->conn_owner_nlportid = 0; return rdev_deauth(rdev, dev, &req); } int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_disassoc_request req = { .reason_code = reason, .local_state_change = local_state_change, .ie = ie, .ie_len = ie_len, }; int err; ASSERT_WDEV_LOCK(wdev); if (!wdev->current_bss) return -ENOTCONN; if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) req.bss = &wdev->current_bss->pub; else return -ENOTCONN; err = rdev_disassoc(rdev, dev, &req); if (err) return err; /* driver should have reported the disassoc */ WARN_ON(wdev->current_bss); return 0; } void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; u8 bssid[ETH_ALEN]; ASSERT_WDEV_LOCK(wdev); if (!rdev->ops->deauth) return; if (!wdev->current_bss) return; memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); } struct cfg80211_mgmt_registration { struct list_head list; struct wireless_dev *wdev; u32 nlportid; int match_len; __le16 frame_type; u8 match[]; }; static void cfg80211_process_mlme_unregistrations(struct cfg80211_registered_device *rdev) { struct cfg80211_mgmt_registration *reg; ASSERT_RTNL(); spin_lock_bh(&rdev->mlme_unreg_lock); while ((reg = list_first_entry_or_null(&rdev->mlme_unreg, struct cfg80211_mgmt_registration, list))) { list_del(®->list); spin_unlock_bh(&rdev->mlme_unreg_lock); if (rdev->ops->mgmt_frame_register) { u16 frame_type = le16_to_cpu(reg->frame_type); rdev_mgmt_frame_register(rdev, reg->wdev, frame_type, false); } kfree(reg); spin_lock_bh(&rdev->mlme_unreg_lock); } spin_unlock_bh(&rdev->mlme_unreg_lock); } void cfg80211_mlme_unreg_wk(struct work_struct *wk) { struct cfg80211_registered_device *rdev; rdev = container_of(wk, struct cfg80211_registered_device, mlme_unreg_wk); rtnl_lock(); cfg80211_process_mlme_unregistrations(rdev); rtnl_unlock(); } int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid, u16 frame_type, const u8 *match_data, int match_len) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_mgmt_registration *reg, *nreg; int err = 0; u16 mgmt_type; if (!wdev->wiphy->mgmt_stypes) return -EOPNOTSUPP; if ((frame_type & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT) return -EINVAL; if (frame_type & ~(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) return -EINVAL; mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4; if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].rx & BIT(mgmt_type))) return -EINVAL; nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL); if (!nreg) return -ENOMEM; spin_lock_bh(&wdev->mgmt_registrations_lock); list_for_each_entry(reg, &wdev->mgmt_registrations, list) { int mlen = min(match_len, reg->match_len); if (frame_type != le16_to_cpu(reg->frame_type)) continue; if (memcmp(reg->match, match_data, mlen) == 0) { err = -EALREADY; break; } } if (err) { kfree(nreg); goto out; } memcpy(nreg->match, match_data, match_len); nreg->match_len = match_len; nreg->nlportid = snd_portid; nreg->frame_type = cpu_to_le16(frame_type); nreg->wdev = wdev; list_add(&nreg->list, &wdev->mgmt_registrations); spin_unlock_bh(&wdev->mgmt_registrations_lock); /* process all unregistrations to avoid driver confusion */ cfg80211_process_mlme_unregistrations(rdev); if (rdev->ops->mgmt_frame_register) rdev_mgmt_frame_register(rdev, wdev, frame_type, true); return 0; out: spin_unlock_bh(&wdev->mgmt_registrations_lock); return err; } void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_mgmt_registration *reg, *tmp; spin_lock_bh(&wdev->mgmt_registrations_lock); list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { if (reg->nlportid != nlportid) continue; list_del(®->list); spin_lock(&rdev->mlme_unreg_lock); list_add_tail(®->list, &rdev->mlme_unreg); spin_unlock(&rdev->mlme_unreg_lock); schedule_work(&rdev->mlme_unreg_wk); } spin_unlock_bh(&wdev->mgmt_registrations_lock); if (nlportid && rdev->crit_proto_nlportid == nlportid) { rdev->crit_proto_nlportid = 0; rdev_crit_proto_stop(rdev, wdev); } if (nlportid == wdev->ap_unexpected_nlportid) wdev->ap_unexpected_nlportid = 0; } void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); spin_lock_bh(&wdev->mgmt_registrations_lock); spin_lock(&rdev->mlme_unreg_lock); list_splice_tail_init(&wdev->mgmt_registrations, &rdev->mlme_unreg); spin_unlock(&rdev->mlme_unreg_lock); spin_unlock_bh(&wdev->mgmt_registrations_lock); cfg80211_process_mlme_unregistrations(rdev); } int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { const struct ieee80211_mgmt *mgmt; u16 stype; if (!wdev->wiphy->mgmt_stypes) return -EOPNOTSUPP; if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; if (params->len < 24 + 1) return -EINVAL; mgmt = (const struct ieee80211_mgmt *)params->buf; if (!ieee80211_is_mgmt(mgmt->frame_control)) return -EINVAL; stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].tx & BIT(stype >> 4))) return -EINVAL; if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { int err = 0; wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (!wdev->current_bss) { err = -ENOTCONN; break; } if (!ether_addr_equal(wdev->current_bss->pub.bssid, mgmt->bssid)) { err = -ENOTCONN; break; } /* * check for IBSS DA must be done by driver as * cfg80211 doesn't track the stations */ if (wdev->iftype == NL80211_IFTYPE_ADHOC) break; /* for station, check that DA is the AP */ if (!ether_addr_equal(wdev->current_bss->pub.bssid, mgmt->da)) { err = -ENOTCONN; break; } break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP_VLAN: if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev))) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) { err = -EINVAL; break; } /* * check for mesh DA must be done by driver as * cfg80211 doesn't track the stations */ break; case NL80211_IFTYPE_P2P_DEVICE: /* * fall through, P2P device only supports * public action frames */ case NL80211_IFTYPE_NAN: default: err = -EOPNOTSUPP; break; } wdev_unlock(wdev); if (err) return err; } if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) { /* Allow random TA to be used with Public Action frames if the * driver has indicated support for this. Otherwise, only allow * the local address to be used. */ if (!ieee80211_is_action(mgmt->frame_control) || mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) return -EINVAL; if (!wdev->current_bss && !wiphy_ext_feature_isset( &rdev->wiphy, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA)) return -EINVAL; if (wdev->current_bss && !wiphy_ext_feature_isset( &rdev->wiphy, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED)) return -EINVAL; } /* Transmit the Action frame as requested by user space */ return rdev_mgmt_tx(rdev, wdev, params, cookie); } bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, const u8 *buf, size_t len, u32 flags) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_mgmt_registration *reg; const struct ieee80211_txrx_stypes *stypes = &wiphy->mgmt_stypes[wdev->iftype]; struct ieee80211_mgmt *mgmt = (void *)buf; const u8 *data; int data_len; bool result = false; __le16 ftype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); u16 stype; trace_cfg80211_rx_mgmt(wdev, freq, sig_dbm); stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4; if (!(stypes->rx & BIT(stype))) { trace_cfg80211_return_bool(false); return false; } data = buf + ieee80211_hdrlen(mgmt->frame_control); data_len = len - ieee80211_hdrlen(mgmt->frame_control); spin_lock_bh(&wdev->mgmt_registrations_lock); list_for_each_entry(reg, &wdev->mgmt_registrations, list) { if (reg->frame_type != ftype) continue; if (reg->match_len > data_len) continue; if (memcmp(reg->match, data, reg->match_len)) continue; /* found match! */ /* Indicate the received Action frame to user space */ if (nl80211_send_mgmt(rdev, wdev, reg->nlportid, freq, sig_dbm, buf, len, flags, GFP_ATOMIC)) continue; result = true; break; } spin_unlock_bh(&wdev->mgmt_registrations_lock); trace_cfg80211_return_bool(result); return result; } EXPORT_SYMBOL(cfg80211_rx_mgmt); void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev) { cancel_delayed_work(&rdev->dfs_update_channels_wk); queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk, 0); } void cfg80211_dfs_channels_update_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct cfg80211_registered_device *rdev; struct cfg80211_chan_def chandef; struct ieee80211_supported_band *sband; struct ieee80211_channel *c; struct wiphy *wiphy; bool check_again = false; unsigned long timeout, next_time = 0; unsigned long time_dfs_update; enum nl80211_radar_event radar_event; int bandid, i; rdev = container_of(delayed_work, struct cfg80211_registered_device, dfs_update_channels_wk); wiphy = &rdev->wiphy; rtnl_lock(); for (bandid = 0; bandid < NUM_NL80211_BANDS; bandid++) { sband = wiphy->bands[bandid]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { c = &sband->channels[i]; if (!(c->flags & IEEE80211_CHAN_RADAR)) continue; if (c->dfs_state != NL80211_DFS_UNAVAILABLE && c->dfs_state != NL80211_DFS_AVAILABLE) continue; if (c->dfs_state == NL80211_DFS_UNAVAILABLE) { time_dfs_update = IEEE80211_DFS_MIN_NOP_TIME_MS; radar_event = NL80211_RADAR_NOP_FINISHED; } else { if (regulatory_pre_cac_allowed(wiphy) || cfg80211_any_wiphy_oper_chan(wiphy, c)) continue; time_dfs_update = REG_PRE_CAC_EXPIRY_GRACE_MS; radar_event = NL80211_RADAR_PRE_CAC_EXPIRED; } timeout = c->dfs_state_entered + msecs_to_jiffies(time_dfs_update); if (time_after_eq(jiffies, timeout)) { c->dfs_state = NL80211_DFS_USABLE; c->dfs_state_entered = jiffies; cfg80211_chandef_create(&chandef, c, NL80211_CHAN_NO_HT); nl80211_radar_notify(rdev, &chandef, radar_event, NULL, GFP_ATOMIC); regulatory_propagate_dfs_state(wiphy, &chandef, c->dfs_state, radar_event); continue; } if (!check_again) next_time = timeout - jiffies; else next_time = min(next_time, timeout - jiffies); check_again = true; } } rtnl_unlock(); /* reschedule if there are other channels waiting to be cleared again */ if (check_again) queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk, next_time); } void cfg80211_radar_event(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_radar_event(wiphy, chandef); /* only set the chandef supplied channel to unavailable, in * case the radar is detected on only one of multiple channels * spanned by the chandef. */ cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE); cfg80211_sched_dfs_chan_update(rdev); nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp); memcpy(&rdev->radar_chandef, chandef, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk); } EXPORT_SYMBOL(cfg80211_radar_event); void cfg80211_cac_event(struct net_device *netdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event, gfp_t gfp) { struct wireless_dev *wdev = netdev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); unsigned long timeout; trace_cfg80211_cac_event(netdev, event); if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED)) return; if (WARN_ON(!wdev->chandef.chan)) return; switch (event) { case NL80211_RADAR_CAC_FINISHED: timeout = wdev->cac_start_time + msecs_to_jiffies(wdev->cac_time_ms); WARN_ON(!time_after_eq(jiffies, timeout)); cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE); memcpy(&rdev->cac_done_chandef, chandef, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); cfg80211_sched_dfs_chan_update(rdev); /* fall through */ case NL80211_RADAR_CAC_ABORTED: wdev->cac_started = false; break; case NL80211_RADAR_CAC_STARTED: wdev->cac_started = true; break; default: WARN_ON(1); return; } nl80211_radar_notify(rdev, chandef, event, netdev, gfp); } EXPORT_SYMBOL(cfg80211_cac_event); backport-iwlwifi-dkms-7906/net/wireless/nl80211.c000066400000000000000000016064261351064634500215140ustar00rootroot00000000000000/* * This is the new netlink-based wireless configuration interface. * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "core.h" #include "nl80211.h" #include "reg.h" #include "rdev-ops.h" static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_crypto_settings *settings, int pairwise_cipher_limit, int group_cipher_limit); /* the netlink family */ static struct genl_family nl80211_fam; /* multicast groups */ enum nl80211_multicast_groups { NL80211_MCGRP_CONFIG, NL80211_MCGRP_SCAN, NL80211_MCGRP_REGULATORY, NL80211_MCGRP_MLME, NL80211_MCGRP_VENDOR, NL80211_MCGRP_NAN, NL80211_MCGRP_TESTMODE /* keep last - ifdef! */ }; static __genl_const struct genl_multicast_group nl80211_mcgrps[] = { [NL80211_MCGRP_CONFIG] = { .name = NL80211_MULTICAST_GROUP_CONFIG }, [NL80211_MCGRP_SCAN] = { .name = NL80211_MULTICAST_GROUP_SCAN }, [NL80211_MCGRP_REGULATORY] = { .name = NL80211_MULTICAST_GROUP_REG }, [NL80211_MCGRP_MLME] = { .name = NL80211_MULTICAST_GROUP_MLME }, [NL80211_MCGRP_VENDOR] = { .name = NL80211_MULTICAST_GROUP_VENDOR }, [NL80211_MCGRP_NAN] = { .name = NL80211_MULTICAST_GROUP_NAN }, #ifdef CPTCFG_NL80211_TESTMODE [NL80211_MCGRP_TESTMODE] = { .name = NL80211_MULTICAST_GROUP_TESTMODE } #endif }; /* returns ERR_PTR values */ static struct wireless_dev * __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) { struct cfg80211_registered_device *rdev; struct wireless_dev *result = NULL; bool have_ifidx = attrs[NL80211_ATTR_IFINDEX]; bool have_wdev_id = attrs[NL80211_ATTR_WDEV]; u64 wdev_id; int wiphy_idx = -1; int ifidx = -1; ASSERT_RTNL(); if (!have_ifidx && !have_wdev_id) return ERR_PTR(-EINVAL); if (have_ifidx) ifidx = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); if (have_wdev_id) { wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]); wiphy_idx = wdev_id >> 32; } list_for_each_entry(rdev, &cfg80211_rdev_list, list) { struct wireless_dev *wdev; if (wiphy_net(&rdev->wiphy) != netns) continue; if (have_wdev_id && rdev->wiphy_idx != wiphy_idx) continue; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (have_ifidx && wdev->netdev && wdev->netdev->ifindex == ifidx) { result = wdev; break; } if (have_wdev_id && wdev->identifier == (u32)wdev_id) { result = wdev; break; } } if (result) break; } if (result) return result; return ERR_PTR(-ENODEV); } static struct cfg80211_registered_device * __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) { struct cfg80211_registered_device *rdev = NULL, *tmp; struct net_device *netdev; ASSERT_RTNL(); if (!attrs[NL80211_ATTR_WIPHY] && !attrs[NL80211_ATTR_IFINDEX] && !attrs[NL80211_ATTR_WDEV]) return ERR_PTR(-EINVAL); if (attrs[NL80211_ATTR_WIPHY]) rdev = cfg80211_rdev_by_wiphy_idx( nla_get_u32(attrs[NL80211_ATTR_WIPHY])); if (attrs[NL80211_ATTR_WDEV]) { u64 wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]); struct wireless_dev *wdev; bool found = false; tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32); if (tmp) { /* make sure wdev exists */ list_for_each_entry(wdev, &tmp->wiphy.wdev_list, list) { if (wdev->identifier != (u32)wdev_id) continue; found = true; break; } if (!found) tmp = NULL; if (rdev && tmp != rdev) return ERR_PTR(-EINVAL); rdev = tmp; } } if (attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); netdev = __dev_get_by_index(netns, ifindex); if (netdev) { if (netdev->ieee80211_ptr) tmp = wiphy_to_rdev( netdev->ieee80211_ptr->wiphy); else tmp = NULL; /* not wireless device -- return error */ if (!tmp) return ERR_PTR(-EINVAL); /* mismatch -- return error */ if (rdev && tmp != rdev) return ERR_PTR(-EINVAL); rdev = tmp; } } if (!rdev) return ERR_PTR(-ENODEV); if (netns != wiphy_net(&rdev->wiphy)) return ERR_PTR(-ENODEV); return rdev; } /* * This function returns a pointer to the driver * that the genl_info item that is passed refers to. * * The result of this can be a PTR_ERR and hence must * be checked with IS_ERR() for errors. */ static struct cfg80211_registered_device * cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info) { return __cfg80211_rdev_from_attrs(netns, info->attrs); } static int validate_ie_attr(const struct nlattr *attr, struct netlink_ext_ack *extack) { const u8 *data = nla_data(attr); unsigned int len = nla_len(attr); struct element *elem; for_each_element(elem, data, len) { /* nothing */ } if (for_each_element_completed(elem, data, len)) return 0; NL_SET_ERR_MSG_ATTR(extack, attr, "malformed information elements"); return -EINVAL; } /* policy for the attributes */ static const struct nla_policy nl80211_ftm_responder_policy[NL80211_FTM_RESP_ATTR_MAX + 1] = { [NL80211_FTM_RESP_ATTR_ENABLED] = { .type = NLA_FLAG, }, [NL80211_FTM_RESP_ATTR_LCI] = { .type = NLA_BINARY, .len = U8_MAX }, [NL80211_FTM_RESP_ATTR_CIVICLOC] = { .type = NLA_BINARY, .len = U8_MAX }, }; static const struct nla_policy nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_REQ_ATTR_ASAP] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD] = { .type = NLA_U16 }, [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = NLA_POLICY_MAX(NLA_U8, 31), [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, }; static const struct nla_policy nl80211_pmsr_req_data_policy[NL80211_PMSR_TYPE_MAX + 1] = { [NL80211_PMSR_TYPE_FTM] = NLA_POLICY_NESTED(nl80211_pmsr_ftm_req_attr_policy), }; static const struct nla_policy nl80211_pmsr_req_attr_policy[NL80211_PMSR_REQ_ATTR_MAX + 1] = { [NL80211_PMSR_REQ_ATTR_DATA] = NLA_POLICY_NESTED(nl80211_pmsr_req_data_policy), [NL80211_PMSR_REQ_ATTR_GET_AP_TSF] = { .type = NLA_FLAG }, }; static const struct nla_policy nl80211_psmr_peer_attr_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = { [NL80211_PMSR_PEER_ATTR_ADDR] = NLA_POLICY_ETH_ADDR, /* * we could specify this again to be the top-level policy, * but that would open us up to recursion problems ... */ [NL80211_PMSR_PEER_ATTR_CHAN] = { .type = NLA_NESTED }, [NL80211_PMSR_PEER_ATTR_REQ] = NLA_POLICY_NESTED(nl80211_pmsr_req_attr_policy), [NL80211_PMSR_PEER_ATTR_RESP] = { .type = NLA_REJECT }, }; static const struct nla_policy nl80211_pmsr_attr_policy[NL80211_PMSR_ATTR_MAX + 1] = { [NL80211_PMSR_ATTR_MAX_PEERS] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_REPORT_AP_TSF] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_TYPE_CAPA] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_PEERS] = NLA_POLICY_NESTED_ARRAY(nl80211_psmr_peer_attr_policy), }; const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, .len = 20-1 }, [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 }, [NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 }, [NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_WIPHY_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_DYN_ACK] = { .type = NLA_FLAG }, [NL80211_ATTR_IFTYPE] = NLA_POLICY_MAX(NLA_U32, NL80211_IFTYPE_MAX), [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, [NL80211_ATTR_MAC] = { .len = ETH_ALEN }, [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN }, [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 5), [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, [NL80211_ATTR_KEY_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES), [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_BEACON_TAIL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_STA_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_STA_FLAGS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_ATTR_STA_PLINK_ACTION] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_ACTIONS - 1), [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_MESH_ID_LEN }, [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_BASIC_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 }, [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG }, [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN }, [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, [NL80211_ATTR_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED }, [NL80211_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, [NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG }, [NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG }, [NL80211_ATTR_USE_MFP] = NLA_POLICY_RANGE(NLA_U32, NL80211_MFP_NO, NL80211_MFP_OPTIONAL), [NL80211_ATTR_STA_FLAGS2] = { .len = sizeof(struct nl80211_sta_flag_update), }, [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 }, [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG }, [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG }, [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN }, [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, [NL80211_ATTR_FRAME] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, [NL80211_ATTR_PS_STATE] = NLA_POLICY_RANGE(NLA_U32, NL80211_PS_DISABLED, NL80211_PS_ENABLED), [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG }, [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 }, [NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 }, [NL80211_ATTR_WIPHY_ANTENNA_TX] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_ANTENNA_RX] = { .type = NLA_U32 }, [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 }, [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_PLINK_STATE] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1), [NL80211_ATTR_MESH_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED }, [NL80211_ATTR_HIDDEN_SSID] = NLA_POLICY_RANGE(NLA_U32, NL80211_HIDDEN_SSID_NOT_IN_USE, NL80211_HIDDEN_SSID_ZERO_CONTENTS), [NL80211_ATTR_IE_PROBE_RESP] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_IE_ASSOC_RESP] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED }, [NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG }, [NL80211_ATTR_TDLS_ACTION] = { .type = NLA_U8 }, [NL80211_ATTR_TDLS_DIALOG_TOKEN] = { .type = NLA_U8 }, [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 }, [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG }, [NL80211_ATTR_TDLS_INITIATOR] = { .type = NLA_FLAG }, [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG }, [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 }, [NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG }, [NL80211_ATTR_HT_CAPABILITY_MASK] = { .len = NL80211_HT_CAPABILITY_LEN }, [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 }, [NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 }, [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 }, [NL80211_ATTR_WDEV] = { .type = NLA_U64 }, [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, }, [NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN }, [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127), [NL80211_ATTR_P2P_OPPPS] = NLA_POLICY_MAX(NLA_U8, 1), [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = NLA_POLICY_RANGE(NLA_U32, NL80211_MESH_POWER_UNKNOWN + 1, NL80211_MESH_POWER_MAX), [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, }, [NL80211_ATTR_SPLIT_WIPHY_DUMP] = { .type = NLA_FLAG, }, [NL80211_ATTR_DISABLE_VHT] = { .type = NLA_FLAG }, [NL80211_ATTR_VHT_CAPABILITY_MASK] = { .len = NL80211_VHT_CAPABILITY_LEN, }, [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED }, [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_BINARY }, [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_BINARY }, [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY }, [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY }, [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG }, [NL80211_ATTR_OPMODE_NOTIF] = { .type = NLA_U8 }, [NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 }, [NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 }, [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY, .len = IEEE80211_QOS_MAP_LEN_MAX }, [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN }, [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 }, [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, [NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY }, [NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG }, [NL80211_ATTR_TSID] = NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_TIDS - 1), [NL80211_ATTR_USER_PRIO] = NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1), [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN }, [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG }, [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 }, [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 }, [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG }, [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_SUPPORT_P2P_PS] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_P2P_PS_STATUS - 1), [NL80211_ATTR_MU_MIMO_GROUP_DATA] = { .len = VHT_MUMIMO_GROUPS_DATA_LEN }, [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN }, [NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_BANDS] = { .type = NLA_U32 }, [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED }, [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY, .len = FILS_MAX_KEK_LEN }, [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN }, [NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, }, [NL80211_ATTR_BSSID] = { .len = ETH_ALEN }, [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 }, [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = { .len = sizeof(struct nl80211_bss_select_rssi_adjust) }, [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 }, [NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY, .len = FILS_ERP_MAX_USERNAME_LEN }, [NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY, .len = FILS_ERP_MAX_REALM_LEN }, [NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 }, [NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY, .len = FILS_ERP_MAX_RRK_LEN }, [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 }, [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG }, [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 }, [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY, .len = NL80211_HE_MAX_CAPABILITY_LEN }, [NL80211_ATTR_FTM_RESPONDER] = { .type = NLA_NESTED, .validation_data = nl80211_ftm_responder_policy, }, [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), [NL80211_ATTR_PEER_MEASUREMENTS] = NLA_POLICY_NESTED(nl80211_pmsr_attr_policy), [NL80211_ATTR_NAN_CDW_2G] = { .type = NLA_U8 }, [NL80211_ATTR_NAN_CDW_5G] = { .type = NLA_U8 }, }; /* policy for the key attributes */ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = { [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_KEY_IDX] = { .type = NLA_U8 }, [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, [NL80211_KEY_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES - 1), [NL80211_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, }; /* policy for the key default flags */ static const struct nla_policy nl80211_key_default_policy[NUM_NL80211_KEY_DEFAULT_TYPES] = { [NL80211_KEY_DEFAULT_TYPE_UNICAST] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_TYPE_MULTICAST] = { .type = NLA_FLAG }, }; #ifdef CONFIG_PM /* policy for WoWLAN attributes */ static const struct nla_policy nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = { [NL80211_WOWLAN_TRIG_ANY] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_DISCONNECT] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_MAGIC_PKT] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED }, [NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_TCP_CONNECTION] = { .type = NLA_NESTED }, [NL80211_WOWLAN_TRIG_NET_DETECT] = { .type = NLA_NESTED }, }; static const struct nla_policy nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = { [NL80211_WOWLAN_TCP_SRC_IPV4] = { .type = NLA_U32 }, [NL80211_WOWLAN_TCP_DST_IPV4] = { .type = NLA_U32 }, [NL80211_WOWLAN_TCP_DST_MAC] = { .len = ETH_ALEN }, [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 }, [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 }, [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .len = 1 }, [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = { .len = sizeof(struct nl80211_wowlan_tcp_data_seq) }, [NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN] = { .len = sizeof(struct nl80211_wowlan_tcp_data_token) }, [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 }, [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .len = 1 }, [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 }, }; #endif /* CONFIG_PM */ /* policy for coalesce rule attributes */ static const struct nla_policy nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = { [NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 }, [NL80211_ATTR_COALESCE_RULE_CONDITION] = NLA_POLICY_RANGE(NLA_U32, NL80211_COALESCE_CONDITION_MATCH, NL80211_COALESCE_CONDITION_NO_MATCH), [NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED }, }; /* policy for GTK rekey offload attributes */ static const struct nla_policy nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN }, [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN }, [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, }; static const struct nla_policy nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = { [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = { .len = ETH_ALEN }, [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 }, }; static const struct nla_policy nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = { [NL80211_SCHED_SCAN_PLAN_INTERVAL] = { .type = NLA_U32 }, [NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 }, }; static const struct nla_policy nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = { [NL80211_BSS_SELECT_ATTR_RSSI] = { .type = NLA_FLAG }, [NL80211_BSS_SELECT_ATTR_BAND_PREF] = { .type = NLA_U32 }, [NL80211_BSS_SELECT_ATTR_RSSI_ADJUST] = { .len = sizeof(struct nl80211_bss_select_rssi_adjust) }, }; /* policy for NAN function attributes */ static const struct nla_policy nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = { [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_SERVICE_ID] = { .len = NL80211_NAN_FUNC_SERVICE_ID_LEN }, [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG }, [NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE] = { .type = NLA_FLAG }, [NL80211_NAN_FUNC_FOLLOW_UP_ID] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = { .len = ETH_ALEN }, [NL80211_NAN_FUNC_CLOSE_RANGE] = { .type = NLA_FLAG }, [NL80211_NAN_FUNC_TTL] = { .type = NLA_U32 }, [NL80211_NAN_FUNC_SERVICE_INFO] = { .type = NLA_BINARY, .len = NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN }, [NL80211_NAN_FUNC_SRF] = { .type = NLA_NESTED }, [NL80211_NAN_FUNC_RX_MATCH_FILTER] = { .type = NLA_NESTED }, [NL80211_NAN_FUNC_TX_MATCH_FILTER] = { .type = NLA_NESTED }, [NL80211_NAN_FUNC_INSTANCE_ID] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_TERM_REASON] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_SECURITY_CIPHER_SUITES] = { .type = NLA_U32 }, [NL80211_NAN_FUNC_SECURITY_CTX_IDS] = { .type = NLA_NESTED }, [NL80211_NAN_FUNC_AWAKE_DW_INTERVAL] = { .type = NLA_U8 }, }; /* policy for Service Response Filter attributes */ static const struct nla_policy nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = { [NL80211_NAN_SRF_INCLUDE] = { .type = NLA_FLAG }, [NL80211_NAN_SRF_BF] = { .type = NLA_BINARY, .len = NL80211_NAN_FUNC_SRF_MAX_LEN }, [NL80211_NAN_SRF_BF_IDX] = { .type = NLA_U8 }, [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED }, }; /* policy for packet pattern attributes */ static const struct nla_policy nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = { [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, }, [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, }, [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 }, }; static const struct nla_policy nl80211_nan_sec_ctx_policy[NL80211_NAN_SEC_CTX_MAX + 1] = { [NL80211_NAN_SEC_CTX_ID_TYPE] = { .type = NLA_U32 }, [NL80211_NAN_SEC_CTX_ID_DATA] = { .type = NLA_BINARY }, }; int nl80211_prepare_wdev_dump(struct netlink_callback *cb, struct cfg80211_registered_device **rdev, struct wireless_dev **wdev) { int err; if (!cb->args[0]) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, genl_family_attrbuf(&nl80211_fam), nl80211_fam.maxattr, nl80211_policy, NULL); if (err) return err; *wdev = __cfg80211_wdev_from_attrs( sock_net(cb->skb->sk), genl_family_attrbuf(&nl80211_fam)); if (IS_ERR(*wdev)) return PTR_ERR(*wdev); *rdev = wiphy_to_rdev((*wdev)->wiphy); /* 0 is the first index - add 1 to parse only once */ cb->args[0] = (*rdev)->wiphy_idx + 1; cb->args[1] = (*wdev)->identifier; } else { /* subtract the 1 again here */ struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); struct wireless_dev *tmp; if (!wiphy) return -ENODEV; *rdev = wiphy_to_rdev(wiphy); *wdev = NULL; list_for_each_entry(tmp, &(*rdev)->wiphy.wdev_list, list) { if (tmp->identifier == cb->args[1]) { *wdev = tmp; break; } } if (!*wdev) return -ENODEV; } return 0; } /* message building helper */ void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq, int flags, u8 cmd) { /* since there is no private header just add the generic one */ return genlmsg_put(skb, portid, seq, &nl80211_fam, flags, cmd); } static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, const struct ieee80211_reg_rule *rule) { int j; struct nlattr *nl_wmm_rules = nla_nest_start(msg, NL80211_FREQUENCY_ATTR_WMM); if (!nl_wmm_rules) goto nla_put_failure; for (j = 0; j < IEEE80211_NUM_ACS; j++) { struct nlattr *nl_wmm_rule = nla_nest_start(msg, j); if (!nl_wmm_rule) goto nla_put_failure; if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, rule->wmm_rule.client[j].cw_min) || nla_put_u16(msg, NL80211_WMMR_CW_MAX, rule->wmm_rule.client[j].cw_max) || nla_put_u8(msg, NL80211_WMMR_AIFSN, rule->wmm_rule.client[j].aifsn) || nla_put_u16(msg, NL80211_WMMR_TXOP, rule->wmm_rule.client[j].cot)) goto nla_put_failure; nla_nest_end(msg, nl_wmm_rule); } nla_nest_end(msg, nl_wmm_rules); return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, struct ieee80211_channel *chan, bool large) { /* Some channels must be completely excluded from the * list to protect old user-space tools from breaking */ if (!large && chan->flags & (IEEE80211_CHAN_NO_10MHZ | IEEE80211_CHAN_NO_20MHZ)) return 0; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, chan->center_freq)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_DISABLED) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED)) goto nla_put_failure; if (chan->flags & IEEE80211_CHAN_NO_IR) { if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IR)) goto nla_put_failure; if (nla_put_flag(msg, __NL80211_FREQUENCY_ATTR_NO_IBSS)) goto nla_put_failure; } if (chan->flags & IEEE80211_CHAN_RADAR) { if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) goto nla_put_failure; if (large) { u32 time; time = elapsed_jiffies_msecs(chan->dfs_state_entered); if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE, chan->dfs_state)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_CAC_TIME, chan->dfs_cac_ms)) goto nla_put_failure; } } if (large) { if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_IR_CONCURRENT) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_IR_CONCURRENT)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ)) goto nla_put_failure; } if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, DBM_TO_MBM(chan->max_power))) goto nla_put_failure; if (large) { const struct ieee80211_reg_rule *rule = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq)); if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { if (nl80211_msg_put_wmm_rules(msg, rule)) goto nla_put_failure; } } return 0; nla_put_failure: return -ENOBUFS; } static bool nl80211_put_txq_stats(struct sk_buff *msg, struct cfg80211_txq_stats *txqstats, int attrtype) { struct nlattr *txqattr; #define PUT_TXQVAL_U32(attr, memb) do { \ if (txqstats->filled & BIT(NL80211_TXQ_STATS_ ## attr) && \ nla_put_u32(msg, NL80211_TXQ_STATS_ ## attr, txqstats->memb)) \ return false; \ } while (0) txqattr = nla_nest_start(msg, attrtype); if (!txqattr) return false; PUT_TXQVAL_U32(BACKLOG_BYTES, backlog_bytes); PUT_TXQVAL_U32(BACKLOG_PACKETS, backlog_packets); PUT_TXQVAL_U32(FLOWS, flows); PUT_TXQVAL_U32(DROPS, drops); PUT_TXQVAL_U32(ECN_MARKS, ecn_marks); PUT_TXQVAL_U32(OVERLIMIT, overlimit); PUT_TXQVAL_U32(OVERMEMORY, overmemory); PUT_TXQVAL_U32(COLLISIONS, collisions); PUT_TXQVAL_U32(TX_BYTES, tx_bytes); PUT_TXQVAL_U32(TX_PACKETS, tx_packets); PUT_TXQVAL_U32(MAX_FLOWS, max_flows); nla_nest_end(msg, txqattr); #undef PUT_TXQVAL_U32 return true; } /* netlink command implementations */ struct key_parse { struct key_params p; int idx; int type; bool def, defmgmt; bool def_uni, def_multi; }; static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key, struct key_parse *k) { struct nlattr *tb[NL80211_KEY_MAX + 1]; int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, nl80211_key_policy, genl_info_extack(info)); if (err) return err; k->def = !!tb[NL80211_KEY_DEFAULT]; k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; if (k->def) { k->def_uni = true; k->def_multi = true; } if (k->defmgmt) k->def_multi = true; if (tb[NL80211_KEY_IDX]) k->idx = nla_get_u8(tb[NL80211_KEY_IDX]); if (tb[NL80211_KEY_DATA]) { k->p.key = nla_data(tb[NL80211_KEY_DATA]); k->p.key_len = nla_len(tb[NL80211_KEY_DATA]); } if (tb[NL80211_KEY_SEQ]) { k->p.seq = nla_data(tb[NL80211_KEY_SEQ]); k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]); } if (tb[NL80211_KEY_CIPHER]) k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]); if (tb[NL80211_KEY_TYPE]) k->type = nla_get_u32(tb[NL80211_KEY_TYPE]); if (tb[NL80211_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, tb[NL80211_KEY_DEFAULT_TYPES], nl80211_key_default_policy, genl_info_extack(info)); if (err) return err; k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST]; k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST]; } return 0; } static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) { if (info->attrs[NL80211_ATTR_KEY_DATA]) { k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]); k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); } if (info->attrs[NL80211_ATTR_KEY_SEQ]) { k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]); k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]); } if (info->attrs[NL80211_ATTR_KEY_IDX]) k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); if (info->attrs[NL80211_ATTR_KEY_CIPHER]) k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]); k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT]; k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]; if (k->def) { k->def_uni = true; k->def_multi = true; } if (k->defmgmt) k->def_multi = true; if (info->attrs[NL80211_ATTR_KEY_TYPE]) k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; int err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES], nl80211_key_default_policy, genl_info_extack(info)); if (err) return err; k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST]; k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST]; } return 0; } static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) { int err; memset(k, 0, sizeof(*k)); k->idx = -1; k->type = -1; if (info->attrs[NL80211_ATTR_KEY]) err = nl80211_parse_key_new(info, info->attrs[NL80211_ATTR_KEY], k); else err = nl80211_parse_key_old(info, k); if (err) return err; if (k->def && k->defmgmt) { GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid"); return -EINVAL; } if (k->defmgmt) { if (k->def_uni || !k->def_multi) { GENL_SET_ERR_MSG(info, "defmgmt key must be mcast"); return -EINVAL; } } if (k->idx != -1) { if (k->defmgmt) { if (k->idx < 4 || k->idx > 5) { GENL_SET_ERR_MSG(info, "defmgmt key idx not 4 or 5"); return -EINVAL; } } else if (k->def) { if (k->idx < 0 || k->idx > 3) { GENL_SET_ERR_MSG(info, "def key idx not 0-3"); return -EINVAL; } } else { if (k->idx < 0 || k->idx > 5) { GENL_SET_ERR_MSG(info, "key idx not 0-5"); return -EINVAL; } } } return 0; } static struct cfg80211_cached_keys * nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, struct genl_info *info, bool *no_ht) { struct nlattr *keys = info->attrs[NL80211_ATTR_KEYS]; struct key_parse parse; struct nlattr *key; struct cfg80211_cached_keys *result; int rem, err, def = 0; bool have_key = false; nla_for_each_nested(key, keys, rem) { have_key = true; break; } if (!have_key) return NULL; result = kzalloc(sizeof(*result), GFP_KERNEL); if (!result) return ERR_PTR(-ENOMEM); result->def = -1; nla_for_each_nested(key, keys, rem) { memset(&parse, 0, sizeof(parse)); parse.idx = -1; err = nl80211_parse_key_new(info, key, &parse); if (err) goto error; err = -EINVAL; if (!parse.p.key) goto error; if (parse.idx < 0 || parse.idx > 3) { GENL_SET_ERR_MSG(info, "key index out of range [0-3]"); goto error; } if (parse.def) { if (def) { GENL_SET_ERR_MSG(info, "only one key can be default"); goto error; } def = 1; result->def = parse.idx; if (!parse.def_uni || !parse.def_multi) goto error; } else if (parse.defmgmt) goto error; err = cfg80211_validate_key_settings(rdev, &parse.p, parse.idx, false, NULL); if (err) goto error; if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 && parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) { GENL_SET_ERR_MSG(info, "connect key must be WEP"); err = -EINVAL; goto error; } result->params[parse.idx].cipher = parse.p.cipher; result->params[parse.idx].key_len = parse.p.key_len; result->params[parse.idx].key = result->data[parse.idx]; memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); /* must be WEP key if we got here */ if (no_ht) *no_ht = true; } if (result->def < 0) { err = -EINVAL; GENL_SET_ERR_MSG(info, "need a default/TX key"); goto error; } return result; error: kfree(result); return ERR_PTR(err); } static int nl80211_key_allowed(struct wireless_dev *wdev) { ASSERT_WDEV_LOCK(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_MESH_POINT: break; case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (!wdev->current_bss) return -ENOLINK; break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN_DATA: case NL80211_IFTYPE_WDS: case NUM_NL80211_IFTYPES: return -EINVAL; } return 0; } static struct ieee80211_channel *nl80211_get_valid_chan(struct wiphy *wiphy, struct nlattr *tb) { struct ieee80211_channel *chan; if (tb == NULL) return NULL; chan = ieee80211_get_channel(wiphy, nla_get_u32(tb)); if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) return NULL; return chan; } static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes) { struct nlattr *nl_modes = nla_nest_start(msg, attr); int i; if (!nl_modes) goto nla_put_failure; i = 0; while (ifmodes) { if ((ifmodes & 1) && nla_put_flag(msg, i)) goto nla_put_failure; ifmodes >>= 1; i++; } nla_nest_end(msg, nl_modes); return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_put_iface_combinations(struct wiphy *wiphy, struct sk_buff *msg, bool large) { struct nlattr *nl_combis; int i, j; nl_combis = nla_nest_start(msg, NL80211_ATTR_INTERFACE_COMBINATIONS); if (!nl_combis) goto nla_put_failure; for (i = 0; i < wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *c; struct nlattr *nl_combi, *nl_limits; c = &wiphy->iface_combinations[i]; nl_combi = nla_nest_start(msg, i + 1); if (!nl_combi) goto nla_put_failure; nl_limits = nla_nest_start(msg, NL80211_IFACE_COMB_LIMITS); if (!nl_limits) goto nla_put_failure; for (j = 0; j < c->n_limits; j++) { struct nlattr *nl_limit; nl_limit = nla_nest_start(msg, j + 1); if (!nl_limit) goto nla_put_failure; if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, c->limits[j].max)) goto nla_put_failure; if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, c->limits[j].types)) goto nla_put_failure; nla_nest_end(msg, nl_limit); } nla_nest_end(msg, nl_limits); if (c->beacon_int_infra_match && nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, c->num_different_channels) || nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, c->max_interfaces)) goto nla_put_failure; if (large && (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, c->radar_detect_widths) || nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, c->radar_detect_regions))) goto nla_put_failure; if (c->beacon_int_min_gcd && nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD, c->beacon_int_min_gcd)) goto nla_put_failure; nla_nest_end(msg, nl_combi); } nla_nest_end(msg, nl_combis); return 0; nla_put_failure: return -ENOBUFS; } #ifdef CONFIG_PM static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan->tcp; struct nlattr *nl_tcp; if (!tcp) return 0; nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); if (!nl_tcp) return -ENOBUFS; if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, tcp->data_payload_max)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, tcp->data_payload_max)) return -ENOBUFS; if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ)) return -ENOBUFS; if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, sizeof(*tcp->tok), tcp->tok)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, tcp->data_interval_max)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, tcp->wake_payload_max)) return -ENOBUFS; nla_nest_end(msg, nl_tcp); return 0; } static int nl80211_send_wowlan(struct sk_buff *msg, struct cfg80211_registered_device *rdev, bool large) { struct nlattr *nl_wowlan; if (!rdev->wiphy.wowlan) return 0; nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); if (!nl_wowlan) return -ENOBUFS; if (((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) return -ENOBUFS; if (rdev->wiphy.wowlan->n_patterns) { struct nl80211_pattern_support pat = { .max_patterns = rdev->wiphy.wowlan->n_patterns, .min_pattern_len = rdev->wiphy.wowlan->pattern_min_len, .max_pattern_len = rdev->wiphy.wowlan->pattern_max_len, .max_pkt_offset = rdev->wiphy.wowlan->max_pkt_offset, }; if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, sizeof(pat), &pat)) return -ENOBUFS; } if ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_NET_DETECT) && nla_put_u32(msg, NL80211_WOWLAN_TRIG_NET_DETECT, rdev->wiphy.wowlan->max_nd_match_sets)) return -ENOBUFS; if (large && nl80211_send_wowlan_tcp_caps(rdev, msg)) return -ENOBUFS; nla_nest_end(msg, nl_wowlan); return 0; } #endif static int nl80211_send_coalesce(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct nl80211_coalesce_rule_support rule; if (!rdev->wiphy.coalesce) return 0; rule.max_rules = rdev->wiphy.coalesce->n_rules; rule.max_delay = rdev->wiphy.coalesce->max_delay; rule.pat.max_patterns = rdev->wiphy.coalesce->n_patterns; rule.pat.min_pattern_len = rdev->wiphy.coalesce->pattern_min_len; rule.pat.max_pattern_len = rdev->wiphy.coalesce->pattern_max_len; rule.pat.max_pkt_offset = rdev->wiphy.coalesce->max_pkt_offset; if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule)) return -ENOBUFS; return 0; } static int nl80211_send_iftype_data(struct sk_buff *msg, const struct ieee80211_sband_iftype_data *iftdata) { const struct ieee80211_sta_he_cap *he_cap = &iftdata->he_cap; if (nl80211_put_iftypes(msg, NL80211_BAND_IFTYPE_ATTR_IFTYPES, iftdata->types_mask)) return -ENOBUFS; if (he_cap->has_he) { if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC, sizeof(he_cap->he_cap_elem.mac_cap_info), he_cap->he_cap_elem.mac_cap_info) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, sizeof(he_cap->he_cap_elem.phy_cap_info), he_cap->he_cap_elem.phy_cap_info) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, sizeof(he_cap->he_mcs_nss_supp), &he_cap->he_mcs_nss_supp) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, sizeof(he_cap->ppe_thres), he_cap->ppe_thres)) return -ENOBUFS; } return 0; } static int nl80211_send_band_rateinfo(struct sk_buff *msg, struct ieee80211_supported_band *sband) { struct nlattr *nl_rates, *nl_rate; struct ieee80211_rate *rate; int i; /* add HT info */ if (sband->ht_cap.ht_supported && (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET, sizeof(sband->ht_cap.mcs), &sband->ht_cap.mcs) || nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA, sband->ht_cap.cap) || nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, sband->ht_cap.ampdu_factor) || nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, sband->ht_cap.ampdu_density))) return -ENOBUFS; /* add VHT info */ if (sband->vht_cap.vht_supported && (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET, sizeof(sband->vht_cap.vht_mcs), &sband->vht_cap.vht_mcs) || nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA, sband->vht_cap.cap))) return -ENOBUFS; if (sband->n_iftype_data) { struct nlattr *nl_iftype_data = nla_nest_start(msg, NL80211_BAND_ATTR_IFTYPE_DATA); int err; if (!nl_iftype_data) return -ENOBUFS; for (i = 0; i < sband->n_iftype_data; i++) { struct nlattr *iftdata; iftdata = nla_nest_start(msg, i + 1); if (!iftdata) return -ENOBUFS; err = nl80211_send_iftype_data(msg, &sband->iftype_data[i]); if (err) return err; nla_nest_end(msg, iftdata); } nla_nest_end(msg, nl_iftype_data); } /* add bitrates */ nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); if (!nl_rates) return -ENOBUFS; for (i = 0; i < sband->n_bitrates; i++) { nl_rate = nla_nest_start(msg, i); if (!nl_rate) return -ENOBUFS; rate = &sband->bitrates[i]; if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE, rate->bitrate)) return -ENOBUFS; if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) && nla_put_flag(msg, NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE)) return -ENOBUFS; nla_nest_end(msg, nl_rate); } nla_nest_end(msg, nl_rates); return 0; } static int nl80211_send_mgmt_stypes(struct sk_buff *msg, const struct ieee80211_txrx_stypes *mgmt_stypes) { u16 stypes; struct nlattr *nl_ftypes, *nl_ifs; enum nl80211_iftype ift; int i; if (!mgmt_stypes) return 0; nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES); if (!nl_ifs) return -ENOBUFS; for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { nl_ftypes = nla_nest_start(msg, ift); if (!nl_ftypes) return -ENOBUFS; i = 0; stypes = mgmt_stypes[ift].tx; while (stypes) { if ((stypes & 1) && nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, (i << 4) | IEEE80211_FTYPE_MGMT)) return -ENOBUFS; stypes >>= 1; i++; } nla_nest_end(msg, nl_ftypes); } nla_nest_end(msg, nl_ifs); nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES); if (!nl_ifs) return -ENOBUFS; for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { nl_ftypes = nla_nest_start(msg, ift); if (!nl_ftypes) return -ENOBUFS; i = 0; stypes = mgmt_stypes[ift].rx; while (stypes) { if ((stypes & 1) && nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, (i << 4) | IEEE80211_FTYPE_MGMT)) return -ENOBUFS; stypes >>= 1; i++; } nla_nest_end(msg, nl_ftypes); } nla_nest_end(msg, nl_ifs); return 0; } #define CMD(op, n) \ do { \ if (rdev->ops->op) { \ i++; \ if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ goto nla_put_failure; \ } \ } while (0) static int nl80211_add_commands_unsplit(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { int i = 0; /* * do *NOT* add anything into this function, new things need to be * advertised only to new versions of userspace that can deal with * the split (and they can't possibly care about new features... */ CMD(add_virtual_intf, NEW_INTERFACE); CMD(change_virtual_intf, SET_INTERFACE); CMD(add_key, NEW_KEY); CMD(start_ap, START_AP); CMD(add_station, NEW_STATION); CMD(add_mpath, NEW_MPATH); CMD(update_mesh_config, SET_MESH_CONFIG); CMD(change_bss, SET_BSS); CMD(auth, AUTHENTICATE); CMD(assoc, ASSOCIATE); CMD(deauth, DEAUTHENTICATE); CMD(disassoc, DISASSOCIATE); CMD(join_ibss, JOIN_IBSS); CMD(join_mesh, JOIN_MESH); CMD(set_pmksa, SET_PMKSA); CMD(del_pmksa, DEL_PMKSA); CMD(flush_pmksa, FLUSH_PMKSA); if (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) CMD(remain_on_channel, REMAIN_ON_CHANNEL); CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); CMD(mgmt_tx, FRAME); CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); if (rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { i++; if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) goto nla_put_failure; } if (rdev->ops->set_monitor_channel || rdev->ops->start_ap || rdev->ops->join_mesh) { i++; if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL)) goto nla_put_failure; } CMD(set_wds_peer, SET_WDS_PEER); if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) { CMD(tdls_mgmt, TDLS_MGMT); CMD(tdls_oper, TDLS_OPER); } if (rdev->wiphy.max_sched_scan_reqs) CMD(sched_scan_start, START_SCHED_SCAN); CMD(probe_client, PROBE_CLIENT); CMD(set_noack_map, SET_NOACK_MAP); if (rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { i++; if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) goto nla_put_failure; } CMD(start_p2p_device, START_P2P_DEVICE); CMD(set_mcast_rate, SET_MCAST_RATE); #ifdef CPTCFG_NL80211_TESTMODE CMD(testmode_cmd, TESTMODE); #endif if (rdev->ops->connect || rdev->ops->auth) { i++; if (nla_put_u32(msg, i, NL80211_CMD_CONNECT)) goto nla_put_failure; } if (rdev->ops->disconnect || rdev->ops->deauth) { i++; if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) goto nla_put_failure; } return i; nla_put_failure: return -ENOBUFS; } static int nl80211_send_pmsr_ftm_capa(const struct cfg80211_pmsr_capabilities *cap, struct sk_buff *msg) { struct nlattr *ftm; if (!cap->ftm.supported) return 0; ftm = nla_nest_start(msg, NL80211_PMSR_TYPE_FTM); if (!ftm) return -ENOBUFS; if (cap->ftm.asap && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_ASAP)) return -ENOBUFS; if (cap->ftm.non_asap && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP)) return -ENOBUFS; if (cap->ftm.request_lci && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI)) return -ENOBUFS; if (cap->ftm.request_civicloc && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES, cap->ftm.preambles)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS, cap->ftm.bandwidths)) return -ENOBUFS; if (cap->ftm.max_bursts_exponent >= 0 && nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT, cap->ftm.max_bursts_exponent)) return -ENOBUFS; if (cap->ftm.max_ftms_per_burst && nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST, cap->ftm.max_ftms_per_burst)) return -ENOBUFS; nla_nest_end(msg, ftm); return 0; } static int nl80211_send_pmsr_capa(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { const struct cfg80211_pmsr_capabilities *cap = rdev->wiphy.pmsr_capa; struct nlattr *pmsr, *caps; if (!cap) return 0; /* * we don't need to clean up anything here since the caller * will genlmsg_cancel() if we fail */ pmsr = nla_nest_start(msg, NL80211_ATTR_PEER_MEASUREMENTS); if (!pmsr) return -ENOBUFS; if (nla_put_u32(msg, NL80211_PMSR_ATTR_MAX_PEERS, cap->max_peers)) return -ENOBUFS; if (cap->report_ap_tsf && nla_put_flag(msg, NL80211_PMSR_ATTR_REPORT_AP_TSF)) return -ENOBUFS; if (cap->randomize_mac_addr && nla_put_flag(msg, NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR)) return -ENOBUFS; caps = nla_nest_start(msg, NL80211_PMSR_ATTR_TYPE_CAPA); if (!caps) return -ENOBUFS; if (nl80211_send_pmsr_ftm_capa(cap, msg)) return -ENOBUFS; nla_nest_end(msg, caps); nla_nest_end(msg, pmsr); return 0; } struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; long split_start, band_start, chan_start, capa_start; bool split; }; static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, enum nl80211_commands cmd, struct sk_buff *msg, u32 portid, u32 seq, int flags, struct nl80211_dump_wiphy_state *state) { void *hdr; struct nlattr *nl_bands, *nl_band; struct nlattr *nl_freqs, *nl_freq; struct nlattr *nl_cmds; enum nl80211_band band; struct ieee80211_channel *chan; int i; const struct ieee80211_txrx_stypes *mgmt_stypes = rdev->wiphy.mgmt_stypes; u32 features; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) return -ENOBUFS; if (WARN_ON(!state)) return -EINVAL; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&rdev->wiphy)) || nla_put_u32(msg, NL80211_ATTR_GENERATION, cfg80211_rdev_list_generation)) goto nla_put_failure; if (cmd != NL80211_CMD_NEW_WIPHY) goto finish; switch (state->split_start) { case 0: if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, rdev->wiphy.retry_short) || nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, rdev->wiphy.retry_long) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, rdev->wiphy.frag_threshold) || nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, rdev->wiphy.rts_threshold) || nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, rdev->wiphy.coverage_class) || nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, rdev->wiphy.max_scan_ssids) || nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, rdev->wiphy.max_sched_scan_ssids) || nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, rdev->wiphy.max_scan_ie_len) || nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, rdev->wiphy.max_sched_scan_ie_len) || nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS, rdev->wiphy.max_match_sets) || nla_put_u32(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS, rdev->wiphy.max_sched_scan_plans) || nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL, rdev->wiphy.max_sched_scan_plan_interval) || nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, rdev->wiphy.max_sched_scan_plan_iterations)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) && nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) && nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) && nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) && nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) goto nla_put_failure; state->split_start++; if (state->split) break; /* fall through */ case 1: if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, sizeof(u32) * rdev->wiphy.n_cipher_suites, rdev->wiphy.cipher_suites)) goto nla_put_failure; if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, rdev->wiphy.max_num_pmkids)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, rdev->wiphy.available_antennas_tx) || nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, rdev->wiphy.available_antennas_rx)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) && nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, rdev->wiphy.probe_resp_offload)) goto nla_put_failure; if ((rdev->wiphy.available_antennas_tx || rdev->wiphy.available_antennas_rx) && rdev->ops->get_antenna) { u32 tx_ant = 0, rx_ant = 0; int res; res = rdev_get_antenna(rdev, &tx_ant, &rx_ant); if (!res) { if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant) || nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant)) goto nla_put_failure; } } state->split_start++; if (state->split) break; /* fall through */ case 2: if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, rdev->wiphy.interface_modes)) goto nla_put_failure; state->split_start++; if (state->split) break; /* fall through */ case 3: nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); if (!nl_bands) goto nla_put_failure; for (band = state->band_start; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = rdev->wiphy.bands[band]; if (!sband) continue; nl_band = nla_nest_start(msg, band); if (!nl_band) goto nla_put_failure; switch (state->chan_start) { case 0: if (nl80211_send_band_rateinfo(msg, sband)) goto nla_put_failure; state->chan_start++; if (state->split) break; /* fall through */ default: /* add frequencies */ nl_freqs = nla_nest_start( msg, NL80211_BAND_ATTR_FREQS); if (!nl_freqs) goto nla_put_failure; for (i = state->chan_start - 1; i < sband->n_channels; i++) { nl_freq = nla_nest_start(msg, i); if (!nl_freq) goto nla_put_failure; chan = &sband->channels[i]; if (nl80211_msg_put_channel( msg, &rdev->wiphy, chan, state->split)) goto nla_put_failure; nla_nest_end(msg, nl_freq); if (state->split) break; } if (i < sband->n_channels) state->chan_start = i + 2; else state->chan_start = 0; nla_nest_end(msg, nl_freqs); } nla_nest_end(msg, nl_band); if (state->split) { /* start again here */ if (state->chan_start) band--; break; } } nla_nest_end(msg, nl_bands); if (band < NUM_NL80211_BANDS) state->band_start = band + 1; else state->band_start = 0; /* if bands & channels are done, continue outside */ if (state->band_start == 0 && state->chan_start == 0) state->split_start++; if (state->split) break; /* fall through */ case 4: nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS); if (!nl_cmds) goto nla_put_failure; i = nl80211_add_commands_unsplit(rdev, msg); if (i < 0) goto nla_put_failure; if (state->split) { CMD(crit_proto_start, CRIT_PROTOCOL_START); CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) CMD(channel_switch, CHANNEL_SWITCH); CMD(set_qos_map, SET_QOS_MAP); if (rdev->wiphy.features & NL80211_FEATURE_SUPPORTS_WMM_ADMISSION) CMD(add_tx_ts, ADD_TX_TS); CMD(set_multicast_to_unicast, SET_MULTICAST_TO_UNICAST); CMD(update_connect_params, UPDATE_CONNECT_PARAMS); } #undef CMD nla_nest_end(msg, nl_cmds); state->split_start++; if (state->split) break; /* fall through */ case 5: if (rdev->ops->remain_on_channel && (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, rdev->wiphy.max_remain_on_channel_duration)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) && nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) goto nla_put_failure; if (nl80211_send_mgmt_stypes(msg, mgmt_stypes)) goto nla_put_failure; state->split_start++; if (state->split) break; /* fall through */ case 6: #ifdef CONFIG_PM if (nl80211_send_wowlan(msg, rdev, state->split)) goto nla_put_failure; state->split_start++; if (state->split) break; #else state->split_start++; #endif /* fall through */ case 7: if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, rdev->wiphy.software_iftypes)) goto nla_put_failure; if (nl80211_put_iface_combinations(&rdev->wiphy, msg, state->split)) goto nla_put_failure; state->split_start++; if (state->split) break; /* fall through */ case 8: if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, rdev->wiphy.ap_sme_capa)) goto nla_put_failure; features = rdev->wiphy.features; /* * We can only add the per-channel limit information if the * dump is split, otherwise it makes it too big. Therefore * only advertise it in that case. */ if (state->split) features |= NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features)) goto nla_put_failure; if (rdev->wiphy.ht_capa_mod_mask && nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, sizeof(*rdev->wiphy.ht_capa_mod_mask), rdev->wiphy.ht_capa_mod_mask)) goto nla_put_failure; if (rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME && rdev->wiphy.max_acl_mac_addrs && nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX, rdev->wiphy.max_acl_mac_addrs)) goto nla_put_failure; /* * Any information below this point is only available to * applications that can deal with it being split. This * helps ensure that newly added capabilities don't break * older tools by overrunning their buffers. * * We still increment split_start so that in the split * case we'll continue with more data in the next round, * but break unconditionally so unsplit data stops here. */ state->split_start++; break; case 9: if (rdev->wiphy.extended_capabilities && (nla_put(msg, NL80211_ATTR_EXT_CAPA, rdev->wiphy.extended_capabilities_len, rdev->wiphy.extended_capabilities) || nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, rdev->wiphy.extended_capabilities_len, rdev->wiphy.extended_capabilities_mask))) goto nla_put_failure; if (rdev->wiphy.vht_capa_mod_mask && nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK, sizeof(*rdev->wiphy.vht_capa_mod_mask), rdev->wiphy.vht_capa_mod_mask)) goto nla_put_failure; state->split_start++; break; case 10: if (nl80211_send_coalesce(msg, rdev)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) && (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) || nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ))) goto nla_put_failure; if (rdev->wiphy.max_ap_assoc_sta && nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA, rdev->wiphy.max_ap_assoc_sta)) goto nla_put_failure; state->split_start++; break; case 11: if (rdev->wiphy.n_vendor_commands) { const struct nl80211_vendor_cmd_info *info; struct nlattr *nested; nested = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA); if (!nested) goto nla_put_failure; for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) { info = &rdev->wiphy.vendor_commands[i].info; if (nla_put(msg, i + 1, sizeof(*info), info)) goto nla_put_failure; } nla_nest_end(msg, nested); } if (rdev->wiphy.n_vendor_events) { const struct nl80211_vendor_cmd_info *info; struct nlattr *nested; nested = nla_nest_start(msg, NL80211_ATTR_VENDOR_EVENTS); if (!nested) goto nla_put_failure; for (i = 0; i < rdev->wiphy.n_vendor_events; i++) { info = &rdev->wiphy.vendor_events[i]; if (nla_put(msg, i + 1, sizeof(*info), info)) goto nla_put_failure; } nla_nest_end(msg, nested); } state->split_start++; break; case 12: if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH && nla_put_u8(msg, NL80211_ATTR_MAX_CSA_COUNTERS, rdev->wiphy.max_num_csa_counters)) goto nla_put_failure; if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; if (rdev->wiphy.max_sched_scan_reqs && nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_MAX_REQS, rdev->wiphy.max_sched_scan_reqs)) goto nla_put_failure; if (nla_put(msg, NL80211_ATTR_EXT_FEATURES, sizeof(rdev->wiphy.ext_features), rdev->wiphy.ext_features)) goto nla_put_failure; if (rdev->wiphy.bss_select_support) { struct nlattr *nested; u32 bss_select_support = rdev->wiphy.bss_select_support; nested = nla_nest_start(msg, NL80211_ATTR_BSS_SELECT); if (!nested) goto nla_put_failure; i = 0; while (bss_select_support) { if ((bss_select_support & 1) && nla_put_flag(msg, i)) goto nla_put_failure; i++; bss_select_support >>= 1; } nla_nest_end(msg, nested); } state->split_start++; break; case 13: if (rdev->wiphy.num_iftype_ext_capab && rdev->wiphy.iftype_ext_capab) { struct nlattr *nested_ext_capab, *nested; nested = nla_nest_start(msg, NL80211_ATTR_IFTYPE_EXT_CAPA); if (!nested) goto nla_put_failure; for (i = state->capa_start; i < rdev->wiphy.num_iftype_ext_capab; i++) { const struct wiphy_iftype_ext_capab *capab; capab = &rdev->wiphy.iftype_ext_capab[i]; nested_ext_capab = nla_nest_start(msg, i); if (!nested_ext_capab || nla_put_u32(msg, NL80211_ATTR_IFTYPE, capab->iftype) || nla_put(msg, NL80211_ATTR_EXT_CAPA, capab->extended_capabilities_len, capab->extended_capabilities) || nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, capab->extended_capabilities_len, capab->extended_capabilities_mask)) goto nla_put_failure; nla_nest_end(msg, nested_ext_capab); if (state->split) break; } nla_nest_end(msg, nested); if (i < rdev->wiphy.num_iftype_ext_capab) { state->capa_start = i + 1; break; } } if (nla_put_u32(msg, NL80211_ATTR_BANDS, rdev->wiphy.nan_supported_bands)) goto nla_put_failure; if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_TXQS)) { struct cfg80211_txq_stats txqstats = {}; int res; res = rdev_get_txq_stats(rdev, NULL, &txqstats); if (!res && !nl80211_put_txq_stats(msg, &txqstats, NL80211_ATTR_TXQ_STATS)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_TXQ_LIMIT, rdev->wiphy.txq_limit)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_TXQ_MEMORY_LIMIT, rdev->wiphy.txq_memory_limit)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_TXQ_QUANTUM, rdev->wiphy.txq_quantum)) goto nla_put_failure; } state->split_start++; break; case 14: if (nl80211_send_pmsr_capa(rdev, msg)) goto nla_put_failure; /* done */ state->split_start = 0; break; } finish: genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_wiphy_parse(struct sk_buff *skb, struct netlink_callback *cb, struct nl80211_dump_wiphy_state *state) { struct nlattr **tb = genl_family_attrbuf(&nl80211_fam); int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, tb, nl80211_fam.maxattr, nl80211_policy, NULL); /* ignore parse errors for backward compatibility */ if (ret) return 0; state->split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; if (tb[NL80211_ATTR_WIPHY]) state->filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]); if (tb[NL80211_ATTR_WDEV]) state->filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32; if (tb[NL80211_ATTR_IFINDEX]) { struct net_device *netdev; struct cfg80211_registered_device *rdev; int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]); netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); if (!netdev) return -ENODEV; if (netdev->ieee80211_ptr) { rdev = wiphy_to_rdev( netdev->ieee80211_ptr->wiphy); state->filter_wiphy = rdev->wiphy_idx; } } return 0; } static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0, ret; struct nl80211_dump_wiphy_state *state = (void *)cb->args[0]; struct cfg80211_registered_device *rdev; rtnl_lock(); if (!state) { state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) { rtnl_unlock(); return -ENOMEM; } state->filter_wiphy = -1; ret = nl80211_dump_wiphy_parse(skb, cb, state); if (ret) { kfree(state); rtnl_unlock(); return ret; } cb->args[0] = (long)state; } list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; if (++idx <= state->start) continue; if (state->filter_wiphy != -1 && state->filter_wiphy != rdev->wiphy_idx) continue; /* attempt to fit multiple wiphy data chunks into the skb */ do { ret = nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY, skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, NLM_F_MULTI, state); if (ret < 0) { #if LINUX_VERSION_IS_GEQ(3,1,0) /* * If sending the wiphy data didn't fit (ENOBUFS * or EMSGSIZE returned), this SKB is still * empty (so it's not too big because another * wiphy dataset is already in the skb) and * we've not tried to adjust the dump allocation * yet ... then adjust the alloc size to be * bigger, and return 1 but with the empty skb. * This results in an empty message being RX'ed * in userspace, but that is ignored. * * We can then retry with the larger buffer. */ if ((ret == -ENOBUFS || ret == -EMSGSIZE) && !skb->len && !state->split && cb->min_dump_alloc < 4096) { cb->min_dump_alloc = 4096; state->split_start = 0; rtnl_unlock(); return 1; } #endif idx--; break; } } while (state->split_start > 0); break; } rtnl_unlock(); state->start = idx; return skb->len; } static int nl80211_dump_wiphy_done(struct netlink_callback *cb) { kfree((void *)cb->args[0]); return 0; } static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nl80211_dump_wiphy_state state = {}; msg = nlmsg_new(4096, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY, msg, genl_info_snd_portid(info), info->snd_seq, 0, &state) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = { [NL80211_TXQ_ATTR_QUEUE] = { .type = NLA_U8 }, [NL80211_TXQ_ATTR_TXOP] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_CWMIN] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_CWMAX] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_AIFS] = { .type = NLA_U8 }, }; static int parse_txq_params(struct nlattr *tb[], struct ieee80211_txq_params *txq_params) { u8 ac; if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] || !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || !tb[NL80211_TXQ_ATTR_AIFS]) return -EINVAL; ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]); txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); if (ac >= NL80211_NUM_ACS) return -EINVAL; txq_params->ac = array_index_nospec(ac, NL80211_NUM_ACS); return 0; } static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev) { /* * You can only set the channel explicitly for WDS interfaces, * all others have their channel managed via their respective * "establish a connection" command (connect, join, ...) * * For AP/GO and mesh mode, the channel can be set with the * channel userspace API, but is only stored and passed to the * low-level driver when the AP starts or the mesh is joined. * This is for backward compatibility, userspace can also give * the channel in the start-ap or join-mesh commands instead. * * Monitors are special as they are normally slaved to * whatever else is going on, so they have their own special * operation to set the monitor channel if possible. */ return !wdev || wdev->iftype == NL80211_IFTYPE_AP || wdev->iftype == NL80211_IFTYPE_MESH_POINT || wdev->iftype == NL80211_IFTYPE_MONITOR || wdev->iftype == NL80211_IFTYPE_P2P_GO; } int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_chan_def *chandef) { struct netlink_ext_ack *extack = genl_info_extack(info); struct nlattr **attrs = info->attrs; u32 control_freq; if (!attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]); chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq); chandef->width = NL80211_CHAN_WIDTH_20_NOHT; chandef->center_freq1 = control_freq; chandef->center_freq2 = 0; /* Primary channel not allowed */ if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) { NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ], "Channel is disabled"); return -EINVAL; } if (attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { enum nl80211_channel_type chantype; chantype = nla_get_u32(attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); switch (chantype) { case NL80211_CHAN_NO_HT: case NL80211_CHAN_HT20: case NL80211_CHAN_HT40PLUS: case NL80211_CHAN_HT40MINUS: cfg80211_chandef_create(chandef, chandef->chan, chantype); /* user input for center_freq is incorrect */ if (attrs[NL80211_ATTR_CENTER_FREQ1] && chandef->center_freq1 != nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1])) { NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_CENTER_FREQ1], "bad center frequency 1"); return -EINVAL; } /* center_freq2 must be zero */ if (attrs[NL80211_ATTR_CENTER_FREQ2] && nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2])) { NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_CENTER_FREQ2], "center frequency 2 can't be used"); return -EINVAL; } break; default: NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE], "invalid channel type"); return -EINVAL; } } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) { chandef->width = nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]); if (attrs[NL80211_ATTR_CENTER_FREQ1]) chandef->center_freq1 = nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]); if (attrs[NL80211_ATTR_CENTER_FREQ2]) chandef->center_freq2 = nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2]); } if (!cfg80211_chandef_valid(chandef)) { NL_SET_ERR_MSG(extack, "invalid channel definition"); return -EINVAL; } if (!cfg80211_chandef_usable(&rdev->wiphy, chandef, IEEE80211_CHAN_DISABLED)) { NL_SET_ERR_MSG(extack, "(extension) channel is disabled"); return -EINVAL; } if ((chandef->width == NL80211_CHAN_WIDTH_5 || chandef->width == NL80211_CHAN_WIDTH_10) && !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ)) { NL_SET_ERR_MSG(extack, "5/10 MHz not supported"); return -EINVAL; } return 0; } static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, struct net_device *dev, struct genl_info *info) { struct cfg80211_chan_def chandef; int result; enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR; struct wireless_dev *wdev = NULL; if (dev) wdev = dev->ieee80211_ptr; if (!nl80211_can_set_dev_channel(wdev)) return -EOPNOTSUPP; if (wdev) iftype = wdev->iftype; result = nl80211_parse_chandef(rdev, info, &chandef); if (result) return result; switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, iftype)) { result = -EINVAL; break; } if (wdev->beacon_interval) { if (!dev || !rdev->ops->set_ap_chanwidth || !(rdev->wiphy.features & NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE)) { result = -EBUSY; break; } /* Only allow dynamic channel width changes */ if (chandef.chan != wdev->preset_chandef.chan) { result = -EBUSY; break; } result = rdev_set_ap_chanwidth(rdev, dev, &chandef); if (result) break; } wdev->preset_chandef = chandef; result = 0; break; case NL80211_IFTYPE_MESH_POINT: result = cfg80211_set_mesh_channel(rdev, wdev, &chandef); break; case NL80211_IFTYPE_MONITOR: result = cfg80211_set_monitor_channel(rdev, &chandef); break; default: result = -EINVAL; } return result; } static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *netdev = info->user_ptr[1]; return __nl80211_set_channel(rdev, netdev, info); } static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *bssid; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (netif_running(dev)) return -EBUSY; if (!rdev->ops->set_wds_peer) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_WDS) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); return rdev_set_wds_peer(rdev, dev, bssid); } static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *netdev = NULL; struct wireless_dev *wdev; int result = 0, rem_txq_params = 0; struct nlattr *nl_txq_params; u32 changed; u8 retry_short = 0, retry_long = 0; u32 frag_threshold = 0, rts_threshold = 0; u8 coverage_class = 0; u32 txq_limit = 0, txq_memory_limit = 0, txq_quantum = 0; ASSERT_RTNL(); /* * Try to find the wiphy and netdev. Normally this * function shouldn't need the netdev, but this is * done for backward compatibility -- previously * setting the channel was done per wiphy, but now * it is per netdev. Previous userland like hostapd * also passed a netdev to set_wiphy, so that it is * possible to let that go to the right netdev! */ if (info->attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); netdev = __dev_get_by_index(genl_info_net(info), ifindex); if (netdev && netdev->ieee80211_ptr) rdev = wiphy_to_rdev(netdev->ieee80211_ptr->wiphy); else netdev = NULL; } if (!netdev) { rdev = __cfg80211_rdev_from_attrs(genl_info_net(info), info->attrs); if (IS_ERR(rdev)) return PTR_ERR(rdev); wdev = NULL; netdev = NULL; result = 0; } else wdev = netdev->ieee80211_ptr; /* * end workaround code, by now the rdev is available * and locked, and wdev may or may not be NULL. */ if (info->attrs[NL80211_ATTR_WIPHY_NAME]) result = cfg80211_dev_rename( rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); if (result) return result; if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { struct ieee80211_txq_params txq_params; struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1]; if (!rdev->ops->set_txq_params) return -EOPNOTSUPP; if (!netdev) return -EINVAL; if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; if (!netif_running(netdev)) return -ENETDOWN; nla_for_each_nested(nl_txq_params, info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], rem_txq_params) { result = nla_parse_nested(tb, NL80211_TXQ_ATTR_MAX, nl_txq_params, txq_params_policy, genl_info_extack(info)); if (result) return result; result = parse_txq_params(tb, &txq_params); if (result) return result; result = rdev_set_txq_params(rdev, netdev, &txq_params); if (result) return result; } } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { result = __nl80211_set_channel( rdev, nl80211_can_set_dev_channel(wdev) ? netdev : NULL, info); if (result) return result; } if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) { struct wireless_dev *txp_wdev = wdev; enum nl80211_tx_power_setting type; int idx, mbm = 0; if (!(rdev->wiphy.features & NL80211_FEATURE_VIF_TXPOWER)) txp_wdev = NULL; if (!rdev->ops->set_tx_power) return -EOPNOTSUPP; idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING; type = nla_get_u32(info->attrs[idx]); if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] && (type != NL80211_TX_POWER_AUTOMATIC)) return -EINVAL; if (type != NL80211_TX_POWER_AUTOMATIC) { idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL; mbm = nla_get_u32(info->attrs[idx]); } result = rdev_set_tx_power(rdev, txp_wdev, type, mbm); if (result) return result; } if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] && info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) { u32 tx_ant, rx_ant; if ((!rdev->wiphy.available_antennas_tx && !rdev->wiphy.available_antennas_rx) || !rdev->ops->set_antenna) return -EOPNOTSUPP; tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]); rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]); /* reject antenna configurations which don't match the * available antenna masks, except for the "all" mask */ if ((~tx_ant && (tx_ant & ~rdev->wiphy.available_antennas_tx)) || (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx))) return -EINVAL; tx_ant = tx_ant & rdev->wiphy.available_antennas_tx; rx_ant = rx_ant & rdev->wiphy.available_antennas_rx; result = rdev_set_antenna(rdev, tx_ant, rx_ant); if (result) return result; } changed = 0; if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { retry_short = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]); changed |= WIPHY_PARAM_RETRY_SHORT; } if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) { retry_long = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]); changed |= WIPHY_PARAM_RETRY_LONG; } if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) { frag_threshold = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]); if (frag_threshold < 256) return -EINVAL; if (frag_threshold != (u32) -1) { /* * Fragments (apart from the last one) are required to * have even length. Make the fragmentation code * simpler by stripping LSB should someone try to use * odd threshold value. */ frag_threshold &= ~0x1; } changed |= WIPHY_PARAM_FRAG_THRESHOLD; } if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) { rts_threshold = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]); changed |= WIPHY_PARAM_RTS_THRESHOLD; } if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) { if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) return -EINVAL; coverage_class = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]); changed |= WIPHY_PARAM_COVERAGE_CLASS; } if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) { if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION)) return -EOPNOTSUPP; changed |= WIPHY_PARAM_DYN_ACK; } if (info->attrs[NL80211_ATTR_TXQ_LIMIT]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_TXQS)) return -EOPNOTSUPP; txq_limit = nla_get_u32( info->attrs[NL80211_ATTR_TXQ_LIMIT]); changed |= WIPHY_PARAM_TXQ_LIMIT; } if (info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_TXQS)) return -EOPNOTSUPP; txq_memory_limit = nla_get_u32( info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]); changed |= WIPHY_PARAM_TXQ_MEMORY_LIMIT; } if (info->attrs[NL80211_ATTR_TXQ_QUANTUM]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_TXQS)) return -EOPNOTSUPP; txq_quantum = nla_get_u32( info->attrs[NL80211_ATTR_TXQ_QUANTUM]); changed |= WIPHY_PARAM_TXQ_QUANTUM; } if (changed) { u8 old_retry_short, old_retry_long; u32 old_frag_threshold, old_rts_threshold; u8 old_coverage_class; u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum; if (!rdev->ops->set_wiphy_params) return -EOPNOTSUPP; old_retry_short = rdev->wiphy.retry_short; old_retry_long = rdev->wiphy.retry_long; old_frag_threshold = rdev->wiphy.frag_threshold; old_rts_threshold = rdev->wiphy.rts_threshold; old_coverage_class = rdev->wiphy.coverage_class; old_txq_limit = rdev->wiphy.txq_limit; old_txq_memory_limit = rdev->wiphy.txq_memory_limit; old_txq_quantum = rdev->wiphy.txq_quantum; if (changed & WIPHY_PARAM_RETRY_SHORT) rdev->wiphy.retry_short = retry_short; if (changed & WIPHY_PARAM_RETRY_LONG) rdev->wiphy.retry_long = retry_long; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) rdev->wiphy.frag_threshold = frag_threshold; if (changed & WIPHY_PARAM_RTS_THRESHOLD) rdev->wiphy.rts_threshold = rts_threshold; if (changed & WIPHY_PARAM_COVERAGE_CLASS) rdev->wiphy.coverage_class = coverage_class; if (changed & WIPHY_PARAM_TXQ_LIMIT) rdev->wiphy.txq_limit = txq_limit; if (changed & WIPHY_PARAM_TXQ_MEMORY_LIMIT) rdev->wiphy.txq_memory_limit = txq_memory_limit; if (changed & WIPHY_PARAM_TXQ_QUANTUM) rdev->wiphy.txq_quantum = txq_quantum; result = rdev_set_wiphy_params(rdev, changed); if (result) { rdev->wiphy.retry_short = old_retry_short; rdev->wiphy.retry_long = old_retry_long; rdev->wiphy.frag_threshold = old_frag_threshold; rdev->wiphy.rts_threshold = old_rts_threshold; rdev->wiphy.coverage_class = old_coverage_class; rdev->wiphy.txq_limit = old_txq_limit; rdev->wiphy.txq_memory_limit = old_txq_memory_limit; rdev->wiphy.txq_quantum = old_txq_quantum; return result; } } return 0; } static int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef) { if (WARN_ON(!cfg80211_chandef_valid(chandef))) return -EINVAL; if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chandef->chan->center_freq)) return -ENOBUFS; switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: if (nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, cfg80211_get_chandef_type(chandef))) return -ENOBUFS; break; default: break; } if (nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, chandef->width)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ1, chandef->center_freq1)) return -ENOBUFS; if (chandef->center_freq2 && nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2)) return -ENOBUFS; return 0; } static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_commands cmd) { struct net_device *dev = wdev->netdev; void *hdr; WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE && cmd != NL80211_CMD_DEL_INTERFACE && cmd != NL80211_CMD_SET_INTERFACE); hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) return -1; if (dev && (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name))) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) || nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->devlist_generation ^ (cfg80211_rdev_list_generation << 2)) || nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr)) goto nla_put_failure; if (rdev->ops->get_channel) { int ret; struct cfg80211_chan_def chandef; ret = rdev_get_channel(rdev, wdev, &chandef); if (ret == 0) { if (nl80211_send_chandef(msg, &chandef)) goto nla_put_failure; } } if (rdev->ops->get_tx_power) { int dbm, ret; ret = rdev_get_tx_power(rdev, wdev, &dbm); if (ret == 0 && nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL, DBM_TO_MBM(dbm))) goto nla_put_failure; } wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: if (wdev->ssid_len && nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid)) goto nla_put_failure_locked; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: { const u8 *ssid_ie; if (!wdev->current_bss) break; rcu_read_lock(); ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, WLAN_EID_SSID); if (ssid_ie && nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2)) goto nla_put_failure_rcu_locked; rcu_read_unlock(); break; } default: /* nothing */ break; } wdev_unlock(wdev); if (rdev->ops->get_txq_stats) { struct cfg80211_txq_stats txqstats = {}; int ret = rdev_get_txq_stats(rdev, wdev, &txqstats); if (ret == 0 && !nl80211_put_txq_stats(msg, &txqstats, NL80211_ATTR_TXQ_STATS)) goto nla_put_failure; } genlmsg_end(msg, hdr); return 0; nla_put_failure_rcu_locked: rcu_read_unlock(); nla_put_failure_locked: wdev_unlock(wdev); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) { int wp_idx = 0; int if_idx = 0; int wp_start = cb->args[0]; int if_start = cb->args[1]; int filter_wiphy = -1; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; int ret; rtnl_lock(); if (!cb->args[2]) { struct nl80211_dump_wiphy_state state = { .filter_wiphy = -1, }; ret = nl80211_dump_wiphy_parse(skb, cb, &state); if (ret) goto out_unlock; filter_wiphy = state.filter_wiphy; /* * if filtering, set cb->args[2] to +1 since 0 is the default * value needed to determine that parsing is necessary. */ if (filter_wiphy >= 0) cb->args[2] = filter_wiphy + 1; else cb->args[2] = -1; } else if (cb->args[2] > 0) { filter_wiphy = cb->args[2] - 1; } list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; if (wp_idx < wp_start) { wp_idx++; continue; } if (filter_wiphy >= 0 && filter_wiphy != rdev->wiphy_idx) continue; if_idx = 0; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (if_idx < if_start) { if_idx++; continue; } if (nl80211_send_iface(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) { goto out; } if_idx++; } wp_idx++; } out: cb->args[0] = wp_idx; cb->args[1] = if_idx; ret = skb->len; out_unlock: rtnl_unlock(); return ret; } static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_iface(msg, genl_info_snd_portid(info), info->snd_seq, 0, rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = { [NL80211_MNTR_FLAG_FCSFAIL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_PLCPFAIL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_ACTIVE] = { .type = NLA_FLAG }, }; static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) { struct nlattr *flags[NL80211_MNTR_FLAG_MAX + 1]; int flag; *mntrflags = 0; if (!nla) return -EINVAL; if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX, nla, mntr_flags_policy, NULL)) return -EINVAL; for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++) if (flags[flag]) *mntrflags |= (1<attrs[NL80211_ATTR_MNTR_FLAGS]) { if (type != NL80211_IFTYPE_MONITOR) return -EINVAL; err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], ¶ms->flags); if (err) return err; change = true; } if (params->flags & MONITOR_FLAG_ACTIVE && !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) { const u8 *mumimo_groups; u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; if (type != NL80211_IFTYPE_MONITOR) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) return -EOPNOTSUPP; mumimo_groups = nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]); /* bits 0 and 63 are reserved and must be zero */ if ((mumimo_groups[0] & BIT(0)) || (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(7))) return -EINVAL; params->vht_mumimo_groups = mumimo_groups; change = true; } if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) { u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; if (type != NL80211_IFTYPE_MONITOR) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) return -EOPNOTSUPP; params->vht_mumimo_follow_addr = nla_data(info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]); change = true; } return change ? 1 : 0; } static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 use_4addr, enum nl80211_iftype iftype) { if (!use_4addr) { if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT)) return -EBUSY; return 0; } switch (iftype) { case NL80211_IFTYPE_AP_VLAN: if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP) return 0; break; case NL80211_IFTYPE_STATION: if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_STATION) return 0; break; default: break; } return -EOPNOTSUPP; } static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; int err; enum nl80211_iftype otype, ntype; struct net_device *dev = info->user_ptr[1]; bool change = false; memset(¶ms, 0, sizeof(params)); otype = ntype = dev->ieee80211_ptr->iftype; if (info->attrs[NL80211_ATTR_IFTYPE]) { ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (otype != ntype) change = true; } if (info->attrs[NL80211_ATTR_MESH_ID]) { struct wireless_dev *wdev = dev->ieee80211_ptr; if (ntype != NL80211_IFTYPE_MESH_POINT) return -EINVAL; if (netif_running(dev)) return -EBUSY; wdev_lock(wdev); BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); wdev->mesh_id_up_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]), wdev->mesh_id_up_len); wdev_unlock(wdev); } if (info->attrs[NL80211_ATTR_4ADDR]) { params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); change = true; err = nl80211_valid_4addr(rdev, dev, params.use_4addr, ntype); if (err) return err; } else { params.use_4addr = -1; } err = nl80211_parse_mon_options(rdev, ntype, info, ¶ms); if (err < 0) return err; if (err > 0) change = true; if (change) err = cfg80211_change_iface(rdev, dev, ntype, ¶ms); else err = 0; if (!err && params.use_4addr != -1) dev->ieee80211_ptr->use_4addr = params.use_4addr; if (change && !err) { struct wireless_dev *wdev = dev->ieee80211_ptr; nl80211_notify_iface(rdev, wdev, NL80211_CMD_SET_INTERFACE); } return err; } static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; struct wireless_dev *wdev; struct sk_buff *msg; int err; enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; /* to avoid failing a new interface creation due to pending removal */ cfg80211_destroy_ifaces(rdev); memset(¶ms, 0, sizeof(params)); if (!info->attrs[NL80211_ATTR_IFNAME]) return -EINVAL; if (info->attrs[NL80211_ATTR_IFTYPE]) type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (!rdev->ops->add_virtual_intf || !(rdev->wiphy.interface_modes & (1 << type))) return -EOPNOTSUPP; if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN || rdev->wiphy.features & NL80211_FEATURE_MAC_ON_CREATE) && info->attrs[NL80211_ATTR_MAC]) { nla_memcpy(params.macaddr, info->attrs[NL80211_ATTR_MAC], ETH_ALEN); if (!is_valid_ether_addr(params.macaddr)) return -EADDRNOTAVAIL; } if (info->attrs[NL80211_ATTR_4ADDR]) { params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type); if (err) return err; } err = nl80211_parse_mon_options(rdev, type, info, ¶ms); if (err < 0) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; wdev = rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL80211_ATTR_IFNAME]), NET_NAME_USER, type, ¶ms); if (WARN_ON(!wdev)) { nlmsg_free(msg); return -EPROTO; } else if (IS_ERR(wdev)) { nlmsg_free(msg); return PTR_ERR(wdev); } if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) wdev->owner_nlportid = genl_info_snd_portid(info); switch (type) { case NL80211_IFTYPE_MESH_POINT: if (!info->attrs[NL80211_ATTR_MESH_ID]) break; wdev_lock(wdev); BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); wdev->mesh_id_up_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]), wdev->mesh_id_up_len); wdev_unlock(wdev); break; case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_P2P_DEVICE: /* * P2P Device and NAN do not have a netdev, so don't go * through the netdev notifier and must be added here */ cfg80211_init_wdev(rdev, wdev); break; default: break; } if (nl80211_send_iface(msg, genl_info_snd_portid(info), info->snd_seq, 0, rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (!rdev->ops->del_virtual_intf) return -EOPNOTSUPP; /* * If we remove a wireless device without a netdev then clear * user_ptr[1] so that nl80211_post_doit won't dereference it * to check if it needs to do dev_put(). Otherwise it crashes * since the wdev has been freed, unlike with a netdev where * we need the dev_put() for the netdev to really be freed. */ if (!wdev->netdev) info->user_ptr[1] = NULL; return rdev_del_virtual_intf(rdev, wdev); } static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u16 noack_map; if (!info->attrs[NL80211_ATTR_NOACK_MAP]) return -EINVAL; if (!rdev->ops->set_noack_map) return -EOPNOTSUPP; noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]); return rdev_set_noack_map(rdev, dev, noack_map); } struct get_key_cookie { struct sk_buff *msg; int error; int idx; }; static void get_key_callback(void *c, struct key_params *params) { struct nlattr *key; struct get_key_cookie *cookie = c; if ((params->key && nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, params->key_len, params->key)) || (params->seq && nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER, params->cipher))) goto nla_put_failure; key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); if (!key) goto nla_put_failure; if ((params->key && nla_put(cookie->msg, NL80211_KEY_DATA, params->key_len, params->key)) || (params->seq && nla_put(cookie->msg, NL80211_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && nla_put_u32(cookie->msg, NL80211_KEY_CIPHER, params->cipher))) goto nla_put_failure; if (nla_put_u8(cookie->msg, NL80211_KEY_IDX, cookie->idx)) goto nla_put_failure; nla_nest_end(cookie->msg, key); return; nla_put_failure: cookie->error = 1; } static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; u8 key_idx = 0; const u8 *mac_addr = NULL; bool pairwise; struct get_key_cookie cookie = { .error = 0, }; void *hdr; struct sk_buff *msg; if (info->attrs[NL80211_ATTR_KEY_IDX]) key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); pairwise = !!mac_addr; if (info->attrs[NL80211_ATTR_KEY_TYPE]) { u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); if (kt != NL80211_KEYTYPE_GROUP && kt != NL80211_KEYTYPE_PAIRWISE) return -EINVAL; pairwise = kt == NL80211_KEYTYPE_PAIRWISE; } if (!rdev->ops->get_key) return -EOPNOTSUPP; if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) return -ENOENT; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_NEW_KEY); if (!hdr) goto nla_put_failure; cookie.msg = msg; cookie.idx = key_idx; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx)) goto nla_put_failure; if (mac_addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) goto nla_put_failure; err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, get_key_callback); if (err) goto free_msg; if (cookie.error) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct key_parse key; int err; struct net_device *dev = info->user_ptr[1]; err = nl80211_parse_key(info, &key); if (err) return err; if (key.idx < 0) return -EINVAL; /* only support setting default key */ if (!key.def && !key.defmgmt) return -EINVAL; wdev_lock(dev->ieee80211_ptr); if (key.def) { if (!rdev->ops->set_default_key) { err = -EOPNOTSUPP; goto out; } err = nl80211_key_allowed(dev->ieee80211_ptr); if (err) goto out; err = rdev_set_default_key(rdev, dev, key.idx, key.def_uni, key.def_multi); if (err) goto out; #ifdef CPTCFG_CFG80211_WEXT dev->ieee80211_ptr->wext.default_key = key.idx; #endif } else { if (key.def_uni || !key.def_multi) { err = -EINVAL; goto out; } if (!rdev->ops->set_default_mgmt_key) { err = -EOPNOTSUPP; goto out; } err = nl80211_key_allowed(dev->ieee80211_ptr); if (err) goto out; err = rdev_set_default_mgmt_key(rdev, dev, key.idx); if (err) goto out; #ifdef CPTCFG_CFG80211_WEXT dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; #endif } out: wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct key_parse key; const u8 *mac_addr = NULL; err = nl80211_parse_key(info, &key); if (err) return err; if (!key.p.key) return -EINVAL; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (key.type == -1) { if (mac_addr) key.type = NL80211_KEYTYPE_PAIRWISE; else key.type = NL80211_KEYTYPE_GROUP; } /* for now */ if (key.type != NL80211_KEYTYPE_PAIRWISE && key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; if (!rdev->ops->add_key) return -EOPNOTSUPP; if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr)) return -EINVAL; wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); if (!err) err = rdev_add_key(rdev, dev, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr, &key.p); wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; u8 *mac_addr = NULL; struct key_parse key; err = nl80211_parse_key(info, &key); if (err) return err; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (key.type == -1) { if (mac_addr) key.type = NL80211_KEYTYPE_PAIRWISE; else key.type = NL80211_KEYTYPE_GROUP; } /* for now */ if (key.type != NL80211_KEYTYPE_PAIRWISE && key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; if (!rdev->ops->del_key) return -EOPNOTSUPP; wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); if (key.type == NL80211_KEYTYPE_GROUP && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; if (!err) err = rdev_del_key(rdev, dev, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr); #ifdef CPTCFG_CFG80211_WEXT if (!err) { if (key.idx == dev->ieee80211_ptr->wext.default_key) dev->ieee80211_ptr->wext.default_key = -1; else if (key.idx == dev->ieee80211_ptr->wext.default_mgmt_key) dev->ieee80211_ptr->wext.default_mgmt_key = -1; } #endif wdev_unlock(dev->ieee80211_ptr); return err; } /* This function returns an error or the number of nested attributes */ static int validate_acl_mac_addrs(struct nlattr *nl_attr) { struct nlattr *attr; int n_entries = 0, tmp; nla_for_each_nested(attr, nl_attr, tmp) { if (nla_len(attr) != ETH_ALEN) return -EINVAL; n_entries++; } return n_entries; } /* * This function parses ACL information and allocates memory for ACL data. * On successful return, the calling function is responsible to free the * ACL buffer returned by this function. */ static struct cfg80211_acl_data *parse_acl_data(struct wiphy *wiphy, struct genl_info *info) { enum nl80211_acl_policy acl_policy; struct nlattr *attr; struct cfg80211_acl_data *acl; int i = 0, n_entries, tmp; if (!wiphy->max_acl_mac_addrs) return ERR_PTR(-EOPNOTSUPP); if (!info->attrs[NL80211_ATTR_ACL_POLICY]) return ERR_PTR(-EINVAL); acl_policy = nla_get_u32(info->attrs[NL80211_ATTR_ACL_POLICY]); if (acl_policy != NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED && acl_policy != NL80211_ACL_POLICY_DENY_UNLESS_LISTED) return ERR_PTR(-EINVAL); if (!info->attrs[NL80211_ATTR_MAC_ADDRS]) return ERR_PTR(-EINVAL); n_entries = validate_acl_mac_addrs(info->attrs[NL80211_ATTR_MAC_ADDRS]); if (n_entries < 0) return ERR_PTR(n_entries); if (n_entries > wiphy->max_acl_mac_addrs) return ERR_PTR(-ENOTSUPP); acl = kzalloc(sizeof(*acl) + (sizeof(struct mac_address) * n_entries), GFP_KERNEL); if (!acl) return ERR_PTR(-ENOMEM); nla_for_each_nested(attr, info->attrs[NL80211_ATTR_MAC_ADDRS], tmp) { memcpy(acl->mac_addrs[i].addr, nla_data(attr), ETH_ALEN); i++; } acl->n_acl_entries = n_entries; acl->acl_policy = acl_policy; return acl; } static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_acl_data *acl; int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!dev->ieee80211_ptr->beacon_interval) return -EINVAL; acl = parse_acl_data(&rdev->wiphy, info); if (IS_ERR(acl)) return PTR_ERR(acl); err = rdev_set_mac_acl(rdev, dev, acl); kfree(acl); return err; } static u32 rateset_to_mask(struct ieee80211_supported_band *sband, u8 *rates, u8 rates_len) { u8 i; u32 mask = 0; for (i = 0; i < rates_len; i++) { int rate = (rates[i] & 0x7f) * 5; int ridx; for (ridx = 0; ridx < sband->n_bitrates; ridx++) { struct ieee80211_rate *srate = &sband->bitrates[ridx]; if (rate == srate->bitrate) { mask |= 1 << ridx; break; } } if (ridx == sband->n_bitrates) return 0; /* rate not found */ } return mask; } static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband, u8 *rates, u8 rates_len, u8 mcs[IEEE80211_HT_MCS_MASK_LEN]) { u8 i; memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN); for (i = 0; i < rates_len; i++) { int ridx, rbit; ridx = rates[i] / 8; rbit = BIT(rates[i] % 8); /* check validity */ if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN)) return false; /* check availability */ ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN); if (sband->ht_cap.mcs.rx_mask[ridx] & rbit) mcs[ridx] |= rbit; else return false; } return true; } static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map) { u16 mcs_mask = 0; switch (vht_mcs_map) { case IEEE80211_VHT_MCS_NOT_SUPPORTED: break; case IEEE80211_VHT_MCS_SUPPORT_0_7: mcs_mask = 0x00FF; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: mcs_mask = 0x01FF; break; case IEEE80211_VHT_MCS_SUPPORT_0_9: mcs_mask = 0x03FF; break; default: break; } return mcs_mask; } static void vht_build_mcs_mask(u16 vht_mcs_map, u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) { u8 nss; for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03); vht_mcs_map >>= 2; } } static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband, struct nl80211_txrate_vht *txrate, u16 mcs[NL80211_VHT_NSS_MAX]) { u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {}; u8 i; if (!sband->vht_cap.vht_supported) return false; memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX); /* Build vht_mcs_mask from VHT capabilities */ vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask); for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i]) mcs[i] = txrate->mcs[i]; else return false; } return true; } static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = { [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_TXRATE_HT] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_HT_RATES }, [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)}, [NL80211_TXRATE_GI] = { .type = NLA_U8 }, }; static int nl80211_parse_tx_bitrate_mask(struct genl_info *info, struct cfg80211_bitrate_mask *mask) { struct nlattr *tb[NL80211_TXRATE_MAX + 1]; struct cfg80211_registered_device *rdev = info->user_ptr[0]; int rem, i; struct nlattr *tx_rates; struct ieee80211_supported_band *sband; u16 vht_tx_mcs_map; memset(mask, 0, sizeof(*mask)); /* Default to all rates enabled */ for (i = 0; i < NUM_NL80211_BANDS; i++) { sband = rdev->wiphy.bands[i]; if (!sband) continue; mask->control[i].legacy = (1 << sband->n_bitrates) - 1; memcpy(mask->control[i].ht_mcs, sband->ht_cap.mcs.rx_mask, sizeof(mask->control[i].ht_mcs)); if (!sband->vht_cap.vht_supported) continue; vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs); } /* if no rates are given set it back to the defaults */ if (!info->attrs[NL80211_ATTR_TX_RATES]) goto out; /* The nested attribute uses enum nl80211_band as the index. This maps * directly to the enum nl80211_band values used in cfg80211. */ BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8); nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) { enum nl80211_band band = nla_type(tx_rates); int err; if (band < 0 || band >= NUM_NL80211_BANDS) return -EINVAL; sband = rdev->wiphy.bands[band]; if (sband == NULL) return -EINVAL; err = nla_parse_nested(tb, NL80211_TXRATE_MAX, tx_rates, nl80211_txattr_policy, genl_info_extack(info)); if (err) return err; if (tb[NL80211_TXRATE_LEGACY]) { mask->control[band].legacy = rateset_to_mask( sband, nla_data(tb[NL80211_TXRATE_LEGACY]), nla_len(tb[NL80211_TXRATE_LEGACY])); if ((mask->control[band].legacy == 0) && nla_len(tb[NL80211_TXRATE_LEGACY])) return -EINVAL; } if (tb[NL80211_TXRATE_HT]) { if (!ht_rateset_to_mask( sband, nla_data(tb[NL80211_TXRATE_HT]), nla_len(tb[NL80211_TXRATE_HT]), mask->control[band].ht_mcs)) return -EINVAL; } if (tb[NL80211_TXRATE_VHT]) { if (!vht_set_mcs_mask( sband, nla_data(tb[NL80211_TXRATE_VHT]), mask->control[band].vht_mcs)) return -EINVAL; } if (tb[NL80211_TXRATE_GI]) { mask->control[band].gi = nla_get_u8(tb[NL80211_TXRATE_GI]); if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI) return -EINVAL; } if (mask->control[band].legacy == 0) { /* don't allow empty legacy rates if HT or VHT * are not even supported. */ if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported || rdev->wiphy.bands[band]->vht_cap.vht_supported)) return -EINVAL; for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) if (mask->control[band].ht_mcs[i]) goto out; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) if (mask->control[band].vht_mcs[i]) goto out; /* legacy and mcs rates may not be both empty */ return -EINVAL; } } out: return 0; } static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev, enum nl80211_band band, struct cfg80211_bitrate_mask *beacon_rate) { u32 count_ht, count_vht, i; u32 rate = beacon_rate->control[band].legacy; /* Allow only one rate */ if (hweight32(rate) > 1) return -EINVAL; count_ht = 0; for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { if (hweight8(beacon_rate->control[band].ht_mcs[i]) > 1) { return -EINVAL; } else if (beacon_rate->control[band].ht_mcs[i]) { count_ht++; if (count_ht > 1) return -EINVAL; } if (count_ht && rate) return -EINVAL; } count_vht = 0; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { if (hweight16(beacon_rate->control[band].vht_mcs[i]) > 1) { return -EINVAL; } else if (beacon_rate->control[band].vht_mcs[i]) { count_vht++; if (count_vht > 1) return -EINVAL; } if (count_vht && rate) return -EINVAL; } if ((count_ht && count_vht) || (!rate && !count_ht && !count_vht)) return -EINVAL; if (rate && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY)) return -EINVAL; if (count_ht && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT)) return -EINVAL; if (count_vht && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT)) return -EINVAL; return 0; } static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, struct nlattr *attrs[], struct cfg80211_beacon_data *bcn) { bool haveinfo = false; int err; memset(bcn, 0, sizeof(*bcn)); if (attrs[NL80211_ATTR_BEACON_HEAD]) { bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]); bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]); if (!bcn->head_len) return -EINVAL; haveinfo = true; } if (attrs[NL80211_ATTR_BEACON_TAIL]) { bcn->tail = nla_data(attrs[NL80211_ATTR_BEACON_TAIL]); bcn->tail_len = nla_len(attrs[NL80211_ATTR_BEACON_TAIL]); haveinfo = true; } if (!haveinfo) return -EINVAL; if (attrs[NL80211_ATTR_IE]) { bcn->beacon_ies = nla_data(attrs[NL80211_ATTR_IE]); bcn->beacon_ies_len = nla_len(attrs[NL80211_ATTR_IE]); } if (attrs[NL80211_ATTR_IE_PROBE_RESP]) { bcn->proberesp_ies = nla_data(attrs[NL80211_ATTR_IE_PROBE_RESP]); bcn->proberesp_ies_len = nla_len(attrs[NL80211_ATTR_IE_PROBE_RESP]); } if (attrs[NL80211_ATTR_IE_ASSOC_RESP]) { bcn->assocresp_ies = nla_data(attrs[NL80211_ATTR_IE_ASSOC_RESP]); bcn->assocresp_ies_len = nla_len(attrs[NL80211_ATTR_IE_ASSOC_RESP]); } if (attrs[NL80211_ATTR_PROBE_RESP]) { bcn->probe_resp = nla_data(attrs[NL80211_ATTR_PROBE_RESP]); bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]); } if (attrs[NL80211_ATTR_FTM_RESPONDER]) { struct nlattr *tb[NL80211_FTM_RESP_ATTR_MAX + 1]; err = nla_parse_nested(tb, NL80211_FTM_RESP_ATTR_MAX, attrs[NL80211_ATTR_FTM_RESPONDER], NULL, NULL); if (err) return err; if (tb[NL80211_FTM_RESP_ATTR_ENABLED] && wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER)) bcn->ftm_responder = 1; else return -EOPNOTSUPP; if (tb[NL80211_FTM_RESP_ATTR_LCI]) { bcn->lci = nla_data(tb[NL80211_FTM_RESP_ATTR_LCI]); bcn->lci_len = nla_len(tb[NL80211_FTM_RESP_ATTR_LCI]); } if (tb[NL80211_FTM_RESP_ATTR_CIVICLOC]) { bcn->civicloc = nla_data(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]); bcn->civicloc_len = nla_len(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]); } } else { bcn->ftm_responder = -1; } return 0; } static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, const u8 *rates) { int i; if (!rates) return; for (i = 0; i < rates[1]; i++) { if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_HT_PHY) params->ht_required = true; if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_VHT_PHY) params->vht_required = true; } } /* * Since the nl80211 API didn't include, from the beginning, attributes about * HT/VHT requirements/capabilities, we parse them out of the IEs for the * benefit of drivers that rebuild IEs in the firmware. */ static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params) { const struct cfg80211_beacon_data *bcn = ¶ms->beacon; size_t ies_len = bcn->tail_len; const u8 *ies = bcn->tail; const u8 *rates; const u8 *cap; rates = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies, ies_len); nl80211_check_ap_rate_selectors(params, rates); rates = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies, ies_len); nl80211_check_ap_rate_selectors(params, rates); cap = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len); if (cap && cap[1] >= sizeof(*params->ht_cap)) params->ht_cap = (void *)(cap + 2); cap = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, ies_len); if (cap && cap[1] >= sizeof(*params->vht_cap)) params->vht_cap = (void *)(cap + 2); cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ies, ies_len); if (cap && cap[1] >= sizeof(*params->he_cap) + 1) params->he_cap = (void *)(cap + 3); } static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, struct cfg80211_ap_settings *params) { struct wireless_dev *wdev; bool ret = false; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) continue; if (!wdev->preset_chandef.chan) continue; params->chandef = wdev->preset_chandef; ret = true; break; } return ret; } static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev, enum nl80211_auth_type auth_type, enum nl80211_commands cmd) { if (auth_type > NL80211_AUTHTYPE_MAX) return false; switch (cmd) { case NL80211_CMD_AUTHENTICATE: if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) && auth_type == NL80211_AUTHTYPE_SAE) return false; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_FILS_STA) && (auth_type == NL80211_AUTHTYPE_FILS_SK || auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK)) return false; return true; case NL80211_CMD_CONNECT: if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) && auth_type == NL80211_AUTHTYPE_SAE) return false; /* FILS with SK PFS or PK not supported yet */ if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK) return false; if (!wiphy_ext_feature_isset( &rdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && auth_type == NL80211_AUTHTYPE_FILS_SK) return false; return true; case NL80211_CMD_START_AP: /* SAE not supported yet */ if (auth_type == NL80211_AUTHTYPE_SAE) return false; /* FILS not supported yet */ if (auth_type == NL80211_AUTHTYPE_FILS_SK || auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK) return false; return true; default: return false; } } static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_ap_settings params; int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!rdev->ops->start_ap) return -EOPNOTSUPP; if (wdev->beacon_interval) return -EALREADY; memset(¶ms, 0, sizeof(params)); /* these are required for START_AP */ if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || !info->attrs[NL80211_ATTR_DTIM_PERIOD] || !info->attrs[NL80211_ATTR_BEACON_HEAD]) return -EINVAL; err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon); if (err) return err; params.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); params.dtim_period = nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype, params.beacon_interval); if (err) return err; /* * In theory, some of these attributes should be required here * but since they were not used when the command was originally * added, keep them optional for old user space programs to let * them continue to work with drivers that do not need the * additional information -- drivers must check! */ if (info->attrs[NL80211_ATTR_SSID]) { params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); params.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (params.ssid_len == 0 || params.ssid_len > IEEE80211_MAX_SSID_LEN) return -EINVAL; } if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) params.hidden_ssid = nla_get_u32( info->attrs[NL80211_ATTR_HIDDEN_SSID]); params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { params.auth_type = nla_get_u32( info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(rdev, params.auth_type, NL80211_CMD_START_AP)) return -EINVAL; } else params.auth_type = NL80211_AUTHTYPE_AUTOMATIC; err = nl80211_crypto_settings(rdev, info, ¶ms.crypto, NL80211_MAX_NR_CIPHER_SUITES, 1); if (err) return err; if (info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]) { if (!(rdev->wiphy.features & NL80211_FEATURE_INACTIVITY_TIMER)) return -EOPNOTSUPP; params.inactivity_timeout = nla_get_u16( info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]); } if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) { if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; params.p2p_ctwindow = nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]); if (params.p2p_ctwindow != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)) return -EINVAL; } if (info->attrs[NL80211_ATTR_P2P_OPPPS]) { u8 tmp; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]); params.p2p_opp_ps = tmp; if (params.p2p_opp_ps != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)) return -EINVAL; } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { err = nl80211_parse_chandef(rdev, info, ¶ms.chandef); if (err) return err; } else if (wdev->preset_chandef.chan) { params.chandef = wdev->preset_chandef; } else if (!nl80211_get_ap_channel(rdev, ¶ms)) return -EINVAL; if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms.chandef, wdev->iftype)) return -EINVAL; if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, ¶ms.beacon_rate); if (err) return err; err = validate_beacon_tx_rate(rdev, params.chandef.chan->band, ¶ms.beacon_rate); if (err) return err; } if (info->attrs[NL80211_ATTR_SMPS_MODE]) { params.smps_mode = nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]); switch (params.smps_mode) { case NL80211_SMPS_OFF: break; case NL80211_SMPS_STATIC: if (!(rdev->wiphy.features & NL80211_FEATURE_STATIC_SMPS)) return -EINVAL; break; case NL80211_SMPS_DYNAMIC: if (!(rdev->wiphy.features & NL80211_FEATURE_DYNAMIC_SMPS)) return -EINVAL; break; default: return -EINVAL; } } else { params.smps_mode = NL80211_SMPS_OFF; } params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_ACL_POLICY]) { params.acl = parse_acl_data(&rdev->wiphy, info); if (IS_ERR(params.acl)) return PTR_ERR(params.acl); } nl80211_calculate_ap_params(¶ms); wdev_lock(wdev); err = rdev_start_ap(rdev, dev, ¶ms); if (!err) { wdev->preset_chandef = params.chandef; wdev->beacon_interval = params.beacon_interval; wdev->chandef = params.chandef; wdev->ssid_len = params.ssid_len; memcpy(wdev->ssid, params.ssid, wdev->ssid_len); if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) wdev->conn_owner_nlportid = genl_info_snd_portid(info); } wdev_unlock(wdev); kfree(params.acl); return err; } static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_beacon_data params; int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!rdev->ops->change_beacon) return -EOPNOTSUPP; if (!wdev->beacon_interval) return -EINVAL; err = nl80211_parse_beacon(rdev, info->attrs, ¶ms); if (err) return err; wdev_lock(wdev); err = rdev_change_beacon(rdev, dev, ¶ms); wdev_unlock(wdev); return err; } static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; return cfg80211_stop_ap(rdev, dev, false); } static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_TDLS_PEER] = { .type = NLA_FLAG }, }; static int parse_station_flags(struct genl_info *info, enum nl80211_iftype iftype, struct station_parameters *params) { struct nlattr *flags[NL80211_STA_FLAG_MAX + 1]; struct nlattr *nla; int flag; /* * Try parsing the new attribute first so userspace * can specify both for older kernels. */ nla = info->attrs[NL80211_ATTR_STA_FLAGS2]; if (nla) { struct nl80211_sta_flag_update *sta_flags; sta_flags = nla_data(nla); params->sta_flags_mask = sta_flags->mask; params->sta_flags_set = sta_flags->set; params->sta_flags_set &= params->sta_flags_mask; if ((params->sta_flags_mask | params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID)) return -EINVAL; return 0; } /* if present, parse the old attribute */ nla = info->attrs[NL80211_ATTR_STA_FLAGS]; if (!nla) return 0; if (nla_parse_nested(flags, NL80211_STA_FLAG_MAX, nla, sta_flags_policy, genl_info_extack(info))) return -EINVAL; /* * Only allow certain flags for interface types so that * other attributes are silently ignored. Remember that * this is backward compatibility code with old userspace * and shouldn't be hit in other cases anyway. */ switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_MFP); break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_TDLS_PEER); break; case NL80211_IFTYPE_MESH_POINT: params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED); break; default: return -EINVAL; } for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) { if (flags[flag]) { params->sta_flags_set |= (1< NL80211_STA_FLAG_MAX_OLD_API) return -EINVAL; } } return 0; } bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) { struct nlattr *rate; u32 bitrate; u16 bitrate_compat; enum nl80211_rate_info rate_flg; rate = nla_nest_start(msg, attr); if (!rate) return false; /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ bitrate = cfg80211_calculate_bitrate(info); /* report 16-bit bitrate only if we can */ bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0; if (bitrate > 0 && nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) return false; if (bitrate_compat > 0 && nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) return false; switch (info->bw) { case RATE_INFO_BW_5: rate_flg = NL80211_RATE_INFO_5_MHZ_WIDTH; break; case RATE_INFO_BW_10: rate_flg = NL80211_RATE_INFO_10_MHZ_WIDTH; break; default: WARN_ON(1); /* fall through */ case RATE_INFO_BW_20: rate_flg = 0; break; case RATE_INFO_BW_40: rate_flg = NL80211_RATE_INFO_40_MHZ_WIDTH; break; case RATE_INFO_BW_80: rate_flg = NL80211_RATE_INFO_80_MHZ_WIDTH; break; case RATE_INFO_BW_160: rate_flg = NL80211_RATE_INFO_160_MHZ_WIDTH; break; case RATE_INFO_BW_HE_RU: rate_flg = 0; WARN_ON(!(info->flags & RATE_INFO_FLAGS_HE_MCS)); } if (rate_flg && nla_put_flag(msg, rate_flg)) return false; if (info->flags & RATE_INFO_FLAGS_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) return false; if (info->flags & RATE_INFO_FLAGS_SHORT_GI && nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) return false; } else if (info->flags & RATE_INFO_FLAGS_VHT_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_MCS, info->mcs)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_NSS, info->nss)) return false; if (info->flags & RATE_INFO_FLAGS_SHORT_GI && nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) return false; } else if (info->flags & RATE_INFO_FLAGS_HE_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_HE_MCS, info->mcs)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_HE_NSS, info->nss)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_HE_GI, info->he_gi)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_HE_DCM, info->he_dcm)) return false; if (info->bw == RATE_INFO_BW_HE_RU && nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC, info->he_ru_alloc)) return false; } nla_nest_end(msg, rate); return true; } static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal, int id) { void *attr; int i = 0; if (!mask) return true; attr = nla_nest_start(msg, id); if (!attr) return false; for (i = 0; i < IEEE80211_MAX_CHAINS; i++) { if (!(mask & BIT(i))) continue; if (nla_put_u8(msg, i, signal[i])) return false; } nla_nest_end(msg, attr); return true; } static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { void *hdr; struct nlattr *sinfoattr, *bss_param; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) return -1; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation)) goto nla_put_failure; sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); if (!sinfoattr) goto nla_put_failure; #define PUT_SINFO(attr, memb, type) do { \ BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb)) \ goto nla_put_failure; \ } while (0) #define PUT_SINFO_U64(attr, memb) do { \ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb, NL80211_STA_INFO_PAD)) \ goto nla_put_failure; \ } while (0) PUT_SINFO(CONNECTED_TIME, connected_time, u32); PUT_SINFO(INACTIVE_TIME, inactive_time, u32); if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) | BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, (u32)sinfo->rx_bytes)) goto nla_put_failure; if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) | BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, (u32)sinfo->tx_bytes)) goto nla_put_failure; PUT_SINFO_U64(RX_BYTES64, rx_bytes); PUT_SINFO_U64(TX_BYTES64, tx_bytes); PUT_SINFO(LLID, llid, u16); PUT_SINFO(PLID, plid, u16); PUT_SINFO(PLINK_STATE, plink_state, u8); PUT_SINFO_U64(RX_DURATION, rx_duration); switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: PUT_SINFO(SIGNAL, signal, u8); PUT_SINFO(SIGNAL_AVG, signal_avg, u8); break; default: break; } if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) { if (!nl80211_put_signal(msg, sinfo->chains, sinfo->chain_signal, NL80211_STA_INFO_CHAIN_SIGNAL)) goto nla_put_failure; } if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) { if (!nl80211_put_signal(msg, sinfo->chains, sinfo->chain_signal_avg, NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) goto nla_put_failure; } if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) { if (!nl80211_put_sta_rate(msg, &sinfo->txrate, NL80211_STA_INFO_TX_BITRATE)) goto nla_put_failure; } if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) { if (!nl80211_put_sta_rate(msg, &sinfo->rxrate, NL80211_STA_INFO_RX_BITRATE)) goto nla_put_failure; } PUT_SINFO(RX_PACKETS, rx_packets, u32); PUT_SINFO(TX_PACKETS, tx_packets, u32); PUT_SINFO(TX_RETRIES, tx_retries, u32); PUT_SINFO(TX_FAILED, tx_failed, u32); PUT_SINFO(EXPECTED_THROUGHPUT, expected_throughput, u32); PUT_SINFO(BEACON_LOSS, beacon_loss_count, u32); PUT_SINFO(LOCAL_PM, local_pm, u32); PUT_SINFO(PEER_PM, peer_pm, u32); PUT_SINFO(NONPEER_PM, nonpeer_pm, u32); PUT_SINFO(CONNECTED_TO_GATE, connected_to_gate, u8); if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) { bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); if (!bss_param) goto nla_put_failure; if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) && nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) || ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) && nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) || ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) && nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) || nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, sinfo->bss_param.dtim_period) || nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, sinfo->bss_param.beacon_interval)) goto nla_put_failure; nla_nest_end(msg, bss_param); } if ((sinfo->filled & BIT_ULL(NL80211_STA_INFO_STA_FLAGS)) && nla_put(msg, NL80211_STA_INFO_STA_FLAGS, sizeof(struct nl80211_sta_flag_update), &sinfo->sta_flags)) goto nla_put_failure; PUT_SINFO_U64(T_OFFSET, t_offset); PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc); PUT_SINFO_U64(BEACON_RX, rx_beacon); PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8); PUT_SINFO(RX_MPDUS, rx_mpdu_count, u32); PUT_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32); if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) { PUT_SINFO(ACK_SIGNAL, ack_signal, u8); PUT_SINFO(ACK_SIGNAL_AVG, avg_ack_signal, s8); } #undef PUT_SINFO #undef PUT_SINFO_U64 if (sinfo->pertid) { struct nlattr *tidsattr; int tid; tidsattr = nla_nest_start(msg, NL80211_STA_INFO_TID_STATS); if (!tidsattr) goto nla_put_failure; for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) { struct cfg80211_tid_stats *tidstats; struct nlattr *tidattr; tidstats = &sinfo->pertid[tid]; if (!tidstats->filled) continue; tidattr = nla_nest_start(msg, tid + 1); if (!tidattr) goto nla_put_failure; #define PUT_TIDVAL_U64(attr, memb) do { \ if (tidstats->filled & BIT(NL80211_TID_STATS_ ## attr) && \ nla_put_u64_64bit(msg, NL80211_TID_STATS_ ## attr, \ tidstats->memb, NL80211_TID_STATS_PAD)) \ goto nla_put_failure; \ } while (0) PUT_TIDVAL_U64(RX_MSDU, rx_msdu); PUT_TIDVAL_U64(TX_MSDU, tx_msdu); PUT_TIDVAL_U64(TX_MSDU_RETRIES, tx_msdu_retries); PUT_TIDVAL_U64(TX_MSDU_FAILED, tx_msdu_failed); #undef PUT_TIDVAL_U64 if ((tidstats->filled & BIT(NL80211_TID_STATS_TXQ_STATS)) && !nl80211_put_txq_stats(msg, &tidstats->txq_stats, NL80211_TID_STATS_TXQ_STATS)) goto nla_put_failure; nla_nest_end(msg, tidattr); } nla_nest_end(msg, tidsattr); } nla_nest_end(msg, sinfoattr); if (sinfo->assoc_req_ies_len && nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, sinfo->assoc_req_ies)) goto nla_put_failure; cfg80211_sinfo_release_content(sinfo); genlmsg_end(msg, hdr); return 0; nla_put_failure: cfg80211_sinfo_release_content(sinfo); genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_station(struct sk_buff *skb, struct netlink_callback *cb) { struct station_info sinfo; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; u8 mac_addr[ETH_ALEN]; int sta_idx = cb->args[2]; int err; rtnl_lock(); err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (err) goto out_err; if (!wdev->netdev) { err = -EINVAL; goto out_err; } if (!rdev->ops->dump_station) { err = -EOPNOTSUPP; goto out_err; } while (1) { memset(&sinfo, 0, sizeof(sinfo)); err = rdev_dump_station(rdev, wdev->netdev, sta_idx, mac_addr, &sinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_station(skb, NL80211_CMD_NEW_STATION, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev->netdev, mac_addr, &sinfo) < 0) goto out; sta_idx++; } out: cb->args[2] = sta_idx; err = skb->len; out_err: rtnl_unlock(); return err; } static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct station_info sinfo; struct sk_buff *msg; u8 *mac_addr = NULL; int err; memset(&sinfo, 0, sizeof(sinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->get_station) return -EOPNOTSUPP; err = rdev_get_station(rdev, dev, mac_addr, &sinfo); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { cfg80211_sinfo_release_content(&sinfo); return -ENOMEM; } if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, genl_info_snd_portid(info), info->snd_seq, 0, rdev, dev, mac_addr, &sinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } int cfg80211_check_station_change(struct wiphy *wiphy, struct station_parameters *params, enum cfg80211_station_type statype) { if (params->listen_interval != -1 && statype != CFG80211_STA_AP_CLIENT_UNASSOC) return -EINVAL; if (params->support_p2p_ps != -1 && statype != CFG80211_STA_AP_CLIENT_UNASSOC) return -EINVAL; if (params->aid && !(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) && statype != CFG80211_STA_AP_CLIENT_UNASSOC) return -EINVAL; /* When you run into this, adjust the code below for the new flag */ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); switch (statype) { case CFG80211_STA_MESH_PEER_KERNEL: case CFG80211_STA_MESH_PEER_USER: /* * No ignoring the TDLS flag here -- the userspace mesh * code doesn't have the bug of including TDLS in the * mask everywhere. */ if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED))) return -EINVAL; break; case CFG80211_STA_TDLS_PEER_SETUP: case CFG80211_STA_TDLS_PEER_ACTIVE: if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) return -EINVAL; /* ignore since it can't change */ params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); break; default: /* disallow mesh-specific things */ if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION) return -EINVAL; if (params->local_pm) return -EINVAL; if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) return -EINVAL; } if (statype != CFG80211_STA_TDLS_PEER_SETUP && statype != CFG80211_STA_TDLS_PEER_ACTIVE) { /* TDLS can't be set, ... */ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) return -EINVAL; /* * ... but don't bother the driver with it. This works around * a hostapd/wpa_supplicant issue -- it always includes the * TLDS_PEER flag in the mask even for AP mode. */ params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); } if (statype != CFG80211_STA_TDLS_PEER_SETUP && statype != CFG80211_STA_AP_CLIENT_UNASSOC) { /* reject other things that can't change */ if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) return -EINVAL; if (params->sta_modify_mask & STATION_PARAM_APPLY_CAPABILITY) return -EINVAL; if (params->supported_rates) return -EINVAL; if (params->ext_capab || params->ht_capa || params->vht_capa || params->he_capa) return -EINVAL; } if (statype != CFG80211_STA_AP_CLIENT && statype != CFG80211_STA_AP_CLIENT_UNASSOC) { if (params->vlan) return -EINVAL; } switch (statype) { case CFG80211_STA_AP_MLME_CLIENT: /* Use this only for authorizing/unauthorizing a station */ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) return -EOPNOTSUPP; break; case CFG80211_STA_AP_CLIENT: case CFG80211_STA_AP_CLIENT_UNASSOC: /* accept only the listed bits */ if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED) | BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_MFP))) return -EINVAL; /* but authenticated/associated only if driver handles it */ if (!(wiphy->features & NL80211_FEATURE_FULL_AP_CLIENT_STATE) && params->sta_flags_mask & (BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED))) return -EINVAL; break; case CFG80211_STA_IBSS: case CFG80211_STA_AP_STA: /* reject any changes other than AUTHORIZED */ if (params->sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) return -EINVAL; break; case CFG80211_STA_TDLS_PEER_SETUP: /* reject any changes other than AUTHORIZED or WME */ if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_WME))) return -EINVAL; /* force (at least) rates when authorizing */ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED) && !params->supported_rates) return -EINVAL; break; case CFG80211_STA_TDLS_PEER_ACTIVE: /* reject any changes */ return -EINVAL; case CFG80211_STA_MESH_PEER_KERNEL: if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) return -EINVAL; break; case CFG80211_STA_MESH_PEER_USER: if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION && params->plink_action != NL80211_PLINK_ACTION_BLOCK) return -EINVAL; break; } /* * Older kernel versions ignored this attribute entirely, so don't * reject attempts to update it but mark it as unused instead so the * driver won't look at the data. */ if (statype != CFG80211_STA_AP_CLIENT_UNASSOC && statype != CFG80211_STA_TDLS_PEER_SETUP) params->opmode_notif_used = false; return 0; } EXPORT_SYMBOL(cfg80211_check_station_change); /* * Get vlan interface making sure it is running and on the right wiphy. */ static struct net_device *get_vlan(struct genl_info *info, struct cfg80211_registered_device *rdev) { struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; struct net_device *v; int ret; if (!vlanattr) return NULL; v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr)); if (!v) return ERR_PTR(-ENODEV); if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) { ret = -EINVAL; goto error; } if (v->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && v->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && v->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) { ret = -EINVAL; goto error; } if (!netif_running(v)) { ret = -ENETDOWN; goto error; } return v; error: dev_put(v); return ERR_PTR(ret); } static const struct nla_policy nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = { [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 }, [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 }, }; static int nl80211_parse_sta_wme(struct genl_info *info, struct station_parameters *params) { struct nlattr *tb[NL80211_STA_WME_MAX + 1]; struct nlattr *nla; int err; /* parse WME attributes if present */ if (!info->attrs[NL80211_ATTR_STA_WME]) return 0; nla = info->attrs[NL80211_ATTR_STA_WME]; err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla, nl80211_sta_wme_policy, genl_info_extack(info)); if (err) return err; if (tb[NL80211_STA_WME_UAPSD_QUEUES]) params->uapsd_queues = nla_get_u8( tb[NL80211_STA_WME_UAPSD_QUEUES]); if (params->uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) return -EINVAL; if (tb[NL80211_STA_WME_MAX_SP]) params->max_sp = nla_get_u8(tb[NL80211_STA_WME_MAX_SP]); if (params->max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK) return -EINVAL; params->sta_modify_mask |= STATION_PARAM_APPLY_UAPSD; return 0; } static int nl80211_parse_sta_channel_info(struct genl_info *info, struct station_parameters *params) { if (info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]) { params->supported_channels = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]); params->supported_channels_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]); /* * Need to include at least one (first channel, number of * channels) tuple for each subband, and must have proper * tuples for the rest of the data as well. */ if (params->supported_channels_len < 2) return -EINVAL; if (params->supported_channels_len % 2) return -EINVAL; } if (info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]) { params->supported_oper_classes = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]); params->supported_oper_classes_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]); /* * The value of the Length field of the Supported Operating * Classes element is between 2 and 253. */ if (params->supported_oper_classes_len < 2 || params->supported_oper_classes_len > 253) return -EINVAL; } return 0; } static int nl80211_set_station_tdls(struct genl_info *info, struct station_parameters *params) { int err; /* Dummy STA entry gets updated once the peer capabilities are known */ if (info->attrs[NL80211_ATTR_PEER_AID]) params->aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params->ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) params->vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { params->he_capa = nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params->he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); if (params->he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) return -EINVAL; } err = nl80211_parse_sta_channel_info(info, params); if (err) return err; return nl80211_parse_sta_wme(info, params); } static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct station_parameters params; u8 *mac_addr; int err; memset(¶ms, 0, sizeof(params)); if (!rdev->ops->change_station) return -EOPNOTSUPP; /* * AID and listen_interval properties can be set only for unassociated * station. Include these parameters here and will check them in * cfg80211_check_station_change(). */ if (info->attrs[NL80211_ATTR_STA_AID]) params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); else params.listen_interval = -1; if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) params.support_p2p_ps = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); else params.support_p2p_ps = -1; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) { params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); } if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) { params.capability = nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]); params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY; } if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) { params.ext_capab = nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]); params.ext_capab_len = nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]); } if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) return -EINVAL; if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) { params.plink_state = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) params.peer_aid = nla_get_u16( info->attrs[NL80211_ATTR_MESH_PEER_AID]); params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE; } if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) params.local_pm = nla_get_u32( info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]); if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.opmode_notif_used = true; params.opmode_notif = nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]); } /* Include parameters for TDLS peer (will check later) */ err = nl80211_set_station_tdls(info, ¶ms); if (err) return err; params.vlan = get_vlan(info, rdev); if (IS_ERR(params.vlan)) return PTR_ERR(params.vlan); switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: break; default: err = -EOPNOTSUPP; goto out_put_vlan; } /* driver will call cfg80211_check_station_change() */ err = rdev_change_station(rdev, dev, mac_addr, ¶ms); out_put_vlan: if (params.vlan) dev_put(params.vlan); return err; } static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct station_parameters params; u8 *mac_addr = NULL; u32 auth_assoc = BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED); memset(¶ms, 0, sizeof(params)); if (!rdev->ops->add_station) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_AID] && !info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) { params.support_p2p_ps = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); } else { /* * if not specified, assume it's supported for P2P GO interface, * and is NOT supported for AP interface */ params.support_p2p_ps = dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO; } if (info->attrs[NL80211_ATTR_PEER_AID]) params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); else params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) { params.capability = nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]); params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY; } if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) { params.ext_capab = nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]); params.ext_capab_len = nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]); } if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) params.vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { params.he_capa = nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params.he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); /* max len is validated in nla policy */ if (params.he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) return -EINVAL; } if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.opmode_notif_used = true; params.opmode_notif = nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]); } if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); err = nl80211_parse_sta_channel_info(info, ¶ms); if (err) return err; err = nl80211_parse_sta_wme(info, ¶ms); if (err) return err; if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) return -EINVAL; /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT * as userspace might just pass through the capabilities from the IEs * directly, rather than enforcing this restriction and returning an * error in this case. */ if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) { params.ht_capa = NULL; params.vht_capa = NULL; /* HE requires WME */ if (params.he_capa_len) return -EINVAL; } /* When you run into this, adjust the code below for the new flag */ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: /* ignore WME attributes if iface/sta is not capable */ if (!(rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) || !(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; /* TDLS peers cannot be added */ if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) || info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; /* but don't bother the driver with it */ params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); /* allow authenticated/associated only if driver handles it */ if (!(rdev->wiphy.features & NL80211_FEATURE_FULL_AP_CLIENT_STATE) && params.sta_flags_mask & auth_assoc) return -EINVAL; /* Older userspace, or userspace wanting to be compatible with * !NL80211_FEATURE_FULL_AP_CLIENT_STATE, will not set the auth * and assoc flags in the mask, but assumes the station will be * added as associated anyway since this was the required driver * behaviour before NL80211_FEATURE_FULL_AP_CLIENT_STATE was * introduced. * In order to not bother drivers with this quirk in the API * set the flags in both the mask and set for new stations in * this case. */ if (!(params.sta_flags_mask & auth_assoc)) { params.sta_flags_mask |= auth_assoc; params.sta_flags_set |= auth_assoc; } /* must be last in here for error handling */ params.vlan = get_vlan(info, rdev); if (IS_ERR(params.vlan)) return PTR_ERR(params.vlan); break; case NL80211_IFTYPE_MESH_POINT: /* ignore uAPSD data */ params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; /* associated is disallowed */ if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) return -EINVAL; /* TDLS peers cannot be added */ if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) || info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: /* ignore uAPSD data */ params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; /* these are disallowed */ if (params.sta_flags_mask & (BIT(NL80211_STA_FLAG_ASSOCIATED) | BIT(NL80211_STA_FLAG_AUTHENTICATED))) return -EINVAL; /* Only TDLS peers can be added */ if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) return -EINVAL; /* Can only add if TDLS ... */ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)) return -EOPNOTSUPP; /* ... with external setup is supported */ if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)) return -EOPNOTSUPP; /* * Older wpa_supplicant versions always mark the TDLS peer * as authorized, but it shouldn't yet be. */ params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_AUTHORIZED); break; default: return -EOPNOTSUPP; } /* be aware of params.vlan when changing code here */ err = rdev_add_station(rdev, dev, mac_addr, ¶ms); if (params.vlan) dev_put(params.vlan); return err; } static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct station_del_parameters params; memset(¶ms, 0, sizeof(params)); if (info->attrs[NL80211_ATTR_MAC]) params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; if (!rdev->ops->del_station) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_MGMT_SUBTYPE]) { params.subtype = nla_get_u8(info->attrs[NL80211_ATTR_MGMT_SUBTYPE]); if (params.subtype != IEEE80211_STYPE_DISASSOC >> 4 && params.subtype != IEEE80211_STYPE_DEAUTH >> 4) return -EINVAL; } else { /* Default to Deauthentication frame */ params.subtype = IEEE80211_STYPE_DEAUTH >> 4; } if (info->attrs[NL80211_ATTR_REASON_CODE]) { params.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (params.reason_code == 0) return -EINVAL; /* 0 is reserved */ } else { /* Default to reason code 2 */ params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID; } return rdev_del_station(rdev, dev, ¶ms); } static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { void *hdr; struct nlattr *pinfoattr; hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_MPATH); if (!hdr) return -1; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) || nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) || nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation)) goto nla_put_failure; pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); if (!pinfoattr) goto nla_put_failure; if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) && nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN, pinfo->frame_qlen)) goto nla_put_failure; if (((pinfo->filled & MPATH_INFO_SN) && nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) || ((pinfo->filled & MPATH_INFO_METRIC) && nla_put_u32(msg, NL80211_MPATH_INFO_METRIC, pinfo->metric)) || ((pinfo->filled & MPATH_INFO_EXPTIME) && nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME, pinfo->exptime)) || ((pinfo->filled & MPATH_INFO_FLAGS) && nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS, pinfo->flags)) || ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) && nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, pinfo->discovery_timeout)) || ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) && nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, pinfo->discovery_retries))) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_mpath(struct sk_buff *skb, struct netlink_callback *cb) { struct mpath_info pinfo; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; u8 dst[ETH_ALEN]; u8 next_hop[ETH_ALEN]; int path_idx = cb->args[2]; int err; rtnl_lock(); err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (err) goto out_err; if (!rdev->ops->dump_mpath) { err = -EOPNOTSUPP; goto out_err; } if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out_err; } while (1) { err = rdev_dump_mpath(rdev, wdev->netdev, path_idx, dst, next_hop, &pinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_mpath(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, NLM_F_MULTI, wdev->netdev, dst, next_hop, &pinfo) < 0) goto out; path_idx++; } out: cb->args[2] = path_idx; err = skb->len; out_err: rtnl_unlock(); return err; } static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct mpath_info pinfo; struct sk_buff *msg; u8 *dst = NULL; u8 next_hop[ETH_ALEN]; memset(&pinfo, 0, sizeof(pinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->get_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; err = rdev_get_mpath(rdev, dev, dst, next_hop, &pinfo); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_mpath(msg, genl_info_snd_portid(info), info->snd_seq, 0, dev, dst, next_hop, &pinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; u8 *next_hop = NULL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); if (!rdev->ops->change_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; return rdev_change_mpath(rdev, dev, dst, next_hop); } static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; u8 *next_hop = NULL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); if (!rdev->ops->add_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; return rdev_add_mpath(rdev, dev, dst, next_hop); } static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; if (info->attrs[NL80211_ATTR_MAC]) dst = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->del_mpath) return -EOPNOTSUPP; return rdev_del_mpath(rdev, dev, dst); } static int nl80211_get_mpp(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct mpath_info pinfo; struct sk_buff *msg; u8 *dst = NULL; u8 mpp[ETH_ALEN]; memset(&pinfo, 0, sizeof(pinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->get_mpp) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; err = rdev_get_mpp(rdev, dev, dst, mpp, &pinfo); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_mpath(msg, genl_info_snd_portid(info), info->snd_seq, 0, dev, dst, mpp, &pinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static int nl80211_dump_mpp(struct sk_buff *skb, struct netlink_callback *cb) { struct mpath_info pinfo; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; u8 dst[ETH_ALEN]; u8 mpp[ETH_ALEN]; int path_idx = cb->args[2]; int err; rtnl_lock(); err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (err) goto out_err; if (!rdev->ops->dump_mpp) { err = -EOPNOTSUPP; goto out_err; } if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out_err; } while (1) { err = rdev_dump_mpp(rdev, wdev->netdev, path_idx, dst, mpp, &pinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_mpath(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, NLM_F_MULTI, wdev->netdev, dst, mpp, &pinfo) < 0) goto out; path_idx++; } out: cb->args[2] = path_idx; err = skb->len; out_err: rtnl_unlock(); return err; } static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct bss_parameters params; int err; memset(¶ms, 0, sizeof(params)); /* default to not changing parameters */ params.use_cts_prot = -1; params.use_short_preamble = -1; params.use_short_slot_time = -1; params.ap_isolate = -1; params.ht_opmode = -1; params.p2p_ctwindow = -1; params.p2p_opp_ps = -1; if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) params.use_cts_prot = nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]); if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) params.use_short_preamble = nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]); if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) params.use_short_slot_time = nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]); if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { params.basic_rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); params.basic_rates_len = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); } if (info->attrs[NL80211_ATTR_AP_ISOLATE]) params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]); if (info->attrs[NL80211_ATTR_BSS_HT_OPMODE]) params.ht_opmode = nla_get_u16(info->attrs[NL80211_ATTR_BSS_HT_OPMODE]); if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) { if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; params.p2p_ctwindow = nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]); if (params.p2p_ctwindow != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)) return -EINVAL; } if (info->attrs[NL80211_ATTR_P2P_OPPPS]) { u8 tmp; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]); params.p2p_opp_ps = tmp; if (params.p2p_opp_ps && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)) return -EINVAL; } if (!rdev->ops->change_bss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; wdev_lock(wdev); err = rdev_change_bss(rdev, dev, ¶ms); wdev_unlock(wdev); return err; } static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) { char *data = NULL; bool is_indoor; enum nl80211_user_reg_hint_type user_reg_hint_type; u32 owner_nlportid; /* * You should only get this when cfg80211 hasn't yet initialized * completely when built-in to the kernel right between the time * window between nl80211_init() and regulatory_init(), if that is * even possible. */ if (unlikely(!rcu_access_pointer(cfg80211_regdomain))) return -EINPROGRESS; if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]) user_reg_hint_type = nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]); else user_reg_hint_type = NL80211_USER_REG_HINT_USER; switch (user_reg_hint_type) { case NL80211_USER_REG_HINT_USER: case NL80211_USER_REG_HINT_CELL_BASE: if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); return regulatory_hint_user(data, user_reg_hint_type); case NL80211_USER_REG_HINT_INDOOR: if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) { owner_nlportid = genl_info_snd_portid(info); is_indoor = !!info->attrs[NL80211_ATTR_REG_INDOOR]; } else { owner_nlportid = 0; is_indoor = true; } return regulatory_hint_indoor(is_indoor, owner_nlportid); default: return -EINVAL; } } static int nl80211_reload_regdb(struct sk_buff *skb, struct genl_info *info) { return reg_reload_regdb(); } static int nl80211_get_mesh_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct mesh_config cur_params; int err = 0; void *hdr; struct nlattr *pinfoattr; struct sk_buff *msg; if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!rdev->ops->get_mesh_config) return -EOPNOTSUPP; wdev_lock(wdev); /* If not connected, get default parameters */ if (!wdev->mesh_id_len) memcpy(&cur_params, &default_mesh_config, sizeof(cur_params)); else err = rdev_get_mesh_config(rdev, dev, &cur_params); wdev_unlock(wdev); if (err) return err; /* Draw up a netlink message to send back */ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_GET_MESH_CONFIG); if (!hdr) goto out; pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); if (!pinfoattr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, cur_params.dot11MeshRetryTimeout) || nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, cur_params.dot11MeshConfirmTimeout) || nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, cur_params.dot11MeshHoldingTimeout) || nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, cur_params.dot11MeshMaxPeerLinks) || nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES, cur_params.dot11MeshMaxRetries) || nla_put_u8(msg, NL80211_MESHCONF_TTL, cur_params.dot11MeshTTL) || nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL, cur_params.element_ttl) || nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, cur_params.auto_open_plinks) || nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, cur_params.dot11MeshNbrOffsetMaxNeighbor) || nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, cur_params.dot11MeshHWMPmaxPREQretries) || nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, cur_params.path_refresh_time) || nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, cur_params.min_discovery_timeout) || nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, cur_params.dot11MeshHWMPactivePathTimeout) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, cur_params.dot11MeshHWMPpreqMinInterval) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, cur_params.dot11MeshHWMPperrMinInterval) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, cur_params.dot11MeshHWMPnetDiameterTraversalTime) || nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, cur_params.dot11MeshHWMPRootMode) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, cur_params.dot11MeshHWMPRannInterval) || nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, cur_params.dot11MeshGateAnnouncementProtocol) || nla_put_u8(msg, NL80211_MESHCONF_FORWARDING, cur_params.dot11MeshForwarding) || nla_put_s32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, cur_params.rssi_threshold) || nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE, cur_params.ht_opmode) || nla_put_u32(msg, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, cur_params.dot11MeshHWMPactivePathToRootTimeout) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, cur_params.dot11MeshHWMProotInterval) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, cur_params.dot11MeshHWMPconfirmationInterval) || nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE, cur_params.power_mode) || nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW, cur_params.dot11MeshAwakeWindowDuration) || nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT, cur_params.plink_timeout) || nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_GATE, cur_params.dot11MeshConnectedToMeshGate)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: out: nlmsg_free(msg); return -ENOBUFS; } static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { [NL80211_MESHCONF_RETRY_TIMEOUT] = NLA_POLICY_RANGE(NLA_U16, 1, 255), [NL80211_MESHCONF_CONFIRM_TIMEOUT] = NLA_POLICY_RANGE(NLA_U16, 1, 255), [NL80211_MESHCONF_HOLDING_TIMEOUT] = NLA_POLICY_RANGE(NLA_U16, 1, 255), [NL80211_MESHCONF_MAX_PEER_LINKS] = NLA_POLICY_RANGE(NLA_U16, 0, 255), [NL80211_MESHCONF_MAX_RETRIES] = NLA_POLICY_MAX(NLA_U8, 16), [NL80211_MESHCONF_TTL] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_MESHCONF_ELEMENT_TTL] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = NLA_POLICY_MAX(NLA_U8, 1), [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = NLA_POLICY_RANGE(NLA_U32, 1, 255), [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_ROOTMODE] = NLA_POLICY_MAX(NLA_U8, 4), [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = NLA_POLICY_MAX(NLA_U8, 1), [NL80211_MESHCONF_FORWARDING] = NLA_POLICY_MAX(NLA_U8, 1), [NL80211_MESHCONF_RSSI_THRESHOLD] = NLA_POLICY_RANGE(NLA_S32, -255, 0), [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_POWER_MODE] = NLA_POLICY_RANGE(NLA_U32, NL80211_MESH_POWER_ACTIVE, NL80211_MESH_POWER_MAX), [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_CONNECTED_TO_GATE] = NLA_POLICY_RANGE(NLA_U8, 0, 1), }; static const struct nla_policy nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { [NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, }; static int nl80211_parse_mesh_config(struct genl_info *info, struct mesh_config *cfg, u32 *mask_out) { struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; u32 mask = 0; u16 ht_opmode; #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, mask, attr, fn) \ do { \ if (tb[attr]) { \ cfg->param = fn(tb[attr]); \ mask |= BIT((attr) - 1); \ } \ } while (0) if (!info->attrs[NL80211_ATTR_MESH_CONFIG]) return -EINVAL; if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_CONFIG], nl80211_meshconf_params_policy, genl_info_extack(info))) return -EINVAL; /* This makes sure that there aren't more than 32 mesh config * parameters (otherwise our bitfield scheme would not work.) */ BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); /* Fill in the params struct */ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, mask, NL80211_MESHCONF_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32); if (mask & BIT(NL80211_MESHCONF_PATH_REFRESH_TIME) && (cfg->path_refresh_time < 1 || cfg->path_refresh_time > 65535)) return -EINVAL; FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, nla_get_u32); if (mask & BIT(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT) && (cfg->dot11MeshHWMPactivePathTimeout < 1 || cfg->dot11MeshHWMPactivePathTimeout > 65535)) return -EINVAL; FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPnetDiameterTraversalTime, mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask, NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshGateAnnouncementProtocol, mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, mask, NL80211_MESHCONF_FORWARDING, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_s32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToMeshGate, mask, NL80211_MESHCONF_CONNECTED_TO_GATE, nla_get_u8); /* * Check HT operation mode based on * IEEE 802.11-2016 9.4.2.57 HT Operation element. */ if (tb[NL80211_MESHCONF_HT_OPMODE]) { ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION | IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) return -EINVAL; /* NON_HT_STA bit is reserved, but some programs set it */ ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; cfg->ht_opmode = ht_opmode; mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); } FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, mask, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, nla_get_u32); if (mask & BIT(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT) && (cfg->dot11MeshHWMPactivePathToRootTimeout < 1 || cfg->dot11MeshHWMPactivePathToRootTimeout > 65535)) return -EINVAL; FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPconfirmationInterval, mask, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, mask, NL80211_MESHCONF_POWER_MODE, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, mask, NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, mask, NL80211_MESHCONF_PLINK_TIMEOUT, nla_get_u32); if (mask_out) *mask_out = mask; return 0; #undef FILL_IN_MESH_PARAM_IF_SET } static int nl80211_parse_mesh_setup(struct genl_info *info, struct mesh_setup *setup) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1]; if (!info->attrs[NL80211_ATTR_MESH_SETUP]) return -EINVAL; if (nla_parse_nested(tb, NL80211_MESH_SETUP_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_SETUP], nl80211_mesh_setup_params_policy, genl_info_extack(info))) return -EINVAL; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC]) setup->sync_method = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ? IEEE80211_SYNC_METHOD_VENDOR : IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL]) setup->path_sel_proto = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ? IEEE80211_PATH_PROTOCOL_VENDOR : IEEE80211_PATH_PROTOCOL_HWMP; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC]) setup->path_metric = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC])) ? IEEE80211_PATH_METRIC_VENDOR : IEEE80211_PATH_METRIC_AIRTIME; if (tb[NL80211_MESH_SETUP_IE]) { struct nlattr *ieattr = tb[NL80211_MESH_SETUP_IE]; setup->ie = nla_data(ieattr); setup->ie_len = nla_len(ieattr); } if (tb[NL80211_MESH_SETUP_USERSPACE_MPM] && !(rdev->wiphy.features & NL80211_FEATURE_USERSPACE_MPM)) return -EINVAL; setup->user_mpm = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_MPM]); setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]); setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]); if (setup->is_secure) setup->user_mpm = true; if (tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]) { if (!setup->user_mpm) return -EINVAL; setup->auth_id = nla_get_u8(tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]); } return 0; } static int nl80211_update_mesh_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct mesh_config cfg; u32 mask; int err; if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!rdev->ops->update_mesh_config) return -EOPNOTSUPP; err = nl80211_parse_mesh_config(info, &cfg, &mask); if (err) return err; wdev_lock(wdev); if (!wdev->mesh_id_len) err = -ENOLINK; if (!err) err = rdev_update_mesh_config(rdev, dev, mask, &cfg); wdev_unlock(wdev); return err; } static int nl80211_put_regdom(const struct ieee80211_regdomain *regdom, struct sk_buff *msg) { struct nlattr *nl_reg_rules; unsigned int i; if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, regdom->alpha2) || (regdom->dfs_region && nla_put_u8(msg, NL80211_ATTR_DFS_REGION, regdom->dfs_region))) goto nla_put_failure; nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); if (!nl_reg_rules) goto nla_put_failure; for (i = 0; i < regdom->n_reg_rules; i++) { struct nlattr *nl_reg_rule; const struct ieee80211_reg_rule *reg_rule; const struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule; unsigned int max_bandwidth_khz; reg_rule = ®dom->reg_rules[i]; freq_range = ®_rule->freq_range; power_rule = ®_rule->power_rule; nl_reg_rule = nla_nest_start(msg, i); if (!nl_reg_rule) goto nla_put_failure; max_bandwidth_khz = freq_range->max_bandwidth_khz; if (!max_bandwidth_khz) max_bandwidth_khz = reg_get_max_bandwidth(regdom, reg_rule); if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS, reg_rule->flags) || nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START, freq_range->start_freq_khz) || nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END, freq_range->end_freq_khz) || nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, max_bandwidth_khz) || nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, power_rule->max_antenna_gain) || nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, power_rule->max_eirp) || nla_put_u32(msg, NL80211_ATTR_DFS_CAC_TIME, reg_rule->dfs_cac_ms)) goto nla_put_failure; nla_nest_end(msg, nl_reg_rule); } nla_nest_end(msg, nl_reg_rules); return 0; nla_put_failure: return -EMSGSIZE; } static int nl80211_get_reg_do(struct sk_buff *skb, struct genl_info *info) { const struct ieee80211_regdomain *regdom = NULL; struct cfg80211_registered_device *rdev; struct wiphy *wiphy = NULL; struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOBUFS; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_GET_REG); if (!hdr) goto put_failure; if (info->attrs[NL80211_ATTR_WIPHY]) { bool self_managed; rdev = cfg80211_get_dev_from_info(genl_info_net(info), info); if (IS_ERR(rdev)) { nlmsg_free(msg); return PTR_ERR(rdev); } wiphy = &rdev->wiphy; self_managed = wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED; regdom = get_wiphy_regdom(wiphy); /* a self-managed-reg device must have a private regdom */ if (WARN_ON(!regdom && self_managed)) { nlmsg_free(msg); return -EINVAL; } if (regdom && nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) goto nla_put_failure; } if (!wiphy && reg_last_request_cell_base() && nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE, NL80211_USER_REG_HINT_CELL_BASE)) goto nla_put_failure; rcu_read_lock(); if (!regdom) regdom = rcu_dereference(cfg80211_regdomain); if (nl80211_put_regdom(regdom, msg)) goto nla_put_failure_rcu; rcu_read_unlock(); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure_rcu: rcu_read_unlock(); nla_put_failure: put_failure: nlmsg_free(msg); return -EMSGSIZE; } static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb, u32 seq, int flags, struct wiphy *wiphy, const struct ieee80211_regdomain *regdom) { void *hdr = nl80211hdr_put(msg, NETLINK_CB_PORTID(cb->skb), seq, flags, NL80211_CMD_GET_REG); if (!hdr) return -1; genl_dump_check_consistent(cb, hdr); if (nl80211_put_regdom(regdom, msg)) goto nla_put_failure; if (!wiphy && reg_last_request_cell_base() && nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE, NL80211_USER_REG_HINT_CELL_BASE)) goto nla_put_failure; if (wiphy && nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) goto nla_put_failure; if (wiphy && wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_get_reg_dump(struct sk_buff *skb, struct netlink_callback *cb) { const struct ieee80211_regdomain *regdom = NULL; struct cfg80211_registered_device *rdev; int err, reg_idx, start = cb->args[2]; rtnl_lock(); if (cfg80211_regdomain && start == 0) { err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq, NLM_F_MULTI, NULL, rtnl_dereference(cfg80211_regdomain)); if (err < 0) goto out_err; } /* the global regdom is idx 0 */ reg_idx = 1; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { regdom = get_wiphy_regdom(&rdev->wiphy); if (!regdom) continue; if (++reg_idx <= start) continue; err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq, NLM_F_MULTI, &rdev->wiphy, regdom); if (err < 0) { reg_idx--; break; } } cb->args[2] = reg_idx; err = skb->len; out_err: rtnl_unlock(); return err; } #ifdef CPTCFG_CFG80211_CRDA_SUPPORT static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 }, [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 }, [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 }, [NL80211_ATTR_DFS_CAC_TIME] = { .type = NLA_U32 }, }; static int parse_reg_rule(struct nlattr *tb[], struct ieee80211_reg_rule *reg_rule) { struct ieee80211_freq_range *freq_range = ®_rule->freq_range; struct ieee80211_power_rule *power_rule = ®_rule->power_rule; if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_START]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_END]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) return -EINVAL; if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) return -EINVAL; reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); freq_range->start_freq_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); freq_range->end_freq_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); freq_range->max_bandwidth_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); power_rule->max_eirp = nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) power_rule->max_antenna_gain = nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); if (tb[NL80211_ATTR_DFS_CAC_TIME]) reg_rule->dfs_cac_ms = nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]); return 0; } static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; struct nlattr *nl_reg_rule; char *alpha2; int rem_reg_rules, r; u32 num_rules = 0, rule_idx = 0, size_of_regd; enum nl80211_dfs_regions dfs_region = NL80211_DFS_UNSET; struct ieee80211_regdomain *rd; if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REG_RULES]) return -EINVAL; alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); if (info->attrs[NL80211_ATTR_DFS_REGION]) dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]); nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { num_rules++; if (num_rules > NL80211_MAX_SUPP_REG_RULES) return -EINVAL; } if (!reg_is_valid_request(alpha2)) return -EINVAL; size_of_regd = sizeof(struct ieee80211_regdomain) + num_rules * sizeof(struct ieee80211_reg_rule); rd = kzalloc(size_of_regd, GFP_KERNEL); if (!rd) return -ENOMEM; rd->n_reg_rules = num_rules; rd->alpha2[0] = alpha2[0]; rd->alpha2[1] = alpha2[1]; /* * Disable DFS master mode if the DFS region was * not supported or known on this kernel. */ if (reg_supported_dfs_region(dfs_region)) rd->dfs_region = dfs_region; nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { r = nla_parse_nested(tb, NL80211_REG_RULE_ATTR_MAX, nl_reg_rule, reg_rule_policy, genl_info_extack(info)); if (r) goto bad_reg; r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); if (r) goto bad_reg; rule_idx++; if (rule_idx > NL80211_MAX_SUPP_REG_RULES) { r = -EINVAL; goto bad_reg; } } /* set_regdom takes ownership of rd */ return set_regdom(rd, REGD_SOURCE_CRDA); bad_reg: kfree(rd); return r; } #endif /* CPTCFG_CFG80211_CRDA_SUPPORT */ static int validate_scan_freqs(struct nlattr *freqs) { struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; nla_for_each_nested(attr1, freqs, tmp1) if (nla_len(attr1) != sizeof(u32)) return 0; nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* * Some hardware has a limited channel list for * scanning, and it is pretty much nonsensical * to scan for a channel twice, so disallow that * and don't require drivers to check that the * channel list they get isn't longer than what * they can scan, as long as they can scan all * the channels they registered at once. */ nla_for_each_nested(attr2, freqs, tmp2) if (attr1 != attr2 && nla_get_u32(attr1) == nla_get_u32(attr2)) return 0; } return n_channels; } static bool is_band_valid(struct wiphy *wiphy, enum nl80211_band b) { return b < NUM_NL80211_BANDS && wiphy->bands[b]; } static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy, struct cfg80211_bss_selection *bss_select) { struct nlattr *attr[NL80211_BSS_SELECT_ATTR_MAX + 1]; struct nlattr *nest; int err; bool found = false; int i; /* only process one nested attribute */ nest = nla_data(nla); if (!nla_ok(nest, nla_len(nest))) return -EINVAL; err = nla_parse_nested(attr, NL80211_BSS_SELECT_ATTR_MAX, nest, nl80211_bss_select_policy, NULL); if (err) return err; /* only one attribute may be given */ for (i = 0; i <= NL80211_BSS_SELECT_ATTR_MAX; i++) { if (attr[i]) { if (found) return -EINVAL; found = true; } } bss_select->behaviour = __NL80211_BSS_SELECT_ATTR_INVALID; if (attr[NL80211_BSS_SELECT_ATTR_RSSI]) bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI; if (attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]) { bss_select->behaviour = NL80211_BSS_SELECT_ATTR_BAND_PREF; bss_select->param.band_pref = nla_get_u32(attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]); if (!is_band_valid(wiphy, bss_select->param.band_pref)) return -EINVAL; } if (attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]) { struct nl80211_bss_select_rssi_adjust *adj_param; adj_param = nla_data(attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]); bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI_ADJUST; bss_select->param.adjust.band = adj_param->band; bss_select->param.adjust.delta = adj_param->delta; if (!is_band_valid(wiphy, bss_select->param.adjust.band)) return -EINVAL; } /* user-space did not provide behaviour attribute */ if (bss_select->behaviour == __NL80211_BSS_SELECT_ATTR_INVALID) return -EINVAL; if (!(wiphy->bss_select_support & BIT(bss_select->behaviour))) return -EINVAL; return 0; } int nl80211_parse_random_mac(struct nlattr **attrs, u8 *mac_addr, u8 *mac_addr_mask) { int i; if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) { eth_zero_addr(mac_addr); eth_zero_addr(mac_addr_mask); mac_addr[0] = 0x2; mac_addr_mask[0] = 0x3; return 0; } /* need both or none */ if (!attrs[NL80211_ATTR_MAC] || !attrs[NL80211_ATTR_MAC_MASK]) return -EINVAL; memcpy(mac_addr, nla_data(attrs[NL80211_ATTR_MAC]), ETH_ALEN); memcpy(mac_addr_mask, nla_data(attrs[NL80211_ATTR_MAC_MASK]), ETH_ALEN); /* don't allow or configure an mcast address */ if (!is_multicast_ether_addr(mac_addr_mask) || is_multicast_ether_addr(mac_addr)) return -EINVAL; /* * allow users to pass a MAC address that has bits set outside * of the mask, but don't bother drivers with having to deal * with such bits */ for (i = 0; i < ETH_ALEN; i++) mac_addr[i] &= mac_addr_mask[i]; return 0; } static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev) { ASSERT_WDEV_LOCK(wdev); if (!cfg80211_beaconing_iface_active(wdev)) return true; if (!(wdev->chandef.chan->flags & IEEE80211_CHAN_RADAR)) return true; return regulatory_pre_cac_allowed(wdev->wiphy); } static bool nl80211_check_scan_feat(struct wiphy *wiphy, u32 flags, u32 flag, enum nl80211_ext_feature_index feat) { if (!(flags & flag)) return true; if (wiphy_ext_feature_isset(wiphy, feat)) return true; return false; } static int nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, void *request, struct nlattr **attrs, bool is_sched_scan) { u8 *mac_addr, *mac_addr_mask; u32 *flags; enum nl80211_feature_flags randomness_flag; if (!attrs[NL80211_ATTR_SCAN_FLAGS]) return 0; if (is_sched_scan) { struct cfg80211_sched_scan_request *req = request; randomness_flag = wdev ? NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR : NL80211_FEATURE_ND_RANDOM_MAC_ADDR; flags = &req->flags; mac_addr = req->mac_addr; mac_addr_mask = req->mac_addr_mask; } else { struct cfg80211_scan_request *req = request; randomness_flag = NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; flags = &req->flags; mac_addr = req->mac_addr; mac_addr_mask = req->mac_addr_mask; } *flags = nla_get_u32(attrs[NL80211_ATTR_SCAN_FLAGS]); if (((*flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_LOW_SPAN, NL80211_EXT_FEATURE_LOW_SPAN_SCAN) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_LOW_POWER, NL80211_EXT_FEATURE_LOW_POWER_SCAN) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_HIGH_ACCURACY, NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_RANDOM_SN, NL80211_EXT_FEATURE_SCAN_RANDOM_SN) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_MIN_PREQ_CONTENT, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT)) return -EOPNOTSUPP; if (*flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { int err; if (!(wiphy->features & randomness_flag) || (wdev && wdev->current_bss)) return -EOPNOTSUPP; err = nl80211_parse_random_mac(attrs, mac_addr, mac_addr_mask); if (err) return err; } return 0; } static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_scan_request *request; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; size_t ie_len; wiphy = &rdev->wiphy; if (wdev->iftype == NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!rdev->ops->scan) return -EOPNOTSUPP; if (rdev->scan_req || rdev->scan_msg) { err = -EBUSY; goto unlock; } if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); if (!n_channels) { err = -EINVAL; goto unlock; } } else { n_channels = ieee80211_get_num_supported_channels(wiphy); } if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; if (n_ssids > wiphy->max_scan_ssids) { err = -EINVAL; goto unlock; } if (info->attrs[NL80211_ATTR_IE]) ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); else ie_len = 0; if (ie_len > wiphy->max_scan_ie_len) { err = -EINVAL; goto unlock; } request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) { err = -ENOMEM; goto unlock; } if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = n_ssids; if (ie_len) { if (n_ssids) request->ie = (void *)(request->ssids + n_ssids); else request->ie = (void *)(request->channels + n_channels); } i = 0; if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { /* user specified, bail out if channel not found */ nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { struct ieee80211_channel *chan; chan = ieee80211_get_channel(wiphy, nla_get_u32(attr)); if (!chan) { err = -EINVAL; goto out_free; } /* ignore disabled channels */ if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } else { enum nl80211_band band; /* all channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { struct ieee80211_channel *chan; chan = &wiphy->bands[band]->channels[j]; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } } if (!i) { err = -EINVAL; goto out_free; } request->n_channels = i; wdev_lock(wdev); if (!cfg80211_off_channel_oper_allowed(wdev)) { struct ieee80211_channel *chan; if (request->n_channels != 1) { wdev_unlock(wdev); err = -EBUSY; goto out_free; } chan = request->channels[0]; if (chan->center_freq != wdev->chandef.chan->center_freq) { wdev_unlock(wdev); err = -EBUSY; goto out_free; } } wdev_unlock(wdev); i = 0; if (n_ssids) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } request->ssids[i].ssid_len = nla_len(attr); memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); i++; } } if (info->attrs[NL80211_ATTR_IE]) { request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); memcpy((void *)request->ie, nla_data(info->attrs[NL80211_ATTR_IE]), request->ie_len); } for (i = 0; i < NUM_NL80211_BANDS; i++) if (wiphy->bands[i]) request->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; if (info->attrs[NL80211_ATTR_SCAN_SUPP_RATES]) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SUPP_RATES], tmp) { enum nl80211_band band = nla_type(attr); if (band < 0 || band >= NUM_NL80211_BANDS) { err = -EINVAL; goto out_free; } if (!wiphy->bands[band]) continue; err = ieee80211_get_ratemask(wiphy->bands[band], nla_data(attr), nla_len(attr), &request->rates[band]); if (err) goto out_free; } } if (info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]) { if (!wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL)) { err = -EOPNOTSUPP; goto out_free; } request->duration = nla_get_u16(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]); request->duration_mandatory = nla_get_flag(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY]); } err = nl80211_check_scan_flags(wiphy, wdev, request, info->attrs, false); if (err) goto out_free; request->no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); /* Initial implementation used NL80211_ATTR_MAC to set the specific * BSSID to scan for. This was problematic because that same attribute * was already used for another purpose (local random MAC address). The * NL80211_ATTR_BSSID attribute was added to fix this. For backwards * compatibility with older userspace components, also use the * NL80211_ATTR_MAC value here if it can be determined to be used for * the specific BSSID use case instead of the random MAC address * (NL80211_ATTR_SCAN_FLAGS is used to enable random MAC address use). */ if (info->attrs[NL80211_ATTR_BSSID]) memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN); else if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) && info->attrs[NL80211_ATTR_MAC]) memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]), ETH_ALEN); else eth_broadcast_addr(request->bssid); request->wdev = wdev; request->wiphy = &rdev->wiphy; request->scan_start = jiffies; rdev->scan_req = request; err = rdev_scan(rdev, request); if (!err) { nl80211_send_scan_start(rdev, wdev); if (wdev->netdev) dev_hold(wdev->netdev); } else { out_free: rdev->scan_req = NULL; kfree(request); } unlock: return err; } static int nl80211_abort_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (!rdev->ops->abort_scan) return -EOPNOTSUPP; if (rdev->scan_msg) return 0; if (!rdev->scan_req) return -ENOENT; rdev_abort_scan(rdev, wdev); return 0; } static int nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans, struct cfg80211_sched_scan_request *request, struct nlattr **attrs) { int tmp, err, i = 0; struct nlattr *attr; if (!attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) { u32 interval; /* * If scan plans are not specified, * %NL80211_ATTR_SCHED_SCAN_INTERVAL will be specified. In this * case one scan plan will be set with the specified scan * interval and infinite number of iterations. */ interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]); if (!interval) return -EINVAL; request->scan_plans[0].interval = DIV_ROUND_UP(interval, MSEC_PER_SEC); if (!request->scan_plans[0].interval) return -EINVAL; if (request->scan_plans[0].interval > wiphy->max_sched_scan_plan_interval) request->scan_plans[0].interval = wiphy->max_sched_scan_plan_interval; return 0; } nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) { struct nlattr *plan[NL80211_SCHED_SCAN_PLAN_MAX + 1]; if (WARN_ON(i >= n_plans)) return -EINVAL; err = nla_parse_nested(plan, NL80211_SCHED_SCAN_PLAN_MAX, attr, nl80211_plan_policy, NULL); if (err) return err; if (!plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]) return -EINVAL; request->scan_plans[i].interval = nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]); if (!request->scan_plans[i].interval || request->scan_plans[i].interval > wiphy->max_sched_scan_plan_interval) return -EINVAL; if (plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]) { request->scan_plans[i].iterations = nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]); if (!request->scan_plans[i].iterations || (request->scan_plans[i].iterations > wiphy->max_sched_scan_plan_iterations)) return -EINVAL; } else if (i < n_plans - 1) { /* * All scan plans but the last one must specify * a finite number of iterations */ return -EINVAL; } i++; } /* * The last scan plan must not specify the number of * iterations, it is supposed to run infinitely */ if (request->scan_plans[n_plans - 1].iterations) return -EINVAL; return 0; } static struct cfg80211_sched_scan_request * nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, struct nlattr **attrs, int max_match_sets) { struct cfg80211_sched_scan_request *request; struct nlattr *attr; int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0; enum nl80211_band band; size_t ie_len; struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1]; s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF; if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( attrs[NL80211_ATTR_SCAN_FREQUENCIES]); if (!n_channels) return ERR_PTR(-EINVAL); } else { n_channels = ieee80211_get_num_supported_channels(wiphy); } if (attrs[NL80211_ATTR_SCAN_SSIDS]) nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; if (n_ssids > wiphy->max_sched_scan_ssids) return ERR_PTR(-EINVAL); /* * First, count the number of 'real' matchsets. Due to an issue with * the old implementation, matchsets containing only the RSSI attribute * (NL80211_SCHED_SCAN_MATCH_ATTR_RSSI) are considered as the 'default' * RSSI for all matchsets, rather than their own matchset for reporting * all APs with a strong RSSI. This is needed to be compatible with * older userspace that treated a matchset with only the RSSI as the * global RSSI for all other matchsets - if there are other matchsets. */ if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) { nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_MATCH], tmp) { struct nlattr *rssi; err = nla_parse_nested(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, attr, nl80211_match_policy, NULL); if (err) return ERR_PTR(err); /* SSID and BSSID are mutually exclusive */ if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] && tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) return ERR_PTR(-EINVAL); /* add other standalone attributes here */ if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] || tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) { n_match_sets++; continue; } rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; if (rssi) default_match_rssi = nla_get_s32(rssi); } } /* However, if there's no other matchset, add the RSSI one */ if (!n_match_sets && default_match_rssi != NL80211_SCAN_RSSI_THOLD_OFF) n_match_sets = 1; if (n_match_sets > max_match_sets) return ERR_PTR(-EINVAL); if (attrs[NL80211_ATTR_IE]) ie_len = nla_len(attrs[NL80211_ATTR_IE]); else ie_len = 0; if (ie_len > wiphy->max_sched_scan_ie_len) return ERR_PTR(-EINVAL); if (attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) { /* * NL80211_ATTR_SCHED_SCAN_INTERVAL must not be specified since * each scan plan already specifies its own interval */ if (attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) return ERR_PTR(-EINVAL); nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) n_plans++; } else { /* * The scan interval attribute is kept for backward * compatibility. If no scan plans are specified and sched scan * interval is specified, one scan plan will be set with this * scan interval and infinite number of iterations. */ if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) return ERR_PTR(-EINVAL); n_plans = 1; } if (!n_plans || n_plans > wiphy->max_sched_scan_plans) return ERR_PTR(-EINVAL); if (!wiphy_ext_feature_isset( wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) && (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] || attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST])) return ERR_PTR(-EINVAL); request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->match_sets) * n_match_sets + sizeof(*request->scan_plans) * n_plans + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) return ERR_PTR(-ENOMEM); if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = n_ssids; if (ie_len) { if (n_ssids) request->ie = (void *)(request->ssids + n_ssids); else request->ie = (void *)(request->channels + n_channels); } if (n_match_sets) { if (request->ie) request->match_sets = (void *)(request->ie + ie_len); else if (n_ssids) request->match_sets = (void *)(request->ssids + n_ssids); else request->match_sets = (void *)(request->channels + n_channels); } request->n_match_sets = n_match_sets; if (n_match_sets) request->scan_plans = (void *)(request->match_sets + n_match_sets); else if (request->ie) request->scan_plans = (void *)(request->ie + ie_len); else if (n_ssids) request->scan_plans = (void *)(request->ssids + n_ssids); else request->scan_plans = (void *)(request->channels + n_channels); request->n_scan_plans = n_plans; i = 0; if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { /* user specified, bail out if channel not found */ nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { struct ieee80211_channel *chan; chan = ieee80211_get_channel(wiphy, nla_get_u32(attr)); if (!chan) { err = -EINVAL; goto out_free; } /* ignore disabled channels */ if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } else { /* all channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { struct ieee80211_channel *chan; chan = &wiphy->bands[band]->channels[j]; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } } if (!i) { err = -EINVAL; goto out_free; } request->n_channels = i; i = 0; if (n_ssids) { nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } request->ssids[i].ssid_len = nla_len(attr); memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); i++; } } i = 0; if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) { nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_MATCH], tmp) { struct nlattr *ssid, *bssid, *rssi; err = nla_parse_nested(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, attr, nl80211_match_policy, NULL); if (err) goto out_free; ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]; bssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]; if (ssid || bssid) { if (WARN_ON(i >= n_match_sets)) { /* this indicates a programming error, * the loop above should have verified * things properly */ err = -EINVAL; goto out_free; } if (ssid) { if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } memcpy(request->match_sets[i].ssid.ssid, nla_data(ssid), nla_len(ssid)); request->match_sets[i].ssid.ssid_len = nla_len(ssid); } if (bssid) { if (nla_len(bssid) != ETH_ALEN) { err = -EINVAL; goto out_free; } memcpy(request->match_sets[i].bssid, nla_data(bssid), ETH_ALEN); } /* special attribute - old implementation w/a */ request->match_sets[i].rssi_thold = default_match_rssi; rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; if (rssi) request->match_sets[i].rssi_thold = nla_get_s32(rssi); } i++; } /* there was no other matchset, so the RSSI one is alone */ if (i == 0 && n_match_sets) request->match_sets[0].rssi_thold = default_match_rssi; request->min_rssi_thold = INT_MAX; for (i = 0; i < n_match_sets; i++) request->min_rssi_thold = min(request->match_sets[i].rssi_thold, request->min_rssi_thold); } else { request->min_rssi_thold = NL80211_SCAN_RSSI_THOLD_OFF; } if (ie_len) { request->ie_len = ie_len; memcpy((void *)request->ie, nla_data(attrs[NL80211_ATTR_IE]), request->ie_len); } err = nl80211_check_scan_flags(wiphy, wdev, request, attrs, true); if (err) goto out_free; if (attrs[NL80211_ATTR_SCHED_SCAN_DELAY]) request->delay = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]); if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) { request->relative_rssi = nla_get_s8( attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]); request->relative_rssi_set = true; } if (request->relative_rssi_set && attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) { struct nl80211_bss_select_rssi_adjust *rssi_adjust; rssi_adjust = nla_data( attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]); request->rssi_adjust.band = rssi_adjust->band; request->rssi_adjust.delta = rssi_adjust->delta; if (!is_band_valid(wiphy, request->rssi_adjust.band)) { err = -EINVAL; goto out_free; } } err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs); if (err) goto out_free; request->scan_start = jiffies; return request; out_free: kfree(request); return ERR_PTR(err); } static int nl80211_start_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_sched_scan_request *sched_scan_req; bool want_multi; int err; if (!rdev->wiphy.max_sched_scan_reqs || !rdev->ops->sched_scan_start) return -EOPNOTSUPP; want_multi = info->attrs[NL80211_ATTR_SCHED_SCAN_MULTI]; err = cfg80211_sched_scan_req_possible(rdev, want_multi); if (err) return err; sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev, info->attrs, rdev->wiphy.max_match_sets); err = PTR_ERR_OR_ZERO(sched_scan_req); if (err) goto out_err; /* leave request id zero for legacy request * or if driver does not support multi-scheduled scan */ if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1) { while (!sched_scan_req->reqid) sched_scan_req->reqid = cfg80211_assign_cookie(rdev); } err = rdev_sched_scan_start(rdev, dev, sched_scan_req); if (err) goto out_free; sched_scan_req->dev = dev; sched_scan_req->wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) sched_scan_req->owner_nlportid = genl_info_snd_portid(info); cfg80211_add_sched_scan_req(rdev, sched_scan_req); nl80211_send_sched_scan(sched_scan_req, NL80211_CMD_START_SCHED_SCAN); return 0; out_free: kfree(sched_scan_req); out_err: return err; } static int nl80211_stop_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_sched_scan_request *req; struct cfg80211_registered_device *rdev = info->user_ptr[0]; u64 cookie; if (!rdev->wiphy.max_sched_scan_reqs || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_COOKIE]) { cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); return __cfg80211_stop_sched_scan(rdev, cookie, false); } req = list_first_or_null_rcu(&rdev->sched_scan_req_list, struct cfg80211_sched_scan_request, list); if (!req || req->reqid || (req->owner_nlportid && req->owner_nlportid != genl_info_snd_portid(info))) return -ENOENT; return cfg80211_stop_sched_scan_req(rdev, req, false); } static int nl80211_start_radar_detection(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_chan_def chandef; enum nl80211_dfs_regions dfs_region; unsigned int cac_time_ms; int err; dfs_region = reg_get_dfs_region(wiphy); if (dfs_region == NL80211_DFS_UNSET) return -EINVAL; err = nl80211_parse_chandef(rdev, info, &chandef); if (err) return err; if (netif_carrier_ok(dev)) return -EBUSY; if (wdev->cac_started) return -EBUSY; err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype); if (err < 0) return err; if (err == 0) return -EINVAL; if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) return -EINVAL; /* CAC start is offloaded to HW and can't be started manually */ if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) return -EOPNOTSUPP; if (!rdev->ops->start_radar_detection) return -EOPNOTSUPP; cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef); if (WARN_ON(!cac_time_ms)) cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms); if (!err) { wdev->chandef = chandef; wdev->cac_started = true; wdev->cac_start_time = jiffies; wdev->cac_time_ms = cac_time_ms; } return err; } static int nl80211_notify_radar_detection(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_chan_def chandef; enum nl80211_dfs_regions dfs_region; int err; dfs_region = reg_get_dfs_region(wiphy); if (dfs_region == NL80211_DFS_UNSET) { GENL_SET_ERR_MSG(info, "DFS Region is not set. Unexpected Radar indication"); return -EINVAL; } err = nl80211_parse_chandef(rdev, info, &chandef); if (err) { GENL_SET_ERR_MSG(info, "Unable to extract chandef info"); return err; } err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype); if (err < 0) { GENL_SET_ERR_MSG(info, "chandef is invalid"); return err; } if (err == 0) { GENL_SET_ERR_MSG(info, "Unexpected Radar indication for chandef/iftype"); return -EINVAL; } /* Do not process this notification if radar is already detected * by kernel on this channel, and return success. */ if (chandef.chan->dfs_state == NL80211_DFS_UNAVAILABLE) return 0; cfg80211_set_dfs_state(wiphy, &chandef, NL80211_DFS_UNAVAILABLE); cfg80211_sched_dfs_chan_update(rdev); rdev->radar_chandef = chandef; /* Propagate this notification to other radios as well */ queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk); return 0; } static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_csa_settings params; /* csa_attrs is defined static to avoid waste of stack size - this * function is called under RTNL lock, so this should not be a problem. */ static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1]; int err; bool need_new_beacon = false; bool need_handle_dfs_flag = true; int len, i; u32 cs_count; if (!rdev->ops->channel_switch || !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) return -EOPNOTSUPP; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: need_new_beacon = true; /* For all modes except AP the handle_dfs flag needs to be * supplied to tell the kernel that userspace will handle radar * events when they happen. Otherwise a switch to a channel * requiring DFS will be rejected. */ need_handle_dfs_flag = false; /* useless if AP is not running */ if (!wdev->beacon_interval) return -ENOTCONN; break; case NL80211_IFTYPE_ADHOC: if (!wdev->ssid_len) return -ENOTCONN; break; case NL80211_IFTYPE_MESH_POINT: if (!wdev->mesh_id_len) return -ENOTCONN; break; default: return -EOPNOTSUPP; } memset(¶ms, 0, sizeof(params)); params.beacon_csa.ftm_responder = -1; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]) return -EINVAL; /* only important for AP, IBSS and mesh create IEs internally */ if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES]) return -EINVAL; /* Even though the attribute is u32, the specification says * u8, so let's make sure we don't overflow. */ cs_count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]); if (cs_count > 255) return -EINVAL; params.count = cs_count; if (!need_new_beacon) goto skip_beacons; err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon_after); if (err) return err; err = nla_parse_nested(csa_attrs, NL80211_ATTR_MAX, info->attrs[NL80211_ATTR_CSA_IES], nl80211_policy, genl_info_extack(info)); if (err) return err; err = nl80211_parse_beacon(rdev, csa_attrs, ¶ms.beacon_csa); if (err) return err; if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]) return -EINVAL; len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); if (!len || (len % sizeof(u16))) return -EINVAL; params.n_counter_offsets_beacon = len / sizeof(u16); if (rdev->wiphy.max_num_csa_counters && (params.n_counter_offsets_beacon > rdev->wiphy.max_num_csa_counters)) return -EINVAL; params.counter_offsets_beacon = nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); /* sanity checks - counters should fit and be the same */ for (i = 0; i < params.n_counter_offsets_beacon; i++) { u16 offset = params.counter_offsets_beacon[i]; if (offset >= params.beacon_csa.tail_len) return -EINVAL; if (params.beacon_csa.tail[offset] != params.count) return -EINVAL; } if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) { len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); if (!len || (len % sizeof(u16))) return -EINVAL; params.n_counter_offsets_presp = len / sizeof(u16); if (rdev->wiphy.max_num_csa_counters && (params.n_counter_offsets_presp > rdev->wiphy.max_num_csa_counters)) return -EINVAL; params.counter_offsets_presp = nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); /* sanity checks - counters should fit and be the same */ for (i = 0; i < params.n_counter_offsets_presp; i++) { u16 offset = params.counter_offsets_presp[i]; if (offset >= params.beacon_csa.probe_resp_len) return -EINVAL; if (params.beacon_csa.probe_resp[offset] != params.count) return -EINVAL; } } skip_beacons: err = nl80211_parse_chandef(rdev, info, ¶ms.chandef); if (err) return err; if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms.chandef, wdev->iftype)) return -EINVAL; err = cfg80211_chandef_dfs_required(wdev->wiphy, ¶ms.chandef, wdev->iftype); if (err < 0) return err; if (err > 0) { params.radar_required = true; if (need_handle_dfs_flag && !nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS])) { return -EINVAL; } } if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX]) params.block_tx = true; wdev_lock(wdev); err = rdev_channel_switch(rdev, dev, ¶ms); wdev_unlock(wdev); return err; } static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_internal_bss *intbss) { struct cfg80211_bss *res = &intbss->pub; const struct cfg80211_bss_ies *ies; void *hdr; struct nlattr *bss; ASSERT_WDEV_LOCK(wdev); hdr = nl80211hdr_put(msg, NETLINK_CB_PORTID(cb->skb), seq, flags, NL80211_CMD_NEW_SCAN_RESULTS); if (!hdr) return -1; genl_dump_check_consistent(cb, hdr); if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation)) goto nla_put_failure; if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) goto nla_put_failure; if (nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; bss = nla_nest_start(msg, NL80211_ATTR_BSS); if (!bss) goto nla_put_failure; if ((!is_zero_ether_addr(res->bssid) && nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid))) goto nla_put_failure; rcu_read_lock(); /* indicate whether we have probe response data or not */ if (rcu_access_pointer(res->proberesp_ies) && nla_put_flag(msg, NL80211_BSS_PRESP_DATA)) goto fail_unlock_rcu; /* this pointer prefers to be pointed to probe response data * but is always valid */ ies = rcu_dereference(res->ies); if (ies) { if (nla_put_u64_64bit(msg, NL80211_BSS_TSF, ies->tsf, NL80211_BSS_PAD)) goto fail_unlock_rcu; if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, ies->len, ies->data)) goto fail_unlock_rcu; } /* and this pointer is always (unless driver didn't know) beacon data */ ies = rcu_dereference(res->beacon_ies); if (ies && ies->from_beacon) { if (nla_put_u64_64bit(msg, NL80211_BSS_BEACON_TSF, ies->tsf, NL80211_BSS_PAD)) goto fail_unlock_rcu; if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES, ies->len, ies->data)) goto fail_unlock_rcu; } rcu_read_unlock(); if (res->beacon_interval && nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval)) goto nla_put_failure; if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) || nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) || nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) || nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO, jiffies_to_msecs(jiffies - intbss->ts))) goto nla_put_failure; if (intbss->parent_tsf && (nla_put_u64_64bit(msg, NL80211_BSS_PARENT_TSF, intbss->parent_tsf, NL80211_BSS_PAD) || nla_put(msg, NL80211_BSS_PARENT_BSSID, ETH_ALEN, intbss->parent_bssid))) goto nla_put_failure; if (intbss->ts_boottime && nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, intbss->ts_boottime, NL80211_BSS_PAD)) goto nla_put_failure; if (!nl80211_put_signal(msg, intbss->pub.chains, intbss->pub.chain_signal, NL80211_BSS_CHAIN_SIGNAL)) goto nla_put_failure; switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal)) goto nla_put_failure; break; case CFG80211_SIGNAL_TYPE_UNSPEC: if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal)) goto nla_put_failure; break; default: break; } switch (wdev->iftype) { case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: if (intbss == wdev->current_bss && nla_put_u32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_ASSOCIATED)) goto nla_put_failure; break; case NL80211_IFTYPE_ADHOC: if (intbss == wdev->current_bss && nla_put_u32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_IBSS_JOINED)) goto nla_put_failure; break; default: break; } nla_nest_end(msg, bss); genlmsg_end(msg, hdr); return 0; fail_unlock_rcu: rcu_read_unlock(); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; struct cfg80211_internal_bss *scan; struct wireless_dev *wdev; int start = cb->args[2], idx = 0; int err; rtnl_lock(); err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (err) { rtnl_unlock(); return err; } wdev_lock(wdev); spin_lock_bh(&rdev->bss_lock); /* * dump_scan will be called multiple times to break up the scan results * into multiple messages. It is unlikely that any more bss-es will be * expired after the first call, so only call only call this on the * first dump_scan invocation. */ if (start == 0) cfg80211_bss_expire(rdev); #if LINUX_VERSION_IS_GEQ(3,1,0) cb->seq = rdev->bss_generation; #endif list_for_each_entry(scan, &rdev->bss_list, list) { if (++idx <= start) continue; if (nl80211_send_bss(skb, cb, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev, scan) < 0) { idx--; break; } } spin_unlock_bh(&rdev->bss_lock); wdev_unlock(wdev); cb->args[2] = idx; rtnl_unlock(); return skb->len; } static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct net_device *dev, bool allow_radio_stats, struct survey_info *survey) { void *hdr; struct nlattr *infoattr; /* skip radio stats if userspace didn't request them */ if (!survey->channel && !allow_radio_stats) return 0; hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_SURVEY_RESULTS); if (!hdr) return -ENOMEM; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); if (!infoattr) goto nla_put_failure; if (survey->channel && nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY, survey->channel->center_freq)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_NOISE_DBM) && nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_IN_USE) && nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME, survey->time, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_BUSY) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_BUSY, survey->time_busy, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_EXT_BUSY) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY, survey->time_ext_busy, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_RX) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_RX, survey->time_rx, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_TX) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_TX, survey->time_tx, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_SCAN) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_SCAN, survey->time_scan, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; nla_nest_end(msg, infoattr); genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam); struct survey_info survey; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; int survey_idx = cb->args[2]; int res; bool radio_stats; rtnl_lock(); res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (res) goto out_err; /* prepare_wdev_dump parsed the attributes */ radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; if (!wdev->netdev) { res = -EINVAL; goto out_err; } if (!rdev->ops->dump_survey) { res = -EOPNOTSUPP; goto out_err; } while (1) { res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey); if (res == -ENOENT) break; if (res) goto out_err; /* don't send disabled channels, but do send non-channel data */ if (survey.channel && survey.channel->flags & IEEE80211_CHAN_DISABLED) { survey_idx++; continue; } if (nl80211_send_survey(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, NLM_F_MULTI, wdev->netdev, radio_stats, &survey) < 0) goto out; survey_idx++; } out: cb->args[2] = survey_idx; res = skb->len; out_err: rtnl_unlock(); return res; } static bool nl80211_valid_wpa_versions(u32 wpa_versions) { return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)); } static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct ieee80211_channel *chan; const u8 *bssid, *ssid, *ie = NULL, *auth_data = NULL; int err, ssid_len, ie_len = 0, auth_data_len = 0; enum nl80211_auth_type auth_type; struct key_parse key; bool local_state_change; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_AUTH_TYPE]) return -EINVAL; if (!info->attrs[NL80211_ATTR_SSID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; err = nl80211_parse_key(info, &key); if (err) return err; if (key.idx >= 0) { if (key.type != -1 && key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; if (!key.p.key || !key.p.key_len) return -EINVAL; if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 || key.p.key_len != WLAN_KEY_LEN_WEP40) && (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 || key.p.key_len != WLAN_KEY_LEN_WEP104)) return -EINVAL; if (key.idx > 3) return -EINVAL; } else { key.p.key_len = 0; key.p.key = NULL; } if (key.idx >= 0) { int i; bool ok = false; for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) { if (key.p.cipher == rdev->wiphy.cipher_suites[i]) { ok = true; break; } } if (!ok) return -EINVAL; } if (!rdev->ops->auth) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); chan = nl80211_get_valid_chan(&rdev->wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]); if (!chan) return -EINVAL; ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE)) return -EINVAL; if ((auth_type == NL80211_AUTHTYPE_SAE || auth_type == NL80211_AUTHTYPE_FILS_SK || auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK) && !info->attrs[NL80211_ATTR_AUTH_DATA]) return -EINVAL; if (info->attrs[NL80211_ATTR_AUTH_DATA]) { if (auth_type != NL80211_AUTHTYPE_SAE && auth_type != NL80211_AUTHTYPE_FILS_SK && auth_type != NL80211_AUTHTYPE_FILS_SK_PFS && auth_type != NL80211_AUTHTYPE_FILS_PK) return -EINVAL; auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]); auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]); /* need to include at least Auth Transaction and Status Code */ if (auth_data_len < 4) return -EINVAL; } local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; /* * Since we no longer track auth state, ignore * requests to only change local state. */ if (local_state_change) return 0; wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, ssid, ssid_len, ie, ie_len, key.p.key, key.p.key_len, key.idx, auth_data, auth_data_len); wdev_unlock(dev->ieee80211_ptr); return err; } static int validate_pae_over_nl80211(struct cfg80211_registered_device *rdev, struct genl_info *info) { if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { GENL_SET_ERR_MSG(info, "SOCKET_OWNER not set"); return -EINVAL; } if (!rdev->ops->tx_control_port || !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211)) return -EOPNOTSUPP; return 0; } static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_crypto_settings *settings, int pairwise_cipher_limit, int group_cipher_limit) { memset(settings, 0, sizeof(*settings)); settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) { u16 proto; proto = nla_get_u16( info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]); settings->control_port_ethertype = cpu_to_be16(proto); if (!(rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && proto != ETH_P_PAE) return -EINVAL; if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT]) settings->control_port_no_encrypt = true; } else settings->control_port_ethertype = cpu_to_be16(ETH_P_PAE); if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) { int r = validate_pae_over_nl80211(rdev, info); if (r < 0) return r; settings->control_port_over_nl80211 = true; } if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) { void *data; int len, i; data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); settings->n_ciphers_pairwise = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; if (settings->n_ciphers_pairwise > pairwise_cipher_limit) return -EINVAL; memcpy(settings->ciphers_pairwise, data, len); for (i = 0; i < settings->n_ciphers_pairwise; i++) if (!cfg80211_supported_cipher_suite( &rdev->wiphy, settings->ciphers_pairwise[i])) return -EINVAL; } if (info->attrs[NL80211_ATTR_CIPHER_SUITES_GROUP]) { void *data; int len, i; data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_GROUP]); len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_GROUP]); settings->n_ciphers_group = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; if (settings->n_ciphers_group > group_cipher_limit) return -EINVAL; memcpy(settings->ciphers_group, data, len); for (i = 0; i < settings->n_ciphers_group; i++) if (!cfg80211_supported_cipher_suite( &rdev->wiphy, settings->ciphers_group[i])) return -EINVAL; } if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) { settings->wpa_versions = nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]); if (!nl80211_valid_wpa_versions(settings->wpa_versions)) return -EINVAL; } if (info->attrs[NL80211_ATTR_AKM_SUITES]) { void *data; int len; data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]); len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]); settings->n_akm_suites = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES) return -EINVAL; memcpy(settings->akm_suites, data, len); } if (info->attrs[NL80211_ATTR_PMK]) { if (nla_len(info->attrs[NL80211_ATTR_PMK]) != WLAN_PMK_LEN) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK)) return -EINVAL; settings->psk = nla_data(info->attrs[NL80211_ATTR_PMK]); } return 0; } static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct ieee80211_channel *chan; struct cfg80211_assoc_request req = {}; const u8 *bssid, *ssid; int err, ssid_len = 0; if (dev->ieee80211_ptr->conn_owner_nlportid && dev->ieee80211_ptr->conn_owner_nlportid != genl_info_snd_portid(info)) return -EPERM; if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_SSID] || !info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; if (!rdev->ops->assoc) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); chan = nl80211_get_valid_chan(&rdev->wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]); if (!chan) return -EINVAL; ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } if (info->attrs[NL80211_ATTR_USE_MFP]) { enum nl80211_mfp mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); if (mfp == NL80211_MFP_REQUIRED) req.use_mfp = true; else if (mfp != NL80211_MFP_NO) return -EINVAL; } if (info->attrs[NL80211_ATTR_PREV_BSSID]) req.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) req.flags |= ASSOC_REQ_DISABLE_HT; if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) memcpy(&req.ht_capa_mask, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), sizeof(req.ht_capa_mask)); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) return -EINVAL; memcpy(&req.ht_capa, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), sizeof(req.ht_capa)); } if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT])) req.flags |= ASSOC_REQ_DISABLE_VHT; if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) memcpy(&req.vht_capa_mask, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]), sizeof(req.vht_capa_mask)); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) return -EINVAL; memcpy(&req.vht_capa, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]), sizeof(req.vht_capa)); } if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { if (!((rdev->wiphy.features & NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) && (rdev->wiphy.features & NL80211_FEATURE_QUIET)) && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_RRM)) return -EINVAL; req.flags |= ASSOC_REQ_USE_RRM; } if (info->attrs[NL80211_ATTR_FILS_KEK]) { req.fils_kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]); req.fils_kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]); if (!info->attrs[NL80211_ATTR_FILS_NONCES]) return -EINVAL; req.fils_nonces = nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]); } err = nl80211_crypto_settings(rdev, info, &req.crypto, 1, 1); if (!err) { wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, ssid, ssid_len, &req); if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { dev->ieee80211_ptr->conn_owner_nlportid = genl_info_snd_portid(info); memcpy(dev->ieee80211_ptr->disconnect_bssid, bssid, ETH_ALEN); } wdev_unlock(dev->ieee80211_ptr); } return err; } static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; int ie_len = 0, err; u16 reason_code; bool local_state_change; if (dev->ieee80211_ptr->conn_owner_nlportid && dev->ieee80211_ptr->conn_owner_nlportid != genl_info_snd_portid(info)) return -EPERM; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REASON_CODE]) return -EINVAL; if (!rdev->ops->deauth) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason_code == 0) { /* Reason Code 0 is reserved */ return -EINVAL; } if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code, local_state_change); wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; int ie_len = 0, err; u16 reason_code; bool local_state_change; if (dev->ieee80211_ptr->conn_owner_nlportid && dev->ieee80211_ptr->conn_owner_nlportid != genl_info_snd_portid(info)) return -EPERM; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REASON_CODE]) return -EINVAL; if (!rdev->ops->disassoc) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason_code == 0) { /* Reason Code 0 is reserved */ return -EINVAL; } if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code, local_state_change); wdev_unlock(dev->ieee80211_ptr); return err; } static bool nl80211_parse_mcast_rate(struct cfg80211_registered_device *rdev, int mcast_rate[NUM_NL80211_BANDS], int rateval) { struct wiphy *wiphy = &rdev->wiphy; bool found = false; int band, i; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == rateval) { mcast_rate[band] = i + 1; found = true; break; } } } return found; } static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_ibss_params ibss; struct wiphy *wiphy; struct cfg80211_cached_keys *connkeys = NULL; int err; memset(&ibss, 0, sizeof(ibss)); if (!info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; ibss.beacon_interval = 100; if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) ibss.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_ADHOC, ibss.beacon_interval); if (err) return err; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_MAC]) { ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!is_valid_ether_addr(ibss.bssid)) return -EINVAL; } ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ibss.ie = nla_data(info->attrs[NL80211_ATTR_IE]); ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } err = nl80211_parse_chandef(rdev, info, &ibss.chandef); if (err) return err; if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef, NL80211_IFTYPE_ADHOC)) return -EINVAL; switch (ibss.chandef.width) { case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: break; case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: if (!(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)) return -EINVAL; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: if (!(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_VHT_IBSS)) return -EINVAL; break; default: return -EINVAL; } ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); int n_rates = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); struct ieee80211_supported_band *sband = wiphy->bands[ibss.chandef.chan->band]; err = ieee80211_get_ratemask(sband, rates, n_rates, &ibss.basic_rates); if (err) return err; } if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) memcpy(&ibss.ht_capa_mask, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), sizeof(ibss.ht_capa_mask)); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) return -EINVAL; memcpy(&ibss.ht_capa, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), sizeof(ibss.ht_capa)); } if (info->attrs[NL80211_ATTR_MCAST_RATE] && !nl80211_parse_mcast_rate(rdev, ibss.mcast_rate, nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]))) return -EINVAL; if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { bool no_ht = false; connkeys = nl80211_parse_connkeys(rdev, info, &no_ht); if (IS_ERR(connkeys)) return PTR_ERR(connkeys); if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) && no_ht) { kzfree(connkeys); return -EINVAL; } } ibss.control_port = nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT]); if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) { int r = validate_pae_over_nl80211(rdev, info); if (r < 0) { kzfree(connkeys); return r; } ibss.control_port_over_nl80211 = true; } ibss.userspace_handles_dfs = nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]); wdev_lock(dev->ieee80211_ptr); err = __cfg80211_join_ibss(rdev, dev, &ibss, connkeys); if (err) kzfree(connkeys); else if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) dev->ieee80211_ptr->conn_owner_nlportid = genl_info_snd_portid(info); wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; if (!rdev->ops->leave_ibss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; return cfg80211_leave_ibss(rdev, dev, false); } static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; int mcast_rate[NUM_NL80211_BANDS]; u32 nla_rate; int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) return -EOPNOTSUPP; if (!rdev->ops->set_mcast_rate) return -EOPNOTSUPP; memset(mcast_rate, 0, sizeof(mcast_rate)); if (!info->attrs[NL80211_ATTR_MCAST_RATE]) return -EINVAL; nla_rate = nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]); if (!nl80211_parse_mcast_rate(rdev, mcast_rate, nla_rate)) return -EINVAL; err = rdev_set_mcast_rate(rdev, dev, mcast_rate); return err; } static struct sk_buff * __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int approxlen, u32 portid, u32 seq, enum nl80211_commands cmd, enum nl80211_attrs attr, const struct nl80211_vendor_cmd_info *info, gfp_t gfp) { struct sk_buff *skb; void *hdr; struct nlattr *data; skb = nlmsg_new(approxlen + 100, gfp); if (!skb) return NULL; hdr = nl80211hdr_put(skb, portid, seq, 0, cmd); if (!hdr) { kfree_skb(skb); return NULL; } if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) goto nla_put_failure; if (info) { if (nla_put_u32(skb, NL80211_ATTR_VENDOR_ID, info->vendor_id)) goto nla_put_failure; if (nla_put_u32(skb, NL80211_ATTR_VENDOR_SUBCMD, info->subcmd)) goto nla_put_failure; } if (wdev) { if (nla_put_u64_64bit(skb, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; if (wdev->netdev && nla_put_u32(skb, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) goto nla_put_failure; } data = nla_nest_start(skb, attr); if (!data) goto nla_put_failure; ((void **)skb->cb)[0] = rdev; ((void **)skb->cb)[1] = hdr; ((void **)skb->cb)[2] = data; return skb; nla_put_failure: kfree_skb(skb); return NULL; } struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_commands cmd, enum nl80211_attrs attr, unsigned int portid, int vendor_event_idx, int approxlen, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); const struct nl80211_vendor_cmd_info *info; switch (cmd) { case NL80211_CMD_TESTMODE: if (WARN_ON(vendor_event_idx != -1)) return NULL; info = NULL; break; case NL80211_CMD_VENDOR: if (WARN_ON(vendor_event_idx < 0 || vendor_event_idx >= wiphy->n_vendor_events)) return NULL; info = &wiphy->vendor_events[vendor_event_idx]; break; default: WARN_ON(1); return NULL; } return __cfg80211_alloc_vendor_skb(rdev, wdev, approxlen, portid, 0, cmd, attr, info, gfp); } EXPORT_SYMBOL(__cfg80211_alloc_event_skb); void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp) { struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; struct nlmsghdr *nlhdr = nlmsg_hdr(skb); struct nlattr *data = ((void **)skb->cb)[2]; enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE; /* clear CB data for netlink core to own from now on */ memset(skb->cb, 0, sizeof(skb->cb)); nla_nest_end(skb, data); genlmsg_end(skb, hdr); if (nlhdr->nlmsg_pid) { genlmsg_unicast(wiphy_net(&rdev->wiphy), skb, nlhdr->nlmsg_pid); } else { if (data->nla_type == NL80211_ATTR_VENDOR_DATA) mcgrp = NL80211_MCGRP_VENDOR; genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), skb, 0, mcgrp, gfp); } } EXPORT_SYMBOL(__cfg80211_send_event_skb); #ifdef CPTCFG_NL80211_TESTMODE static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs); int err; if (!rdev->ops->testmode_cmd) return -EOPNOTSUPP; if (IS_ERR(wdev)) { err = PTR_ERR(wdev); if (err != -EINVAL) return err; wdev = NULL; } else if (wdev->wiphy != &rdev->wiphy) { return -EINVAL; } if (!info->attrs[NL80211_ATTR_TESTDATA]) return -EINVAL; rdev->cur_cmd_info = info; err = rdev_testmode_cmd(rdev, wdev, nla_data(info->attrs[NL80211_ATTR_TESTDATA]), nla_len(info->attrs[NL80211_ATTR_TESTDATA])); rdev->cur_cmd_info = NULL; return err; } static int nl80211_testmode_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; int err; long phy_idx; void *data = NULL; int data_len = 0; rtnl_lock(); if (cb->args[0]) { /* * 0 is a valid index, but not valid for args[0], * so we need to offset by 1. */ phy_idx = cb->args[0] - 1; rdev = cfg80211_rdev_by_wiphy_idx(phy_idx); if (!rdev) { err = -ENOENT; goto out_err; } } else { struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam); err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, nl80211_fam.maxattr, nl80211_policy, NULL); if (err) goto out_err; rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); goto out_err; } phy_idx = rdev->wiphy_idx; if (attrbuf[NL80211_ATTR_TESTDATA]) cb->args[1] = (long)attrbuf[NL80211_ATTR_TESTDATA]; } if (cb->args[1]) { data = nla_data((void *)cb->args[1]); data_len = nla_len((void *)cb->args[1]); } if (!rdev->ops->testmode_dump) { err = -EOPNOTSUPP; goto out_err; } while (1) { void *hdr = nl80211hdr_put(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, NLM_F_MULTI, NL80211_CMD_TESTMODE); struct nlattr *tmdata; if (!hdr) break; if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { genlmsg_cancel(skb, hdr); break; } tmdata = nla_nest_start(skb, NL80211_ATTR_TESTDATA); if (!tmdata) { genlmsg_cancel(skb, hdr); break; } err = rdev_testmode_dump(rdev, skb, cb, data, data_len); nla_nest_end(skb, tmdata); if (err == -ENOBUFS || err == -ENOENT) { genlmsg_cancel(skb, hdr); break; } else if (err) { genlmsg_cancel(skb, hdr); goto out_err; } genlmsg_end(skb, hdr); } err = skb->len; /* see above */ cb->args[0] = phy_idx + 1; out_err: rtnl_unlock(); return err; } #endif static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_connect_params connect; struct wiphy *wiphy; struct cfg80211_cached_keys *connkeys = NULL; int err; memset(&connect, 0, sizeof(connect)); if (!info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { connect.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(rdev, connect.auth_type, NL80211_CMD_CONNECT)) return -EINVAL; } else connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; connect.privacy = info->attrs[NL80211_ATTR_PRIVACY]; if (info->attrs[NL80211_ATTR_WANT_1X_4WAY_HS] && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X)) return -EINVAL; connect.want_1x = info->attrs[NL80211_ATTR_WANT_1X_4WAY_HS]; err = nl80211_crypto_settings(rdev, info, &connect.crypto, NL80211_MAX_NR_CIPHER_SUITES, NL80211_MAX_NR_CIPHER_SUITES); if (err) return err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; wiphy = &rdev->wiphy; connect.bg_scan_period = -1; if (info->attrs[NL80211_ATTR_BG_SCAN_PERIOD] && (wiphy->flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)) { connect.bg_scan_period = nla_get_u16(info->attrs[NL80211_ATTR_BG_SCAN_PERIOD]); } if (info->attrs[NL80211_ATTR_MAC]) connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); else if (info->attrs[NL80211_ATTR_MAC_HINT]) connect.bssid_hint = nla_data(info->attrs[NL80211_ATTR_MAC_HINT]); connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } if (info->attrs[NL80211_ATTR_USE_MFP]) { connect.mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); if (connect.mfp == NL80211_MFP_OPTIONAL && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_MFP_OPTIONAL)) return -EOPNOTSUPP; } else { connect.mfp = NL80211_MFP_NO; } if (info->attrs[NL80211_ATTR_PREV_BSSID]) connect.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { connect.channel = nl80211_get_valid_chan( wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]); if (!connect.channel) return -EINVAL; } else if (info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]) { connect.channel_hint = nl80211_get_valid_chan( wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]); if (!connect.channel_hint) return -EINVAL; } if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { connkeys = nl80211_parse_connkeys(rdev, info, NULL); if (IS_ERR(connkeys)) return PTR_ERR(connkeys); } if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) connect.flags |= ASSOC_REQ_DISABLE_HT; if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) memcpy(&connect.ht_capa_mask, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), sizeof(connect.ht_capa_mask)); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) { kzfree(connkeys); return -EINVAL; } memcpy(&connect.ht_capa, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), sizeof(connect.ht_capa)); } if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT])) connect.flags |= ASSOC_REQ_DISABLE_VHT; if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) memcpy(&connect.vht_capa_mask, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]), sizeof(connect.vht_capa_mask)); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) { kzfree(connkeys); return -EINVAL; } memcpy(&connect.vht_capa, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]), sizeof(connect.vht_capa)); } if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { if (!((rdev->wiphy.features & NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) && (rdev->wiphy.features & NL80211_FEATURE_QUIET)) && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_RRM)) { kzfree(connkeys); return -EINVAL; } connect.flags |= ASSOC_REQ_USE_RRM; } connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); if (connect.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) { kzfree(connkeys); return -EOPNOTSUPP; } if (info->attrs[NL80211_ATTR_BSS_SELECT]) { /* bss selection makes no sense if bssid is set */ if (connect.bssid) { kzfree(connkeys); return -EINVAL; } err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT], wiphy, &connect.bss_select); if (err) { kzfree(connkeys); return err; } } if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] && info->attrs[NL80211_ATTR_FILS_ERP_REALM] && info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] && info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { connect.fils_erp_username = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); connect.fils_erp_username_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); connect.fils_erp_realm = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); connect.fils_erp_realm_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); connect.fils_erp_next_seq_num = nla_get_u16( info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]); connect.fils_erp_rrk = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); connect.fils_erp_rrk_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] || info->attrs[NL80211_ATTR_FILS_ERP_REALM] || info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] || info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { kzfree(connkeys); return -EINVAL; } if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) { if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { kzfree(connkeys); GENL_SET_ERR_MSG(info, "external auth requires connection ownership"); return -EINVAL; } connect.flags |= CONNECT_REQ_EXTERNAL_AUTH_SUPPORT; } wdev_lock(dev->ieee80211_ptr); err = cfg80211_connect(rdev, dev, &connect, connkeys, connect.prev_bssid); if (err) kzfree(connkeys); if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { dev->ieee80211_ptr->conn_owner_nlportid = genl_info_snd_portid(info); if (connect.bssid) memcpy(dev->ieee80211_ptr->disconnect_bssid, connect.bssid, ETH_ALEN); else memset(dev->ieee80211_ptr->disconnect_bssid, 0, ETH_ALEN); } wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_update_connect_params(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_connect_params connect = {}; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; bool fils_sk_offload; u32 auth_type; u32 changed = 0; int ret; if (!rdev->ops->update_connect_params) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_IE]) { connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); changed |= UPDATE_ASSOC_IES; } fils_sk_offload = wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD); /* * when driver supports fils-sk offload all attributes must be * provided. So the else covers "fils-sk-not-all" and * "no-fils-sk-any". */ if (fils_sk_offload && info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] && info->attrs[NL80211_ATTR_FILS_ERP_REALM] && info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] && info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { connect.fils_erp_username = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); connect.fils_erp_username_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); connect.fils_erp_realm = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); connect.fils_erp_realm_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); connect.fils_erp_next_seq_num = nla_get_u16( info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]); connect.fils_erp_rrk = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); connect.fils_erp_rrk_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); changed |= UPDATE_FILS_ERP_INFO; } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] || info->attrs[NL80211_ATTR_FILS_ERP_REALM] || info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] || info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { return -EINVAL; } if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_CONNECT)) return -EINVAL; if (auth_type == NL80211_AUTHTYPE_FILS_SK && fils_sk_offload && !(changed & UPDATE_FILS_ERP_INFO)) return -EINVAL; connect.auth_type = auth_type; changed |= UPDATE_AUTH_TYPE; } wdev_lock(dev->ieee80211_ptr); if (!wdev->current_bss) ret = -ENOLINK; else ret = rdev_update_connect_params(rdev, dev, &connect, changed); wdev_unlock(dev->ieee80211_ptr); return ret; } static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u16 reason; int ret; if (dev->ieee80211_ptr->conn_owner_nlportid && dev->ieee80211_ptr->conn_owner_nlportid != genl_info_snd_portid(info)) return -EPERM; if (!info->attrs[NL80211_ATTR_REASON_CODE]) reason = WLAN_REASON_DEAUTH_LEAVING; else reason = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason == 0) return -EINVAL; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; wdev_lock(dev->ieee80211_ptr); ret = cfg80211_disconnect(rdev, dev, reason, true); wdev_unlock(dev->ieee80211_ptr); return ret; } static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net *net; int err; if (info->attrs[NL80211_ATTR_PID]) { u32 pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]); net = get_net_ns_by_pid(pid); } else if (info->attrs[NL80211_ATTR_NETNS_FD]) { u32 fd = nla_get_u32(info->attrs[NL80211_ATTR_NETNS_FD]); net = get_net_ns_by_fd(fd); } else { return -EINVAL; } if (IS_ERR(net)) return PTR_ERR(net); err = 0; /* check if anything to do */ if (!net_eq(wiphy_net(&rdev->wiphy), net)) err = cfg80211_switch_netns(rdev, net); put_net(net); return err; } static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int (*rdev_ops)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa) = NULL; struct net_device *dev = info->user_ptr[1]; struct cfg80211_pmksa pmksa; memset(&pmksa, 0, sizeof(struct cfg80211_pmksa)); if (!info->attrs[NL80211_ATTR_PMKID]) return -EINVAL; pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]); if (info->attrs[NL80211_ATTR_MAC]) { pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); } else if (info->attrs[NL80211_ATTR_SSID] && info->attrs[NL80211_ATTR_FILS_CACHE_ID] && (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA || info->attrs[NL80211_ATTR_PMK])) { pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); pmksa.cache_id = nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]); } else { return -EINVAL; } if (info->attrs[NL80211_ATTR_PMK]) { pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]); pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]); } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; switch (info->genlhdr->cmd) { case NL80211_CMD_SET_PMKSA: rdev_ops = rdev->ops->set_pmksa; break; case NL80211_CMD_DEL_PMKSA: rdev_ops = rdev->ops->del_pmksa; break; default: WARN_ON(1); break; } if (!rdev_ops) return -EOPNOTSUPP; return rdev_ops(&rdev->wiphy, dev, &pmksa); } static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; if (!rdev->ops->flush_pmksa) return -EOPNOTSUPP; return rdev_flush_pmksa(rdev, dev); } static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 action_code, dialog_token; u32 peer_capability = 0; u16 status_code; u8 *peer; bool initiator; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) || !rdev->ops->tdls_mgmt) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_TDLS_ACTION] || !info->attrs[NL80211_ATTR_STATUS_CODE] || !info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN] || !info->attrs[NL80211_ATTR_IE] || !info->attrs[NL80211_ATTR_MAC]) return -EINVAL; peer = nla_data(info->attrs[NL80211_ATTR_MAC]); action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]); status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]); initiator = nla_get_flag(info->attrs[NL80211_ATTR_TDLS_INITIATOR]); if (info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]) peer_capability = nla_get_u32(info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]); return rdev_tdls_mgmt(rdev, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, nla_data(info->attrs[NL80211_ATTR_IE]), nla_len(info->attrs[NL80211_ATTR_IE])); } static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; enum nl80211_tdls_operation operation; u8 *peer; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) || !rdev->ops->tdls_oper) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_TDLS_OPERATION] || !info->attrs[NL80211_ATTR_MAC]) return -EINVAL; operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]); peer = nla_data(info->attrs[NL80211_ATTR_MAC]); return rdev_tdls_oper(rdev, dev, peer, operation); } static int nl80211_remain_on_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_chan_def chandef; const struct cfg80211_chan_def *compat_chandef; struct sk_buff *msg; void *hdr; u64 cookie; u32 duration; int err; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || !info->attrs[NL80211_ATTR_DURATION]) return -EINVAL; duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); if (!rdev->ops->remain_on_channel || !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)) return -EOPNOTSUPP; /* * We should be on that channel for at least a minimum amount of * time (10ms) but no longer than the driver supports. */ if (duration < NL80211_MIN_REMAIN_ON_CHANNEL_TIME || duration > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; err = nl80211_parse_chandef(rdev, info, &chandef); if (err) return err; wdev_lock(wdev); if (!cfg80211_off_channel_oper_allowed(wdev) && !cfg80211_chandef_identical(&wdev->chandef, &chandef)) { compat_chandef = cfg80211_chandef_compatible(&wdev->chandef, &chandef); if (compat_chandef != &chandef) { wdev_unlock(wdev); return -EBUSY; } } wdev_unlock(wdev); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_REMAIN_ON_CHANNEL); if (!hdr) { err = -ENOBUFS; goto free_msg; } err = rdev_remain_on_channel(rdev, wdev, chandef.chan, duration, &cookie); if (err) goto free_msg; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_cancel_remain_on_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; u64 cookie; if (!info->attrs[NL80211_ATTR_COOKIE]) return -EINVAL; if (!rdev->ops->cancel_remain_on_channel) return -EOPNOTSUPP; cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); return rdev_cancel_remain_on_channel(rdev, wdev, cookie); } static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_bitrate_mask mask; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; int err; if (!rdev->ops->set_bitrate_mask) return -EOPNOTSUPP; err = nl80211_parse_tx_bitrate_mask(info, &mask); if (err) return err; return rdev_set_bitrate_mask(rdev, dev, NULL, &mask); } static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION; if (!info->attrs[NL80211_ATTR_FRAME_MATCH]) return -EINVAL; if (info->attrs[NL80211_ATTR_FRAME_TYPE]) frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: break; case NL80211_IFTYPE_NAN: default: return -EOPNOTSUPP; } /* not much point in registering if we can't reply */ if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; return cfg80211_mlme_register_mgmt(wdev, genl_info_snd_portid(info), frame_type, nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); } static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_chan_def chandef; int err; void *hdr = NULL; u64 cookie; struct sk_buff *msg = NULL; struct cfg80211_mgmt_tx_params params = { .dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK], }; if (!info->attrs[NL80211_ATTR_FRAME]) return -EINVAL; if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_GO: break; case NL80211_IFTYPE_NAN: default: return -EOPNOTSUPP; } if (info->attrs[NL80211_ATTR_DURATION]) { if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) return -EINVAL; params.wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); /* * We should wait on the channel for at least a minimum amount * of time (10ms) but no longer than the driver supports. */ if (params.wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME || params.wait > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; } params.offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; if (params.offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) return -EINVAL; params.no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); /* get the channel if any has been specified, otherwise pass NULL to * the driver. The latter will use the current one */ chandef.chan = NULL; if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { err = nl80211_parse_chandef(rdev, info, &chandef); if (err) return err; } if (!chandef.chan && params.offchan) return -EINVAL; wdev_lock(wdev); if (params.offchan && !cfg80211_off_channel_oper_allowed(wdev)) { wdev_unlock(wdev); return -EBUSY; } wdev_unlock(wdev); params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]); params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]); if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) { int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]); int i; if (len % sizeof(u16)) return -EINVAL; params.n_csa_offsets = len / sizeof(u16); params.csa_offsets = nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]); /* check that all the offsets fit the frame */ for (i = 0; i < params.n_csa_offsets; i++) { if (params.csa_offsets[i] >= params.len) return -EINVAL; } } if (!params.dont_wait_for_ack) { msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_FRAME); if (!hdr) { err = -ENOBUFS; goto free_msg; } } params.chan = chandef.chan; err = cfg80211_mlme_mgmt_tx(rdev, wdev, ¶ms, &cookie); if (err) goto free_msg; if (msg) { if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); } return 0; nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; u64 cookie; if (!info->attrs[NL80211_ATTR_COOKIE]) return -EINVAL; if (!rdev->ops->mgmt_tx_cancel_wait) return -EOPNOTSUPP; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: break; case NL80211_IFTYPE_NAN: default: return -EOPNOTSUPP; } cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); return rdev_mgmt_tx_cancel_wait(rdev, wdev, cookie); } static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; u8 ps_state; bool state; int err; if (!info->attrs[NL80211_ATTR_PS_STATE]) return -EINVAL; ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]); wdev = dev->ieee80211_ptr; if (!rdev->ops->set_power_mgmt) return -EOPNOTSUPP; state = (ps_state == NL80211_PS_ENABLED) ? true : false; if (state == wdev->ps) return 0; err = rdev_set_power_mgmt(rdev, dev, state, wdev->ps_timeout); if (!err) wdev->ps = state; return err; } static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; enum nl80211_ps_state ps_state; struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; struct sk_buff *msg; void *hdr; int err; wdev = dev->ieee80211_ptr; if (!rdev->ops->set_power_mgmt) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_GET_POWER_SAVE); if (!hdr) { err = -ENOBUFS; goto free_msg; } if (wdev->ps) ps_state = NL80211_PS_ENABLED; else ps_state = NL80211_PS_DISABLED; if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static const struct nla_policy nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = { [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_BINARY }, [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_PKTS] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_INTVL] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_RSSI_LEVEL] = { .type = NLA_S32 }, }; static int nl80211_set_cqm_txe(struct genl_info *info, u32 rate, u32 pkts, u32 intvl) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; if (rate > 100 || intvl > NL80211_CQM_TXE_MAX_INTVL) return -EINVAL; if (!rdev->ops->set_cqm_txe_config) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; return rdev_set_cqm_txe_config(rdev, dev, rate, pkts, intvl); } static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; s32 last, low, high; u32 hyst; int i, n, low_index; int err; /* RSSI reporting disabled? */ if (!wdev->cqm_config) return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0); /* * Obtain current RSSI value if possible, if not and no RSSI threshold * event has been received yet, we should receive an event after a * connection is established and enough beacons received to calculate * the average. */ if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss && rdev->ops->get_station) { struct station_info sinfo = {}; u8 *mac_addr; mac_addr = wdev->current_bss->pub.bssid; err = rdev_get_station(rdev, dev, mac_addr, &sinfo); if (err) return err; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) wdev->cqm_config->last_rssi_event_value = (s8) sinfo.rx_beacon_signal_avg; } last = wdev->cqm_config->last_rssi_event_value; hyst = wdev->cqm_config->rssi_hyst; n = wdev->cqm_config->n_rssi_thresholds; for (i = 0; i < n; i++) if (last < wdev->cqm_config->rssi_thresholds[i]) break; low_index = i - 1; if (low_index >= 0) { low_index = array_index_nospec(low_index, n); low = wdev->cqm_config->rssi_thresholds[low_index] - hyst; } else { low = S32_MIN; } if (i < n) { i = array_index_nospec(i, n); high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1; } else { high = S32_MAX; } return rdev_set_cqm_rssi_range_config(rdev, dev, low, high); } static int nl80211_set_cqm_rssi(struct genl_info *info, const s32 *thresholds, int n_thresholds, u32 hysteresis) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; int i, err; s32 prev = S32_MIN; /* Check all values negative and sorted */ for (i = 0; i < n_thresholds; i++) { if (thresholds[i] > 0 || thresholds[i] <= prev) return -EINVAL; prev = thresholds[i]; } if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; wdev_lock(wdev); cfg80211_cqm_config_free(wdev); wdev_unlock(wdev); if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) { if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */ return rdev_set_cqm_rssi_config(rdev, dev, 0, 0); return rdev_set_cqm_rssi_config(rdev, dev, thresholds[0], hysteresis); } if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST)) return -EOPNOTSUPP; if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */ n_thresholds = 0; wdev_lock(wdev); if (n_thresholds) { struct cfg80211_cqm_config *cqm_config; cqm_config = kzalloc(sizeof(struct cfg80211_cqm_config) + n_thresholds * sizeof(s32), GFP_KERNEL); if (!cqm_config) { err = -ENOMEM; goto unlock; } cqm_config->rssi_hyst = hysteresis; cqm_config->n_rssi_thresholds = n_thresholds; memcpy(cqm_config->rssi_thresholds, thresholds, n_thresholds * sizeof(s32)); wdev->cqm_config = cqm_config; } err = cfg80211_cqm_rssi_update(rdev, dev); unlock: wdev_unlock(wdev); return err; } static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) { struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1]; struct nlattr *cqm; int err; cqm = info->attrs[NL80211_ATTR_CQM]; if (!cqm) return -EINVAL; err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm, nl80211_attr_cqm_policy, genl_info_extack(info)); if (err) return err; if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] && attrs[NL80211_ATTR_CQM_RSSI_HYST]) { const s32 *thresholds = nla_data(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); int len = nla_len(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); if (len % 4) return -EINVAL; return nl80211_set_cqm_rssi(info, thresholds, len / 4, hysteresis); } if (attrs[NL80211_ATTR_CQM_TXE_RATE] && attrs[NL80211_ATTR_CQM_TXE_PKTS] && attrs[NL80211_ATTR_CQM_TXE_INTVL]) { u32 rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]); u32 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]); u32 intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]); return nl80211_set_cqm_txe(info, rate, pkts, intvl); } return -EINVAL; } static int nl80211_join_ocb(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct ocb_setup setup = {}; int err; err = nl80211_parse_chandef(rdev, info, &setup.chandef); if (err) return err; return cfg80211_join_ocb(rdev, dev, &setup); } static int nl80211_leave_ocb(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; return cfg80211_leave_ocb(rdev, dev); } static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct mesh_config cfg; struct mesh_setup setup; int err; /* start with default */ memcpy(&cfg, &default_mesh_config, sizeof(cfg)); memcpy(&setup, &default_mesh_setup, sizeof(setup)); if (info->attrs[NL80211_ATTR_MESH_CONFIG]) { /* and parse parameters if given */ err = nl80211_parse_mesh_config(info, &cfg, NULL); if (err) return err; } if (!info->attrs[NL80211_ATTR_MESH_ID] || !nla_len(info->attrs[NL80211_ATTR_MESH_ID])) return -EINVAL; setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); if (info->attrs[NL80211_ATTR_MCAST_RATE] && !nl80211_parse_mcast_rate(rdev, setup.mcast_rate, nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]))) return -EINVAL; if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { setup.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_MESH_POINT, setup.beacon_interval); if (err) return err; } if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) { setup.dtim_period = nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); if (setup.dtim_period < 1 || setup.dtim_period > 100) return -EINVAL; } if (info->attrs[NL80211_ATTR_MESH_SETUP]) { /* parse additional setup parameters if given */ err = nl80211_parse_mesh_setup(info, &setup); if (err) return err; } if (setup.user_mpm) cfg.auto_open_plinks = false; if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { err = nl80211_parse_chandef(rdev, info, &setup.chandef); if (err) return err; } else { /* __cfg80211_join_mesh() will sort it out */ setup.chandef.chan = NULL; } if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); int n_rates = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); struct ieee80211_supported_band *sband; if (!setup.chandef.chan) return -EINVAL; sband = rdev->wiphy.bands[setup.chandef.chan->band]; err = ieee80211_get_ratemask(sband, rates, n_rates, &setup.basic_rates); if (err) return err; } if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, &setup.beacon_rate); if (err) return err; if (!setup.chandef.chan) return -EINVAL; err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band, &setup.beacon_rate); if (err) return err; } setup.userspace_handles_dfs = nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]); if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) { int r = validate_pae_over_nl80211(rdev, info); if (r < 0) return r; setup.control_port_over_nl80211 = true; } wdev_lock(dev->ieee80211_ptr); err = __cfg80211_join_mesh(rdev, dev, &setup, &cfg); if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) dev->ieee80211_ptr->conn_owner_nlportid = genl_info_snd_portid(info); wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; return cfg80211_leave_mesh(rdev, dev); } #ifdef CONFIG_PM static int nl80211_send_wowlan_patterns(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct cfg80211_wowlan *wowlan = rdev->wiphy.wowlan_config; struct nlattr *nl_pats, *nl_pat; int i, pat_len; if (!wowlan->n_patterns) return 0; nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN); if (!nl_pats) return -ENOBUFS; for (i = 0; i < wowlan->n_patterns; i++) { nl_pat = nla_nest_start(msg, i + 1); if (!nl_pat) return -ENOBUFS; pat_len = wowlan->patterns[i].pattern_len; if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), wowlan->patterns[i].mask) || nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len, wowlan->patterns[i].pattern) || nla_put_u32(msg, NL80211_PKTPAT_OFFSET, wowlan->patterns[i].pkt_offset)) return -ENOBUFS; nla_nest_end(msg, nl_pat); } nla_nest_end(msg, nl_pats); return 0; } static int nl80211_send_wowlan_tcp(struct sk_buff *msg, struct cfg80211_wowlan_tcp *tcp) { struct nlattr *nl_tcp; if (!tcp) return 0; nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); if (!nl_tcp) return -ENOBUFS; if (nla_put_in_addr(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) || nla_put_in_addr(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) || nla_put(msg, NL80211_WOWLAN_TCP_DST_MAC, ETH_ALEN, tcp->dst_mac) || nla_put_u16(msg, NL80211_WOWLAN_TCP_SRC_PORT, tcp->src_port) || nla_put_u16(msg, NL80211_WOWLAN_TCP_DST_PORT, tcp->dst_port) || nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, tcp->payload_len, tcp->payload) || nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, tcp->data_interval) || nla_put(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, tcp->wake_len, tcp->wake_data) || nla_put(msg, NL80211_WOWLAN_TCP_WAKE_MASK, DIV_ROUND_UP(tcp->wake_len, 8), tcp->wake_mask)) return -ENOBUFS; if (tcp->payload_seq.len && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ, sizeof(tcp->payload_seq), &tcp->payload_seq)) return -ENOBUFS; if (tcp->payload_tok.len && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, sizeof(tcp->payload_tok) + tcp->tokens_size, &tcp->payload_tok)) return -ENOBUFS; nla_nest_end(msg, nl_tcp); return 0; } static int nl80211_send_wowlan_nd(struct sk_buff *msg, struct cfg80211_sched_scan_request *req) { struct nlattr *nd, *freqs, *matches, *match, *scan_plans, *scan_plan; int i; if (!req) return 0; nd = nla_nest_start(msg, NL80211_WOWLAN_TRIG_NET_DETECT); if (!nd) return -ENOBUFS; if (req->n_scan_plans == 1 && nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->scan_plans[0].interval * 1000)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay)) return -ENOBUFS; if (req->relative_rssi_set) { struct nl80211_bss_select_rssi_adjust rssi_adjust; if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI, req->relative_rssi)) return -ENOBUFS; rssi_adjust.band = req->rssi_adjust.band; rssi_adjust.delta = req->rssi_adjust.delta; if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST, sizeof(rssi_adjust), &rssi_adjust)) return -ENOBUFS; } freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!freqs) return -ENOBUFS; for (i = 0; i < req->n_channels; i++) { if (nla_put_u32(msg, i, req->channels[i]->center_freq)) return -ENOBUFS; } nla_nest_end(msg, freqs); if (req->n_match_sets) { matches = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_MATCH); if (!matches) return -ENOBUFS; for (i = 0; i < req->n_match_sets; i++) { match = nla_nest_start(msg, i); if (!match) return -ENOBUFS; if (nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID, req->match_sets[i].ssid.ssid_len, req->match_sets[i].ssid.ssid)) return -ENOBUFS; nla_nest_end(msg, match); } nla_nest_end(msg, matches); } scan_plans = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_PLANS); if (!scan_plans) return -ENOBUFS; for (i = 0; i < req->n_scan_plans; i++) { scan_plan = nla_nest_start(msg, i + 1); if (!scan_plan) return -ENOBUFS; if (nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL, req->scan_plans[i].interval) || (req->scan_plans[i].iterations && nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS, req->scan_plans[i].iterations))) return -ENOBUFS; nla_nest_end(msg, scan_plan); } nla_nest_end(msg, scan_plans); nla_nest_end(msg, nd); return 0; } static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct sk_buff *msg; void *hdr; u32 size = NLMSG_DEFAULT_SIZE; if (!rdev->wiphy.wowlan) return -EOPNOTSUPP; if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) { /* adjust size to have room for all the data */ size += rdev->wiphy.wowlan_config->tcp->tokens_size + rdev->wiphy.wowlan_config->tcp->payload_len + rdev->wiphy.wowlan_config->tcp->wake_len + rdev->wiphy.wowlan_config->tcp->wake_len / 8; } msg = nlmsg_new(size, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_GET_WOWLAN); if (!hdr) goto nla_put_failure; if (rdev->wiphy.wowlan_config) { struct nlattr *nl_wowlan; nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!nl_wowlan) goto nla_put_failure; if ((rdev->wiphy.wowlan_config->any && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || (rdev->wiphy.wowlan_config->disconnect && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || (rdev->wiphy.wowlan_config->magic_pkt && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || (rdev->wiphy.wowlan_config->gtk_rekey_failure && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || (rdev->wiphy.wowlan_config->eap_identity_req && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || (rdev->wiphy.wowlan_config->four_way_handshake && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || (rdev->wiphy.wowlan_config->rfkill_release && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) goto nla_put_failure; if (nl80211_send_wowlan_patterns(msg, rdev)) goto nla_put_failure; if (nl80211_send_wowlan_tcp(msg, rdev->wiphy.wowlan_config->tcp)) goto nla_put_failure; if (nl80211_send_wowlan_nd( msg, rdev->wiphy.wowlan_config->nd_config)) goto nla_put_failure; nla_nest_end(msg, nl_wowlan); } genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, struct nlattr *attr, struct cfg80211_wowlan *trig) { struct nlattr *tb[NUM_NL80211_WOWLAN_TCP]; struct cfg80211_wowlan_tcp *cfg; struct nl80211_wowlan_tcp_data_token *tok = NULL; struct nl80211_wowlan_tcp_data_seq *seq = NULL; u32 size; u32 data_size, wake_size, tokens_size = 0, wake_mask_size; int err, port; if (!rdev->wiphy.wowlan->tcp) return -EINVAL; err = nla_parse_nested(tb, MAX_NL80211_WOWLAN_TCP, attr, nl80211_wowlan_tcp_policy, NULL); if (err) return err; if (!tb[NL80211_WOWLAN_TCP_SRC_IPV4] || !tb[NL80211_WOWLAN_TCP_DST_IPV4] || !tb[NL80211_WOWLAN_TCP_DST_MAC] || !tb[NL80211_WOWLAN_TCP_DST_PORT] || !tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD] || !tb[NL80211_WOWLAN_TCP_DATA_INTERVAL] || !tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] || !tb[NL80211_WOWLAN_TCP_WAKE_MASK]) return -EINVAL; data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]); if (data_size > rdev->wiphy.wowlan->tcp->data_payload_max) return -EINVAL; if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > rdev->wiphy.wowlan->tcp->data_interval_max || nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0) return -EINVAL; wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); if (wake_size > rdev->wiphy.wowlan->tcp->wake_payload_max) return -EINVAL; wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]); if (wake_mask_size != DIV_ROUND_UP(wake_size, 8)) return -EINVAL; if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]) { u32 tokln = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]); tok = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]); tokens_size = tokln - sizeof(*tok); if (!tok->len || tokens_size % tok->len) return -EINVAL; if (!rdev->wiphy.wowlan->tcp->tok) return -EINVAL; if (tok->len > rdev->wiphy.wowlan->tcp->tok->max_len) return -EINVAL; if (tok->len < rdev->wiphy.wowlan->tcp->tok->min_len) return -EINVAL; if (tokens_size > rdev->wiphy.wowlan->tcp->tok->bufsize) return -EINVAL; if (tok->offset + tok->len > data_size) return -EINVAL; } if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) { seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]); if (!rdev->wiphy.wowlan->tcp->seq) return -EINVAL; if (seq->len == 0 || seq->len > 4) return -EINVAL; if (seq->len + seq->offset > data_size) return -EINVAL; } size = sizeof(*cfg); size += data_size; size += wake_size + wake_mask_size; size += tokens_size; cfg = kzalloc(size, GFP_KERNEL); if (!cfg) return -ENOMEM; cfg->src = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_SRC_IPV4]); cfg->dst = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_DST_IPV4]); memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]), ETH_ALEN); if (tb[NL80211_WOWLAN_TCP_SRC_PORT]) port = nla_get_u16(tb[NL80211_WOWLAN_TCP_SRC_PORT]); else port = 0; #ifdef CONFIG_INET /* allocate a socket and port for it and use it */ err = __sock_create(wiphy_net(&rdev->wiphy), PF_INET, SOCK_STREAM, IPPROTO_TCP, &cfg->sock, 1); if (err) { kfree(cfg); return err; } if (inet_csk_get_port(cfg->sock->sk, port)) { sock_release(cfg->sock); kfree(cfg); return -EADDRINUSE; } cfg->src_port = inet_sk(cfg->sock->sk)->inet_num; #else if (!port) { kfree(cfg); return -EINVAL; } cfg->src_port = port; #endif cfg->dst_port = nla_get_u16(tb[NL80211_WOWLAN_TCP_DST_PORT]); cfg->payload_len = data_size; cfg->payload = (u8 *)cfg + sizeof(*cfg) + tokens_size; memcpy((void *)cfg->payload, nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]), data_size); if (seq) cfg->payload_seq = *seq; cfg->data_interval = nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]); cfg->wake_len = wake_size; cfg->wake_data = (u8 *)cfg + sizeof(*cfg) + tokens_size + data_size; memcpy((void *)cfg->wake_data, nla_data(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]), wake_size); cfg->wake_mask = (u8 *)cfg + sizeof(*cfg) + tokens_size + data_size + wake_size; memcpy((void *)cfg->wake_mask, nla_data(tb[NL80211_WOWLAN_TCP_WAKE_MASK]), wake_mask_size); if (tok) { cfg->tokens_size = tokens_size; memcpy(&cfg->payload_tok, tok, sizeof(*tok) + tokens_size); } trig->tcp = cfg; return 0; } static int nl80211_parse_wowlan_nd(struct cfg80211_registered_device *rdev, const struct wiphy_wowlan_support *wowlan, struct nlattr *attr, struct cfg80211_wowlan *trig) { struct nlattr **tb; int err; tb = kcalloc(NUM_NL80211_ATTR, sizeof(*tb), GFP_KERNEL); if (!tb) return -ENOMEM; if (!(wowlan->flags & WIPHY_WOWLAN_NET_DETECT)) { err = -EOPNOTSUPP; goto out; } err = nla_parse_nested(tb, NL80211_ATTR_MAX, attr, nl80211_policy, NULL); if (err) goto out; trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb, wowlan->max_nd_match_sets); err = PTR_ERR_OR_ZERO(trig->nd_config); if (err) trig->nd_config = NULL; out: kfree(tb); return err; } static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; struct cfg80211_wowlan new_triggers = {}; struct cfg80211_wowlan *ntrig; const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan; int err, i; bool prev_enabled = rdev->wiphy.wowlan_config; bool regular = false; if (!wowlan) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { cfg80211_rdev_free_wowlan(rdev); rdev->wiphy.wowlan_config = NULL; goto set_wakeup; } err = nla_parse_nested(tb, MAX_NL80211_WOWLAN_TRIG, info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS], nl80211_wowlan_policy, genl_info_extack(info)); if (err) return err; if (tb[NL80211_WOWLAN_TRIG_ANY]) { if (!(wowlan->flags & WIPHY_WOWLAN_ANY)) return -EINVAL; new_triggers.any = true; } if (tb[NL80211_WOWLAN_TRIG_DISCONNECT]) { if (!(wowlan->flags & WIPHY_WOWLAN_DISCONNECT)) return -EINVAL; new_triggers.disconnect = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) { if (!(wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT)) return -EINVAL; new_triggers.magic_pkt = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED]) return -EINVAL; if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE]) { if (!(wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE)) return -EINVAL; new_triggers.gtk_rekey_failure = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST]) { if (!(wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ)) return -EINVAL; new_triggers.eap_identity_req = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE]) { if (!(wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE)) return -EINVAL; new_triggers.four_way_handshake = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_RFKILL_RELEASE]) { if (!(wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE)) return -EINVAL; new_triggers.rfkill_release = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) { struct nlattr *pat; int n_patterns = 0; int rem, pat_len, mask_len, pkt_offset; struct nlattr *pat_tb[NUM_NL80211_PKTPAT]; regular = true; nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], rem) n_patterns++; if (n_patterns > wowlan->n_patterns) return -EINVAL; new_triggers.patterns = kcalloc(n_patterns, sizeof(new_triggers.patterns[0]), GFP_KERNEL); if (!new_triggers.patterns) return -ENOMEM; new_triggers.n_patterns = n_patterns; i = 0; nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], rem) { u8 *mask_pat; err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, nl80211_packet_pattern_policy, genl_info_extack(info)); if (err) goto error; err = -EINVAL; if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) goto error; pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]); mask_len = DIV_ROUND_UP(pat_len, 8); if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len) goto error; if (pat_len > wowlan->pattern_max_len || pat_len < wowlan->pattern_min_len) goto error; if (!pat_tb[NL80211_PKTPAT_OFFSET]) pkt_offset = 0; else pkt_offset = nla_get_u32( pat_tb[NL80211_PKTPAT_OFFSET]); if (pkt_offset > wowlan->max_pkt_offset) goto error; new_triggers.patterns[i].pkt_offset = pkt_offset; mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL); if (!mask_pat) { err = -ENOMEM; goto error; } new_triggers.patterns[i].mask = mask_pat; memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len); mask_pat += mask_len; new_triggers.patterns[i].pattern = mask_pat; new_triggers.patterns[i].pattern_len = pat_len; memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len); i++; } } if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) { regular = true; err = nl80211_parse_wowlan_tcp( rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION], &new_triggers); if (err) goto error; } if (tb[NL80211_WOWLAN_TRIG_NET_DETECT]) { regular = true; err = nl80211_parse_wowlan_nd( rdev, wowlan, tb[NL80211_WOWLAN_TRIG_NET_DETECT], &new_triggers); if (err) goto error; } /* The 'any' trigger means the device continues operating more or less * as in its normal operation mode and wakes up the host on most of the * normal interrupts (like packet RX, ...) * It therefore makes little sense to combine with the more constrained * wakeup trigger modes. */ if (new_triggers.any && regular) { err = -EINVAL; goto error; } ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL); if (!ntrig) { err = -ENOMEM; goto error; } cfg80211_rdev_free_wowlan(rdev); rdev->wiphy.wowlan_config = ntrig; set_wakeup: if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wiphy.wowlan_config) rdev_set_wakeup(rdev, rdev->wiphy.wowlan_config); return 0; error: for (i = 0; i < new_triggers.n_patterns; i++) kfree(new_triggers.patterns[i].mask); kfree(new_triggers.patterns); if (new_triggers.tcp && new_triggers.tcp->sock) sock_release(new_triggers.tcp->sock); kfree(new_triggers.tcp); kfree(new_triggers.nd_config); return err; } #endif static int nl80211_send_coalesce_rules(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct nlattr *nl_pats, *nl_pat, *nl_rule, *nl_rules; int i, j, pat_len; struct cfg80211_coalesce_rules *rule; if (!rdev->coalesce->n_rules) return 0; nl_rules = nla_nest_start(msg, NL80211_ATTR_COALESCE_RULE); if (!nl_rules) return -ENOBUFS; for (i = 0; i < rdev->coalesce->n_rules; i++) { nl_rule = nla_nest_start(msg, i + 1); if (!nl_rule) return -ENOBUFS; rule = &rdev->coalesce->rules[i]; if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_DELAY, rule->delay)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_CONDITION, rule->condition)) return -ENOBUFS; nl_pats = nla_nest_start(msg, NL80211_ATTR_COALESCE_RULE_PKT_PATTERN); if (!nl_pats) return -ENOBUFS; for (j = 0; j < rule->n_patterns; j++) { nl_pat = nla_nest_start(msg, j + 1); if (!nl_pat) return -ENOBUFS; pat_len = rule->patterns[j].pattern_len; if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), rule->patterns[j].mask) || nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len, rule->patterns[j].pattern) || nla_put_u32(msg, NL80211_PKTPAT_OFFSET, rule->patterns[j].pkt_offset)) return -ENOBUFS; nla_nest_end(msg, nl_pat); } nla_nest_end(msg, nl_pats); nla_nest_end(msg, nl_rule); } nla_nest_end(msg, nl_rules); return 0; } static int nl80211_get_coalesce(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct sk_buff *msg; void *hdr; if (!rdev->wiphy.coalesce) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_GET_COALESCE); if (!hdr) goto nla_put_failure; if (rdev->coalesce && nl80211_send_coalesce_rules(msg, rdev)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev) { struct cfg80211_coalesce *coalesce = rdev->coalesce; int i, j; struct cfg80211_coalesce_rules *rule; if (!coalesce) return; for (i = 0; i < coalesce->n_rules; i++) { rule = &coalesce->rules[i]; for (j = 0; j < rule->n_patterns; j++) kfree(rule->patterns[j].mask); kfree(rule->patterns); } kfree(coalesce->rules); kfree(coalesce); rdev->coalesce = NULL; } static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, struct nlattr *rule, struct cfg80211_coalesce_rules *new_rule) { int err, i; const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce; struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat; int rem, pat_len, mask_len, pkt_offset, n_patterns = 0; struct nlattr *pat_tb[NUM_NL80211_PKTPAT]; err = nla_parse_nested(tb, NL80211_ATTR_COALESCE_RULE_MAX, rule, nl80211_coalesce_policy, NULL); if (err) return err; if (tb[NL80211_ATTR_COALESCE_RULE_DELAY]) new_rule->delay = nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]); if (new_rule->delay > coalesce->max_delay) return -EINVAL; if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION]) new_rule->condition = nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]); if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN]) return -EINVAL; nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], rem) n_patterns++; if (n_patterns > coalesce->n_patterns) return -EINVAL; new_rule->patterns = kcalloc(n_patterns, sizeof(new_rule->patterns[0]), GFP_KERNEL); if (!new_rule->patterns) return -ENOMEM; new_rule->n_patterns = n_patterns; i = 0; nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], rem) { u8 *mask_pat; err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, nl80211_packet_pattern_policy, NULL); if (err) return err; if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) return -EINVAL; pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]); mask_len = DIV_ROUND_UP(pat_len, 8); if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len) return -EINVAL; if (pat_len > coalesce->pattern_max_len || pat_len < coalesce->pattern_min_len) return -EINVAL; if (!pat_tb[NL80211_PKTPAT_OFFSET]) pkt_offset = 0; else pkt_offset = nla_get_u32(pat_tb[NL80211_PKTPAT_OFFSET]); if (pkt_offset > coalesce->max_pkt_offset) return -EINVAL; new_rule->patterns[i].pkt_offset = pkt_offset; mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL); if (!mask_pat) return -ENOMEM; new_rule->patterns[i].mask = mask_pat; memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len); mask_pat += mask_len; new_rule->patterns[i].pattern = mask_pat; new_rule->patterns[i].pattern_len = pat_len; memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len); i++; } return 0; } static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce; struct cfg80211_coalesce new_coalesce = {}; struct cfg80211_coalesce *n_coalesce; int err, rem_rule, n_rules = 0, i, j; struct nlattr *rule; struct cfg80211_coalesce_rules *tmp_rule; if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) { cfg80211_rdev_free_coalesce(rdev); rdev_set_coalesce(rdev, NULL); return 0; } nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE], rem_rule) n_rules++; if (n_rules > coalesce->n_rules) return -EINVAL; new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]), GFP_KERNEL); if (!new_coalesce.rules) return -ENOMEM; new_coalesce.n_rules = n_rules; i = 0; nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE], rem_rule) { err = nl80211_parse_coalesce_rule(rdev, rule, &new_coalesce.rules[i]); if (err) goto error; i++; } err = rdev_set_coalesce(rdev, &new_coalesce); if (err) goto error; n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL); if (!n_coalesce) { err = -ENOMEM; goto error; } cfg80211_rdev_free_coalesce(rdev); rdev->coalesce = n_coalesce; return 0; error: for (i = 0; i < new_coalesce.n_rules; i++) { tmp_rule = &new_coalesce.rules[i]; for (j = 0; j < tmp_rule->n_patterns; j++) kfree(tmp_rule->patterns[j].mask); kfree(tmp_rule->patterns); } kfree(new_coalesce.rules); return err; } static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct nlattr *tb[NUM_NL80211_REKEY_DATA]; struct cfg80211_gtk_rekey_data rekey_data; int err; if (!info->attrs[NL80211_ATTR_REKEY_DATA]) return -EINVAL; err = nla_parse_nested(tb, MAX_NL80211_REKEY_DATA, info->attrs[NL80211_ATTR_REKEY_DATA], nl80211_rekey_policy, genl_info_extack(info)); if (err) return err; if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_KCK]) return -EINVAL; if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) return -ERANGE; rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]); rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]); wdev_lock(wdev); if (!wdev->current_bss) { err = -ENOTCONN; goto out; } if (!rdev->ops->set_rekey_data) { err = -EOPNOTSUPP; goto out; } err = rdev_set_rekey_data(rdev, dev, &rekey_data); out: wdev_unlock(wdev); return err; } static int nl80211_register_unexpected_frame(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; if (wdev->ap_unexpected_nlportid) return -EBUSY; wdev->ap_unexpected_nlportid = genl_info_snd_portid(info); return 0; } static int nl80211_probe_client(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct sk_buff *msg; void *hdr; const u8 *addr; u64 cookie; int err; if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!rdev->ops->probe_client) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_PROBE_CLIENT); if (!hdr) { err = -ENOBUFS; goto free_msg; } addr = nla_data(info->attrs[NL80211_ATTR_MAC]); err = rdev_probe_client(rdev, dev, addr, &cookie); if (err) goto free_msg; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_beacon_registration *reg, *nreg; int rv; if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS)) return -EOPNOTSUPP; nreg = kzalloc(sizeof(*nreg), GFP_KERNEL); if (!nreg) return -ENOMEM; /* First, check if already registered. */ spin_lock_bh(&rdev->beacon_registrations_lock); list_for_each_entry(reg, &rdev->beacon_registrations, list) { if (reg->nlportid == genl_info_snd_portid(info)) { rv = -EALREADY; goto out_err; } } /* Add it to the list */ nreg->nlportid = genl_info_snd_portid(info); list_add(&nreg->list, &rdev->beacon_registrations); spin_unlock_bh(&rdev->beacon_registrations_lock); return 0; out_err: spin_unlock_bh(&rdev->beacon_registrations_lock); kfree(nreg); return rv; } static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; int err; if (!rdev->ops->start_p2p_device) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) return -EOPNOTSUPP; if (wdev_running(wdev)) return 0; if (rfkill_blocked(rdev->rfkill)) return -ERFKILL; err = rdev_start_p2p_device(rdev, wdev); if (err) return err; wdev->is_running = true; rdev->opencount++; return 0; } static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) return -EOPNOTSUPP; if (!rdev->ops->stop_p2p_device) return -EOPNOTSUPP; cfg80211_stop_p2p_device(rdev, wdev); return 0; } static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_nan_conf conf = {}; int err; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (wdev_running(wdev)) return -EEXIST; if (rfkill_blocked(rdev->rfkill)) return -ERFKILL; if (!info->attrs[NL80211_ATTR_NAN_MASTER_PREF]) return -EINVAL; conf.master_pref = nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]); if (info->attrs[NL80211_ATTR_BANDS]) { u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]); if (bands & ~(u32)wdev->wiphy->nan_supported_bands) return -EOPNOTSUPP; if (bands && !(bands & BIT(NL80211_BAND_2GHZ))) return -EINVAL; conf.bands = bands; } if (info->attrs[NL80211_ATTR_NAN_CDW_2G]) conf.cdw_2g = nla_get_u8(info->attrs[NL80211_ATTR_NAN_CDW_2G]); else conf.cdw_2g = 1; if (info->attrs[NL80211_ATTR_NAN_CDW_5G]) conf.cdw_5g = nla_get_u8(info->attrs[NL80211_ATTR_NAN_CDW_5G]); else conf.cdw_5g = 1; if (!conf.cdw_2g || conf.cdw_2g > 5 || conf.cdw_5g > 5) return -EINVAL; err = rdev_start_nan(rdev, wdev, &conf); if (err) return err; wdev->is_running = true; rdev->opencount++; return 0; } static int nl80211_stop_nan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; cfg80211_stop_nan(rdev, wdev); return 0; } static int validate_nan_filter(struct nlattr *filter_attr) { struct nlattr *attr; int len = 0, n_entries = 0, rem; nla_for_each_nested(attr, filter_attr, rem) { len += nla_len(attr); n_entries++; } if (len >= U8_MAX) return -EINVAL; return n_entries; } static int handle_nan_filter(struct nlattr *attr_filter, struct cfg80211_nan_func *func, bool tx) { struct nlattr *attr; int n_entries, rem, i; struct cfg80211_nan_func_filter *filter; n_entries = validate_nan_filter(attr_filter); if (n_entries < 0) return n_entries; BUILD_BUG_ON(sizeof(*func->rx_filters) != sizeof(*func->tx_filters)); filter = kcalloc(n_entries, sizeof(*func->rx_filters), GFP_KERNEL); if (!filter) return -ENOMEM; i = 0; nla_for_each_nested(attr, attr_filter, rem) { filter[i].filter = nla_memdup(attr, GFP_KERNEL); filter[i].len = nla_len(attr); i++; } if (tx) { func->num_tx_filters = n_entries; func->tx_filters = filter; } else { func->num_rx_filters = n_entries; func->rx_filters = filter; } return 0; } static bool nl80211_nan_cipher_suites_valid(struct wiphy *wiphy, u32 ids) { if (ids & (~(NL80211_NAN_CS_ID_SK_CCM_128 | NL80211_NAN_CS_ID_SK_GCM_256))) return false; if ((ids & NL80211_NAN_CS_ID_SK_CCM_128) && !cfg80211_supported_cipher_suite(wiphy, WLAN_CIPHER_SUITE_CCMP)) return false; if ((ids & NL80211_NAN_CS_ID_SK_GCM_256) && !cfg80211_supported_cipher_suite(wiphy, WLAN_CIPHER_SUITE_GCMP_256)) return false; return true; } static bool nl80211_nan_sec_ctx_id_valid(struct cfg80211_nan_sec_ctx_id *id) { if (id->type == NL80211_NAN_SEC_CTX_TYPE_PMKID && id->len == WLAN_PMKID_LEN) return true; return false; } static int nl80211_nan_set_security(struct cfg80211_registered_device *rdev, struct nlattr **tb, struct cfg80211_nan_sec *sec) { int rem, ret, i, n_ctx_ids = 0; struct nlattr *sec_attr; if (!tb[NL80211_NAN_FUNC_SECURITY_CIPHER_SUITES]) return 0; sec->cipher_suite_ids = nla_get_u32(tb[NL80211_NAN_FUNC_SECURITY_CIPHER_SUITES]); if (!nl80211_nan_cipher_suites_valid(&rdev->wiphy, sec->cipher_suite_ids)) return -EINVAL; if (!tb[NL80211_NAN_FUNC_SECURITY_CTX_IDS]) return 0; nla_for_each_nested(sec_attr, tb[NL80211_NAN_FUNC_SECURITY_CTX_IDS], rem) n_ctx_ids++; if (n_ctx_ids > NL80211_MAX_NR_NAN_SEC_CTX_IDS) return -EINVAL; sec->ctx_ids = kcalloc(n_ctx_ids, sizeof(*sec->ctx_ids), GFP_KERNEL); if (!sec->ctx_ids) return -ENOMEM; nla_for_each_nested(sec_attr, tb[NL80211_NAN_FUNC_SECURITY_CTX_IDS], rem) { struct nlattr *attr[NL80211_NAN_SEC_CTX_MAX + 1]; struct cfg80211_nan_sec_ctx_id *id = &sec->ctx_ids[sec->n_ctx_ids]; ret = nla_parse_nested(attr, NL80211_NAN_SEC_CTX_MAX, sec_attr, nl80211_nan_sec_ctx_policy, NULL); if (ret) goto out; if (!attr[NL80211_NAN_SEC_CTX_ID_TYPE] || !attr[NL80211_NAN_SEC_CTX_ID_DATA]) { ret = -EINVAL; goto out; } id->type = nla_get_u32(attr[NL80211_NAN_SEC_CTX_ID_TYPE]); id->len = nla_len(attr[NL80211_NAN_SEC_CTX_ID_DATA]); id->data = kmemdup(nla_data(attr[NL80211_NAN_SEC_CTX_ID_DATA]), id->len, GFP_KERNEL); if (!id->data) { ret = -ENOMEM; goto out; } sec->n_ctx_ids++; if (!nl80211_nan_sec_ctx_id_valid(id)) { ret = -EINVAL; goto out; } } return 0; out: for (i = 0; i < sec->n_ctx_ids; i++) kfree(sec->ctx_ids[i].data); kfree(sec->ctx_ids); sec->ctx_ids = NULL; sec->n_ctx_ids = 0; return ret; } static int nl80211_nan_add_func(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct nlattr *tb[NUM_NL80211_NAN_FUNC_ATTR], *func_attr; struct cfg80211_nan_func *func; struct sk_buff *msg = NULL; void *hdr = NULL; int err = 0; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!wdev_running(wdev)) return -ENOTCONN; if (!info->attrs[NL80211_ATTR_NAN_FUNC]) return -EINVAL; err = nla_parse_nested(tb, NL80211_NAN_FUNC_ATTR_MAX, info->attrs[NL80211_ATTR_NAN_FUNC], nl80211_nan_func_policy, genl_info_extack(info)); if (err) return err; func = kzalloc(sizeof(*func), GFP_KERNEL); if (!func) return -ENOMEM; func->cookie = cfg80211_assign_cookie(rdev); if (!tb[NL80211_NAN_FUNC_TYPE] || nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) { err = -EINVAL; goto out; } func->type = nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]); if (!tb[NL80211_NAN_FUNC_SERVICE_ID]) { err = -EINVAL; goto out; } memcpy(func->service_id, nla_data(tb[NL80211_NAN_FUNC_SERVICE_ID]), sizeof(func->service_id)); func->close_range = nla_get_flag(tb[NL80211_NAN_FUNC_CLOSE_RANGE]); if (tb[NL80211_NAN_FUNC_SERVICE_INFO]) { func->serv_spec_info_len = nla_len(tb[NL80211_NAN_FUNC_SERVICE_INFO]); func->serv_spec_info = kmemdup(nla_data(tb[NL80211_NAN_FUNC_SERVICE_INFO]), func->serv_spec_info_len, GFP_KERNEL); if (!func->serv_spec_info) { err = -ENOMEM; goto out; } } if (tb[NL80211_NAN_FUNC_TTL]) func->ttl = nla_get_u32(tb[NL80211_NAN_FUNC_TTL]); if (tb[NL80211_NAN_FUNC_AWAKE_DW_INTERVAL]) { func->awake_dw_interval = nla_get_u8(tb[NL80211_NAN_FUNC_AWAKE_DW_INTERVAL]); if (func->awake_dw_interval > NL80211_NAN_FUNC_MAX_DW_INTERVAL) { err = -EINVAL; goto out; } } switch (func->type) { case NL80211_NAN_FUNC_PUBLISH: if (!tb[NL80211_NAN_FUNC_PUBLISH_TYPE]) { err = -EINVAL; goto out; } func->publish_type = nla_get_u8(tb[NL80211_NAN_FUNC_PUBLISH_TYPE]); func->publish_bcast = nla_get_flag(tb[NL80211_NAN_FUNC_PUBLISH_BCAST]); if ((!(func->publish_type & NL80211_NAN_SOLICITED_PUBLISH)) && func->publish_bcast) { err = -EINVAL; goto out; } err = nl80211_nan_set_security(rdev, tb, &func->sec); if (err) goto out; if (tb[NL80211_NAN_FUNC_FSD]) { func->fsd_required = 1; func->fsd_method = nla_get_u32(tb[NL80211_NAN_FUNC_FSD]); if (func->fsd_method > NL80211_NAN_FSD_GAS) { err = -EINVAL; goto out; } } if (tb[NL80211_NAN_FUNC_NDP_TYPE]) { func->ndp_required = 1; func->ndp_type = nla_get_u32(tb[NL80211_NAN_FUNC_NDP_TYPE]); if (func->ndp_type > NL80211_NAN_NDP_TYPE_MCAST_MANY_TO_MANY) { err = -EINVAL; goto out; } } if (tb[NL80211_NAN_FUNC_RANGE_LIMIT_INGRESS]) func->range_limit_ingress = nla_get_u16(tb[NL80211_NAN_FUNC_RANGE_LIMIT_INGRESS]); if (tb[NL80211_NAN_FUNC_RANGE_LIMIT_EGRESS]) func->range_limit_ingress = nla_get_u16(tb[NL80211_NAN_FUNC_RANGE_LIMIT_EGRESS]); break; case NL80211_NAN_FUNC_SUBSCRIBE: func->subscribe_active = nla_get_flag(tb[NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE]); break; case NL80211_NAN_FUNC_FOLLOW_UP: if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] || !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] || !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) { err = -EINVAL; goto out; } func->followup_id = nla_get_u8(tb[NL80211_NAN_FUNC_FOLLOW_UP_ID]); func->followup_reqid = nla_get_u8(tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]); memcpy(func->followup_dest.addr, nla_data(tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]), sizeof(func->followup_dest.addr)); if (func->ttl) { err = -EINVAL; goto out; } break; default: err = -EINVAL; goto out; } if (tb[NL80211_NAN_FUNC_SRF]) { struct nlattr *srf_tb[NUM_NL80211_NAN_SRF_ATTR]; err = nla_parse_nested(srf_tb, NL80211_NAN_SRF_ATTR_MAX, tb[NL80211_NAN_FUNC_SRF], nl80211_nan_srf_policy, genl_info_extack(info)); if (err) goto out; func->srf_include = nla_get_flag(srf_tb[NL80211_NAN_SRF_INCLUDE]); if (srf_tb[NL80211_NAN_SRF_BF]) { if (srf_tb[NL80211_NAN_SRF_MAC_ADDRS] || !srf_tb[NL80211_NAN_SRF_BF_IDX]) { err = -EINVAL; goto out; } func->srf_bf_len = nla_len(srf_tb[NL80211_NAN_SRF_BF]); func->srf_bf = kmemdup(nla_data(srf_tb[NL80211_NAN_SRF_BF]), func->srf_bf_len, GFP_KERNEL); if (!func->srf_bf) { err = -ENOMEM; goto out; } func->srf_bf_idx = nla_get_u8(srf_tb[NL80211_NAN_SRF_BF_IDX]); } else { struct nlattr *attr, *mac_attr = srf_tb[NL80211_NAN_SRF_MAC_ADDRS]; int n_entries, rem, i = 0; if (!mac_attr) { err = -EINVAL; goto out; } n_entries = validate_acl_mac_addrs(mac_attr); if (n_entries <= 0) { err = -EINVAL; goto out; } func->srf_num_macs = n_entries; func->srf_macs = kcalloc(n_entries, sizeof(*func->srf_macs), GFP_KERNEL); if (!func->srf_macs) { err = -ENOMEM; goto out; } nla_for_each_nested(attr, mac_attr, rem) memcpy(func->srf_macs[i++].addr, nla_data(attr), sizeof(*func->srf_macs)); } } if (tb[NL80211_NAN_FUNC_TX_MATCH_FILTER]) { err = handle_nan_filter(tb[NL80211_NAN_FUNC_TX_MATCH_FILTER], func, true); if (err) goto out; } if (tb[NL80211_NAN_FUNC_RX_MATCH_FILTER]) { err = handle_nan_filter(tb[NL80211_NAN_FUNC_RX_MATCH_FILTER], func, false); if (err) goto out; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out; } hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_ADD_NAN_FUNCTION); /* This can't really happen - we just allocated 4KB */ if (WARN_ON(!hdr)) { err = -ENOMEM; goto out; } err = rdev_add_nan_func(rdev, wdev, func); out: if (err < 0) { cfg80211_free_nan_func(func); nlmsg_free(msg); return err; } /* propagate the instance id and cookie to userspace */ if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, func->cookie, NL80211_ATTR_PAD)) goto nla_put_failure; func_attr = nla_nest_start(msg, NL80211_ATTR_NAN_FUNC); if (!func_attr) goto nla_put_failure; if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, func->instance_id)) goto nla_put_failure; nla_nest_end(msg, func_attr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } static int nl80211_nan_del_func(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; u64 cookie; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!wdev_running(wdev)) return -ENOTCONN; if (!info->attrs[NL80211_ATTR_COOKIE]) return -EINVAL; cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); rdev_del_nan_func(rdev, wdev, cookie); return 0; } static int nl80211_nan_change_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_nan_conf conf = {}; u32 changed = 0; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!wdev_running(wdev)) return -ENOTCONN; if (info->attrs[NL80211_ATTR_NAN_MASTER_PREF]) { conf.master_pref = nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]); if (conf.master_pref <= 1 || conf.master_pref == 255) return -EINVAL; changed |= CFG80211_NAN_CONF_CHANGED_PREF; } if (info->attrs[NL80211_ATTR_BANDS]) { u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]); if (bands & ~(u32)wdev->wiphy->nan_supported_bands) return -EOPNOTSUPP; if (bands && !(bands & BIT(NL80211_BAND_2GHZ))) return -EINVAL; conf.bands = bands; changed |= CFG80211_NAN_CONF_CHANGED_BANDS; } if (info->attrs[NL80211_ATTR_NAN_CDW_2G]) { conf.cdw_2g = nla_get_u8(info->attrs[NL80211_ATTR_NAN_CDW_2G]); if (!conf.cdw_2g || conf.cdw_2g > 5) return -EINVAL; changed |= CFG80211_NAN_CONF_CHANGED_CDW_2G; } if (info->attrs[NL80211_ATTR_NAN_CDW_5G]) { conf.cdw_5g = nla_get_u8(info->attrs[NL80211_ATTR_NAN_CDW_2G]); if (conf.cdw_5g > 5) return -EINVAL; changed |= CFG80211_NAN_CONF_CHANGED_CDW_5G; } if (!changed) return -EINVAL; return rdev_nan_change_conf(rdev, wdev, &conf, changed); } static int nl80211_nan_put_security(struct sk_buff *msg, struct cfg80211_nan_sec *sec) { struct nlattr *ctx_ids, *ctx_id; u8 i; if (!sec->cipher_suite_ids) return 0; if (nla_put_u32(msg, NL80211_NAN_FUNC_SECURITY_CIPHER_SUITES, sec->cipher_suite_ids)) return -ENOBUFS; if (!sec->n_ctx_ids) return 0; ctx_ids = nla_nest_start(msg, NL80211_NAN_FUNC_SECURITY_CTX_IDS); if (!ctx_ids) return -ENOBUFS; for (i = 0; i < sec->n_ctx_ids; i++) { struct cfg80211_nan_sec_ctx_id *id = &sec->ctx_ids[i]; ctx_id = nla_nest_start(msg, i + 1); if (!ctx_id || nla_put_u32(msg, NL80211_NAN_SEC_CTX_ID_TYPE, id->type) || nla_put(msg, NL80211_NAN_SEC_CTX_ID_DATA, id->len, id->data)) return -ENOBUFS; nla_nest_end(msg, ctx_id); } nla_nest_end(msg, ctx_ids); return 0; } void cfg80211_nan_match(struct wireless_dev *wdev, struct cfg80211_nan_match_params *match, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct nlattr *match_attr, *local_func_attr, *peer_func_attr; struct sk_buff *msg; void *hdr; u32 size = 200; u8 i; if (WARN_ON(!match->inst_id || !match->peer_inst_id || !match->addr)) return; /* * Add room for security context IDs. Each security context ID will * require a nested attribute with type and data. */ for (i = 0; i < match->sec.n_ctx_ids; i++) size += NLA_HDRLEN + NLA_HDRLEN + NLA_ALIGN(sizeof(u32)) + NLA_HDRLEN + NLA_ALIGN(match->sec.ctx_ids[i].len); size += NLA_HDRLEN + NLA_ALIGN(match->device_attrs_len) + NLA_HDRLEN + NLA_ALIGN(match->info_len); msg = nlmsg_new(size, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NAN_MATCH); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, match->cookie, NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, match->addr)) goto nla_put_failure; match_attr = nla_nest_start(msg, NL80211_ATTR_NAN_MATCH); if (!match_attr) goto nla_put_failure; if (match->type == NL80211_NAN_FUNC_PUBLISH && match->device_attrs_len && match->device_attrs && nla_put(msg, NL80211_NAN_MATCH_DEVICE_ATTRS, match->device_attrs_len, match->device_attrs)) goto nla_put_failure; local_func_attr = nla_nest_start(msg, NL80211_NAN_MATCH_FUNC_LOCAL); if (!local_func_attr) goto nla_put_failure; if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, match->inst_id)) goto nla_put_failure; nla_nest_end(msg, local_func_attr); peer_func_attr = nla_nest_start(msg, NL80211_NAN_MATCH_FUNC_PEER); if (!peer_func_attr) goto nla_put_failure; if (nla_put_u8(msg, NL80211_NAN_FUNC_TYPE, match->type) || nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, match->peer_inst_id)) goto nla_put_failure; if (match->info && match->info_len && nla_put(msg, NL80211_NAN_FUNC_SERVICE_INFO, match->info_len, match->info)) goto nla_put_failure; if (match->type == NL80211_NAN_FUNC_PUBLISH) { if (nl80211_nan_put_security(msg, &match->sec)) goto nla_put_failure; if (match->fsd_required && nla_put_u32(msg, NL80211_NAN_FUNC_FSD, match->fsd_method)) goto nla_put_failure; if (match->ndp_required && nla_put_u32(msg, NL80211_NAN_FUNC_NDP_TYPE, match->ndp_type)) goto nla_put_failure; if (match->range_limit_ingress && nla_put_u16(msg, NL80211_NAN_FUNC_RANGE_LIMIT_INGRESS, match->range_limit_ingress)) goto nla_put_failure; if (match->range_limit_egress && nla_put_u16(msg, NL80211_NAN_FUNC_RANGE_LIMIT_EGRESS, match->range_limit_egress)) goto nla_put_failure; } nla_nest_end(msg, peer_func_attr); nla_nest_end(msg, match_attr); genlmsg_end(msg, hdr); if (!wdev->owner_nlportid) genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_NAN, gfp); else genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, wdev->owner_nlportid); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_nan_match); void cfg80211_nan_func_terminated(struct wireless_dev *wdev, u8 inst_id, enum nl80211_nan_func_term_reason reason, u64 cookie, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; struct nlattr *func_attr; void *hdr; if (WARN_ON(!inst_id)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_NAN_FUNCTION); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; func_attr = nla_nest_start(msg, NL80211_ATTR_NAN_FUNC); if (!func_attr) goto nla_put_failure; if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, inst_id) || nla_put_u8(msg, NL80211_NAN_FUNC_TERM_REASON, reason)) goto nla_put_failure; nla_nest_end(msg, func_attr); genlmsg_end(msg, hdr); if (!wdev->owner_nlportid) genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_NAN, gfp); else genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, wdev->owner_nlportid); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_nan_func_terminated); static int nl80211_get_protocol_features(struct sk_buff *skb, struct genl_info *info) { void *hdr; struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_GET_PROTOCOL_FEATURES); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_PROTOCOL_FEATURES, NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: kfree_skb(msg); return -ENOBUFS; } static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_update_ft_ies_params ft_params; struct net_device *dev = info->user_ptr[1]; if (!rdev->ops->update_ft_ies) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MDID] || !info->attrs[NL80211_ATTR_IE]) return -EINVAL; memset(&ft_params, 0, sizeof(ft_params)); ft_params.md = nla_get_u16(info->attrs[NL80211_ATTR_MDID]); ft_params.ie = nla_data(info->attrs[NL80211_ATTR_IE]); ft_params.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); return rdev_update_ft_ies(rdev, dev, &ft_params); } static int nl80211_crit_protocol_start(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; enum nl80211_crit_proto_id proto = NL80211_CRIT_PROTO_UNSPEC; u16 duration; int ret; if (!rdev->ops->crit_proto_start) return -EOPNOTSUPP; if (WARN_ON(!rdev->ops->crit_proto_stop)) return -EINVAL; if (rdev->crit_proto_nlportid) return -EBUSY; /* determine protocol if provided */ if (info->attrs[NL80211_ATTR_CRIT_PROT_ID]) proto = nla_get_u16(info->attrs[NL80211_ATTR_CRIT_PROT_ID]); if (proto >= NUM_NL80211_CRIT_PROTO) return -EINVAL; /* timeout must be provided */ if (!info->attrs[NL80211_ATTR_MAX_CRIT_PROT_DURATION]) return -EINVAL; duration = nla_get_u16(info->attrs[NL80211_ATTR_MAX_CRIT_PROT_DURATION]); if (duration > NL80211_CRIT_PROTO_MAX_DURATION) return -ERANGE; ret = rdev_crit_proto_start(rdev, wdev, proto, duration); if (!ret) rdev->crit_proto_nlportid = genl_info_snd_portid(info); return ret; } static int nl80211_crit_protocol_stop(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (!rdev->ops->crit_proto_stop) return -EOPNOTSUPP; if (rdev->crit_proto_nlportid) { rdev->crit_proto_nlportid = 0; rdev_crit_proto_stop(rdev, wdev); } return 0; } static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs); int i, err; u32 vid, subcmd; if (!rdev->wiphy.vendor_commands) return -EOPNOTSUPP; if (IS_ERR(wdev)) { err = PTR_ERR(wdev); if (err != -EINVAL) return err; wdev = NULL; } else if (wdev->wiphy != &rdev->wiphy) { return -EINVAL; } if (!info->attrs[NL80211_ATTR_VENDOR_ID] || !info->attrs[NL80211_ATTR_VENDOR_SUBCMD]) return -EINVAL; vid = nla_get_u32(info->attrs[NL80211_ATTR_VENDOR_ID]); subcmd = nla_get_u32(info->attrs[NL80211_ATTR_VENDOR_SUBCMD]); for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) { const struct wiphy_vendor_command *vcmd; void *data = NULL; int len = 0; vcmd = &rdev->wiphy.vendor_commands[i]; if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) continue; if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV)) { if (!wdev) return -EINVAL; if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && !wdev->netdev) return -EINVAL; if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { if (!wdev_running(wdev)) return -ENETDOWN; } if (!vcmd->doit) return -EOPNOTSUPP; } else { wdev = NULL; } if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); } rdev->cur_cmd_info = info; err = rdev->wiphy.vendor_commands[i].doit(&rdev->wiphy, wdev, data, len); rdev->cur_cmd_info = NULL; return err; } return -EOPNOTSUPP; } static int nl80211_prepare_vendor_dump(struct sk_buff *skb, struct netlink_callback *cb, struct cfg80211_registered_device **rdev, struct wireless_dev **wdev) { struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam); u32 vid, subcmd; unsigned int i; int vcmd_idx = -1; int err; void *data = NULL; unsigned int data_len = 0; if (cb->args[0]) { /* subtract the 1 again here */ struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); struct wireless_dev *tmp; if (!wiphy) return -ENODEV; *rdev = wiphy_to_rdev(wiphy); *wdev = NULL; if (cb->args[1]) { list_for_each_entry(tmp, &wiphy->wdev_list, list) { if (tmp->identifier == cb->args[1] - 1) { *wdev = tmp; break; } } } /* keep rtnl locked in successful case */ return 0; } err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, nl80211_fam.maxattr, nl80211_policy, NULL); if (err) return err; if (!attrbuf[NL80211_ATTR_VENDOR_ID] || !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) return -EINVAL; *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); if (IS_ERR(*wdev)) *wdev = NULL; *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); if (IS_ERR(*rdev)) return PTR_ERR(*rdev); vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); for (i = 0; i < (*rdev)->wiphy.n_vendor_commands; i++) { const struct wiphy_vendor_command *vcmd; vcmd = &(*rdev)->wiphy.vendor_commands[i]; if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) continue; if (!vcmd->dumpit) return -EOPNOTSUPP; vcmd_idx = i; break; } if (vcmd_idx < 0) return -EOPNOTSUPP; if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); data_len = nla_len(attrbuf[NL80211_ATTR_VENDOR_DATA]); } /* 0 is the first index - add 1 to parse only once */ cb->args[0] = (*rdev)->wiphy_idx + 1; /* add 1 to know if it was NULL */ cb->args[1] = *wdev ? (*wdev)->identifier + 1 : 0; cb->args[2] = vcmd_idx; cb->args[3] = (unsigned long)data; cb->args[4] = data_len; /* keep rtnl locked in successful case */ return 0; } static int nl80211_vendor_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; unsigned int vcmd_idx; const struct wiphy_vendor_command *vcmd; void *data; int data_len; int err; struct nlattr *vendor_data; rtnl_lock(); err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); if (err) goto out; vcmd_idx = cb->args[2]; data = (void *)cb->args[3]; data_len = cb->args[4]; vcmd = &rdev->wiphy.vendor_commands[vcmd_idx]; if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV)) { if (!wdev) { err = -EINVAL; goto out; } if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && !wdev->netdev) { err = -EINVAL; goto out; } if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { if (!wdev_running(wdev)) { err = -ENETDOWN; goto out; } } } while (1) { void *hdr = nl80211hdr_put(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, NLM_F_MULTI, NL80211_CMD_VENDOR); if (!hdr) break; if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev && nla_put_u64_64bit(skb, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD))) { genlmsg_cancel(skb, hdr); break; } vendor_data = nla_nest_start(skb, NL80211_ATTR_VENDOR_DATA); if (!vendor_data) { genlmsg_cancel(skb, hdr); break; } err = vcmd->dumpit(&rdev->wiphy, wdev, skb, data, data_len, (unsigned long *)&cb->args[5]); nla_nest_end(skb, vendor_data); if (err == -ENOBUFS || err == -ENOENT) { genlmsg_cancel(skb, hdr); break; } else if (err) { genlmsg_cancel(skb, hdr); goto out; } genlmsg_end(skb, hdr); } err = skb->len; out: rtnl_unlock(); return err; } struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy, enum nl80211_commands cmd, enum nl80211_attrs attr, int approxlen) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); if (WARN_ON(!rdev->cur_cmd_info)) return NULL; return __cfg80211_alloc_vendor_skb(rdev, NULL, approxlen, genl_info_snd_portid(rdev->cur_cmd_info), rdev->cur_cmd_info->snd_seq, cmd, attr, NULL, GFP_KERNEL); } EXPORT_SYMBOL(__cfg80211_alloc_reply_skb); int cfg80211_vendor_cmd_reply(struct sk_buff *skb) { struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; struct nlattr *data = ((void **)skb->cb)[2]; /* clear CB data for netlink core to own from now on */ memset(skb->cb, 0, sizeof(skb->cb)); if (WARN_ON(!rdev->cur_cmd_info)) { kfree_skb(skb); return -EINVAL; } nla_nest_end(skb, data); genlmsg_end(skb, hdr); return genlmsg_reply(skb, rdev->cur_cmd_info); } EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply); unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); if (WARN_ON(!rdev->cur_cmd_info)) return 0; return genl_info_snd_portid(rdev->cur_cmd_info); } EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_get_sender); static int nl80211_set_qos_map(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_qos_map *qos_map = NULL; struct net_device *dev = info->user_ptr[1]; u8 *pos, len, num_des, des_len, des; int ret; if (!rdev->ops->set_qos_map) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_QOS_MAP]) { pos = nla_data(info->attrs[NL80211_ATTR_QOS_MAP]); len = nla_len(info->attrs[NL80211_ATTR_QOS_MAP]); if (len % 2 || len < IEEE80211_QOS_MAP_LEN_MIN || len > IEEE80211_QOS_MAP_LEN_MAX) return -EINVAL; qos_map = kzalloc(sizeof(struct cfg80211_qos_map), GFP_KERNEL); if (!qos_map) return -ENOMEM; num_des = (len - IEEE80211_QOS_MAP_LEN_MIN) >> 1; if (num_des) { des_len = num_des * sizeof(struct cfg80211_dscp_exception); memcpy(qos_map->dscp_exception, pos, des_len); qos_map->num_des = num_des; for (des = 0; des < num_des; des++) { if (qos_map->dscp_exception[des].up > 7) { kfree(qos_map); return -EINVAL; } } pos += des_len; } memcpy(qos_map->up, pos, IEEE80211_QOS_MAP_LEN_MIN); } wdev_lock(dev->ieee80211_ptr); ret = nl80211_key_allowed(dev->ieee80211_ptr); if (!ret) ret = rdev_set_qos_map(rdev, dev, qos_map); wdev_unlock(dev->ieee80211_ptr); kfree(qos_map); return ret; } static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *peer; u8 tsid, up; u16 admitted_time = 0; int err; if (!(rdev->wiphy.features & NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_USER_PRIO]) return -EINVAL; tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]); up = nla_get_u8(info->attrs[NL80211_ATTR_USER_PRIO]); /* WMM uses TIDs 0-7 even for TSPEC */ if (tsid >= IEEE80211_FIRST_TSPEC_TSID) { /* TODO: handle 802.11 TSPEC/admission control * need more attributes for that (e.g. BA session requirement); * change the WMM adminssion test above to allow both then */ return -EINVAL; } peer = nla_data(info->attrs[NL80211_ATTR_MAC]); if (info->attrs[NL80211_ATTR_ADMITTED_TIME]) { admitted_time = nla_get_u16(info->attrs[NL80211_ATTR_ADMITTED_TIME]); if (!admitted_time) return -EINVAL; } wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (wdev->current_bss) break; err = -ENOTCONN; goto out; default: err = -EOPNOTSUPP; goto out; } err = rdev_add_tx_ts(rdev, dev, tsid, peer, up, admitted_time); out: wdev_unlock(wdev); return err; } static int nl80211_del_tx_ts(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *peer; u8 tsid; int err; if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC]) return -EINVAL; tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]); peer = nla_data(info->attrs[NL80211_ATTR_MAC]); wdev_lock(wdev); err = rdev_del_tx_ts(rdev, dev, tsid, peer); wdev_unlock(wdev); return err; } static int nl80211_tdls_channel_switch(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_chan_def chandef = {}; const u8 *addr; u8 oper_class; int err; if (!rdev->ops->tdls_channel_switch || !(rdev->wiphy.features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) return -EOPNOTSUPP; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: break; default: return -EOPNOTSUPP; } if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_OPER_CLASS]) return -EINVAL; err = nl80211_parse_chandef(rdev, info, &chandef); if (err) return err; /* * Don't allow wide channels on the 2.4Ghz band, as per IEEE802.11-2012 * section 10.22.6.2.1. Disallow 5/10Mhz channels as well for now, the * specification is not defined for them. */ if (chandef.chan->band == NL80211_BAND_2GHZ && chandef.width != NL80211_CHAN_WIDTH_20_NOHT && chandef.width != NL80211_CHAN_WIDTH_20) return -EINVAL; /* we will be active on the TDLS link */ if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, wdev->iftype)) return -EINVAL; /* don't allow switching to DFS channels */ if (cfg80211_chandef_dfs_required(wdev->wiphy, &chandef, wdev->iftype)) return -EINVAL; addr = nla_data(info->attrs[NL80211_ATTR_MAC]); oper_class = nla_get_u8(info->attrs[NL80211_ATTR_OPER_CLASS]); wdev_lock(wdev); err = rdev_tdls_channel_switch(rdev, dev, addr, oper_class, &chandef); wdev_unlock(wdev); return err; } static int nl80211_tdls_cancel_channel_switch(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *addr; if (!rdev->ops->tdls_channel_switch || !rdev->ops->tdls_cancel_channel_switch || !(rdev->wiphy.features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) return -EOPNOTSUPP; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: break; default: return -EOPNOTSUPP; } if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; addr = nla_data(info->attrs[NL80211_ATTR_MAC]); wdev_lock(wdev); rdev_tdls_cancel_channel_switch(rdev, dev, addr); wdev_unlock(wdev); return 0; } static int nl80211_set_multicast_to_unicast(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const struct nlattr *nla; bool enabled; if (!rdev->ops->set_multicast_to_unicast) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; nla = info->attrs[NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED]; enabled = nla_get_flag(nla); return rdev_set_multicast_to_unicast(rdev, dev, enabled); } static int nl80211_set_pmk(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_pmk_conf pmk_conf = {}; int ret; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X)) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_PMK]) return -EINVAL; wdev_lock(wdev); if (!wdev->current_bss) { ret = -ENOTCONN; goto out; } pmk_conf.aa = nla_data(info->attrs[NL80211_ATTR_MAC]); if (memcmp(pmk_conf.aa, wdev->current_bss->pub.bssid, ETH_ALEN)) { ret = -EINVAL; goto out; } pmk_conf.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]); pmk_conf.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]); if (pmk_conf.pmk_len != WLAN_PMK_LEN && pmk_conf.pmk_len != WLAN_PMK_LEN_SUITE_B_192) { ret = -EINVAL; goto out; } if (info->attrs[NL80211_ATTR_PMKR0_NAME]) { int r0_name_len = nla_len(info->attrs[NL80211_ATTR_PMKR0_NAME]); if (r0_name_len != WLAN_PMK_NAME_LEN) { ret = -EINVAL; goto out; } pmk_conf.pmk_r0_name = nla_data(info->attrs[NL80211_ATTR_PMKR0_NAME]); } ret = rdev_set_pmk(rdev, dev, &pmk_conf); out: wdev_unlock(wdev); return ret; } static int nl80211_del_pmk(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *aa; int ret; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X)) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; wdev_lock(wdev); aa = nla_data(info->attrs[NL80211_ATTR_MAC]); ret = rdev_del_pmk(rdev, dev, aa); wdev_unlock(wdev); return ret; } static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_external_auth_params params; if (!rdev->ops->external_auth) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_SSID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_BSSID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STATUS_CODE]) return -EINVAL; if ((info->attrs[NL80211_ATTR_PMK] && !info->attrs[NL80211_ATTR_PMKID]) || (info->attrs[NL80211_ATTR_PMKID] && !info->attrs[NL80211_ATTR_PMK])) return -EINVAL; memset(¶ms, 0, sizeof(params)); params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (params.ssid.ssid_len == 0 || params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN) return -EINVAL; memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]), params.ssid.ssid_len); memcpy(params.bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN); params.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); if (info->attrs[NL80211_ATTR_PMK] && info->attrs[NL80211_ATTR_PMKID]) { params.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]); params.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]); params.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]); } return rdev_external_auth(rdev, dev, ¶ms); } static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *buf; size_t len; u8 *dest; u16 proto; bool noencrypt; int err; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211)) return -EOPNOTSUPP; if (!rdev->ops->tx_control_port) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_FRAME] || !info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) { GENL_SET_ERR_MSG(info, "Frame, MAC or ethertype missing"); return -EINVAL; } wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_MESH_POINT: break; case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (wdev->current_bss) break; err = -ENOTCONN; goto out; default: err = -EOPNOTSUPP; goto out; } wdev_unlock(wdev); buf = nla_data(info->attrs[NL80211_ATTR_FRAME]); len = nla_len(info->attrs[NL80211_ATTR_FRAME]); dest = nla_data(info->attrs[NL80211_ATTR_MAC]); proto = nla_get_u16(info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]); noencrypt = nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT]); return rdev_tx_control_port(rdev, dev, buf, len, dest, cpu_to_be16(proto), noencrypt); out: wdev_unlock(wdev); return err; } static int nl80211_get_ftm_responder_stats(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_ftm_responder_stats ftm_stats = {}; struct sk_buff *msg; void *hdr; struct nlattr *ftm_stats_attr; int err; if (wdev->iftype != NL80211_IFTYPE_AP || !wdev->beacon_interval) return -EOPNOTSUPP; err = rdev_get_ftm_responder_stats(rdev, dev, &ftm_stats); if (err) return err; if (!ftm_stats.filled) return -ENODATA; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, genl_info_snd_portid(info), info->snd_seq, 0, NL80211_CMD_GET_FTM_RESPONDER_STATS); if (!hdr) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; ftm_stats_attr = nla_nest_start(msg, NL80211_ATTR_FTM_RESPONDER_STATS); if (!ftm_stats_attr) goto nla_put_failure; #define SET_FTM(field, name, type) \ do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \ nla_put_ ## type(msg, NL80211_FTM_STATS_ ## name, \ ftm_stats.field)) \ goto nla_put_failure; } while (0) #define SET_FTM_U64(field, name) \ do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \ nla_put_u64_64bit(msg, NL80211_FTM_STATS_ ## name, \ ftm_stats.field, NL80211_FTM_STATS_PAD)) \ goto nla_put_failure; } while (0) SET_FTM(success_num, SUCCESS_NUM, u32); SET_FTM(partial_num, PARTIAL_NUM, u32); SET_FTM(failed_num, FAILED_NUM, u32); SET_FTM(asap_num, ASAP_NUM, u32); SET_FTM(non_asap_num, NON_ASAP_NUM, u32); SET_FTM_U64(total_duration_ms, TOTAL_DURATION_MSEC); SET_FTM(unknown_triggers_num, UNKNOWN_TRIGGERS_NUM, u32); SET_FTM(reschedule_requests_num, RESCHEDULE_REQUESTS_NUM, u32); SET_FTM(out_of_window_triggers_num, OUT_OF_WINDOW_TRIGGERS_NUM, u32); #undef SET_FTM nla_nest_end(msg, ftm_stats_attr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 #define NL80211_FLAG_CHECK_NETDEV_UP 0x08 #define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\ NL80211_FLAG_CHECK_NETDEV_UP) #define NL80211_FLAG_NEED_WDEV 0x10 /* If a netdev is associated, it must be UP, P2P must be started */ #define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\ NL80211_FLAG_CHECK_NETDEV_UP) #define NL80211_FLAG_CLEAR_SKB 0x20 static int nl80211_pre_doit(__genl_const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; struct net_device *dev; bool rtnl = ops->internal_flags & NL80211_FLAG_NEED_RTNL; #ifdef CPTCFG_REJECT_NONUPSTREAM_NL80211 if (info->genlhdr->cmd >= __NL80211_CMD_NONUPSTREAM_START) return -EOPNOTSUPP; #endif if (rtnl) rtnl_lock(); if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) { rdev = cfg80211_get_dev_from_info(genl_info_net(info), info); if (IS_ERR(rdev)) { if (rtnl) rtnl_unlock(); return PTR_ERR(rdev); } info->user_ptr[0] = rdev; } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV || ops->internal_flags & NL80211_FLAG_NEED_WDEV) { ASSERT_RTNL(); wdev = __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs); if (IS_ERR(wdev)) { if (rtnl) rtnl_unlock(); return PTR_ERR(wdev); } dev = wdev->netdev; rdev = wiphy_to_rdev(wdev->wiphy); if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { if (!dev) { if (rtnl) rtnl_unlock(); return -EINVAL; } info->user_ptr[1] = dev; } else { info->user_ptr[1] = wdev; } if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP && !wdev_running(wdev)) { if (rtnl) rtnl_unlock(); return -ENETDOWN; } if (dev) dev_hold(dev); info->user_ptr[0] = rdev; } return 0; } static void nl80211_post_doit(__genl_const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { if (info->user_ptr[1]) { if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) { struct wireless_dev *wdev = info->user_ptr[1]; if (wdev->netdev) dev_put(wdev->netdev); } else { dev_put(info->user_ptr[1]); } } if (ops->internal_flags & NL80211_FLAG_NEED_RTNL) rtnl_unlock(); /* If needed, clear the netlink message payload from the SKB * as it might contain key data that shouldn't stick around on * the heap after the SKB is freed. The netlink message header * is still needed for further processing, so leave it intact. */ if (ops->internal_flags & NL80211_FLAG_CLEAR_SKB) { struct nlmsghdr *nlh = nlmsg_hdr(skb); memset(nlmsg_data(nlh), 0, nlmsg_len(nlh)); } } static __genl_const struct genl_ops nl80211_ops[] = { { .cmd = NL80211_CMD_GET_WIPHY, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_wiphy, .dumpit = nl80211_dump_wiphy, .done = nl80211_dump_wiphy_done, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WIPHY, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_wiphy, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_INTERFACE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_interface, .dumpit = nl80211_dump_interface, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_INTERFACE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_interface, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NEW_INTERFACE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_new_interface, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_INTERFACE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_del_interface, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_KEY, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_key, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_KEY, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_key, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, }, { .cmd = NL80211_CMD_NEW_KEY, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_new_key, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, }, { .cmd = NL80211_CMD_DEL_KEY, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_del_key, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_BEACON, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_set_beacon, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_START_AP, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_start_ap, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_STOP_AP, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_stop_ap, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_STATION, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_station, .dumpit = nl80211_dump_station, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_STATION, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NEW_STATION, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_new_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_STATION, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_del_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_MPATH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_mpath, .dumpit = nl80211_dump_mpath, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_MPP, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_mpp, .dumpit = nl80211_dump_mpp, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_MPATH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_mpath, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NEW_MPATH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_new_mpath, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_MPATH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_del_mpath, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_BSS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_bss, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_REG, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_reg_do, .dumpit = nl80211_get_reg_dump, .internal_flags = NL80211_FLAG_NEED_RTNL, /* can be retrieved by unprivileged users */ }, #ifdef CPTCFG_CFG80211_CRDA_SUPPORT { .cmd = NL80211_CMD_SET_REG, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_reg, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_RTNL, }, #endif { .cmd = NL80211_CMD_REQ_SET_REG, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_req_set_reg, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_RELOAD_REGDB, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_reload_regdb, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_MESH_CONFIG, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_mesh_config, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_MESH_CONFIG, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_update_mesh_config, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_TRIGGER_SCAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_trigger_scan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_ABORT_SCAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_abort_scan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_SCAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .dumpit = nl80211_dump_scan, }, { .cmd = NL80211_CMD_START_SCHED_SCAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_start_sched_scan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_STOP_SCHED_SCAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_stop_sched_scan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_AUTHENTICATE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_authenticate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, }, { .cmd = NL80211_CMD_ASSOCIATE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_associate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEAUTHENTICATE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_deauthenticate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DISASSOCIATE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_disassociate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_JOIN_IBSS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_join_ibss, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_LEAVE_IBSS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_leave_ibss, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, #ifdef CPTCFG_NL80211_TESTMODE { .cmd = NL80211_CMD_TESTMODE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_testmode_do, .dumpit = nl80211_testmode_dump, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, #endif { .cmd = NL80211_CMD_CONNECT, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_connect, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_update_connect_params, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DISCONNECT, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_disconnect, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WIPHY_NETNS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_wiphy_netns, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_SURVEY, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .dumpit = nl80211_dump_survey, }, { .cmd = NL80211_CMD_SET_PMKSA, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_setdel_pmksa, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_PMKSA, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_setdel_pmksa, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_FLUSH_PMKSA, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_flush_pmksa, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_REMAIN_ON_CHANNEL, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_remain_on_channel, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_cancel_remain_on_channel, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_TX_BITRATE_MASK, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_tx_bitrate_mask, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_REGISTER_FRAME, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_register_mgmt, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_FRAME, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_tx_mgmt, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_FRAME_WAIT_CANCEL, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_tx_mgmt_cancel_wait, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_POWER_SAVE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_power_save, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_POWER_SAVE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_power_save, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_CQM, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_cqm, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_CHANNEL, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_channel, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WDS_PEER, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_wds_peer, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_JOIN_MESH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_join_mesh, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_LEAVE_MESH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_leave_mesh, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_JOIN_OCB, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_join_ocb, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_LEAVE_OCB, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_leave_ocb, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, #ifdef CONFIG_PM { .cmd = NL80211_CMD_GET_WOWLAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_wowlan, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WOWLAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_wowlan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, #endif { .cmd = NL80211_CMD_SET_REKEY_OFFLOAD, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_rekey_data, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, }, { .cmd = NL80211_CMD_TDLS_MGMT, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_tdls_mgmt, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_TDLS_OPER, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_tdls_oper, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_UNEXPECTED_FRAME, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_register_unexpected_frame, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_PROBE_CLIENT, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_probe_client, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_REGISTER_BEACONS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_register_beacons, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_NOACK_MAP, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_noack_map, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_START_P2P_DEVICE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_start_p2p_device, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_STOP_P2P_DEVICE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_stop_p2p_device, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_START_NAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_start_nan, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_STOP_NAN, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_stop_nan, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_ADD_NAN_FUNCTION, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_nan_add_func, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_NAN_FUNCTION, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_nan_del_func, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_CHANGE_NAN_CONFIG, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_nan_change_config, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_MCAST_RATE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_mcast_rate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_MAC_ACL, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_mac_acl, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_RADAR_DETECT, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_start_radar_detection, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_PROTOCOL_FEATURES, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_protocol_features, }, { .cmd = NL80211_CMD_UPDATE_FT_IES, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_update_ft_ies, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_CRIT_PROTOCOL_START, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_crit_protocol_start, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_CRIT_PROTOCOL_STOP, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_crit_protocol_stop, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_COALESCE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_coalesce, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_COALESCE, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_coalesce, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_CHANNEL_SWITCH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_channel_switch, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_VENDOR, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_vendor_cmd, .dumpit = nl80211_vendor_cmd_dump, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_QOS_MAP, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_qos_map, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_ADD_TX_TS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_add_tx_ts, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_TX_TS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_del_tx_ts, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_tdls_channel_switch, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_tdls_cancel_channel_switch, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_MULTICAST_TO_UNICAST, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_multicast_to_unicast, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_PMK, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_set_pmk, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_PMK, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_del_pmk, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_EXTERNAL_AUTH, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_external_auth, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_CONTROL_PORT_FRAME, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_tx_control_port, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_FTM_RESPONDER_STATS, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_get_ftm_responder_stats, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_PEER_MEASUREMENT_START, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_pmsr_start, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NOTIFY_RADAR, #if LINUX_VERSION_IS_GEQ(5,2,0) .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, #endif .doit = nl80211_notify_radar_detection, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, }; static struct genl_family nl80211_fam __genl_ro_after_init = { .name = NL80211_GENL_NAME, /* have users key off the name instead */ .hdrsize = 0, /* no private header */ .version = 1, /* no particular meaning now */ .maxattr = NL80211_ATTR_MAX, .policy = nl80211_policy, .netnsok = true, .pre_doit = nl80211_pre_doit, .post_doit = nl80211_post_doit, .module = THIS_MODULE, .ops = nl80211_ops, .n_ops = ARRAY_SIZE(nl80211_ops), .mcgrps = nl80211_mcgrps, .n_mcgrps = ARRAY_SIZE(nl80211_mcgrps), }; /* notification functions */ void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev, enum nl80211_commands cmd) { struct sk_buff *msg; struct nl80211_dump_wiphy_state state = {}; WARN_ON(cmd != NL80211_CMD_NEW_WIPHY && cmd != NL80211_CMD_DEL_WIPHY); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_wiphy(rdev, cmd, msg, 0, 0, 0, &state) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_CONFIG, GFP_KERNEL); } void nl80211_notify_iface(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_commands cmd) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, cmd) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_CONFIG, GFP_KERNEL); } static int nl80211_add_scan_req(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct cfg80211_scan_request *req = rdev->scan_req; struct nlattr *nest; int i; if (WARN_ON(!req)) return 0; nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_ssids; i++) { if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid)) goto nla_put_failure; } nla_nest_end(msg, nest); nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_channels; i++) { if (nla_put_u32(msg, i, req->channels[i]->center_freq)) goto nla_put_failure; } nla_nest_end(msg, nest); if (req->ie && nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) goto nla_put_failure; if (req->flags && nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags)) goto nla_put_failure; if (req->info.scan_start_tsf && (nla_put_u64_64bit(msg, NL80211_ATTR_SCAN_START_TIME_TSF, req->info.scan_start_tsf, NL80211_BSS_PAD) || nla_put(msg, NL80211_ATTR_SCAN_START_TIME_TSF_BSSID, ETH_ALEN, req->info.tsf_bssid))) goto nla_put_failure; return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_prep_scan_msg(struct sk_buff *msg, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 portid, u32 seq, int flags, u32 cmd) { void *hdr; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) return -1; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; /* ignore errors and send incomplete event anyway */ nl80211_add_scan_req(msg, rdev); genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_prep_sched_scan_msg(struct sk_buff *msg, struct cfg80211_sched_scan_request *req, u32 cmd) { void *hdr; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) return -1; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, wiphy_to_rdev(req->wiphy)->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, req->dev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->reqid, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0, NL80211_CMD_TRIGGER_SCAN) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_SCAN, GFP_KERNEL); } struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, bool aborted) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return NULL; if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0, aborted ? NL80211_CMD_SCAN_ABORTED : NL80211_CMD_NEW_SCAN_RESULTS) < 0) { nlmsg_free(msg); return NULL; } return msg; } /* send message created by nl80211_build_scan_msg() */ void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { if (!msg) return; genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_SCAN, GFP_KERNEL); } void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_prep_sched_scan_msg(msg, req, cmd) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(req->wiphy), msg, 0, NL80211_MCGRP_SCAN, GFP_KERNEL); } static bool nl80211_reg_change_event_fill(struct sk_buff *msg, struct regulatory_request *request) { /* Userspace can always count this one always being set */ if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator)) goto nla_put_failure; if (request->alpha2[0] == '0' && request->alpha2[1] == '0') { if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_WORLD)) goto nla_put_failure; } else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') { if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_CUSTOM_WORLD)) goto nla_put_failure; } else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || request->intersect) { if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_INTERSECTION)) goto nla_put_failure; } else { if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_COUNTRY) || nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2)) goto nla_put_failure; } if (request->wiphy_idx != WIPHY_IDX_INVALID) { struct wiphy *wiphy = wiphy_idx_to_wiphy(request->wiphy_idx); if (wiphy && nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx)) goto nla_put_failure; if (wiphy && wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; } return true; nla_put_failure: return false; } /* * This can happen on global regulatory changes or device specific settings * based on custom regulatory domains. */ void nl80211_common_reg_change_event(enum nl80211_commands cmd_id, struct regulatory_request *request) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd_id); if (!hdr) { nlmsg_free(msg); return; } if (nl80211_reg_change_event_fill(msg, request) == false) goto nla_put_failure; genlmsg_end(msg, hdr); rcu_read_lock(); genlmsg_multicast_allns(&nl80211_fam, msg, 0, NL80211_MCGRP_REGULATORY, GFP_ATOMIC); rcu_read_unlock(); return; nla_put_failure: nlmsg_free(msg); } static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, enum nl80211_commands cmd, gfp_t gfp, int uapsd_queues, const u8 *req_ies, size_t req_ies_len) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(100 + len + req_ies_len, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_FRAME, len, buf) || (req_ies && nla_put(msg, NL80211_ATTR_REQ_IE, req_ies_len, req_ies))) goto nla_put_failure; if (uapsd_queues >= 0) { struct nlattr *nla_wmm = nla_nest_start(msg, NL80211_ATTR_STA_WME); if (!nla_wmm) goto nla_put_failure; if (nla_put_u8(msg, NL80211_STA_WME_UAPSD_QUEUES, uapsd_queues)) goto nla_put_failure; nla_nest_end(msg, nla_wmm); } genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0); } void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp, int uapsd_queues, const u8 *req_ies, size_t req_ies_len) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE, gfp, uapsd_queues, req_ies, req_ies_len); } void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_DEAUTHENTICATE, gfp, -1, NULL, 0); } void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_DISASSOCIATE, gfp, -1, NULL, 0); } void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); const struct ieee80211_mgmt *mgmt = (void *)buf; u32 cmd; if (WARN_ON(len < 2)) return; if (ieee80211_is_deauth(mgmt->frame_control)) cmd = NL80211_CMD_UNPROT_DEAUTHENTICATE; else cmd = NL80211_CMD_UNPROT_DISASSOCIATE; trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len); nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1, NULL, 0); } EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt); static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, int cmd, const u8 *addr, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp) { nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE, addr, gfp); } void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp) { nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, addr, gfp); } void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_connect_resp_params *cr, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len + cr->fils.kek_len + cr->fils.pmk_len + (cr->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONNECT); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || (cr->bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) || nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE : cr->status) || (cr->status < 0 && (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, cr->timeout_reason))) || (cr->req_ie && nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) || (cr->resp_ie && nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len, cr->resp_ie)) || (cr->fils.update_erp_next_seq_num && nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM, cr->fils.erp_next_seq_num)) || (cr->status == WLAN_STATUS_SUCCESS && ((cr->fils.kek && nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils.kek_len, cr->fils.kek)) || (cr->fils.pmk && nla_put(msg, NL80211_ATTR_PMK, cr->fils.pmk_len, cr->fils.pmk)) || (cr->fils.pmkid && nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->fils.pmkid))))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_roamed(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_roam_info *info, gfp_t gfp) { struct sk_buff *msg; void *hdr; const u8 *bssid = info->bss ? info->bss->bssid : info->bssid; msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len + info->fils.kek_len + info->fils.pmk_len + (info->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) || (info->req_ie && nla_put(msg, NL80211_ATTR_REQ_IE, info->req_ie_len, info->req_ie)) || (info->resp_ie && nla_put(msg, NL80211_ATTR_RESP_IE, info->resp_ie_len, info->resp_ie)) || (info->fils.update_erp_next_seq_num && nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM, info->fils.erp_next_seq_num)) || (info->fils.kek && nla_put(msg, NL80211_ATTR_FILS_KEK, info->fils.kek_len, info->fils.kek)) || (info->fils.pmk && nla_put(msg, NL80211_ATTR_PMK, info->fils.pmk_len, info->fils.pmk)) || (info->fils.pmkid && nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, info->fils.pmkid))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PORT_AUTHORIZED); if (!hdr) { nlmsg_free(msg); return; } if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct net_device *netdev, u16 reason, const u8 *ie, size_t ie_len, bool from_ap) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(100 + ie_len, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || (reason && nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) || (from_ap && nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) || (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_JOIN_IBSS); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr, const u8 *ie, u8 ie_len, int sig_dbm, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT)) return; trace_cfg80211_notify_new_peer_candidate(dev, addr); msg = nlmsg_new(100 + ie_len, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NEW_PEER_CANDIDATE); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || (ie_len && ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie)) || (sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate); void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_MICHAEL_MIC_FAILURE); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) || nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) || (key_id != -1 && nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) || (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_beacon_hint_event(struct wiphy *wiphy, struct ieee80211_channel *channel_before, struct ieee80211_channel *channel_after) { struct sk_buff *msg; void *hdr; struct nlattr *nl_freq; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_BEACON_HINT); if (!hdr) { nlmsg_free(msg); return; } /* * Since we are applying the beacon hint to a wiphy we know its * wiphy_idx is valid */ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) goto nla_put_failure; /* Before */ nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); if (!nl_freq) goto nla_put_failure; if (nl80211_msg_put_channel(msg, wiphy, channel_before, false)) goto nla_put_failure; nla_nest_end(msg, nl_freq); /* After */ nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER); if (!nl_freq) goto nla_put_failure; if (nl80211_msg_put_channel(msg, wiphy, channel_after, false)) goto nla_put_failure; nla_nest_end(msg, nl_freq); genlmsg_end(msg, hdr); rcu_read_lock(); genlmsg_multicast_allns(&nl80211_fam, msg, 0, NL80211_MCGRP_REGULATORY, GFP_ATOMIC); rcu_read_unlock(); return; nla_put_failure: nlmsg_free(msg); } static void nl80211_send_remain_on_chan_event( int cmd, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, NL80211_CHAN_NO_HT) || nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL && nla_put_u32(msg, NL80211_ATTR_DURATION, duration)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration); nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, rdev, wdev, cookie, chan, duration, gfp); } EXPORT_SYMBOL(cfg80211_ready_on_channel); void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan); nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, rdev, wdev, cookie, chan, 0, gfp); } EXPORT_SYMBOL(cfg80211_remain_on_channel_expired); void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; trace_cfg80211_new_sta(dev, mac_addr, sinfo); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, 0, 0, 0, rdev, dev, mac_addr, sinfo) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); } EXPORT_SYMBOL(cfg80211_new_sta); void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; struct station_info empty_sinfo = {}; if (!sinfo) sinfo = &empty_sinfo; trace_cfg80211_del_sta(dev, mac_addr); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) { cfg80211_sinfo_release_content(sinfo); return; } if (nl80211_send_station(msg, NL80211_CMD_DEL_STATION, 0, 0, 0, rdev, dev, mac_addr, sinfo) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); } EXPORT_SYMBOL(cfg80211_del_sta_sinfo); void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, enum nl80211_connect_failed_reason reason, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONN_FAILED); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || nla_put_u32(msg, NL80211_ATTR_CONN_FAILED_REASON, reason)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_conn_failed); static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, const u8 *addr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid); if (!nlportid) return false; msg = nlmsg_new(100, gfp); if (!msg) return true; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return true; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); return true; nla_put_failure: nlmsg_free(msg); return true; } bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; bool ret; trace_cfg80211_rx_spurious_frame(dev, addr); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO)) { trace_cfg80211_return_bool(false); return false; } ret = __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME, addr, gfp); trace_cfg80211_return_bool(ret); return ret; } EXPORT_SYMBOL(cfg80211_rx_spurious_frame); bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; bool ret; trace_cfg80211_rx_unexpected_4addr_frame(dev, addr); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO && wdev->iftype != NL80211_IFTYPE_AP_VLAN)) { trace_cfg80211_return_bool(false); return false; } ret = __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_4ADDR_FRAME, addr, gfp); trace_cfg80211_return_bool(ret); return ret; } EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 nlportid, int freq, int sig_dbm, const u8 *buf, size_t len, u32 flags, gfp_t gfp) { struct net_device *netdev = wdev->netdev; struct sk_buff *msg; void *hdr; msg = nlmsg_new(100 + len, gfp); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); if (!hdr) { nlmsg_free(msg); return -ENOMEM; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || (sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || nla_put(msg, NL80211_ATTR_FRAME, len, buf) || (flags && nla_put_u32(msg, NL80211_ATTR_RXMGMT_FLAGS, flags))) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, const u8 *buf, size_t len, bool ack, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct net_device *netdev = wdev->netdev; struct sk_buff *msg; void *hdr; trace_cfg80211_mgmt_tx_status(wdev, cookie, ack); msg = nlmsg_new(100 + len, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME_TX_STATUS); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_FRAME, len, buf) || nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD) || (ack && nla_put_flag(msg, NL80211_ATTR_ACK))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_mgmt_tx_status); static int __nl80211_rx_control_port(struct net_device *dev, struct sk_buff *skb, bool unencrypted, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ethhdr *ehdr = eth_hdr(skb); const u8 *addr = ehdr->h_source; u16 proto = be16_to_cpu(skb->protocol); struct sk_buff *msg; void *hdr; struct nlattr *frame; u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); if (!nlportid) return -ENOENT; msg = nlmsg_new(100 + skb->len, gfp); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONTROL_PORT_FRAME); if (!hdr) { nlmsg_free(msg); return -ENOBUFS; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || (unencrypted && nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) goto nla_put_failure; frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len); if (!frame) goto nla_put_failure; skb_copy_bits(skb, 0, nla_data(frame), skb->len); genlmsg_end(msg, hdr); return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } bool cfg80211_rx_control_port(struct net_device *dev, struct sk_buff *skb, bool unencrypted) { int ret; trace_cfg80211_rx_control_port(dev, skb, unencrypted); ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC); trace_cfg80211_return_bool(ret == 0); return ret == 0; } EXPORT_SYMBOL(cfg80211_rx_control_port); static struct sk_buff *cfg80211_prepare_cqm(struct net_device *dev, const char *mac, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); void **cb; if (!msg) return NULL; cb = (void **)msg->cb; cb[0] = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); if (!cb[0]) { nlmsg_free(msg); return NULL; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; if (mac && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac)) goto nla_put_failure; cb[1] = nla_nest_start(msg, NL80211_ATTR_CQM); if (!cb[1]) goto nla_put_failure; cb[2] = rdev; return msg; nla_put_failure: nlmsg_free(msg); return NULL; } static void cfg80211_send_cqm(struct sk_buff *msg, gfp_t gfp) { void **cb = (void **)msg->cb; struct cfg80211_registered_device *rdev = cb[2]; nla_nest_end(msg, cb[1]); genlmsg_end(msg, cb[0]); memset(msg->cb, 0, sizeof(msg->cb)); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); } void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level, gfp_t gfp) { struct sk_buff *msg; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level); if (WARN_ON(rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW && rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH)) return; if (wdev->cqm_config) { wdev->cqm_config->last_rssi_event_value = rssi_level; cfg80211_cqm_rssi_update(rdev, dev); if (rssi_level == 0) rssi_level = wdev->cqm_config->last_rssi_event_value; } msg = cfg80211_prepare_cqm(dev, NULL, gfp); if (!msg) return; if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, rssi_event)) goto nla_put_failure; if (rssi_level && nla_put_s32(msg, NL80211_ATTR_CQM_RSSI_LEVEL, rssi_level)) goto nla_put_failure; cfg80211_send_cqm(msg, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer, u32 num_packets, u32 rate, u32 intvl, gfp_t gfp) { struct sk_buff *msg; msg = cfg80211_prepare_cqm(dev, peer, gfp); if (!msg) return; if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl)) goto nla_put_failure; cfg80211_send_cqm(msg, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_txe_notify); void cfg80211_cqm_pktloss_notify(struct net_device *dev, const u8 *peer, u32 num_packets, gfp_t gfp) { struct sk_buff *msg; trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets); msg = cfg80211_prepare_cqm(dev, peer, gfp); if (!msg) return; if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets)) goto nla_put_failure; cfg80211_send_cqm(msg, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify); void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp) { struct sk_buff *msg; msg = cfg80211_prepare_cqm(dev, NULL, gfp); if (!msg) return; if (nla_put_flag(msg, NL80211_ATTR_CQM_BEACON_LOSS_EVENT)) goto nla_put_failure; cfg80211_send_cqm(msg, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_beacon_loss_notify); static void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct sk_buff *msg; struct nlattr *rekey_attr; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_REKEY_OFFLOAD); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) goto nla_put_failure; rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA); if (!rekey_attr) goto nla_put_failure; if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR, NL80211_REPLAY_CTR_LEN, replay_ctr)) goto nla_put_failure; nla_nest_end(msg, rekey_attr); genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_gtk_rekey_notify(dev, bssid); nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); } EXPORT_SYMBOL(cfg80211_gtk_rekey_notify); static void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, int index, const u8 *bssid, bool preauth, gfp_t gfp) { struct sk_buff *msg; struct nlattr *attr; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PMKSA_CANDIDATE); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) goto nla_put_failure; attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE); if (!attr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) || nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) || (preauth && nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH))) goto nla_put_failure; nla_nest_end(msg, attr); genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, const u8 *bssid, bool preauth, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth); nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); } EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_chan_def *chandef, gfp_t gfp, enum nl80211_commands notif, u8 count) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, notif); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) goto nla_put_failure; if (nl80211_send_chandef(msg, chandef)) goto nla_put_failure; if ((notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) && (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_ch_switch_notify(struct net_device *dev, struct cfg80211_chan_def *chandef) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ASSERT_WDEV_LOCK(wdev); trace_cfg80211_ch_switch_notify(dev, chandef); wdev->chandef = *chandef; wdev->preset_chandef = *chandef; nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, NL80211_CMD_CH_SWITCH_NOTIFY, 0); } EXPORT_SYMBOL(cfg80211_ch_switch_notify); void cfg80211_ch_switch_started_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, u8 count) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_ch_switch_started_notify(dev, chandef); nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, count); } EXPORT_SYMBOL(cfg80211_ch_switch_started_notify); void nl80211_radar_notify(struct cfg80211_registered_device *rdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event, struct net_device *netdev, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_RADAR_DETECT); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) goto nla_put_failure; /* NOP and radar events don't need a netdev parameter */ if (netdev) { struct wireless_dev *wdev = netdev->ieee80211_ptr; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; } if (nla_put_u32(msg, NL80211_ATTR_RADAR_EVENT, event)) goto nla_put_failure; if (nl80211_send_chandef(msg, chandef)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac, struct sta_opmode_info *sta_opmode, gfp_t gfp) { struct sk_buff *msg; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); void *hdr; if (WARN_ON(!mac)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STA_OPMODE_CHANGED); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac)) goto nla_put_failure; if ((sta_opmode->changed & STA_OPMODE_SMPS_MODE_CHANGED) && nla_put_u8(msg, NL80211_ATTR_SMPS_MODE, sta_opmode->smps_mode)) goto nla_put_failure; if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) && nla_put_u8(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw)) goto nla_put_failure; if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) && nla_put_u8(msg, NL80211_ATTR_NSS, sta_opmode->rx_nss)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_sta_opmode_change_notify); void cfg80211_probe_status(struct net_device *dev, const u8 *addr, u64 cookie, bool acked, s32 ack_signal, bool is_valid_ack_signal, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_probe_status(dev, addr, cookie, acked); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD) || (acked && nla_put_flag(msg, NL80211_ATTR_ACK)) || (is_valid_ack_signal && nla_put_s32(msg, NL80211_ATTR_ACK_SIGNAL, ack_signal))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_probe_status); void cfg80211_report_obss_beacon(struct wiphy *wiphy, const u8 *frame, size_t len, int freq, int sig_dbm) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; struct cfg80211_beacon_registration *reg; trace_cfg80211_report_obss_beacon(wiphy, frame, len, freq, sig_dbm); spin_lock_bh(&rdev->beacon_registrations_lock); list_for_each_entry(reg, &rdev->beacon_registrations, list) { msg = nlmsg_new(len + 100, GFP_ATOMIC); if (!msg) { spin_unlock_bh(&rdev->beacon_registrations_lock); return; } hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (freq && nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) || (sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || nla_put(msg, NL80211_ATTR_FRAME, len, frame)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, reg->nlportid); } spin_unlock_bh(&rdev->beacon_registrations_lock); return; nla_put_failure: spin_unlock_bh(&rdev->beacon_registrations_lock); nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_report_obss_beacon); #ifdef CONFIG_PM static int cfg80211_net_detect_results(struct sk_buff *msg, struct cfg80211_wowlan_wakeup *wakeup) { struct cfg80211_wowlan_nd_info *nd = wakeup->net_detect; struct nlattr *nl_results, *nl_match, *nl_freqs; int i, j; nl_results = nla_nest_start( msg, NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS); if (!nl_results) return -EMSGSIZE; for (i = 0; i < nd->n_matches; i++) { struct cfg80211_wowlan_nd_match *match = nd->matches[i]; nl_match = nla_nest_start(msg, i); if (!nl_match) break; /* The SSID attribute is optional in nl80211, but for * simplicity reasons it's always present in the * cfg80211 structure. If a driver can't pass the * SSID, that needs to be changed. A zero length SSID * is still a valid SSID (wildcard), so it cannot be * used for this purpose. */ if (nla_put(msg, NL80211_ATTR_SSID, match->ssid.ssid_len, match->ssid.ssid)) { nla_nest_cancel(msg, nl_match); goto out; } if (match->n_channels) { nl_freqs = nla_nest_start( msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!nl_freqs) { nla_nest_cancel(msg, nl_match); goto out; } for (j = 0; j < match->n_channels; j++) { if (nla_put_u32(msg, j, match->channels[j])) { nla_nest_cancel(msg, nl_freqs); nla_nest_cancel(msg, nl_match); goto out; } } nla_nest_end(msg, nl_freqs); } nla_nest_end(msg, nl_match); } out: nla_nest_end(msg, nl_results); return 0; } void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, struct cfg80211_wowlan_wakeup *wakeup, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; int size = 200; trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup); if (wakeup) size += wakeup->packet_present_len; msg = nlmsg_new(size, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_WOWLAN); if (!hdr) goto free_msg; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto free_msg; if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) goto free_msg; if (wakeup) { struct nlattr *reasons; reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!reasons) goto free_msg; if (wakeup->disconnect && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) goto free_msg; if (wakeup->magic_pkt && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) goto free_msg; if (wakeup->gtk_rekey_failure && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) goto free_msg; if (wakeup->eap_identity_req && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) goto free_msg; if (wakeup->four_way_handshake && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) goto free_msg; if (wakeup->rfkill_release && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)) goto free_msg; if (wakeup->pattern_idx >= 0 && nla_put_u32(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, wakeup->pattern_idx)) goto free_msg; if (wakeup->tcp_match && nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH)) goto free_msg; if (wakeup->tcp_connlost && nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST)) goto free_msg; if (wakeup->tcp_nomoretokens && nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS)) goto free_msg; if (wakeup->packet) { u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211; u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN; if (!wakeup->packet_80211) { pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023; len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN; } if (wakeup->packet_len && nla_put_u32(msg, len_attr, wakeup->packet_len)) goto free_msg; if (nla_put(msg, pkt_attr, wakeup->packet_present_len, wakeup->packet)) goto free_msg; } if (wakeup->net_detect && cfg80211_net_detect_results(msg, wakeup)) goto free_msg; nla_nest_end(msg, reasons); } genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; free_msg: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_report_wowlan_wakeup); #endif void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper, reason_code); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_TDLS_OPER); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u8(msg, NL80211_ATTR_TDLS_OPERATION, oper) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer) || (reason_code > 0 && nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_tdls_oper_request); static int nl80211_netlink_notify(struct notifier_block * nb, unsigned long state, void *_notify) { struct netlink_notify *notify = _notify; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; struct cfg80211_beacon_registration *reg, *tmp; if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC) return NOTIFY_DONE; rcu_read_lock(); list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { struct cfg80211_sched_scan_request *sched_scan_req; list_for_each_entry_rcu(sched_scan_req, &rdev->sched_scan_req_list, list) { if (sched_scan_req->owner_nlportid == netlink_notify_portid(notify)) { sched_scan_req->nl_owner_dead = true; schedule_work(&rdev->sched_scan_stop_wk); } } list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) { cfg80211_mlme_unregister_socket(wdev, netlink_notify_portid(notify)); if (wdev->owner_nlportid == netlink_notify_portid(notify)) { wdev->nl_owner_dead = true; schedule_work(&rdev->destroy_work); } else if (wdev->conn_owner_nlportid == netlink_notify_portid(notify)) { schedule_work(&wdev->disconnect_wk); } cfg80211_release_pmsr(wdev, netlink_notify_portid(notify)); } spin_lock_bh(&rdev->beacon_registrations_lock); list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations, list) { if (reg->nlportid == netlink_notify_portid(notify)) { list_del(®->list); kfree(reg); break; } } spin_unlock_bh(&rdev->beacon_registrations_lock); } rcu_read_unlock(); /* * It is possible that the user space process that is controlling the * indoor setting disappeared, so notify the regulatory core. */ regulatory_netlink_notify(netlink_notify_portid(notify)); return NOTIFY_OK; } static struct notifier_block nl80211_netlink_notifier = { .notifier_call = nl80211_netlink_notify, }; void cfg80211_ft_event(struct net_device *netdev, struct cfg80211_ft_event_params *ft_event) { struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_ft_event(wiphy, netdev, ft_event); if (!ft_event->target_ap) return; msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT); if (!hdr) goto out; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap)) goto out; if (ft_event->ies && nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies)) goto out; if (ft_event->ric_ies && nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, ft_event->ric_ies)) goto out; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; out: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_ft_event); void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp) { struct cfg80211_registered_device *rdev; struct sk_buff *msg; void *hdr; u32 nlportid; rdev = wiphy_to_rdev(wdev->wiphy); if (!rdev->crit_proto_nlportid) return; nlportid = rdev->crit_proto_nlportid; rdev->crit_proto_nlportid = 0; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CRIT_PROTOCOL_STOP); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_crit_proto_stopped); void nl80211_send_ap_stopped(struct wireless_dev *wdev) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STOP_AP); if (!hdr) goto out; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto out; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; out: nlmsg_free(msg); } int cfg80211_external_auth_request(struct net_device *dev, struct cfg80211_external_auth_params *params, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; if (!wdev->conn_owner_nlportid) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_EXTERNAL_AUTH); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) || nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION, params->action) || nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) || nla_put(msg, NL80211_ATTR_SSID, params->ssid.ssid_len, params->ssid.ssid)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, wdev->conn_owner_nlportid); return 0; nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(cfg80211_external_auth_request); /* initialisation/exit functions */ int __init nl80211_init(void) { int err; err = genl_register_family(&nl80211_fam); if (err) return err; err = netlink_register_notifier(&nl80211_netlink_notifier); if (err) goto err_out; return 0; err_out: genl_unregister_family(&nl80211_fam); return err; } void nl80211_exit(void) { netlink_unregister_notifier(&nl80211_netlink_notifier); genl_unregister_family(&nl80211_fam); } backport-iwlwifi-dkms-7906/net/wireless/nl80211.h000066400000000000000000000114661351064634500215120ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright (C) 2018 Intel Corporation */ #ifndef __NET_WIRELESS_NL80211_H #define __NET_WIRELESS_NL80211_H #include "core.h" int nl80211_init(void); void nl80211_exit(void); extern const struct nla_policy nl80211_policy[NUM_NL80211_ATTR]; void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq, int flags, u8 cmd); bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr); static inline u64 wdev_id(struct wireless_dev *wdev) { return (u64)wdev->identifier | ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32); } int nl80211_prepare_wdev_dump(struct netlink_callback *cb, struct cfg80211_registered_device **rdev, struct wireless_dev **wdev); int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_chan_def *chandef); int nl80211_parse_random_mac(struct nlattr **attrs, u8 *mac_addr, u8 *mac_addr_mask); void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev, enum nl80211_commands cmd); void nl80211_notify_iface(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_commands cmd); void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, bool aborted); void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev, struct sk_buff *msg); void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd); void nl80211_common_reg_change_event(enum nl80211_commands cmd_id, struct regulatory_request *request); static inline void nl80211_send_reg_change_event(struct regulatory_request *request) { nl80211_common_reg_change_event(NL80211_CMD_REG_CHANGE, request); } static inline void nl80211_send_wiphy_reg_change_event(struct regulatory_request *request) { nl80211_common_reg_change_event(NL80211_CMD_WIPHY_REG_CHANGE, request); } void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp); void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp, int uapsd_queues, const u8 *req_ies, size_t req_ies_len); void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp); void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp); void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp); void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp); void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_connect_resp_params *params, gfp_t gfp); void nl80211_send_roamed(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_roam_info *info, gfp_t gfp); void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid); void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct net_device *netdev, u16 reason, const u8 *ie, size_t ie_len, bool from_ap); void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp); void nl80211_send_beacon_hint_event(struct wiphy *wiphy, struct ieee80211_channel *channel_before, struct ieee80211_channel *channel_after); void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, gfp_t gfp); int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 nlpid, int freq, int sig_dbm, const u8 *buf, size_t len, u32 flags, gfp_t gfp); void nl80211_radar_notify(struct cfg80211_registered_device *rdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event, struct net_device *netdev, gfp_t gfp); void nl80211_send_ap_stopped(struct wireless_dev *wdev); void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev); /* peer measurement */ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info); int nl80211_pmsr_dump_results(struct sk_buff *skb, struct netlink_callback *cb); #endif /* __NET_WIRELESS_NL80211_H */ backport-iwlwifi-dkms-7906/net/wireless/ocb.c000066400000000000000000000037421351064634500212410ustar00rootroot00000000000000/* * OCB mode implementation * * Copyright: (c) 2014 Czech Technical University in Prague * (c) 2014 Volkswagen Group Research * Author: Rostislav Lisovy * Funded by: Volkswagen Group Research * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include "nl80211.h" #include "core.h" #include "rdev-ops.h" int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ocb_setup *setup) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) return -EOPNOTSUPP; if (!rdev->ops->join_ocb) return -EOPNOTSUPP; if (WARN_ON(!setup->chandef.chan)) return -EINVAL; err = rdev_join_ocb(rdev, dev, setup); if (!err) wdev->chandef = setup->chandef; return err; } int cfg80211_join_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ocb_setup *setup) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_join_ocb(rdev, dev, setup); wdev_unlock(wdev); return err; } int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) return -EOPNOTSUPP; if (!rdev->ops->leave_ocb) return -EOPNOTSUPP; err = rdev_leave_ocb(rdev, dev); if (!err) memset(&wdev->chandef, 0, sizeof(wdev->chandef)); return err; } int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_leave_ocb(rdev, dev); wdev_unlock(wdev); return err; } backport-iwlwifi-dkms-7906/net/wireless/of.c000066400000000000000000000066411351064634500211030ustar00rootroot00000000000000/* * Copyright (C) 2017 Rafał Miłecki * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "core.h" static bool wiphy_freq_limits_valid_chan(struct wiphy *wiphy, struct ieee80211_freq_range *freq_limits, unsigned int n_freq_limits, struct ieee80211_channel *chan) { u32 bw = MHZ_TO_KHZ(20); int i; for (i = 0; i < n_freq_limits; i++) { struct ieee80211_freq_range *limit = &freq_limits[i]; if (cfg80211_does_bw_fit_range(limit, MHZ_TO_KHZ(chan->center_freq), bw)) return true; } return false; } static void wiphy_freq_limits_apply(struct wiphy *wiphy, struct ieee80211_freq_range *freq_limits, unsigned int n_freq_limits) { enum nl80211_band band; int i; if (WARN_ON(!n_freq_limits)) return; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *chan = &sband->channels[i]; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; if (!wiphy_freq_limits_valid_chan(wiphy, freq_limits, n_freq_limits, chan)) { pr_debug("Disabling freq %d MHz as it's out of OF limits\n", chan->center_freq); chan->flags |= IEEE80211_CHAN_DISABLED; } } } } void wiphy_read_of_freq_limits(struct wiphy *wiphy) { struct device *dev = wiphy_dev(wiphy); struct device_node *np; struct property *prop; struct ieee80211_freq_range *freq_limits; unsigned int n_freq_limits; const __be32 *p; int len, i; int err = 0; if (!dev) return; np = dev_of_node(dev); if (!np) return; prop = of_find_property(np, "ieee80211-freq-limit", &len); if (!prop) return; if (!len || len % sizeof(u32) || len / sizeof(u32) % 2) { dev_err(dev, "ieee80211-freq-limit wrong format"); return; } n_freq_limits = len / sizeof(u32) / 2; freq_limits = kcalloc(n_freq_limits, sizeof(*freq_limits), GFP_KERNEL); if (!freq_limits) { err = -ENOMEM; goto out_kfree; } p = NULL; for (i = 0; i < n_freq_limits; i++) { struct ieee80211_freq_range *limit = &freq_limits[i]; p = of_prop_next_u32(prop, p, &limit->start_freq_khz); if (!p) { err = -EINVAL; goto out_kfree; } p = of_prop_next_u32(prop, p, &limit->end_freq_khz); if (!p) { err = -EINVAL; goto out_kfree; } if (!limit->start_freq_khz || !limit->end_freq_khz || limit->start_freq_khz >= limit->end_freq_khz) { err = -EINVAL; goto out_kfree; } } wiphy_freq_limits_apply(wiphy, freq_limits, n_freq_limits); out_kfree: kfree(freq_limits); if (err) dev_err(dev, "Failed to get limits: %d\n", err); } EXPORT_SYMBOL(wiphy_read_of_freq_limits); backport-iwlwifi-dkms-7906/net/wireless/pmsr.c000066400000000000000000000374051351064634500214620ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 - 2019 Intel Corporation */ #ifndef __PMSR_H #define __PMSR_H #include #include "core.h" #include "nl80211.h" #include "rdev-ops.h" static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, struct nlattr *ftmreq, struct cfg80211_pmsr_request_peer *out, struct genl_info *info) { const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa; struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1]; u32 preamble = NL80211_PREAMBLE_DMG; /* only optional in DMG */ /* validate existing data */ if (!(rdev->wiphy.pmsr_capa->ftm.bandwidths & BIT(out->chandef.width))) { NL_SET_ERR_MSG(genl_info_extack(info), "FTM: unsupported bandwidth"); return -EINVAL; } /* no validation needed - was already done via nested policy */ nla_parse_nested(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq, NULL, NULL); if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]); /* set up values - struct is 0-initialized */ out->ftm.requested = true; switch (out->chandef.chan->band) { case NL80211_BAND_60GHZ: /* optional */ break; default: if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) { NL_SET_ERR_MSG(genl_info_extack(info), "FTM: must specify preamble"); return -EINVAL; } } if (!(capa->ftm.preambles & BIT(preamble))) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE], "FTM: invalid preamble"); return -EINVAL; } out->ftm.preamble = preamble; out->ftm.burst_period = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]) out->ftm.burst_period = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]); out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP]; if (out->ftm.asap && !capa->ftm.asap) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP], "FTM: ASAP mode not supported"); return -EINVAL; } if (!out->ftm.asap && !capa->ftm.non_asap) { NL_SET_ERR_MSG(genl_info_extack(info), "FTM: non-ASAP mode not supported"); return -EINVAL; } out->ftm.num_bursts_exp = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]) out->ftm.num_bursts_exp = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]); if (capa->ftm.max_bursts_exponent >= 0 && out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP], "FTM: max NUM_BURSTS_EXP must be set lower than the device limit"); return -EINVAL; } out->ftm.burst_duration = 15; if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]) out->ftm.burst_duration = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]); out->ftm.ftms_per_burst = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]) out->ftm.ftms_per_burst = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]); if (capa->ftm.max_ftms_per_burst && (out->ftm.ftms_per_burst > capa->ftm.max_ftms_per_burst || out->ftm.ftms_per_burst == 0)) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST], "FTM: FTMs per burst must be set lower than the device limit but non-zero"); return -EINVAL; } out->ftm.ftmr_retries = 3; if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]) out->ftm.ftmr_retries = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]); out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI]; if (out->ftm.request_lci && !capa->ftm.request_lci) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI], "FTM: LCI request not supported"); } out->ftm.request_civicloc = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC]; if (out->ftm.request_civicloc && !capa->ftm.request_civicloc) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC], "FTM: civic location request not supported"); } return 0; } static int pmsr_parse_peer(struct cfg80211_registered_device *rdev, struct nlattr *peer, struct cfg80211_pmsr_request_peer *out, struct genl_info *info) { struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1]; struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1]; struct nlattr *treq; int err, rem; /* no validation needed - was already done via nested policy */ nla_parse_nested(tb, NL80211_PMSR_PEER_ATTR_MAX, peer, NULL, NULL); if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] || !tb[NL80211_PMSR_PEER_ATTR_CHAN] || !tb[NL80211_PMSR_PEER_ATTR_REQ]) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), peer, "insufficient peer data"); return -EINVAL; } memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN); /* reuse info->attrs */ memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1)); /* need to validate here, we don't want to have validation recursion */ err = nla_parse_nested(info->attrs, NL80211_ATTR_MAX, tb[NL80211_PMSR_PEER_ATTR_CHAN], nl80211_policy, genl_info_extack(info)); if (err) return err; err = nl80211_parse_chandef(rdev, info, &out->chandef); if (err) return err; /* no validation needed - was already done via nested policy */ nla_parse_nested(req, NL80211_PMSR_REQ_ATTR_MAX, tb[NL80211_PMSR_PEER_ATTR_REQ], NULL, NULL); if (!req[NL80211_PMSR_REQ_ATTR_DATA]) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), tb[NL80211_PMSR_PEER_ATTR_REQ], "missing request type/data"); return -EINVAL; } if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF]) out->report_ap_tsf = true; if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF], "reporting AP TSF is not supported"); return -EINVAL; } nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) { switch (nla_type(treq)) { case NL80211_PMSR_TYPE_FTM: err = pmsr_parse_ftm(rdev, treq, out, info); break; default: NL_SET_ERR_MSG_ATTR(genl_info_extack(info), treq, "unsupported measurement type"); err = -EINVAL; } } if (err) return err; return 0; } int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) { struct nlattr *reqattr = info->attrs[NL80211_ATTR_PEER_MEASUREMENTS]; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_pmsr_request *req; struct nlattr *peers, *peer; int count, rem, err, idx; if (!rdev->wiphy.pmsr_capa) return -EOPNOTSUPP; if (!reqattr) return -EINVAL; peers = nla_find(nla_data(reqattr), nla_len(reqattr), NL80211_PMSR_ATTR_PEERS); if (!peers) return -EINVAL; count = 0; nla_for_each_nested(peer, peers, rem) { count++; if (count > rdev->wiphy.pmsr_capa->max_peers) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), peer, "Too many peers used"); return -EINVAL; } } req = kzalloc(struct_size(req, peers, count), GFP_KERNEL); if (!req) return -ENOMEM; if (info->attrs[NL80211_ATTR_TIMEOUT]) req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]); if (info->attrs[NL80211_ATTR_MAC]) { if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) { NL_SET_ERR_MSG_ATTR(genl_info_extack(info), info->attrs[NL80211_ATTR_MAC], "device cannot randomize MAC address"); err = -EINVAL; goto out_err; } err = nl80211_parse_random_mac(info->attrs, req->mac_addr, req->mac_addr_mask); if (err) goto out_err; } else { memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN); memset(req->mac_addr_mask, 0xff, ETH_ALEN); } idx = 0; nla_for_each_nested(peer, peers, rem) { /* NB: this reuses info->attrs, but we no longer need it */ err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info); if (err) goto out_err; idx++; } req->n_peers = count; req->cookie = cfg80211_assign_cookie(rdev); req->nl_portid = genl_info_snd_portid(info); err = rdev_start_pmsr(rdev, wdev, req); if (err) goto out_err; list_add_tail(&req->list, &wdev->pmsr_list); nl_set_extack_cookie_u64(genl_info_extack(info), req->cookie); return 0; out_err: kfree(req); return err; } void cfg80211_pmsr_complete(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) goto free_request; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_COMPLETE); if (!hdr) goto free_msg; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto free_msg; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie, NL80211_ATTR_PAD)) goto free_msg; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid); goto free_request; free_msg: nlmsg_free(msg); free_request: spin_lock_bh(&wdev->pmsr_lock); list_del(&req->list); spin_unlock_bh(&wdev->pmsr_lock); kfree(req); } EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete); static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg, struct cfg80211_pmsr_result *res) { if (res->status == NL80211_PMSR_STATUS_FAILURE) { if (nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON, res->ftm.failure_reason)) goto error; if (res->ftm.failure_reason == NL80211_PMSR_FTM_FAILURE_PEER_BUSY && res->ftm.busy_retry_time && nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME, res->ftm.busy_retry_time)) goto error; return 0; } #define PUT(tp, attr, val) \ do { \ if (nla_put_##tp(msg, \ NL80211_PMSR_FTM_RESP_ATTR_##attr, \ res->ftm.val)) \ goto error; \ } while (0) #define PUTOPT(tp, attr, val) \ do { \ if (res->ftm.val##_valid) \ PUT(tp, attr, val); \ } while (0) #define PUT_U64(attr, val) \ do { \ if (nla_put_u64_64bit(msg, \ NL80211_PMSR_FTM_RESP_ATTR_##attr,\ res->ftm.val, \ NL80211_PMSR_FTM_RESP_ATTR_PAD)) \ goto error; \ } while (0) #define PUTOPT_U64(attr, val) \ do { \ if (res->ftm.val##_valid) \ PUT_U64(attr, val); \ } while (0) if (res->ftm.burst_index >= 0) PUT(u32, BURST_INDEX, burst_index); PUTOPT(u32, NUM_FTMR_ATTEMPTS, num_ftmr_attempts); PUTOPT(u32, NUM_FTMR_SUCCESSES, num_ftmr_successes); PUT(u8, NUM_BURSTS_EXP, num_bursts_exp); PUT(u8, BURST_DURATION, burst_duration); PUT(u8, FTMS_PER_BURST, ftms_per_burst); PUTOPT(s32, RSSI_AVG, rssi_avg); PUTOPT(s32, RSSI_SPREAD, rssi_spread); if (res->ftm.tx_rate_valid && !nl80211_put_sta_rate(msg, &res->ftm.tx_rate, NL80211_PMSR_FTM_RESP_ATTR_TX_RATE)) goto error; if (res->ftm.rx_rate_valid && !nl80211_put_sta_rate(msg, &res->ftm.rx_rate, NL80211_PMSR_FTM_RESP_ATTR_RX_RATE)) goto error; PUTOPT_U64(RTT_AVG, rtt_avg); PUTOPT_U64(RTT_VARIANCE, rtt_variance); PUTOPT_U64(RTT_SPREAD, rtt_spread); PUTOPT_U64(DIST_AVG, dist_avg); PUTOPT_U64(DIST_VARIANCE, dist_variance); PUTOPT_U64(DIST_SPREAD, dist_spread); if (res->ftm.lci && res->ftm.lci_len && nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_LCI, res->ftm.lci_len, res->ftm.lci)) goto error; if (res->ftm.civicloc && res->ftm.civicloc_len && nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC, res->ftm.civicloc_len, res->ftm.civicloc)) goto error; #undef PUT #undef PUTOPT #undef PUT_U64 #undef PUTOPT_U64 return 0; error: return -ENOSPC; } static int nl80211_pmsr_send_result(struct sk_buff *msg, struct cfg80211_pmsr_result *res) { struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata; pmsr = nla_nest_start(msg, NL80211_ATTR_PEER_MEASUREMENTS); if (!pmsr) goto error; peers = nla_nest_start(msg, NL80211_PMSR_ATTR_PEERS); if (!peers) goto error; peer = nla_nest_start(msg, 1); if (!peer) goto error; if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, res->addr)) goto error; resp = nla_nest_start(msg, NL80211_PMSR_PEER_ATTR_RESP); if (!resp) goto error; if (nla_put_u32(msg, NL80211_PMSR_RESP_ATTR_STATUS, res->status) || nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_HOST_TIME, res->host_time, NL80211_PMSR_RESP_ATTR_PAD)) goto error; if (res->ap_tsf_valid && nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF, res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD)) goto error; if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL)) goto error; data = nla_nest_start(msg, NL80211_PMSR_RESP_ATTR_DATA); if (!data) goto error; typedata = nla_nest_start(msg, res->type); if (!typedata) goto error; switch (res->type) { case NL80211_PMSR_TYPE_FTM: if (nl80211_pmsr_send_ftm_res(msg, res)) goto error; break; default: WARN_ON(1); } nla_nest_end(msg, typedata); nla_nest_end(msg, data); nla_nest_end(msg, resp); nla_nest_end(msg, peer); nla_nest_end(msg, peers); nla_nest_end(msg, pmsr); return 0; error: return -ENOSPC; } void cfg80211_pmsr_report(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, struct cfg80211_pmsr_result *result, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; int err; trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie, result->addr); /* * Currently, only variable items are LCI and civic location, * both of which are reasonably short so we don't need to * worry about them here for the allocation. */ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT); if (!hdr) goto free; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto free; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie, NL80211_ATTR_PAD)) goto free; err = nl80211_pmsr_send_result(msg, result); if (err) { pr_err_ratelimited("peer measurement result: message didn't fit!"); goto free; } genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid); return; free: nlmsg_free(msg); } EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_pmsr_request *req, *tmp; LIST_HEAD(free_list); lockdep_assert_held(&wdev->mtx); spin_lock_bh(&wdev->pmsr_lock); list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { if (req->nl_portid) continue; list_move_tail(&req->list, &free_list); } spin_unlock_bh(&wdev->pmsr_lock); list_for_each_entry_safe(req, tmp, &free_list, list) { rdev_abort_pmsr(rdev, wdev, req); kfree(req); } } void cfg80211_pmsr_free_wk(struct work_struct *work) { struct wireless_dev *wdev = container_of(work, struct wireless_dev, pmsr_free_wk); wdev_lock(wdev); cfg80211_pmsr_process_abort(wdev); wdev_unlock(wdev); } void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) { struct cfg80211_pmsr_request *req; bool found = false; spin_lock_bh(&wdev->pmsr_lock); list_for_each_entry(req, &wdev->pmsr_list, list) { found = true; req->nl_portid = 0; } spin_unlock_bh(&wdev->pmsr_lock); if (found) cfg80211_pmsr_process_abort(wdev); WARN_ON(!list_empty(&wdev->pmsr_list)); } void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid) { struct cfg80211_pmsr_request *req; spin_lock_bh(&wdev->pmsr_lock); list_for_each_entry(req, &wdev->pmsr_list, list) { if (req->nl_portid == portid) { req->nl_portid = 0; schedule_work(&wdev->pmsr_free_wk); } } spin_unlock_bh(&wdev->pmsr_lock); } #endif /* __PMSR_H */ backport-iwlwifi-dkms-7906/net/wireless/radiotap.c000066400000000000000000000274031351064634500223010ustar00rootroot00000000000000/* * Radiotap parser * * Copyright 2007 Andy Green * Copyright 2009 Johannes Berg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of BSD * license. * * See COPYING for more details. */ #include #include #include #include #include /* function prototypes and related defs are in include/net/cfg80211.h */ static const struct radiotap_align_size rtap_namespace_sizes[] = { [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, }, [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, }, [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, }, [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, }, [IEEE80211_RADIOTAP_VHT] = { .align = 2, .size = 12, }, /* * add more here as they are defined in radiotap.h */ }; static const struct ieee80211_radiotap_namespace radiotap_ns = { .n_bits = ARRAY_SIZE(rtap_namespace_sizes), .align_size = rtap_namespace_sizes, }; /** * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization * @iterator: radiotap_iterator to initialize * @radiotap_header: radiotap header to parse * @max_length: total length we can parse into (eg, whole packet length) * * Returns: 0 or a negative error code if there is a problem. * * This function initializes an opaque iterator struct which can then * be passed to ieee80211_radiotap_iterator_next() to visit every radiotap * argument which is present in the header. It knows about extended * present headers and handles them. * * How to use: * call __ieee80211_radiotap_iterator_init() to init a semi-opaque iterator * struct ieee80211_radiotap_iterator (no need to init the struct beforehand) * checking for a good 0 return code. Then loop calling * __ieee80211_radiotap_iterator_next()... it returns either 0, * -ENOENT if there are no more args to parse, or -EINVAL if there is a problem. * The iterator's @this_arg member points to the start of the argument * associated with the current argument index that is present, which can be * found in the iterator's @this_arg_index member. This arg index corresponds * to the IEEE80211_RADIOTAP_... defines. * * Radiotap header length: * You can find the CPU-endian total radiotap header length in * iterator->max_length after executing ieee80211_radiotap_iterator_init() * successfully. * * Alignment Gotcha: * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. * * Example code: * See Documentation/networking/radiotap-headers.txt */ int ieee80211_radiotap_iterator_init( struct ieee80211_radiotap_iterator *iterator, struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) { /* check the radiotap header can actually be present */ if (max_length < sizeof(struct ieee80211_radiotap_header)) return -EINVAL; /* Linux only supports version 0 radiotap format */ if (radiotap_header->it_version) return -EINVAL; /* sanity check for allowed length and radiotap length field */ if (max_length < get_unaligned_le16(&radiotap_header->it_len)) return -EINVAL; iterator->_rtheader = radiotap_header; iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len); iterator->_arg_index = 0; iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header); iterator->_reset_on_ext = 0; iterator->_next_bitmap = &radiotap_header->it_present; iterator->_next_bitmap++; iterator->_vns = vns; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; /* find payload start allowing for extended bitmap(s) */ if (iterator->_bitmap_shifter & (1<_arg - (unsigned long)iterator->_rtheader + sizeof(uint32_t) > (unsigned long)iterator->_max_length) return -EINVAL; while (get_unaligned_le32(iterator->_arg) & (1 << IEEE80211_RADIOTAP_EXT)) { iterator->_arg += sizeof(uint32_t); /* * check for insanity where the present bitmaps * keep claiming to extend up to or even beyond the * stated radiotap header length */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader + sizeof(uint32_t) > (unsigned long)iterator->_max_length) return -EINVAL; } iterator->_arg += sizeof(uint32_t); /* * no need to check again for blowing past stated radiotap * header length, because ieee80211_radiotap_iterator_next * checks it before it is dereferenced */ } iterator->this_arg = iterator->_arg; /* we are all initialized happily */ return 0; } EXPORT_SYMBOL(ieee80211_radiotap_iterator_init); static void find_ns(struct ieee80211_radiotap_iterator *iterator, uint32_t oui, uint8_t subns) { int i; iterator->current_namespace = NULL; if (!iterator->_vns) return; for (i = 0; i < iterator->_vns->n_ns; i++) { if (iterator->_vns->ns[i].oui != oui) continue; if (iterator->_vns->ns[i].subns != subns) continue; iterator->current_namespace = &iterator->_vns->ns[i]; break; } } /** * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg * @iterator: radiotap_iterator to move to next arg (if any) * * Returns: 0 if there is an argument to handle, * -ENOENT if there are no more args or -EINVAL * if there is something else wrong. * * This function provides the next radiotap arg index (IEEE80211_RADIOTAP_*) * in @this_arg_index and sets @this_arg to point to the * payload for the field. It takes care of alignment handling and extended * present fields. @this_arg can be changed by the caller (eg, * incremented to move inside a compound argument like * IEEE80211_RADIOTAP_CHANNEL). The args pointed to are in * little-endian format whatever the endianess of your CPU. * * Alignment Gotcha: * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. */ int ieee80211_radiotap_iterator_next( struct ieee80211_radiotap_iterator *iterator) { while (1) { int hit = 0; int pad, align, size, subns; uint32_t oui; /* if no more EXT bits, that's it */ if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT && !(iterator->_bitmap_shifter & 1)) return -ENOENT; if (!(iterator->_bitmap_shifter & 1)) goto next_entry; /* arg not present */ /* get alignment/size of data */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: case IEEE80211_RADIOTAP_EXT: align = 1; size = 0; break; case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: align = 2; size = 6; break; default: if (!iterator->current_namespace || iterator->_arg_index >= iterator->current_namespace->n_bits) { if (iterator->current_namespace == &radiotap_ns) return -ENOENT; align = 0; } else { align = iterator->current_namespace->align_size[iterator->_arg_index].align; size = iterator->current_namespace->align_size[iterator->_arg_index].size; } if (!align) { /* skip all subsequent data */ iterator->_arg = iterator->_next_ns_data; /* give up on this namespace */ iterator->current_namespace = NULL; goto next_entry; } break; } /* * arg is present, account for alignment padding * * Note that these alignments are relative to the start * of the radiotap header. There is no guarantee * that the radiotap header itself is aligned on any * kind of boundary. * * The above is why get_unaligned() is used to dereference * multibyte elements from the radiotap area. */ pad = ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader) & (align - 1); if (pad) iterator->_arg += align - pad; if (iterator->_arg_index % 32 == IEEE80211_RADIOTAP_VENDOR_NAMESPACE) { int vnslen; if ((unsigned long)iterator->_arg + size - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; oui = (*iterator->_arg << 16) | (*(iterator->_arg + 1) << 8) | *(iterator->_arg + 2); subns = *(iterator->_arg + 3); find_ns(iterator, oui, subns); vnslen = get_unaligned_le16(iterator->_arg + 4); iterator->_next_ns_data = iterator->_arg + size + vnslen; if (!iterator->current_namespace) size += vnslen; } /* * this is what we will return to user, but we need to * move on first so next call has something fresh to test */ iterator->this_arg_index = iterator->_arg_index; iterator->this_arg = iterator->_arg; iterator->this_arg_size = size; /* internally move on the size of this arg */ iterator->_arg += size; /* * check for insanity where we are given a bitmap that * claims to have more arg content than the length of the * radiotap section. We will normally end up equalling this * max_length on the last arg, never exceeding it. */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; /* these special ones are valid in each bitmap word */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: iterator->_reset_on_ext = 1; iterator->is_radiotap_ns = 0; /* * If parser didn't register this vendor * namespace with us, allow it to show it * as 'raw. Do do that, set argument index * to vendor namespace. */ iterator->this_arg_index = IEEE80211_RADIOTAP_VENDOR_NAMESPACE; if (!iterator->current_namespace) hit = 1; goto next_entry; case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: iterator->_reset_on_ext = 1; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; goto next_entry; case IEEE80211_RADIOTAP_EXT: /* * bit 31 was set, there is more * -- move to next u32 bitmap */ iterator->_bitmap_shifter = get_unaligned_le32(iterator->_next_bitmap); iterator->_next_bitmap++; if (iterator->_reset_on_ext) iterator->_arg_index = 0; else iterator->_arg_index++; iterator->_reset_on_ext = 0; break; default: /* we've got a hit! */ hit = 1; next_entry: iterator->_bitmap_shifter >>= 1; iterator->_arg_index++; } /* if we found a valid arg earlier, return it now */ if (hit) return 0; } } EXPORT_SYMBOL(ieee80211_radiotap_iterator_next); backport-iwlwifi-dkms-7906/net/wireless/rdev-ops.h000066400000000000000000001114471351064634500222440ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation */ #ifndef __CFG80211_RDEV_OPS #define __CFG80211_RDEV_OPS #include #include #include "core.h" #include "trace.h" static inline int rdev_suspend(struct cfg80211_registered_device *rdev, struct cfg80211_wowlan *wowlan) { int ret; trace_rdev_suspend(&rdev->wiphy, wowlan); ret = rdev->ops->suspend(&rdev->wiphy, wowlan); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_resume(struct cfg80211_registered_device *rdev) { int ret; trace_rdev_resume(&rdev->wiphy); ret = rdev->ops->resume(&rdev->wiphy); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev, bool enabled) { trace_rdev_set_wakeup(&rdev->wiphy, enabled); rdev->ops->set_wakeup(&rdev->wiphy, enabled); trace_rdev_return_void(&rdev->wiphy); } static inline struct wireless_dev *rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params) { struct wireless_dev *ret; trace_rdev_add_virtual_intf(&rdev->wiphy, name, type); ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, name_assign_type, type, params); trace_rdev_return_wdev(&rdev->wiphy, ret); return ret; } static inline int rdev_del_virtual_intf(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { int ret; trace_rdev_del_virtual_intf(&rdev->wiphy, wdev); ret = rdev->ops->del_virtual_intf(&rdev->wiphy, wdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_virtual_intf(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params) { int ret; trace_rdev_change_virtual_intf(&rdev->wiphy, dev, type); ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { int ret; trace_rdev_add_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); ret = rdev->ops->add_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params*)) { int ret; trace_rdev_get_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); ret = rdev->ops->get_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr, cookie, callback); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr) { int ret; trace_rdev_del_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); ret = rdev->ops->del_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 key_index, bool unicast, bool multicast) { int ret; trace_rdev_set_default_key(&rdev->wiphy, netdev, key_index, unicast, multicast); ret = rdev->ops->set_default_key(&rdev->wiphy, netdev, key_index, unicast, multicast); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 key_index) { int ret; trace_rdev_set_default_mgmt_key(&rdev->wiphy, netdev, key_index); ret = rdev->ops->set_default_mgmt_key(&rdev->wiphy, netdev, key_index); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_start_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ap_settings *settings) { int ret; trace_rdev_start_ap(&rdev->wiphy, dev, settings); ret = rdev->ops->start_ap(&rdev->wiphy, dev, settings); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_beacon(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_beacon_data *info) { int ret; trace_rdev_change_beacon(&rdev->wiphy, dev, info); ret = rdev->ops->change_beacon(&rdev->wiphy, dev, info); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_stop_ap(&rdev->wiphy, dev); ret = rdev->ops->stop_ap(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_station(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *mac, struct station_parameters *params) { int ret; trace_rdev_add_station(&rdev->wiphy, dev, mac, params); ret = rdev->ops->add_station(&rdev->wiphy, dev, mac, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct station_del_parameters *params) { int ret; trace_rdev_del_station(&rdev->wiphy, dev, params); ret = rdev->ops->del_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_station(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *mac, struct station_parameters *params) { int ret; trace_rdev_change_station(&rdev->wiphy, dev, mac, params); ret = rdev->ops->change_station(&rdev->wiphy, dev, mac, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_station(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *mac, struct station_info *sinfo) { int ret; trace_rdev_get_station(&rdev->wiphy, dev, mac); ret = rdev->ops->get_station(&rdev->wiphy, dev, mac, sinfo); trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo); return ret; } static inline int rdev_dump_station(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { int ret; trace_rdev_dump_station(&rdev->wiphy, dev, idx, mac); ret = rdev->ops->dump_station(&rdev->wiphy, dev, idx, mac, sinfo); trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo); return ret; } static inline int rdev_add_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop) { int ret; trace_rdev_add_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst) { int ret; trace_rdev_del_mpath(&rdev->wiphy, dev, dst); ret = rdev->ops->del_mpath(&rdev->wiphy, dev, dst); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop) { int ret; trace_rdev_change_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { int ret; trace_rdev_get_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_get_mpp(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { int ret; trace_rdev_get_mpp(&rdev->wiphy, dev, dst, mpp); ret = rdev->ops->get_mpp(&rdev->wiphy, dev, dst, mpp, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { int ret; trace_rdev_dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop); ret = rdev->ops->dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_dump_mpp(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { int ret; trace_rdev_dump_mpp(&rdev->wiphy, dev, idx, dst, mpp); ret = rdev->ops->dump_mpp(&rdev->wiphy, dev, idx, dst, mpp, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_get_mesh_config(struct cfg80211_registered_device *rdev, struct net_device *dev, struct mesh_config *conf) { int ret; trace_rdev_get_mesh_config(&rdev->wiphy, dev); ret = rdev->ops->get_mesh_config(&rdev->wiphy, dev, conf); trace_rdev_return_int_mesh_config(&rdev->wiphy, ret, conf); return ret; } static inline int rdev_update_mesh_config(struct cfg80211_registered_device *rdev, struct net_device *dev, u32 mask, const struct mesh_config *nconf) { int ret; trace_rdev_update_mesh_config(&rdev->wiphy, dev, mask, nconf); ret = rdev->ops->update_mesh_config(&rdev->wiphy, dev, mask, nconf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev, const struct mesh_config *conf, const struct mesh_setup *setup) { int ret; trace_rdev_join_mesh(&rdev->wiphy, dev, conf, setup); ret = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_mesh(&rdev->wiphy, dev); ret = rdev->ops->leave_mesh(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ocb_setup *setup) { int ret; trace_rdev_join_ocb(&rdev->wiphy, dev, setup); ret = rdev->ops->join_ocb(&rdev->wiphy, dev, setup); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_ocb(&rdev->wiphy, dev); ret = rdev->ops->leave_ocb(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_bss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct bss_parameters *params) { int ret; trace_rdev_change_bss(&rdev->wiphy, dev, params); ret = rdev->ops->change_bss(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_txq_params(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_txq_params *params) { int ret; trace_rdev_set_txq_params(&rdev->wiphy, dev, params); ret = rdev->ops->set_txq_params(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_libertas_set_mesh_channel(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan) { int ret; trace_rdev_libertas_set_mesh_channel(&rdev->wiphy, dev, chan); ret = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy, dev, chan); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_monitor_channel(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_set_monitor_channel(&rdev->wiphy, chandef); ret = rdev->ops->set_monitor_channel(&rdev->wiphy, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_scan(struct cfg80211_registered_device *rdev, struct cfg80211_scan_request *request) { int ret; trace_rdev_scan(&rdev->wiphy, request); ret = rdev->ops->scan(&rdev->wiphy, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_abort_scan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_abort_scan(&rdev->wiphy, wdev); rdev->ops->abort_scan(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_auth_request *req) { int ret; trace_rdev_auth(&rdev->wiphy, dev, req); ret = rdev->ops->auth(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_assoc_request *req) { int ret; trace_rdev_assoc(&rdev->wiphy, dev, req); ret = rdev->ops->assoc(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_deauth_request *req) { int ret; trace_rdev_deauth(&rdev->wiphy, dev, req); ret = rdev->ops->deauth(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_disassoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_disassoc_request *req) { int ret; trace_rdev_disassoc(&rdev->wiphy, dev, req); ret = rdev->ops->disassoc(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *sme) { int ret; trace_rdev_connect(&rdev->wiphy, dev, sme); ret = rdev->ops->connect(&rdev->wiphy, dev, sme); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_update_connect_params(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *sme, u32 changed) { int ret; trace_rdev_update_connect_params(&rdev->wiphy, dev, sme, changed); ret = rdev->ops->update_connect_params(&rdev->wiphy, dev, sme, changed); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason_code) { int ret; trace_rdev_disconnect(&rdev->wiphy, dev, reason_code); ret = rdev->ops->disconnect(&rdev->wiphy, dev, reason_code); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ibss_params *params) { int ret; trace_rdev_join_ibss(&rdev->wiphy, dev, params); ret = rdev->ops->join_ibss(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_ibss(&rdev->wiphy, dev); ret = rdev->ops->leave_ibss(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed) { int ret; trace_rdev_set_wiphy_params(&rdev->wiphy, changed); ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { int ret; trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm); ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int *dbm) { int ret; trace_rdev_get_tx_power(&rdev->wiphy, wdev); ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm); trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm); return ret; } static inline int rdev_set_wds_peer(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *addr) { int ret; trace_rdev_set_wds_peer(&rdev->wiphy, dev, addr); ret = rdev->ops->set_wds_peer(&rdev->wiphy, dev, addr); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_multicast_to_unicast(struct cfg80211_registered_device *rdev, struct net_device *dev, const bool enabled) { int ret; trace_rdev_set_multicast_to_unicast(&rdev->wiphy, dev, enabled); ret = rdev->ops->set_multicast_to_unicast(&rdev->wiphy, dev, enabled); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_txq_stats(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_txq_stats *txqstats) { int ret; trace_rdev_get_txq_stats(&rdev->wiphy, wdev); ret = rdev->ops->get_txq_stats(&rdev->wiphy, wdev, txqstats); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev) { trace_rdev_rfkill_poll(&rdev->wiphy); rdev->ops->rfkill_poll(&rdev->wiphy); trace_rdev_return_void(&rdev->wiphy); } #ifdef CPTCFG_NL80211_TESTMODE static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, void *data, int len) { int ret; trace_rdev_testmode_cmd(&rdev->wiphy, wdev); ret = rdev->ops->testmode_cmd(&rdev->wiphy, wdev, data, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_testmode_dump(struct cfg80211_registered_device *rdev, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len) { int ret; trace_rdev_testmode_dump(&rdev->wiphy); ret = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, data, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } #endif static inline int rdev_set_bitrate_mask(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *peer, const struct cfg80211_bitrate_mask *mask) { int ret; trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, peer, mask); ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, peer, mask); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_dump_survey(struct cfg80211_registered_device *rdev, struct net_device *netdev, int idx, struct survey_info *info) { int ret; trace_rdev_dump_survey(&rdev->wiphy, netdev, idx); ret = rdev->ops->dump_survey(&rdev->wiphy, netdev, idx, info); if (ret < 0) trace_rdev_return_int(&rdev->wiphy, ret); else trace_rdev_return_int_survey_info(&rdev->wiphy, ret, info); return ret; } static inline int rdev_set_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { int ret; trace_rdev_set_pmksa(&rdev->wiphy, netdev, pmksa); ret = rdev->ops->set_pmksa(&rdev->wiphy, netdev, pmksa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { int ret; trace_rdev_del_pmksa(&rdev->wiphy, netdev, pmksa); ret = rdev->ops->del_pmksa(&rdev->wiphy, netdev, pmksa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_flush_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev) { int ret; trace_rdev_flush_pmksa(&rdev->wiphy, netdev); ret = rdev->ops->flush_pmksa(&rdev->wiphy, netdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_remain_on_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie) { int ret; trace_rdev_remain_on_channel(&rdev->wiphy, wdev, chan, duration); ret = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan, duration, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_cancel_remain_on_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { int ret; trace_rdev_cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); ret = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_mgmt_tx(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { int ret; trace_rdev_mgmt_tx(&rdev->wiphy, wdev, params); ret = rdev->ops->mgmt_tx(&rdev->wiphy, wdev, params, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_tx_control_port(struct cfg80211_registered_device *rdev, struct net_device *dev, const void *buf, size_t len, const u8 *dest, __be16 proto, const bool noencrypt) { int ret; trace_rdev_tx_control_port(&rdev->wiphy, dev, buf, len, dest, proto, noencrypt); ret = rdev->ops->tx_control_port(&rdev->wiphy, dev, buf, len, dest, proto, noencrypt); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_mgmt_tx_cancel_wait(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { int ret; trace_rdev_mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); ret = rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_power_mgmt(struct cfg80211_registered_device *rdev, struct net_device *dev, bool enabled, int timeout) { int ret; trace_rdev_set_power_mgmt(&rdev->wiphy, dev, enabled, timeout); ret = rdev->ops->set_power_mgmt(&rdev->wiphy, dev, enabled, timeout); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_rssi_config(struct cfg80211_registered_device *rdev, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { int ret; trace_rdev_set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold, rssi_hyst); ret = rdev->ops->set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold, rssi_hyst); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_rssi_range_config(struct cfg80211_registered_device *rdev, struct net_device *dev, s32 low, s32 high) { int ret; trace_rdev_set_cqm_rssi_range_config(&rdev->wiphy, dev, low, high); ret = rdev->ops->set_cqm_rssi_range_config(&rdev->wiphy, dev, low, high); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_txe_config(struct cfg80211_registered_device *rdev, struct net_device *dev, u32 rate, u32 pkts, u32 intvl) { int ret; trace_rdev_set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl); ret = rdev->ops->set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u16 frame_type, bool reg) { might_sleep(); trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg); rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev, u32 tx_ant, u32 rx_ant) { int ret; trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant); ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev, u32 *tx_ant, u32 *rx_ant) { int ret; trace_rdev_get_antenna(&rdev->wiphy); ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant); if (ret) trace_rdev_return_int(&rdev->wiphy, ret); else trace_rdev_return_int_tx_rx(&rdev->wiphy, ret, *tx_ant, *rx_ant); return ret; } static inline int rdev_sched_scan_start(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_sched_scan_request *request) { int ret; trace_rdev_sched_scan_start(&rdev->wiphy, dev, request->reqid); ret = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_sched_scan_stop(struct cfg80211_registered_device *rdev, struct net_device *dev, u64 reqid) { int ret; trace_rdev_sched_scan_stop(&rdev->wiphy, dev, reqid); ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev, reqid); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_rekey_data(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_gtk_rekey_data *data) { int ret; trace_rdev_set_rekey_data(&rdev->wiphy, dev); ret = rdev->ops->set_rekey_data(&rdev->wiphy, dev, data); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *buf, size_t len) { int ret; trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, buf, len); ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, buf, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_oper(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *peer, enum nl80211_tdls_operation oper) { int ret; trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper); ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_probe_client(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *peer, u64 *cookie) { int ret; trace_rdev_probe_client(&rdev->wiphy, dev, peer); ret = rdev->ops->probe_client(&rdev->wiphy, dev, peer, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_set_noack_map(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 noack_map) { int ret; trace_rdev_set_noack_map(&rdev->wiphy, dev, noack_map); ret = rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_get_channel(&rdev->wiphy, wdev); ret = rdev->ops->get_channel(&rdev->wiphy, wdev, chandef); trace_rdev_return_chandef(&rdev->wiphy, ret, chandef); return ret; } static inline int rdev_start_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { int ret; trace_rdev_start_p2p_device(&rdev->wiphy, wdev); ret = rdev->ops->start_p2p_device(&rdev->wiphy, wdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_stop_p2p_device(&rdev->wiphy, wdev); rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_start_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf) { int ret; trace_rdev_start_nan(&rdev->wiphy, wdev, conf); ret = rdev->ops->start_nan(&rdev->wiphy, wdev, conf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_stop_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_stop_nan(&rdev->wiphy, wdev); rdev->ops->stop_nan(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_add_nan_func(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_func *nan_func) { int ret; trace_rdev_add_nan_func(&rdev->wiphy, wdev, nan_func); ret = rdev->ops->add_nan_func(&rdev->wiphy, wdev, nan_func); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_del_nan_func(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { trace_rdev_del_nan_func(&rdev->wiphy, wdev, cookie); rdev->ops->del_nan_func(&rdev->wiphy, wdev, cookie); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_nan_change_conf(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes) { int ret; trace_rdev_nan_change_conf(&rdev->wiphy, wdev, conf, changes); if (rdev->ops->nan_change_conf) ret = rdev->ops->nan_change_conf(&rdev->wiphy, wdev, conf, changes); else ret = -ENOTSUPP; trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_acl_data *params) { int ret; trace_rdev_set_mac_acl(&rdev->wiphy, dev, params); ret = rdev->ops->set_mac_acl(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_update_ft_ies(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_update_ft_ies_params *ftie) { int ret; trace_rdev_update_ft_ies(&rdev->wiphy, dev, ftie); ret = rdev->ops->update_ft_ies(&rdev->wiphy, dev, ftie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_crit_proto_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_crit_proto_id protocol, u16 duration) { int ret; trace_rdev_crit_proto_start(&rdev->wiphy, wdev, protocol, duration); ret = rdev->ops->crit_proto_start(&rdev->wiphy, wdev, protocol, duration); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_crit_proto_stop(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_crit_proto_stop(&rdev->wiphy, wdev); rdev->ops->crit_proto_stop(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_csa_settings *params) { int ret; trace_rdev_channel_switch(&rdev->wiphy, dev, params); ret = rdev->ops->channel_switch(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_qos_map *qos_map) { int ret = -EOPNOTSUPP; if (rdev->ops->set_qos_map) { trace_rdev_set_qos_map(&rdev->wiphy, dev, qos_map); ret = rdev->ops->set_qos_map(&rdev->wiphy, dev, qos_map); trace_rdev_return_int(&rdev->wiphy, ret); } return ret; } static inline int rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, chandef); ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_tx_ts(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time) { int ret = -EOPNOTSUPP; trace_rdev_add_tx_ts(&rdev->wiphy, dev, tsid, peer, user_prio, admitted_time); if (rdev->ops->add_tx_ts) ret = rdev->ops->add_tx_ts(&rdev->wiphy, dev, tsid, peer, user_prio, admitted_time); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_tx_ts(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 tsid, const u8 *peer) { int ret = -EOPNOTSUPP; trace_rdev_del_tx_ts(&rdev->wiphy, dev, tsid, peer); if (rdev->ops->del_tx_ts) ret = rdev->ops->del_tx_ts(&rdev->wiphy, dev, tsid, peer); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_tdls_channel_switch(&rdev->wiphy, dev, addr, oper_class, chandef); ret = rdev->ops->tdls_channel_switch(&rdev->wiphy, dev, addr, oper_class, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_tdls_cancel_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *addr) { trace_rdev_tdls_cancel_channel_switch(&rdev->wiphy, dev, addr); rdev->ops->tdls_cancel_channel_switch(&rdev->wiphy, dev, addr); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_start_radar_detection(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms) { int ret = -ENOTSUPP; trace_rdev_start_radar_detection(&rdev->wiphy, dev, chandef, cac_time_ms); if (rdev->ops->start_radar_detection) ret = rdev->ops->start_radar_detection(&rdev->wiphy, dev, chandef, cac_time_ms); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_mcast_rate(struct cfg80211_registered_device *rdev, struct net_device *dev, int mcast_rate[NUM_NL80211_BANDS]) { int ret = -ENOTSUPP; trace_rdev_set_mcast_rate(&rdev->wiphy, dev, mcast_rate); if (rdev->ops->set_mcast_rate) ret = rdev->ops->set_mcast_rate(&rdev->wiphy, dev, mcast_rate); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_coalesce(struct cfg80211_registered_device *rdev, struct cfg80211_coalesce *coalesce) { int ret = -ENOTSUPP; trace_rdev_set_coalesce(&rdev->wiphy, coalesce); if (rdev->ops->set_coalesce) ret = rdev->ops->set_coalesce(&rdev->wiphy, coalesce); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_pmk(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_pmk_conf *pmk_conf) { int ret = -EOPNOTSUPP; trace_rdev_set_pmk(&rdev->wiphy, dev, pmk_conf); if (rdev->ops->set_pmk) ret = rdev->ops->set_pmk(&rdev->wiphy, dev, pmk_conf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_pmk(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *aa) { int ret = -EOPNOTSUPP; trace_rdev_del_pmk(&rdev->wiphy, dev, aa); if (rdev->ops->del_pmk) ret = rdev->ops->del_pmk(&rdev->wiphy, dev, aa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_external_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_external_auth_params *params) { int ret = -EOPNOTSUPP; trace_rdev_external_auth(&rdev->wiphy, dev, params); if (rdev->ops->external_auth) ret = rdev->ops->external_auth(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_ftm_responder_stats(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ftm_responder_stats *ftm_stats) { int ret = -EOPNOTSUPP; trace_rdev_get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats); if (rdev->ops->get_ftm_responder_stats) ret = rdev->ops->get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_start_pmsr(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request) { int ret = -EOPNOTSUPP; trace_rdev_start_pmsr(&rdev->wiphy, wdev, request->cookie); if (rdev->ops->start_pmsr) ret = rdev->ops->start_pmsr(&rdev->wiphy, wdev, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_abort_pmsr(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request) { trace_rdev_abort_pmsr(&rdev->wiphy, wdev, request->cookie); if (rdev->ops->abort_pmsr) rdev->ops->abort_pmsr(&rdev->wiphy, wdev, request); trace_rdev_return_void(&rdev->wiphy); } #endif /* __CFG80211_RDEV_OPS */ backport-iwlwifi-dkms-7906/net/wireless/reg.c000066400000000000000000003162241351064634500212550ustar00rootroot00000000000000/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2007 Johannes Berg * Copyright 2008-2011 Luis R. Rodriguez * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2019 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * DOC: Wireless regulatory infrastructure * * The usual implementation is for a driver to read a device EEPROM to * determine which regulatory domain it should be operating under, then * looking up the allowable channels in a driver-local table and finally * registering those channels in the wiphy structure. * * Another set of compliance enforcement is for drivers to use their * own compliance limits which can be stored on the EEPROM. The host * driver or firmware may ensure these are used. * * In addition to all this we provide an extra layer of regulatory * conformance. For drivers which do not have any regulatory * information CRDA provides the complete regulatory solution. * For others it provides a community effort on further restrictions * to enhance compliance. * * Note: When number of rules --> infinity we will not be able to * index on alpha2 any more, instead we'll probably have to * rely on some SHA1 checksum of the regdomain for example. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include "core.h" #include "reg.h" #include "rdev-ops.h" #include "nl80211.h" /* * Grace period we give before making sure all current interfaces reside on * channels allowed by the current regulatory domain. */ #define REG_ENFORCE_GRACE_MS 60000 /** * enum reg_request_treatment - regulatory request treatment * * @REG_REQ_OK: continue processing the regulatory request * @REG_REQ_IGNORE: ignore the regulatory request * @REG_REQ_INTERSECT: the regulatory domain resulting from this request should * be intersected with the current one. * @REG_REQ_ALREADY_SET: the regulatory request will not change the current * regulatory settings, and no further processing is required. */ enum reg_request_treatment { REG_REQ_OK, REG_REQ_IGNORE, REG_REQ_INTERSECT, REG_REQ_ALREADY_SET, }; static struct regulatory_request core_request_world = { .initiator = NL80211_REGDOM_SET_BY_CORE, .alpha2[0] = '0', .alpha2[1] = '0', .intersect = false, .processed = true, .country_ie_env = ENVIRON_ANY, }; /* * Receipt of information from last regulatory request, * protected by RTNL (and can be accessed with RCU protection) */ static struct regulatory_request __rcu *last_request = (void __force __rcu *)&core_request_world; /* To trigger userspace events and load firmware */ static struct platform_device *reg_pdev; /* * Central wireless core regulatory domains, we only need two, * the current one and a world regulatory domain in case we have no * information to give us an alpha2. * (protected by RTNL, can be read under RCU) */ const struct ieee80211_regdomain __rcu *cfg80211_regdomain; /* * Number of devices that registered to the core * that support cellular base station regulatory hints * (protected by RTNL) */ static int reg_num_devs_support_basehint; /* * State variable indicating if the platform on which the devices * are attached is operating in an indoor environment. The state variable * is relevant for all registered devices. */ static bool reg_is_indoor; static spinlock_t reg_indoor_lock; /* Used to track the userspace process controlling the indoor setting */ static u32 reg_is_indoor_portid; static void restore_regulatory_settings(bool reset_user, bool cached); static void print_regdomain(const struct ieee80211_regdomain *rd); static const struct ieee80211_regdomain *get_cfg80211_regdom(void) { return rcu_dereference_rtnl(cfg80211_regdomain); } const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy) { return rcu_dereference_rtnl(wiphy->regd); } static const char *reg_dfs_region_str(enum nl80211_dfs_regions dfs_region) { switch (dfs_region) { case NL80211_DFS_UNSET: return "unset"; case NL80211_DFS_FCC: return "FCC"; case NL80211_DFS_ETSI: return "ETSI"; case NL80211_DFS_JP: return "JP"; } return "Unknown"; } enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy) { const struct ieee80211_regdomain *regd = NULL; const struct ieee80211_regdomain *wiphy_regd = NULL; regd = get_cfg80211_regdom(); if (!wiphy) goto out; wiphy_regd = get_wiphy_regdom(wiphy); if (!wiphy_regd) goto out; if (wiphy_regd->dfs_region == regd->dfs_region) goto out; pr_debug("%s: device specific dfs_region (%s) disagrees with cfg80211's central dfs_region (%s)\n", dev_name(&wiphy->dev), reg_dfs_region_str(wiphy_regd->dfs_region), reg_dfs_region_str(regd->dfs_region)); out: return regd->dfs_region; } static void rcu_free_regdom(const struct ieee80211_regdomain *r) { if (!r) return; kfree_rcu((struct ieee80211_regdomain *)r, rcu_head); } static struct regulatory_request *get_last_request(void) { return rcu_dereference_rtnl(last_request); } /* Used to queue up regulatory hints */ static LIST_HEAD(reg_requests_list); static spinlock_t reg_requests_lock; /* Used to queue up beacon hints for review */ static LIST_HEAD(reg_pending_beacons); static spinlock_t reg_pending_beacons_lock; /* Used to keep track of processed beacon hints */ static LIST_HEAD(reg_beacon_list); struct reg_beacon { struct list_head list; struct ieee80211_channel chan; }; static void reg_check_chans_work(struct work_struct *work); static DECLARE_DELAYED_WORK(reg_check_chans, reg_check_chans_work); static void reg_todo(struct work_struct *work); static DECLARE_WORK(reg_work, reg_todo); /* We keep a static world regulatory domain in case of the absence of CRDA */ static const struct ieee80211_regdomain world_regdom = { .n_reg_rules = 8, .alpha2 = "00", .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), /* IEEE 802.11b/g, channels 12..13. */ REG_RULE(2467-10, 2472+10, 20, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW), /* IEEE 802.11 channel 14 - Only JP enables * this and for 802.11b only */ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM), /* IEEE 802.11a, channel 36..48 */ REG_RULE(5180-10, 5240+10, 80, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW), /* IEEE 802.11a, channel 52..64 - DFS required */ REG_RULE(5260-10, 5320+10, 80, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW | NL80211_RRF_DFS), /* IEEE 802.11a, channel 100..144 - DFS required */ REG_RULE(5500-10, 5720+10, 160, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_DFS), /* IEEE 802.11a, channel 149..165 */ REG_RULE(5745-10, 5825+10, 80, 6, 20, NL80211_RRF_NO_IR), /* IEEE 802.11ad (60GHz), channels 1..3 */ REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0), } }; /* protected by RTNL */ static const struct ieee80211_regdomain *cfg80211_world_regdom = &world_regdom; static char *ieee80211_regdom = "00"; static char user_alpha2[2]; static const struct ieee80211_regdomain *cfg80211_user_regdom; module_param(ieee80211_regdom, charp, 0444); MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); static void reg_free_request(struct regulatory_request *request) { if (request == &core_request_world) return; if (request != get_last_request()) kfree(request); } static void reg_free_last_request(void) { struct regulatory_request *lr = get_last_request(); if (lr != &core_request_world && lr) kfree_rcu(lr, rcu_head); } static void reg_update_last_request(struct regulatory_request *request) { struct regulatory_request *lr; lr = get_last_request(); if (lr == request) return; reg_free_last_request(); rcu_assign_pointer(last_request, request); } static void reset_regdomains(bool full_reset, const struct ieee80211_regdomain *new_regdom) { const struct ieee80211_regdomain *r; ASSERT_RTNL(); r = get_cfg80211_regdom(); /* avoid freeing static information or freeing something twice */ if (r == cfg80211_world_regdom) r = NULL; if (cfg80211_world_regdom == &world_regdom) cfg80211_world_regdom = NULL; if (r == &world_regdom) r = NULL; rcu_free_regdom(r); rcu_free_regdom(cfg80211_world_regdom); cfg80211_world_regdom = &world_regdom; rcu_assign_pointer(cfg80211_regdomain, new_regdom); if (!full_reset) return; reg_update_last_request(&core_request_world); } /* * Dynamic world regulatory domain requested by the wireless * core upon initialization */ static void update_world_regdomain(const struct ieee80211_regdomain *rd) { struct regulatory_request *lr; lr = get_last_request(); WARN_ON(!lr); reset_regdomains(false, rd); cfg80211_world_regdom = rd; } bool is_world_regdom(const char *alpha2) { if (!alpha2) return false; return alpha2[0] == '0' && alpha2[1] == '0'; } static bool is_alpha2_set(const char *alpha2) { if (!alpha2) return false; return alpha2[0] && alpha2[1]; } static bool is_unknown_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain was built by driver * but a specific alpha2 cannot be determined */ return alpha2[0] == '9' && alpha2[1] == '9'; } static bool is_intersected_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain is the * result of an intersection between two regulatory domain * structures */ return alpha2[0] == '9' && alpha2[1] == '8'; } static bool is_an_alpha2(const char *alpha2) { if (!alpha2) return false; return isalpha(alpha2[0]) && isalpha(alpha2[1]); } static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) { if (!alpha2_x || !alpha2_y) return false; return alpha2_x[0] == alpha2_y[0] && alpha2_x[1] == alpha2_y[1]; } static bool regdom_changes(const char *alpha2) { const struct ieee80211_regdomain *r = get_cfg80211_regdom(); if (!r) return true; return !alpha2_equal(r->alpha2, alpha2); } /* * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER * has ever been issued. */ static bool is_user_regdom_saved(void) { if (user_alpha2[0] == '9' && user_alpha2[1] == '7') return false; /* This would indicate a mistake on the design */ if (WARN(!is_world_regdom(user_alpha2) && !is_an_alpha2(user_alpha2), "Unexpected user alpha2: %c%c\n", user_alpha2[0], user_alpha2[1])) return false; return true; } static const struct ieee80211_regdomain * reg_copy_regd(const struct ieee80211_regdomain *src_regd) { struct ieee80211_regdomain *regd; int size_of_regd; unsigned int i; size_of_regd = sizeof(struct ieee80211_regdomain) + src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); regd = kzalloc(size_of_regd, GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); for (i = 0; i < src_regd->n_reg_rules; i++) memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], sizeof(struct ieee80211_reg_rule)); return regd; } static void cfg80211_save_user_regdom(const struct ieee80211_regdomain *rd) { ASSERT_RTNL(); if (!IS_ERR(cfg80211_user_regdom)) kfree(cfg80211_user_regdom); cfg80211_user_regdom = reg_copy_regd(rd); } struct reg_regdb_apply_request { struct list_head list; const struct ieee80211_regdomain *regdom; }; static LIST_HEAD(reg_regdb_apply_list); static DEFINE_MUTEX(reg_regdb_apply_mutex); static void reg_regdb_apply(struct work_struct *work) { struct reg_regdb_apply_request *request; rtnl_lock(); mutex_lock(®_regdb_apply_mutex); while (!list_empty(®_regdb_apply_list)) { request = list_first_entry(®_regdb_apply_list, struct reg_regdb_apply_request, list); list_del(&request->list); set_regdom(request->regdom, REGD_SOURCE_INTERNAL_DB); kfree(request); } mutex_unlock(®_regdb_apply_mutex); rtnl_unlock(); } static DECLARE_WORK(reg_regdb_work, reg_regdb_apply); static int reg_schedule_apply(const struct ieee80211_regdomain *regdom) { struct reg_regdb_apply_request *request; request = kzalloc(sizeof(struct reg_regdb_apply_request), GFP_KERNEL); if (!request) { kfree(regdom); return -ENOMEM; } request->regdom = regdom; mutex_lock(®_regdb_apply_mutex); list_add_tail(&request->list, ®_regdb_apply_list); mutex_unlock(®_regdb_apply_mutex); schedule_work(®_regdb_work); return 0; } #ifdef CPTCFG_CFG80211_CRDA_SUPPORT /* Max number of consecutive attempts to communicate with CRDA */ #define REG_MAX_CRDA_TIMEOUTS 10 static u32 reg_crda_timeouts; static void crda_timeout_work(struct work_struct *work); static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work); static void crda_timeout_work(struct work_struct *work) { pr_debug("Timeout while waiting for CRDA to reply, restoring regulatory settings\n"); rtnl_lock(); reg_crda_timeouts++; restore_regulatory_settings(true, false); rtnl_unlock(); } static void cancel_crda_timeout(void) { cancel_delayed_work(&crda_timeout); } static void cancel_crda_timeout_sync(void) { cancel_delayed_work_sync(&crda_timeout); } static void reset_crda_timeouts(void) { reg_crda_timeouts = 0; } /* * This lets us keep regulatory code which is updated on a regulatory * basis in userspace. */ static int call_crda(const char *alpha2) { char country[12]; char *env[] = { country, NULL }; int ret; snprintf(country, sizeof(country), "COUNTRY=%c%c", alpha2[0], alpha2[1]); if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n"); return -EINVAL; } if (!is_world_regdom((char *) alpha2)) pr_debug("Calling CRDA for country: %c%c\n", alpha2[0], alpha2[1]); else pr_debug("Calling CRDA to update world regulatory domain\n"); ret = kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env); if (ret) return ret; queue_delayed_work(system_power_efficient_wq, &crda_timeout, msecs_to_jiffies(3142)); return 0; } #else static inline void cancel_crda_timeout(void) {} static inline void cancel_crda_timeout_sync(void) {} static inline void reset_crda_timeouts(void) {} static inline int call_crda(const char *alpha2) { return -ENODATA; } #endif /* CPTCFG_CFG80211_CRDA_SUPPORT */ /* code to directly load a firmware database through request_firmware */ static const struct fwdb_header *regdb; struct fwdb_country { u8 alpha2[2]; __be16 coll_ptr; /* this struct cannot be extended */ } __packed __aligned(4); struct fwdb_collection { u8 len; u8 n_rules; u8 dfs_region; /* no optional data yet */ /* aligned to 2, then followed by __be16 array of rule pointers */ } __packed __aligned(4); enum fwdb_flags { FWDB_FLAG_NO_OFDM = BIT(0), FWDB_FLAG_NO_OUTDOOR = BIT(1), FWDB_FLAG_DFS = BIT(2), FWDB_FLAG_NO_IR = BIT(3), FWDB_FLAG_AUTO_BW = BIT(4), }; struct fwdb_wmm_ac { u8 ecw; u8 aifsn; __be16 cot; } __packed; struct fwdb_wmm_rule { struct fwdb_wmm_ac client[IEEE80211_NUM_ACS]; struct fwdb_wmm_ac ap[IEEE80211_NUM_ACS]; } __packed; struct fwdb_rule { u8 len; u8 flags; __be16 max_eirp; __be32 start, end, max_bw; /* start of optional data */ __be16 cac_timeout; __be16 wmm_ptr; } __packed __aligned(4); #define FWDB_MAGIC 0x52474442 #define FWDB_VERSION 20 struct fwdb_header { __be32 magic; __be32 version; struct fwdb_country country[]; } __packed __aligned(4); static int ecw2cw(int ecw) { return (1 << ecw) - 1; } static bool valid_wmm(struct fwdb_wmm_rule *rule) { struct fwdb_wmm_ac *ac = (struct fwdb_wmm_ac *)rule; int i; for (i = 0; i < IEEE80211_NUM_ACS * 2; i++) { u16 cw_min = ecw2cw((ac[i].ecw & 0xf0) >> 4); u16 cw_max = ecw2cw(ac[i].ecw & 0x0f); u8 aifsn = ac[i].aifsn; if (cw_min >= cw_max) return false; if (aifsn < 1) return false; } return true; } static bool valid_rule(const u8 *data, unsigned int size, u16 rule_ptr) { struct fwdb_rule *rule = (void *)(data + (rule_ptr << 2)); if ((u8 *)rule + sizeof(rule->len) > data + size) return false; /* mandatory fields */ if (rule->len < offsetofend(struct fwdb_rule, max_bw)) return false; if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; struct fwdb_wmm_rule *wmm; if (wmm_ptr + sizeof(struct fwdb_wmm_rule) > size) return false; wmm = (void *)(data + wmm_ptr); if (!valid_wmm(wmm)) return false; } return true; } static bool valid_country(const u8 *data, unsigned int size, const struct fwdb_country *country) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)(data + ptr); __be16 *rules_ptr; unsigned int i; /* make sure we can read len/n_rules */ if ((u8 *)coll + offsetofend(typeof(*coll), n_rules) > data + size) return false; /* make sure base struct and all rules fit */ if ((u8 *)coll + ALIGN(coll->len, 2) + (coll->n_rules * 2) > data + size) return false; /* mandatory fields must exist */ if (coll->len < offsetofend(struct fwdb_collection, dfs_region)) return false; rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); for (i = 0; i < coll->n_rules; i++) { u16 rule_ptr = be16_to_cpu(rules_ptr[i]); if (!valid_rule(data, size, rule_ptr)) return false; } return true; } #ifdef CPTCFG_CFG80211_REQUIRE_SIGNED_REGDB static struct key *builtin_regdb_keys; static void __init load_keys_from_buffer(const u8 *p, unsigned int buflen) { const u8 *end = p + buflen; size_t plen; key_ref_t key; while (p < end) { /* Each cert begins with an ASN.1 SEQUENCE tag and must be more * than 256 bytes in size. */ if (end - p < 4) goto dodgy_cert; if (p[0] != 0x30 && p[1] != 0x82) goto dodgy_cert; plen = (p[2] << 8) | p[3]; plen += 4; if (plen > end - p) goto dodgy_cert; key = key_create_or_update(make_key_ref(builtin_regdb_keys, 1), "asymmetric", NULL, p, plen, ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ), KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_BUILT_IN | KEY_ALLOC_BYPASS_RESTRICTION); if (IS_ERR(key)) { pr_err("Problem loading in-kernel X.509 certificate (%ld)\n", PTR_ERR(key)); } else { pr_notice("Loaded X.509 cert '%s'\n", key_ref_to_ptr(key)->description); key_ref_put(key); } p += plen; } return; dodgy_cert: pr_err("Problem parsing in-kernel X.509 certificate list\n"); } static int __init load_builtin_regdb_keys(void) { builtin_regdb_keys = keyring_alloc(".builtin_regdb_keys", KUIDT_INIT(0), KGIDT_INIT(0), current_cred(), ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH), KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(builtin_regdb_keys)) return PTR_ERR(builtin_regdb_keys); pr_notice("Loading compiled-in X.509 certificates for regulatory database\n"); #ifdef CPTCFG_CFG80211_USE_KERNEL_REGDB_KEYS load_keys_from_buffer(shipped_regdb_certs, shipped_regdb_certs_len); #endif #ifdef CPTCFG_CFG80211_EXTRA_REGDB_KEYDIR if (CPTCFG_CFG80211_EXTRA_REGDB_KEYDIR[0] != '\0') load_keys_from_buffer(extra_regdb_certs, extra_regdb_certs_len); #endif return 0; } static bool regdb_has_valid_signature(const u8 *data, unsigned int size) { const struct firmware *sig; bool result; if (request_firmware(&sig, "regulatory.db.p7s", ®_pdev->dev)) return false; result = verify_pkcs7_signature(data, size, sig->data, sig->size, builtin_regdb_keys, VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL) == 0; release_firmware(sig); return result; } static void free_regdb_keyring(void) { key_put(builtin_regdb_keys); } #else static int load_builtin_regdb_keys(void) { return 0; } static bool regdb_has_valid_signature(const u8 *data, unsigned int size) { return true; } static void free_regdb_keyring(void) { } #endif /* CPTCFG_CFG80211_REQUIRE_SIGNED_REGDB */ static bool valid_regdb(const u8 *data, unsigned int size) { const struct fwdb_header *hdr = (void *)data; const struct fwdb_country *country; if (size < sizeof(*hdr)) return false; if (hdr->magic != cpu_to_be32(FWDB_MAGIC)) return false; if (hdr->version != cpu_to_be32(FWDB_VERSION)) return false; if (!regdb_has_valid_signature(data, size)) return false; country = &hdr->country[0]; while ((u8 *)(country + 1) <= data + size) { if (!country->coll_ptr) break; if (!valid_country(data, size, country)) return false; country++; } return true; } static void set_wmm_rule(const struct fwdb_header *db, const struct fwdb_country *country, const struct fwdb_rule *rule, struct ieee80211_reg_rule *rrule) { struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule; struct fwdb_wmm_rule *wmm; unsigned int i, wmm_ptr; wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; wmm = (void *)((u8 *)db + wmm_ptr); if (!valid_wmm(wmm)) { pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n", be32_to_cpu(rule->start), be32_to_cpu(rule->end), country->alpha2[0], country->alpha2[1]); return; } for (i = 0; i < IEEE80211_NUM_ACS; i++) { wmm_rule->client[i].cw_min = ecw2cw((wmm->client[i].ecw & 0xf0) >> 4); wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); wmm_rule->client[i].aifsn = wmm->client[i].aifsn; wmm_rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot); wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn; wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); } rrule->has_wmm = true; } static int __regdb_query_wmm(const struct fwdb_header *db, const struct fwdb_country *country, int freq, struct ieee80211_reg_rule *rrule) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); int i; for (i = 0; i < coll->n_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr); if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr)) continue; if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) && freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) { set_wmm_rule(db, country, rule, rrule); return 0; } } return -ENODATA; } int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule) { const struct fwdb_header *hdr = regdb; const struct fwdb_country *country; if (!regdb) return -ENODATA; if (IS_ERR(regdb)) return PTR_ERR(regdb); country = &hdr->country[0]; while (country->coll_ptr) { if (alpha2_equal(alpha2, country->alpha2)) return __regdb_query_wmm(regdb, country, freq, rule); country++; } return -ENODATA; } EXPORT_SYMBOL(reg_query_regdb_wmm); static int regdb_query_country(const struct fwdb_header *db, const struct fwdb_country *country) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); struct ieee80211_regdomain *regdom; unsigned int size_of_regd, i; size_of_regd = sizeof(struct ieee80211_regdomain) + coll->n_rules * sizeof(struct ieee80211_reg_rule); regdom = kzalloc(size_of_regd, GFP_KERNEL); if (!regdom) return -ENOMEM; regdom->n_reg_rules = coll->n_rules; regdom->alpha2[0] = country->alpha2[0]; regdom->alpha2[1] = country->alpha2[1]; regdom->dfs_region = coll->dfs_region; for (i = 0; i < regdom->n_reg_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr); struct ieee80211_reg_rule *rrule = ®dom->reg_rules[i]; rrule->freq_range.start_freq_khz = be32_to_cpu(rule->start); rrule->freq_range.end_freq_khz = be32_to_cpu(rule->end); rrule->freq_range.max_bandwidth_khz = be32_to_cpu(rule->max_bw); rrule->power_rule.max_antenna_gain = 0; rrule->power_rule.max_eirp = be16_to_cpu(rule->max_eirp); rrule->flags = 0; if (rule->flags & FWDB_FLAG_NO_OFDM) rrule->flags |= NL80211_RRF_NO_OFDM; if (rule->flags & FWDB_FLAG_NO_OUTDOOR) rrule->flags |= NL80211_RRF_NO_OUTDOOR; if (rule->flags & FWDB_FLAG_DFS) rrule->flags |= NL80211_RRF_DFS; if (rule->flags & FWDB_FLAG_NO_IR) rrule->flags |= NL80211_RRF_NO_IR; if (rule->flags & FWDB_FLAG_AUTO_BW) rrule->flags |= NL80211_RRF_AUTO_BW; rrule->dfs_cac_ms = 0; /* handle optional data */ if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout)) rrule->dfs_cac_ms = 1000 * be16_to_cpu(rule->cac_timeout); if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) set_wmm_rule(db, country, rule, rrule); } return reg_schedule_apply(regdom); } static int query_regdb(const char *alpha2) { const struct fwdb_header *hdr = regdb; const struct fwdb_country *country; ASSERT_RTNL(); if (IS_ERR(regdb)) return PTR_ERR(regdb); country = &hdr->country[0]; while (country->coll_ptr) { if (alpha2_equal(alpha2, country->alpha2)) return regdb_query_country(regdb, country); country++; } return -ENODATA; } static void regdb_fw_cb(const struct firmware *fw, void *context) { int set_error = 0; bool restore = true; void *db; if (!fw) { pr_info("failed to load regulatory.db\n"); set_error = -ENODATA; } else if (!valid_regdb(fw->data, fw->size)) { pr_info("loaded regulatory.db is malformed or signature is missing/invalid\n"); set_error = -EINVAL; } rtnl_lock(); if (WARN_ON(regdb && !IS_ERR(regdb))) { /* just restore and free new db */ } else if (set_error) { regdb = ERR_PTR(set_error); } else if (fw) { db = kmemdup(fw->data, fw->size, GFP_KERNEL); if (db) { regdb = db; restore = context && query_regdb(context); } else { restore = true; } } if (restore) restore_regulatory_settings(true, false); rtnl_unlock(); kfree(context); release_firmware(fw); } static int query_regdb_file(const char *alpha2) { ASSERT_RTNL(); if (regdb) return query_regdb(alpha2); alpha2 = kmemdup(alpha2, 2, GFP_KERNEL); if (!alpha2) return -ENOMEM; return request_firmware_nowait(THIS_MODULE, true, "regulatory.db", ®_pdev->dev, GFP_KERNEL, (void *)alpha2, regdb_fw_cb); } int reg_reload_regdb(void) { const struct firmware *fw; void *db; int err; err = request_firmware(&fw, "regulatory.db", ®_pdev->dev); if (err) return err; if (!valid_regdb(fw->data, fw->size)) { err = -ENODATA; goto out; } db = kmemdup(fw->data, fw->size, GFP_KERNEL); if (!db) { err = -ENOMEM; goto out; } rtnl_lock(); if (!IS_ERR_OR_NULL(regdb)) kfree(regdb); regdb = db; rtnl_unlock(); out: release_firmware(fw); return err; } static bool reg_query_database(struct regulatory_request *request) { if (query_regdb_file(request->alpha2) == 0) return true; if (call_crda(request->alpha2) == 0) return true; return false; } bool reg_is_valid_request(const char *alpha2) { struct regulatory_request *lr = get_last_request(); if (!lr || lr->processed) return false; return alpha2_equal(lr->alpha2, alpha2); } static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy) { struct regulatory_request *lr = get_last_request(); /* * Follow the driver's regulatory domain, if present, unless a country * IE has been processed or a user wants to help complaince further */ if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && lr->initiator != NL80211_REGDOM_SET_BY_USER && wiphy->regd) return get_wiphy_regdom(wiphy); return get_cfg80211_regdom(); } static unsigned int reg_get_max_bandwidth_from_range(const struct ieee80211_regdomain *rd, const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; const struct ieee80211_freq_range *freq_range_tmp; const struct ieee80211_reg_rule *tmp; u32 start_freq, end_freq, idx, no; for (idx = 0; idx < rd->n_reg_rules; idx++) if (rule == &rd->reg_rules[idx]) break; if (idx == rd->n_reg_rules) return 0; /* get start_freq */ no = idx; while (no) { tmp = &rd->reg_rules[--no]; freq_range_tmp = &tmp->freq_range; if (freq_range_tmp->end_freq_khz < freq_range->start_freq_khz) break; freq_range = freq_range_tmp; } start_freq = freq_range->start_freq_khz; /* get end_freq */ freq_range = &rule->freq_range; no = idx; while (no < rd->n_reg_rules - 1) { tmp = &rd->reg_rules[++no]; freq_range_tmp = &tmp->freq_range; if (freq_range_tmp->start_freq_khz > freq_range->end_freq_khz) break; freq_range = freq_range_tmp; } end_freq = freq_range->end_freq_khz; return end_freq - start_freq; } unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, const struct ieee80211_reg_rule *rule) { unsigned int bw = reg_get_max_bandwidth_from_range(rd, rule); if (rule->flags & NL80211_RRF_NO_160MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(80)); if (rule->flags & NL80211_RRF_NO_80MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(40)); /* * HT40+/HT40- limits are handled per-channel. Only limit BW if both * are not allowed. */ if (rule->flags & NL80211_RRF_NO_HT40MINUS && rule->flags & NL80211_RRF_NO_HT40PLUS) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(20)); return bw; } /* Sanity check on a regulatory rule */ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; u32 freq_diff; if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0) return false; if (freq_range->start_freq_khz > freq_range->end_freq_khz) return false; freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->end_freq_khz <= freq_range->start_freq_khz || freq_range->max_bandwidth_khz > freq_diff) return false; return true; } static bool is_valid_rd(const struct ieee80211_regdomain *rd) { const struct ieee80211_reg_rule *reg_rule = NULL; unsigned int i; if (!rd->n_reg_rules) return false; if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) return false; for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; if (!is_valid_reg_rule(reg_rule)) return false; } return true; } /** * freq_in_rule_band - tells us if a frequency is in a frequency band * @freq_range: frequency rule we want to query * @freq_khz: frequency we are inquiring about * * This lets us know if a specific frequency rule is or is not relevant to * a specific frequency's band. Bands are device specific and artificial * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), * however it is safe for now to assume that a frequency rule should not be * part of a frequency's band if the start freq or end freq are off by more * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the * 60 GHz band. * This resolution can be lowered and should be considered as we add * regulatory rule support for other "bands". **/ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, u32 freq_khz) { #define ONE_GHZ_IN_KHZ 1000000 /* * From 802.11ad: directional multi-gigabit (DMG): * Pertaining to operation in a frequency band containing a channel * with the Channel starting frequency above 45 GHz. */ u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ? 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; if (abs(freq_khz - freq_range->start_freq_khz) <= limit) return true; if (abs(freq_khz - freq_range->end_freq_khz) <= limit) return true; return false; #undef ONE_GHZ_IN_KHZ } /* * Later on we can perhaps use the more restrictive DFS * region but we don't have information for that yet so * for now simply disallow conflicts. */ static enum nl80211_dfs_regions reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1, const enum nl80211_dfs_regions dfs_region2) { if (dfs_region1 != dfs_region2) return NL80211_DFS_UNSET; return dfs_region1; } static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1, const struct ieee80211_wmm_ac *wmm_ac2, struct ieee80211_wmm_ac *intersect) { intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min); intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max); intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot); intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn); } /* * Helper for regdom_intersect(), this does the real * mathematical intersection fun */ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, const struct ieee80211_regdomain *rd2, const struct ieee80211_reg_rule *rule1, const struct ieee80211_reg_rule *rule2, struct ieee80211_reg_rule *intersected_rule) { const struct ieee80211_freq_range *freq_range1, *freq_range2; struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule1, *power_rule2; struct ieee80211_power_rule *power_rule; const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2; struct ieee80211_wmm_rule *wmm_rule; u32 freq_diff, max_bandwidth1, max_bandwidth2; freq_range1 = &rule1->freq_range; freq_range2 = &rule2->freq_range; freq_range = &intersected_rule->freq_range; power_rule1 = &rule1->power_rule; power_rule2 = &rule2->power_rule; power_rule = &intersected_rule->power_rule; wmm_rule1 = &rule1->wmm_rule; wmm_rule2 = &rule2->wmm_rule; wmm_rule = &intersected_rule->wmm_rule; freq_range->start_freq_khz = max(freq_range1->start_freq_khz, freq_range2->start_freq_khz); freq_range->end_freq_khz = min(freq_range1->end_freq_khz, freq_range2->end_freq_khz); max_bandwidth1 = freq_range1->max_bandwidth_khz; max_bandwidth2 = freq_range2->max_bandwidth_khz; if (rule1->flags & NL80211_RRF_AUTO_BW) max_bandwidth1 = reg_get_max_bandwidth(rd1, rule1); if (rule2->flags & NL80211_RRF_AUTO_BW) max_bandwidth2 = reg_get_max_bandwidth(rd2, rule2); freq_range->max_bandwidth_khz = min(max_bandwidth1, max_bandwidth2); intersected_rule->flags = rule1->flags | rule2->flags; /* * In case NL80211_RRF_AUTO_BW requested for both rules * set AUTO_BW in intersected rule also. Next we will * calculate BW correctly in handle_channel function. * In other case remove AUTO_BW flag while we calculate * maximum bandwidth correctly and auto calculation is * not required. */ if ((rule1->flags & NL80211_RRF_AUTO_BW) && (rule2->flags & NL80211_RRF_AUTO_BW)) intersected_rule->flags |= NL80211_RRF_AUTO_BW; else intersected_rule->flags &= ~NL80211_RRF_AUTO_BW; freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->max_bandwidth_khz > freq_diff) freq_range->max_bandwidth_khz = freq_diff; power_rule->max_eirp = min(power_rule1->max_eirp, power_rule2->max_eirp); power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain, power_rule2->max_antenna_gain); intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms, rule2->dfs_cac_ms); if (rule1->has_wmm && rule2->has_wmm) { u8 ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { reg_wmm_rules_intersect(&wmm_rule1->client[ac], &wmm_rule2->client[ac], &wmm_rule->client[ac]); reg_wmm_rules_intersect(&wmm_rule1->ap[ac], &wmm_rule2->ap[ac], &wmm_rule->ap[ac]); } intersected_rule->has_wmm = true; } else if (rule1->has_wmm) { *wmm_rule = *wmm_rule1; intersected_rule->has_wmm = true; } else if (rule2->has_wmm) { *wmm_rule = *wmm_rule2; intersected_rule->has_wmm = true; } else { intersected_rule->has_wmm = false; } if (!is_valid_reg_rule(intersected_rule)) return -EINVAL; return 0; } /* check whether old rule contains new rule */ static bool rule_contains(struct ieee80211_reg_rule *r1, struct ieee80211_reg_rule *r2) { /* for simplicity, currently consider only same flags */ if (r1->flags != r2->flags) return false; /* verify r1 is more restrictive */ if ((r1->power_rule.max_antenna_gain > r2->power_rule.max_antenna_gain) || r1->power_rule.max_eirp > r2->power_rule.max_eirp) return false; /* make sure r2's range is contained within r1 */ if (r1->freq_range.start_freq_khz > r2->freq_range.start_freq_khz || r1->freq_range.end_freq_khz < r2->freq_range.end_freq_khz) return false; /* and finally verify that r1.max_bw >= r2.max_bw */ if (r1->freq_range.max_bandwidth_khz < r2->freq_range.max_bandwidth_khz) return false; return true; } /* add or extend current rules. do nothing if rule is already contained */ static void add_rule(struct ieee80211_reg_rule *rule, struct ieee80211_reg_rule *reg_rules, u32 *n_rules) { struct ieee80211_reg_rule *tmp_rule; int i; for (i = 0; i < *n_rules; i++) { tmp_rule = ®_rules[i]; /* rule is already contained - do nothing */ if (rule_contains(tmp_rule, rule)) return; /* extend rule if possible */ if (rule_contains(rule, tmp_rule)) { memcpy(tmp_rule, rule, sizeof(*rule)); return; } } memcpy(®_rules[*n_rules], rule, sizeof(*rule)); (*n_rules)++; } /** * regdom_intersect - do the intersection between two regulatory domains * @rd1: first regulatory domain * @rd2: second regulatory domain * * Use this function to get the intersection between two regulatory domains. * Once completed we will mark the alpha2 for the rd as intersected, "98", * as no one single alpha2 can represent this regulatory domain. * * Returns a pointer to the regulatory domain structure which will hold the * resulting intersection of rules between rd1 and rd2. We will * kzalloc() this structure for you. */ static struct ieee80211_regdomain * regdom_intersect(const struct ieee80211_regdomain *rd1, const struct ieee80211_regdomain *rd2) { int r, size_of_regd; unsigned int x, y; unsigned int num_rules = 0; const struct ieee80211_reg_rule *rule1, *rule2; struct ieee80211_reg_rule intersected_rule; struct ieee80211_regdomain *rd; if (!rd1 || !rd2) return NULL; /* * First we get a count of the rules we'll need, then we actually * build them. This is to so we can malloc() and free() a * regdomain once. The reason we use reg_rules_intersect() here * is it will return -EINVAL if the rule computed makes no sense. * All rules that do check out OK are valid. */ for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; if (!reg_rules_intersect(rd1, rd2, rule1, rule2, &intersected_rule)) num_rules++; } } if (!num_rules) return NULL; size_of_regd = sizeof(struct ieee80211_regdomain) + num_rules * sizeof(struct ieee80211_reg_rule); rd = kzalloc(size_of_regd, GFP_KERNEL); if (!rd) return NULL; for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; r = reg_rules_intersect(rd1, rd2, rule1, rule2, &intersected_rule); /* * No need to memset here the intersected rule here as * we're not using the stack anymore */ if (r) continue; add_rule(&intersected_rule, rd->reg_rules, &rd->n_reg_rules); } } rd->alpha2[0] = '9'; rd->alpha2[1] = '8'; rd->dfs_region = reg_intersect_dfs_region(rd1->dfs_region, rd2->dfs_region); return rd; } /* * XXX: add support for the rest of enum nl80211_reg_rule_flags, we may * want to just have the channel structure use these */ static u32 map_regdom_flags(u32 rd_flags) { u32 channel_flags = 0; if (rd_flags & NL80211_RRF_NO_IR_ALL) channel_flags |= IEEE80211_CHAN_NO_IR; if (rd_flags & NL80211_RRF_DFS) channel_flags |= IEEE80211_CHAN_RADAR; if (rd_flags & NL80211_RRF_NO_OFDM) channel_flags |= IEEE80211_CHAN_NO_OFDM; if (rd_flags & NL80211_RRF_NO_OUTDOOR) channel_flags |= IEEE80211_CHAN_INDOOR_ONLY; if (rd_flags & NL80211_RRF_IR_CONCURRENT) channel_flags |= IEEE80211_CHAN_IR_CONCURRENT; if (rd_flags & NL80211_RRF_NO_HT40MINUS) channel_flags |= IEEE80211_CHAN_NO_HT40MINUS; if (rd_flags & NL80211_RRF_NO_HT40PLUS) channel_flags |= IEEE80211_CHAN_NO_HT40PLUS; if (rd_flags & NL80211_RRF_NO_80MHZ) channel_flags |= IEEE80211_CHAN_NO_80MHZ; if (rd_flags & NL80211_RRF_NO_160MHZ) channel_flags |= IEEE80211_CHAN_NO_160MHZ; return channel_flags; } static const struct ieee80211_reg_rule * freq_reg_info_regd(u32 center_freq, const struct ieee80211_regdomain *regd, u32 bw) { int i; bool band_rule_found = false; bool bw_fits = false; if (!regd) return ERR_PTR(-EINVAL); for (i = 0; i < regd->n_reg_rules; i++) { const struct ieee80211_reg_rule *rr; const struct ieee80211_freq_range *fr = NULL; rr = ®d->reg_rules[i]; fr = &rr->freq_range; /* * We only need to know if one frequency rule was * was in center_freq's band, that's enough, so lets * not overwrite it once found */ if (!band_rule_found) band_rule_found = freq_in_rule_band(fr, center_freq); bw_fits = cfg80211_does_bw_fit_range(fr, center_freq, bw); if (band_rule_found && bw_fits) return rr; } if (!band_rule_found) return ERR_PTR(-ERANGE); return ERR_PTR(-EINVAL); } static const struct ieee80211_reg_rule * __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw) { const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy); const struct ieee80211_reg_rule *reg_rule = NULL; u32 bw; for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) { reg_rule = freq_reg_info_regd(center_freq, regd, bw); if (!IS_ERR(reg_rule)) return reg_rule; } return reg_rule; } const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, u32 center_freq) { return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20)); } EXPORT_SYMBOL(freq_reg_info); const char *reg_initiator_name(enum nl80211_reg_initiator initiator) { switch (initiator) { case NL80211_REGDOM_SET_BY_CORE: return "core"; case NL80211_REGDOM_SET_BY_USER: return "user"; case NL80211_REGDOM_SET_BY_DRIVER: return "driver"; case NL80211_REGDOM_SET_BY_COUNTRY_IE: return "country element"; default: WARN_ON(1); return "bug"; } } EXPORT_SYMBOL(reg_initiator_name); static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd, const struct ieee80211_reg_rule *reg_rule, const struct ieee80211_channel *chan) { const struct ieee80211_freq_range *freq_range = NULL; u32 max_bandwidth_khz, bw_flags = 0; freq_range = ®_rule->freq_range; max_bandwidth_khz = freq_range->max_bandwidth_khz; /* Check if auto calculation requested */ if (reg_rule->flags & NL80211_RRF_AUTO_BW) max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); /* If we get a reg_rule we can assume that at least 5Mhz fit */ if (!cfg80211_does_bw_fit_range(freq_range, MHZ_TO_KHZ(chan->center_freq), MHZ_TO_KHZ(10))) bw_flags |= IEEE80211_CHAN_NO_10MHZ; if (!cfg80211_does_bw_fit_range(freq_range, MHZ_TO_KHZ(chan->center_freq), MHZ_TO_KHZ(20))) bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(10)) bw_flags |= IEEE80211_CHAN_NO_10MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(20)) bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(40)) bw_flags |= IEEE80211_CHAN_NO_HT40; if (max_bandwidth_khz < MHZ_TO_KHZ(80)) bw_flags |= IEEE80211_CHAN_NO_80MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(160)) bw_flags |= IEEE80211_CHAN_NO_160MHZ; return bw_flags; } /* * Note that right now we assume the desired channel bandwidth * is always 20 MHz for each individual channel (HT40 uses 20 MHz * per channel, the primary and the extension channel). */ static void handle_channel(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *chan) { u32 flags, bw_flags = 0; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; struct wiphy *request_wiphy = NULL; struct regulatory_request *lr = get_last_request(); const struct ieee80211_regdomain *regd; request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); flags = chan->orig_flags; reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq)); if (IS_ERR(reg_rule)) { /* * We will disable all channels that do not match our * received regulatory rule unless the hint is coming * from a Country IE and the Country IE had no information * about a band. The IEEE 802.11 spec allows for an AP * to send only a subset of the regulatory rules allowed, * so an AP in the US that only supports 2.4 GHz may only send * a country IE with information for the 2.4 GHz band * while 5 GHz is still supported. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && PTR_ERR(reg_rule) == -ERANGE) return; if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { pr_debug("Disabling freq %d MHz for good\n", chan->center_freq); chan->orig_flags |= IEEE80211_CHAN_DISABLED; chan->flags = chan->orig_flags; } else { pr_debug("Disabling freq %d MHz\n", chan->center_freq); chan->flags |= IEEE80211_CHAN_DISABLED; } return; } regd = reg_get_regdomain(wiphy); power_rule = ®_rule->power_rule; bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan); if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { /* * This guarantees the driver's requested regulatory domain * will always be used as a base for further regulatory * settings */ chan->flags = chan->orig_flags = map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = chan->orig_mag = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_reg_power = chan->max_power = chan->orig_mpwr = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; } return; } chan->dfs_state = NL80211_DFS_USABLE; chan->dfs_state_entered = jiffies; chan->beacon_found = false; chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); chan->max_antenna_gain = min_t(int, chan->orig_mag, MBI_TO_DBI(power_rule->max_antenna_gain)); chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; else chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; } if (chan->orig_mpwr) { /* * Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER * will always follow the passed country IE power settings. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER) chan->max_power = chan->max_reg_power; else chan->max_power = min(chan->orig_mpwr, chan->max_reg_power); } else chan->max_power = chan->max_reg_power; } static void handle_band(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_supported_band *sband) { unsigned int i; if (!sband) return; for (i = 0; i < sband->n_channels; i++) handle_channel(wiphy, initiator, &sband->channels[i]); } static bool reg_request_cell_base(struct regulatory_request *request) { if (request->initiator != NL80211_REGDOM_SET_BY_USER) return false; return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE; } bool reg_last_request_cell_base(void) { return reg_request_cell_base(get_last_request()); } #ifdef CPTCFG_CFG80211_REG_CELLULAR_HINTS /* Core specific check */ static enum reg_request_treatment reg_ignore_cell_hint(struct regulatory_request *pending_request) { struct regulatory_request *lr = get_last_request(); if (!reg_num_devs_support_basehint) return REG_REQ_IGNORE; if (reg_request_cell_base(lr) && !regdom_changes(pending_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_OK; } /* Device specific check */ static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy) { return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS); } #else static enum reg_request_treatment reg_ignore_cell_hint(struct regulatory_request *pending_request) { return REG_REQ_IGNORE; } static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy) { return true; } #endif static bool wiphy_strict_alpha2_regd(struct wiphy *wiphy) { if (wiphy->regulatory_flags & REGULATORY_STRICT_REG && !(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)) return true; return false; } static bool ignore_reg_update(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { struct regulatory_request *lr = get_last_request(); if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) return true; if (!lr) { pr_debug("Ignoring regulatory request set by %s since last_request is not set\n", reg_initiator_name(initiator)); return true; } if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { pr_debug("Ignoring regulatory request set by %s since the driver uses its own custom regulatory domain\n", reg_initiator_name(initiator)); return true; } /* * wiphy->regd will be set once the device has its own * desired regulatory domain set */ if (wiphy_strict_alpha2_regd(wiphy) && !wiphy->regd && initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && !is_world_regdom(lr->alpha2)) { pr_debug("Ignoring regulatory request set by %s since the driver requires its own regulatory domain to be set first\n", reg_initiator_name(initiator)); return true; } if (reg_request_cell_base(lr)) return reg_dev_ignore_cell_hint(wiphy); return false; } static bool reg_is_world_roaming(struct wiphy *wiphy) { const struct ieee80211_regdomain *cr = get_cfg80211_regdom(); const struct ieee80211_regdomain *wr = get_wiphy_regdom(wiphy); struct regulatory_request *lr = get_last_request(); if (is_world_regdom(cr->alpha2) || (wr && is_world_regdom(wr->alpha2))) return true; if (lr && lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) return true; return false; } static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx, struct reg_beacon *reg_beacon) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; bool channel_changed = false; struct ieee80211_channel chan_before; sband = wiphy->bands[reg_beacon->chan.band]; chan = &sband->channels[chan_idx]; if (likely(chan->center_freq != reg_beacon->chan.center_freq)) return; if (chan->beacon_found) return; chan->beacon_found = true; if (!reg_is_world_roaming(wiphy)) return; if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS) return; chan_before = *chan; if (chan->flags & IEEE80211_CHAN_NO_IR) { chan->flags &= ~IEEE80211_CHAN_NO_IR; channel_changed = true; } if (channel_changed) nl80211_send_beacon_hint_event(wiphy, &chan_before, chan); } /* * Called when a scan on a wiphy finds a beacon on * new channel */ static void wiphy_update_new_beacon(struct wiphy *wiphy, struct reg_beacon *reg_beacon) { unsigned int i; struct ieee80211_supported_band *sband; if (!wiphy->bands[reg_beacon->chan.band]) return; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } /* * Called upon reg changes or a new wiphy is added */ static void wiphy_update_beacon_reg(struct wiphy *wiphy) { unsigned int i; struct ieee80211_supported_band *sband; struct reg_beacon *reg_beacon; list_for_each_entry(reg_beacon, ®_beacon_list, list) { if (!wiphy->bands[reg_beacon->chan.band]) continue; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } } /* Reap the advantages of previously found beacons */ static void reg_process_beacons(struct wiphy *wiphy) { /* * Means we are just firing up cfg80211, so no beacons would * have been processed yet. */ if (!last_request) return; wiphy_update_beacon_reg(wiphy); } static bool is_ht40_allowed(struct ieee80211_channel *chan) { if (!chan) return false; if (chan->flags & IEEE80211_CHAN_DISABLED) return false; /* This would happen when regulatory rules disallow HT40 completely */ if ((chan->flags & IEEE80211_CHAN_NO_HT40) == IEEE80211_CHAN_NO_HT40) return false; return true; } static void reg_process_ht_flags_channel(struct wiphy *wiphy, struct ieee80211_channel *channel) { struct ieee80211_supported_band *sband = wiphy->bands[channel->band]; struct ieee80211_channel *channel_before = NULL, *channel_after = NULL; const struct ieee80211_regdomain *regd; unsigned int i; u32 flags; if (!is_ht40_allowed(channel)) { channel->flags |= IEEE80211_CHAN_NO_HT40; return; } /* * We need to ensure the extension channels exist to * be able to use HT40- or HT40+, this finds them (or not) */ for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *c = &sband->channels[i]; if (c->center_freq == (channel->center_freq - 20)) channel_before = c; if (c->center_freq == (channel->center_freq + 20)) channel_after = c; } flags = 0; regd = get_wiphy_regdom(wiphy); if (regd) { const struct ieee80211_reg_rule *reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(channel->center_freq), regd, MHZ_TO_KHZ(20)); if (!IS_ERR(reg_rule)) flags = reg_rule->flags; } /* * Please note that this assumes target bandwidth is 20 MHz, * if that ever changes we also need to change the below logic * to include that as well. */ if (!is_ht40_allowed(channel_before) || flags & NL80211_RRF_NO_HT40MINUS) channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; if (!is_ht40_allowed(channel_after) || flags & NL80211_RRF_NO_HT40PLUS) channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; } static void reg_process_ht_flags_band(struct wiphy *wiphy, struct ieee80211_supported_band *sband) { unsigned int i; if (!sband) return; for (i = 0; i < sband->n_channels; i++) reg_process_ht_flags_channel(wiphy, &sband->channels[i]); } static void reg_process_ht_flags(struct wiphy *wiphy) { enum nl80211_band band; if (!wiphy) return; for (band = 0; band < NUM_NL80211_BANDS; band++) reg_process_ht_flags_band(wiphy, wiphy->bands[band]); } static void reg_call_notifier(struct wiphy *wiphy, struct regulatory_request *request) { if (wiphy->reg_notifier) wiphy->reg_notifier(wiphy, request); } static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) { struct cfg80211_chan_def chandef; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); enum nl80211_iftype iftype; wdev_lock(wdev); iftype = wdev->iftype; /* make sure the interface is active */ if (!wdev->netdev || !netif_running(wdev->netdev)) goto wdev_inactive_unlock; switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (!wdev->beacon_interval) goto wdev_inactive_unlock; chandef = wdev->chandef; break; case NL80211_IFTYPE_ADHOC: if (!wdev->ssid_len) goto wdev_inactive_unlock; chandef = wdev->chandef; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (!wdev->current_bss || !wdev->current_bss->pub.channel) goto wdev_inactive_unlock; if (!rdev->ops->get_channel || rdev_get_channel(rdev, wdev, &chandef)) cfg80211_chandef_create(&chandef, wdev->current_bss->pub.channel, NL80211_CHAN_NO_HT); break; case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN_DATA: /* no enforcement required */ break; default: /* others not implemented for now */ WARN_ON(1); break; } wdev_unlock(wdev); switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_ADHOC: return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: return cfg80211_chandef_usable(wiphy, &chandef, IEEE80211_CHAN_DISABLED); default: break; } return true; wdev_inactive_unlock: wdev_unlock(wdev); return true; } static void reg_leave_invalid_chans(struct wiphy *wiphy) { struct wireless_dev *wdev; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ASSERT_RTNL(); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) if (!reg_wdev_chan_valid(wiphy, wdev)) cfg80211_leave(rdev, wdev); } static void reg_check_chans_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; pr_debug("Verifying active interfaces after reg change\n"); rtnl_lock(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) if (!(rdev->wiphy.regulatory_flags & REGULATORY_IGNORE_STALE_KICKOFF)) reg_leave_invalid_chans(&rdev->wiphy); rtnl_unlock(); } static void reg_check_channels(void) { /* * Give usermode a chance to do something nicer (move to another * channel, orderly disconnection), before forcing a disconnection. */ mod_delayed_work(system_power_efficient_wq, ®_check_chans, msecs_to_jiffies(REG_ENFORCE_GRACE_MS)); } static void wiphy_update_regulatory(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { enum nl80211_band band; struct regulatory_request *lr = get_last_request(); if (ignore_reg_update(wiphy, initiator)) { /* * Regulatory updates set by CORE are ignored for custom * regulatory cards. Let us notify the changes to the driver, * as some drivers used this to restore its orig_* reg domain. */ if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG && !(wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)) reg_call_notifier(wiphy, lr); return; } lr->dfs_region = get_cfg80211_regdom()->dfs_region; for (band = 0; band < NUM_NL80211_BANDS; band++) handle_band(wiphy, initiator, wiphy->bands[band]); reg_process_beacons(wiphy); reg_process_ht_flags(wiphy); reg_call_notifier(wiphy, lr); } static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; ASSERT_RTNL(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { wiphy = &rdev->wiphy; wiphy_update_regulatory(wiphy, initiator); } reg_check_channels(); } static void handle_channel_custom(struct wiphy *wiphy, struct ieee80211_channel *chan, const struct ieee80211_regdomain *regd) { u32 bw_flags = 0; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; u32 bw; for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) { reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(chan->center_freq), regd, bw); if (!IS_ERR(reg_rule)) break; } if (IS_ERR(reg_rule)) { pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n", chan->center_freq); if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { chan->flags |= IEEE80211_CHAN_DISABLED; } else { chan->orig_flags |= IEEE80211_CHAN_DISABLED; chan->flags = chan->orig_flags; } return; } power_rule = ®_rule->power_rule; bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan); chan->dfs_state_entered = jiffies; chan->dfs_state = NL80211_DFS_USABLE; chan->beacon_found = false; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) chan->flags = chan->orig_flags | bw_flags | map_regdom_flags(reg_rule->flags); else chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_reg_power = chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; else chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; } chan->max_power = chan->max_reg_power; } static void handle_band_custom(struct wiphy *wiphy, struct ieee80211_supported_band *sband, const struct ieee80211_regdomain *regd) { unsigned int i; if (!sband) return; for (i = 0; i < sband->n_channels; i++) handle_channel_custom(wiphy, &sband->channels[i], regd); } /* Used by drivers prior to wiphy registration */ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { enum nl80211_band band; unsigned int bands_set = 0; WARN(!(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG), "wiphy should have REGULATORY_CUSTOM_REG\n"); wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; handle_band_custom(wiphy, wiphy->bands[band], regd); bands_set++; } /* * no point in calling this if it won't have any effect * on your device's supported bands. */ WARN_ON(!bands_set); } EXPORT_SYMBOL(wiphy_apply_custom_regulatory); static void reg_set_request_processed(void) { bool need_more_processing = false; struct regulatory_request *lr = get_last_request(); lr->processed = true; spin_lock(®_requests_lock); if (!list_empty(®_requests_list)) need_more_processing = true; spin_unlock(®_requests_lock); cancel_crda_timeout(); if (need_more_processing) schedule_work(®_work); } /** * reg_process_hint_core - process core regulatory requests * @pending_request: a pending core regulatory request * * The wireless subsystem can use this function to process * a regulatory request issued by the regulatory core. */ static enum reg_request_treatment reg_process_hint_core(struct regulatory_request *core_request) { if (reg_query_database(core_request)) { core_request->intersect = false; core_request->processed = false; reg_update_last_request(core_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_user(struct regulatory_request *user_request) { struct regulatory_request *lr = get_last_request(); if (reg_request_cell_base(user_request)) return reg_ignore_cell_hint(user_request); if (reg_request_cell_base(lr)) return REG_REQ_IGNORE; if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) return REG_REQ_INTERSECT; /* * If the user knows better the user should set the regdom * to their country before the IE is picked up */ if (lr->initiator == NL80211_REGDOM_SET_BY_USER && lr->intersect) return REG_REQ_IGNORE; /* * Process user requests only after previous user/driver/core * requests have been processed */ if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE || lr->initiator == NL80211_REGDOM_SET_BY_DRIVER || lr->initiator == NL80211_REGDOM_SET_BY_USER) && regdom_changes(lr->alpha2)) return REG_REQ_IGNORE; if (!regdom_changes(user_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_OK; } /** * reg_process_hint_user - process user regulatory requests * @user_request: a pending user regulatory request * * The wireless subsystem can use this function to process * a regulatory request initiated by userspace. */ static enum reg_request_treatment reg_process_hint_user(struct regulatory_request *user_request) { enum reg_request_treatment treatment; treatment = __reg_process_hint_user(user_request); if (treatment == REG_REQ_IGNORE || treatment == REG_REQ_ALREADY_SET) return REG_REQ_IGNORE; user_request->intersect = treatment == REG_REQ_INTERSECT; user_request->processed = false; if (reg_query_database(user_request)) { reg_update_last_request(user_request); user_alpha2[0] = user_request->alpha2[0]; user_alpha2[1] = user_request->alpha2[1]; return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_driver(struct regulatory_request *driver_request) { struct regulatory_request *lr = get_last_request(); if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) { if (regdom_changes(driver_request->alpha2)) return REG_REQ_OK; return REG_REQ_ALREADY_SET; } /* * This would happen if you unplug and plug your card * back in or if you add a new device for which the previously * loaded card also agrees on the regulatory domain. */ if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && !regdom_changes(driver_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_INTERSECT; } /** * reg_process_hint_driver - process driver regulatory requests * @driver_request: a pending driver regulatory request * * The wireless subsystem can use this function to process * a regulatory request issued by an 802.11 driver. * * Returns one of the different reg request treatment values. */ static enum reg_request_treatment reg_process_hint_driver(struct wiphy *wiphy, struct regulatory_request *driver_request) { const struct ieee80211_regdomain *regd, *tmp; enum reg_request_treatment treatment; treatment = __reg_process_hint_driver(driver_request); switch (treatment) { case REG_REQ_OK: break; case REG_REQ_IGNORE: return REG_REQ_IGNORE; case REG_REQ_INTERSECT: case REG_REQ_ALREADY_SET: regd = reg_copy_regd(get_cfg80211_regdom()); if (IS_ERR(regd)) return REG_REQ_IGNORE; tmp = get_wiphy_regdom(wiphy); rcu_assign_pointer(wiphy->regd, regd); rcu_free_regdom(tmp); } driver_request->intersect = treatment == REG_REQ_INTERSECT; driver_request->processed = false; /* * Since CRDA will not be called in this case as we already * have applied the requested regulatory domain before we just * inform userspace we have processed the request */ if (treatment == REG_REQ_ALREADY_SET) { nl80211_send_reg_change_event(driver_request); reg_update_last_request(driver_request); reg_set_request_processed(); return REG_REQ_ALREADY_SET; } if (reg_query_database(driver_request)) { reg_update_last_request(driver_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_country_ie(struct wiphy *wiphy, struct regulatory_request *country_ie_request) { struct wiphy *last_wiphy = NULL; struct regulatory_request *lr = get_last_request(); if (reg_request_cell_base(lr)) { /* Trust a Cell base station over the AP's country IE */ if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_IGNORE; return REG_REQ_ALREADY_SET; } else { if (wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_IGNORE) return REG_REQ_IGNORE; } if (unlikely(!is_an_alpha2(country_ie_request->alpha2))) return -EINVAL; if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) return REG_REQ_OK; last_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); if (last_wiphy != wiphy) { /* * Two cards with two APs claiming different * Country IE alpha2s. We could * intersect them, but that seems unlikely * to be correct. Reject second one for now. */ if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_IGNORE; return REG_REQ_ALREADY_SET; } if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_OK; return REG_REQ_ALREADY_SET; } /** * reg_process_hint_country_ie - process regulatory requests from country IEs * @country_ie_request: a regulatory request from a country IE * * The wireless subsystem can use this function to process * a regulatory request issued by a country Information Element. * * Returns one of the different reg request treatment values. */ static enum reg_request_treatment reg_process_hint_country_ie(struct wiphy *wiphy, struct regulatory_request *country_ie_request) { enum reg_request_treatment treatment; treatment = __reg_process_hint_country_ie(wiphy, country_ie_request); switch (treatment) { case REG_REQ_OK: break; case REG_REQ_IGNORE: return REG_REQ_IGNORE; case REG_REQ_ALREADY_SET: reg_free_request(country_ie_request); return REG_REQ_ALREADY_SET; case REG_REQ_INTERSECT: /* * This doesn't happen yet, not sure we * ever want to support it for this case. */ WARN_ONCE(1, "Unexpected intersection for country elements"); return REG_REQ_IGNORE; } country_ie_request->intersect = false; country_ie_request->processed = false; if (reg_query_database(country_ie_request)) { reg_update_last_request(country_ie_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2) { const struct ieee80211_regdomain *wiphy1_regd = NULL; const struct ieee80211_regdomain *wiphy2_regd = NULL; const struct ieee80211_regdomain *cfg80211_regd = NULL; bool dfs_domain_same; rcu_read_lock(); cfg80211_regd = rcu_dereference(cfg80211_regdomain); wiphy1_regd = rcu_dereference(wiphy1->regd); if (!wiphy1_regd) wiphy1_regd = cfg80211_regd; wiphy2_regd = rcu_dereference(wiphy2->regd); if (!wiphy2_regd) wiphy2_regd = cfg80211_regd; dfs_domain_same = wiphy1_regd->dfs_region == wiphy2_regd->dfs_region; rcu_read_unlock(); return dfs_domain_same; } static void reg_copy_dfs_chan_state(struct ieee80211_channel *dst_chan, struct ieee80211_channel *src_chan) { if (!(dst_chan->flags & IEEE80211_CHAN_RADAR) || !(src_chan->flags & IEEE80211_CHAN_RADAR)) return; if (dst_chan->flags & IEEE80211_CHAN_DISABLED || src_chan->flags & IEEE80211_CHAN_DISABLED) return; if (src_chan->center_freq == dst_chan->center_freq && dst_chan->dfs_state == NL80211_DFS_USABLE) { dst_chan->dfs_state = src_chan->dfs_state; dst_chan->dfs_state_entered = src_chan->dfs_state_entered; } } static void wiphy_share_dfs_chan_state(struct wiphy *dst_wiphy, struct wiphy *src_wiphy) { struct ieee80211_supported_band *src_sband, *dst_sband; struct ieee80211_channel *src_chan, *dst_chan; int i, j, band; if (!reg_dfs_domain_same(dst_wiphy, src_wiphy)) return; for (band = 0; band < NUM_NL80211_BANDS; band++) { dst_sband = dst_wiphy->bands[band]; src_sband = src_wiphy->bands[band]; if (!dst_sband || !src_sband) continue; for (i = 0; i < dst_sband->n_channels; i++) { dst_chan = &dst_sband->channels[i]; for (j = 0; j < src_sband->n_channels; j++) { src_chan = &src_sband->channels[j]; reg_copy_dfs_chan_state(dst_chan, src_chan); } } } } static void wiphy_all_share_dfs_chan_state(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (wiphy == &rdev->wiphy) continue; wiphy_share_dfs_chan_state(wiphy, &rdev->wiphy); } } /* This processes *all* regulatory hints */ static void reg_process_hint(struct regulatory_request *reg_request) { struct wiphy *wiphy = NULL; enum reg_request_treatment treatment; enum nl80211_reg_initiator initiator = reg_request->initiator; if (reg_request->wiphy_idx != WIPHY_IDX_INVALID) wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); switch (initiator) { case NL80211_REGDOM_SET_BY_CORE: treatment = reg_process_hint_core(reg_request); break; case NL80211_REGDOM_SET_BY_USER: treatment = reg_process_hint_user(reg_request); break; case NL80211_REGDOM_SET_BY_DRIVER: if (!wiphy) goto out_free; treatment = reg_process_hint_driver(wiphy, reg_request); break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: if (!wiphy) goto out_free; treatment = reg_process_hint_country_ie(wiphy, reg_request); break; default: WARN(1, "invalid initiator %d\n", initiator); goto out_free; } if (treatment == REG_REQ_IGNORE) goto out_free; WARN(treatment != REG_REQ_OK && treatment != REG_REQ_ALREADY_SET, "unexpected treatment value %d\n", treatment); /* This is required so that the orig_* parameters are saved. * NOTE: treatment must be set for any case that reaches here! */ if (treatment == REG_REQ_ALREADY_SET && wiphy && wiphy->regulatory_flags & REGULATORY_STRICT_REG) { wiphy_update_regulatory(wiphy, initiator); wiphy_all_share_dfs_chan_state(wiphy); reg_check_channels(); } return; out_free: reg_free_request(reg_request); } static void notify_self_managed_wiphys(struct regulatory_request *request) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { wiphy = &rdev->wiphy; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && request->initiator == NL80211_REGDOM_SET_BY_USER && request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE) reg_call_notifier(wiphy, request); } } /* * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* * Regulatory hints come on a first come first serve basis and we * must process each one atomically. */ static void reg_process_pending_hints(void) { struct regulatory_request *reg_request, *lr; lr = get_last_request(); /* When last_request->processed becomes true this will be rescheduled */ if (lr && !lr->processed) { reg_process_hint(lr); return; } spin_lock(®_requests_lock); if (list_empty(®_requests_list)) { spin_unlock(®_requests_lock); return; } reg_request = list_first_entry(®_requests_list, struct regulatory_request, list); list_del_init(®_request->list); spin_unlock(®_requests_lock); notify_self_managed_wiphys(reg_request); reg_process_hint(reg_request); lr = get_last_request(); spin_lock(®_requests_lock); if (!list_empty(®_requests_list) && lr && lr->processed) schedule_work(®_work); spin_unlock(®_requests_lock); } /* Processes beacon hints -- this has nothing to do with country IEs */ static void reg_process_pending_beacon_hints(void) { struct cfg80211_registered_device *rdev; struct reg_beacon *pending_beacon, *tmp; /* This goes through the _pending_ beacon list */ spin_lock_bh(®_pending_beacons_lock); list_for_each_entry_safe(pending_beacon, tmp, ®_pending_beacons, list) { list_del_init(&pending_beacon->list); /* Applies the beacon hint to current wiphys */ list_for_each_entry(rdev, &cfg80211_rdev_list, list) wiphy_update_new_beacon(&rdev->wiphy, pending_beacon); /* Remembers the beacon hint for new wiphys or reg changes */ list_add_tail(&pending_beacon->list, ®_beacon_list); } spin_unlock_bh(®_pending_beacons_lock); } static void reg_process_self_managed_hints(void) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; const struct ieee80211_regdomain *tmp; const struct ieee80211_regdomain *regd; enum nl80211_band band; struct regulatory_request request = {}; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { wiphy = &rdev->wiphy; spin_lock(®_requests_lock); regd = rdev->requested_regd; rdev->requested_regd = NULL; spin_unlock(®_requests_lock); if (regd == NULL) continue; tmp = get_wiphy_regdom(wiphy); rcu_assign_pointer(wiphy->regd, regd); rcu_free_regdom(tmp); for (band = 0; band < NUM_NL80211_BANDS; band++) handle_band_custom(wiphy, wiphy->bands[band], regd); reg_process_ht_flags(wiphy); request.wiphy_idx = get_wiphy_idx(wiphy); request.alpha2[0] = regd->alpha2[0]; request.alpha2[1] = regd->alpha2[1]; request.initiator = NL80211_REGDOM_SET_BY_DRIVER; nl80211_send_wiphy_reg_change_event(&request); } reg_check_channels(); } static void reg_todo(struct work_struct *work) { rtnl_lock(); reg_process_pending_hints(); reg_process_pending_beacon_hints(); reg_process_self_managed_hints(); rtnl_unlock(); } static void queue_regulatory_request(struct regulatory_request *request) { request->alpha2[0] = toupper(request->alpha2[0]); request->alpha2[1] = toupper(request->alpha2[1]); spin_lock(®_requests_lock); list_add_tail(&request->list, ®_requests_list); spin_unlock(®_requests_lock); schedule_work(®_work); } /* * Core regulatory hint -- happens during cfg80211_init() * and when we restore regulatory settings. */ static int regulatory_hint_core(const char *alpha2) { struct regulatory_request *request; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; request->wiphy_idx = WIPHY_IDX_INVALID; queue_regulatory_request(request); return 0; } /* User hints */ int regulatory_hint_user(const char *alpha2, enum nl80211_user_reg_hint_type user_reg_hint_type) { struct regulatory_request *request; if (WARN_ON(!alpha2)) return -EINVAL; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = WIPHY_IDX_INVALID; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_USER; request->user_reg_hint_type = user_reg_hint_type; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); return 0; } int regulatory_hint_indoor(bool is_indoor, u32 portid) { spin_lock(®_indoor_lock); /* It is possible that more than one user space process is trying to * configure the indoor setting. To handle such cases, clear the indoor * setting in case that some process does not think that the device * is operating in an indoor environment. In addition, if a user space * process indicates that it is controlling the indoor setting, save its * portid, i.e., make it the owner. */ reg_is_indoor = is_indoor; if (reg_is_indoor) { if (!reg_is_indoor_portid) reg_is_indoor_portid = portid; } else { reg_is_indoor_portid = 0; } spin_unlock(®_indoor_lock); if (!is_indoor) reg_check_channels(); return 0; } void regulatory_netlink_notify(u32 portid) { spin_lock(®_indoor_lock); if (reg_is_indoor_portid != portid) { spin_unlock(®_indoor_lock); return; } reg_is_indoor = false; reg_is_indoor_portid = 0; spin_unlock(®_indoor_lock); reg_check_channels(); } /* Driver hints */ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) { struct regulatory_request *request; if (WARN_ON(!alpha2 || !wiphy)) return -EINVAL; wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_DRIVER; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); return 0; } EXPORT_SYMBOL(regulatory_hint); void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band, const u8 *country_ie, u8 country_ie_len) { char alpha2[2]; enum environment_cap env = ENVIRON_ANY; struct regulatory_request *request = NULL, *lr; /* IE len must be evenly divisible by 2 */ if (country_ie_len & 0x01) return; if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) return; request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return; alpha2[0] = country_ie[0]; alpha2[1] = country_ie[1]; if (country_ie[2] == 'I') env = ENVIRON_INDOOR; else if (country_ie[2] == 'O') env = ENVIRON_OUTDOOR; rcu_read_lock(); lr = get_last_request(); if (unlikely(!lr)) goto out; /* * We will run this only upon a successful connection on cfg80211. * We leave conflict resolution to the workqueue, where can hold * the RTNL. */ if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && lr->wiphy_idx != WIPHY_IDX_INVALID) goto out; request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; request->country_ie_env = env; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); request = NULL; out: kfree(request); rcu_read_unlock(); } static void restore_alpha2(char *alpha2, bool reset_user) { /* indicates there is no alpha2 to consider for restoration */ alpha2[0] = '9'; alpha2[1] = '7'; /* The user setting has precedence over the module parameter */ if (is_user_regdom_saved()) { /* Unless we're asked to ignore it and reset it */ if (reset_user) { pr_debug("Restoring regulatory settings including user preference\n"); user_alpha2[0] = '9'; user_alpha2[1] = '7'; /* * If we're ignoring user settings, we still need to * check the module parameter to ensure we put things * back as they were for a full restore. */ if (!is_world_regdom(ieee80211_regdom)) { pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } } else { pr_debug("Restoring regulatory settings while preserving user preference for: %c%c\n", user_alpha2[0], user_alpha2[1]); alpha2[0] = user_alpha2[0]; alpha2[1] = user_alpha2[1]; } } else if (!is_world_regdom(ieee80211_regdom)) { pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } else pr_debug("Restoring regulatory settings\n"); } static void restore_custom_reg_settings(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; enum nl80211_band band; struct ieee80211_channel *chan; int i; for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; chan->flags = chan->orig_flags; chan->max_antenna_gain = chan->orig_mag; chan->max_power = chan->orig_mpwr; chan->beacon_found = false; } } } /* * Restoring regulatory settings involves ingoring any * possibly stale country IE information and user regulatory * settings if so desired, this includes any beacon hints * learned as we could have traveled outside to another country * after disconnection. To restore regulatory settings we do * exactly what we did at bootup: * * - send a core regulatory hint * - send a user regulatory hint if applicable * * Device drivers that send a regulatory hint for a specific country * keep their own regulatory domain on wiphy->regd so that does does * not need to be remembered. */ static void restore_regulatory_settings(bool reset_user, bool cached) { char alpha2[2]; char world_alpha2[2]; struct reg_beacon *reg_beacon, *btmp; LIST_HEAD(tmp_reg_req_list); struct cfg80211_registered_device *rdev; ASSERT_RTNL(); /* * Clear the indoor setting in case that it is not controlled by user * space, as otherwise there is no guarantee that the device is still * operating in an indoor environment. */ spin_lock(®_indoor_lock); if (reg_is_indoor && !reg_is_indoor_portid) { reg_is_indoor = false; reg_check_channels(); } spin_unlock(®_indoor_lock); reset_regdomains(true, &world_regdom); restore_alpha2(alpha2, reset_user); /* * If there's any pending requests we simply * stash them to a temporary pending queue and * add then after we've restored regulatory * settings. */ spin_lock(®_requests_lock); list_splice_tail_init(®_requests_list, &tmp_reg_req_list); spin_unlock(®_requests_lock); /* Clear beacon hints */ spin_lock_bh(®_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, ®_pending_beacons, list) { list_del(®_beacon->list); kfree(reg_beacon); } spin_unlock_bh(®_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, ®_beacon_list, list) { list_del(®_beacon->list); kfree(reg_beacon); } /* First restore to the basic regulatory settings */ world_alpha2[0] = cfg80211_world_regdom->alpha2[0]; world_alpha2[1] = cfg80211_world_regdom->alpha2[1]; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) continue; if (rdev->wiphy.regulatory_flags & REGULATORY_CUSTOM_REG) restore_custom_reg_settings(&rdev->wiphy); } if (cached && (!is_an_alpha2(alpha2) || !IS_ERR_OR_NULL(cfg80211_user_regdom))) { reset_regdomains(false, cfg80211_world_regdom); update_all_wiphy_regulatory(NL80211_REGDOM_SET_BY_CORE); print_regdomain(get_cfg80211_regdom()); nl80211_send_reg_change_event(&core_request_world); reg_set_request_processed(); if (is_an_alpha2(alpha2) && !regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER)) { struct regulatory_request *ureq; spin_lock(®_requests_lock); ureq = list_last_entry(®_requests_list, struct regulatory_request, list); list_del(&ureq->list); spin_unlock(®_requests_lock); notify_self_managed_wiphys(ureq); reg_update_last_request(ureq); set_regdom(reg_copy_regd(cfg80211_user_regdom), REGD_SOURCE_CACHED); } } else { regulatory_hint_core(world_alpha2); /* * This restores the ieee80211_regdom module parameter * preference or the last user requested regulatory * settings, user regulatory settings takes precedence. */ if (is_an_alpha2(alpha2)) regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER); } spin_lock(®_requests_lock); list_splice_tail_init(&tmp_reg_req_list, ®_requests_list); spin_unlock(®_requests_lock); pr_debug("Kicking the queue\n"); schedule_work(®_work); } static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { wdev_lock(wdev); if (!(wdev->wiphy->regulatory_flags & flag)) { wdev_unlock(wdev); return false; } wdev_unlock(wdev); } } return true; } void regulatory_hint_disconnect(void) { /* Restore of regulatory settings is not required when wiphy(s) * ignore IE from connected access point but clearance of beacon hints * is required when wiphy(s) supports beacon hints. */ if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) { struct reg_beacon *reg_beacon, *btmp; if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS)) return; spin_lock_bh(®_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, ®_pending_beacons, list) { list_del(®_beacon->list); kfree(reg_beacon); } spin_unlock_bh(®_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, ®_beacon_list, list) { list_del(®_beacon->list); kfree(reg_beacon); } return; } pr_debug("All devices are disconnected, going to restore regulatory settings\n"); restore_regulatory_settings(false, true); } static bool freq_is_chan_12_13_14(u32 freq) { if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(14, NL80211_BAND_2GHZ)) return true; return false; } static bool pending_reg_beacon(struct ieee80211_channel *beacon_chan) { struct reg_beacon *pending_beacon; list_for_each_entry(pending_beacon, ®_pending_beacons, list) if (beacon_chan->center_freq == pending_beacon->chan.center_freq) return true; return false; } int regulatory_hint_found_beacon(struct wiphy *wiphy, struct ieee80211_channel *beacon_chan, gfp_t gfp) { struct reg_beacon *reg_beacon; bool processing; if (beacon_chan->beacon_found || beacon_chan->flags & IEEE80211_CHAN_RADAR || (beacon_chan->band == NL80211_BAND_2GHZ && !freq_is_chan_12_13_14(beacon_chan->center_freq))) return 0; spin_lock_bh(®_pending_beacons_lock); processing = pending_reg_beacon(beacon_chan); spin_unlock_bh(®_pending_beacons_lock); if (processing) return 0; reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp); if (!reg_beacon) return -ENOMEM; pr_debug("Found new beacon on frequency: %d MHz (Ch %d) on %s\n", beacon_chan->center_freq, ieee80211_frequency_to_channel(beacon_chan->center_freq), wiphy_name(wiphy)); memcpy(®_beacon->chan, beacon_chan, sizeof(struct ieee80211_channel)); /* * Since we can be called from BH or and non-BH context * we must use spin_lock_bh() */ spin_lock_bh(®_pending_beacons_lock); list_add_tail(®_beacon->list, ®_pending_beacons); spin_unlock_bh(®_pending_beacons_lock); schedule_work(®_work); return 0; } static void print_rd_rules(const struct ieee80211_regdomain *rd) { unsigned int i; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; const struct ieee80211_power_rule *power_rule = NULL; char bw[32], cac_time[32]; pr_debug(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n"); for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; freq_range = ®_rule->freq_range; power_rule = ®_rule->power_rule; if (reg_rule->flags & NL80211_RRF_AUTO_BW) snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO", freq_range->max_bandwidth_khz, reg_get_max_bandwidth(rd, reg_rule)); else snprintf(bw, sizeof(bw), "%d KHz", freq_range->max_bandwidth_khz); if (reg_rule->flags & NL80211_RRF_DFS) scnprintf(cac_time, sizeof(cac_time), "%u s", reg_rule->dfs_cac_ms/1000); else scnprintf(cac_time, sizeof(cac_time), "N/A"); /* * There may not be documentation for max antenna gain * in certain regions */ if (power_rule->max_antenna_gain) pr_debug(" (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, power_rule->max_antenna_gain, power_rule->max_eirp, cac_time); else pr_debug(" (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, power_rule->max_eirp, cac_time); } } bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region) { switch (dfs_region) { case NL80211_DFS_UNSET: case NL80211_DFS_FCC: case NL80211_DFS_ETSI: case NL80211_DFS_JP: return true; default: pr_debug("Ignoring unknown DFS master region: %d\n", dfs_region); return false; } } static void print_regdomain(const struct ieee80211_regdomain *rd) { struct regulatory_request *lr = get_last_request(); if (is_intersected_alpha2(rd->alpha2)) { if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { struct cfg80211_registered_device *rdev; rdev = cfg80211_rdev_by_wiphy_idx(lr->wiphy_idx); if (rdev) { pr_debug("Current regulatory domain updated by AP to: %c%c\n", rdev->country_ie_alpha2[0], rdev->country_ie_alpha2[1]); } else pr_debug("Current regulatory domain intersected:\n"); } else pr_debug("Current regulatory domain intersected:\n"); } else if (is_world_regdom(rd->alpha2)) { pr_debug("World regulatory domain updated:\n"); } else { if (is_unknown_alpha2(rd->alpha2)) pr_debug("Regulatory domain changed to driver built-in settings (unknown country)\n"); else { if (reg_request_cell_base(lr)) pr_debug("Regulatory domain changed to country: %c%c by Cell Station\n", rd->alpha2[0], rd->alpha2[1]); else pr_debug("Regulatory domain changed to country: %c%c\n", rd->alpha2[0], rd->alpha2[1]); } } pr_debug(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region)); print_rd_rules(rd); } static void print_regdomain_info(const struct ieee80211_regdomain *rd) { pr_debug("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_rd_rules(rd); } static int reg_set_rd_core(const struct ieee80211_regdomain *rd) { if (!is_world_regdom(rd->alpha2)) return -EINVAL; update_world_regdomain(rd); return 0; } static int reg_set_rd_user(const struct ieee80211_regdomain *rd, struct regulatory_request *user_request) { const struct ieee80211_regdomain *intersected_rd = NULL; if (!regdom_changes(rd->alpha2)) return -EALREADY; if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } if (!user_request->intersect) { reset_regdomains(false, rd); return 0; } intersected_rd = regdom_intersect(rd, get_cfg80211_regdom()); if (!intersected_rd) return -EINVAL; kfree(rd); rd = NULL; reset_regdomains(false, intersected_rd); return 0; } static int reg_set_rd_driver(const struct ieee80211_regdomain *rd, struct regulatory_request *driver_request) { const struct ieee80211_regdomain *regd; const struct ieee80211_regdomain *intersected_rd = NULL; const struct ieee80211_regdomain *tmp; struct wiphy *request_wiphy; if (is_world_regdom(rd->alpha2)) return -EINVAL; if (!regdom_changes(rd->alpha2)) return -EALREADY; if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx); if (!request_wiphy) return -ENODEV; if (!driver_request->intersect) { if (request_wiphy->regd) return -EALREADY; regd = reg_copy_regd(rd); if (IS_ERR(regd)) return PTR_ERR(regd); rcu_assign_pointer(request_wiphy->regd, regd); reset_regdomains(false, rd); return 0; } intersected_rd = regdom_intersect(rd, get_cfg80211_regdom()); if (!intersected_rd) return -EINVAL; /* * We can trash what CRDA provided now. * However if a driver requested this specific regulatory * domain we keep it for its private use */ tmp = get_wiphy_regdom(request_wiphy); rcu_assign_pointer(request_wiphy->regd, rd); rcu_free_regdom(tmp); rd = NULL; reset_regdomains(false, intersected_rd); return 0; } static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd, struct regulatory_request *country_ie_request) { struct wiphy *request_wiphy; if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) && !is_unknown_alpha2(rd->alpha2)) return -EINVAL; /* * Lets only bother proceeding on the same alpha2 if the current * rd is non static (it means CRDA was present and was used last) * and the pending request came in from a country IE */ if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx); if (!request_wiphy) return -ENODEV; if (country_ie_request->intersect) return -EINVAL; reset_regdomains(false, rd); return 0; } /* * Use this call to set the current regulatory domain. Conflicts with * multiple drivers can be ironed out later. Caller must've already * kmalloc'd the rd structure. */ int set_regdom(const struct ieee80211_regdomain *rd, enum ieee80211_regd_source regd_src) { struct regulatory_request *lr; bool user_reset = false; int r; if (IS_ERR_OR_NULL(rd)) return -ENODATA; if (!reg_is_valid_request(rd->alpha2)) { kfree(rd); return -EINVAL; } if (regd_src == REGD_SOURCE_CRDA) reset_crda_timeouts(); lr = get_last_request(); /* Note that this doesn't update the wiphys, this is done below */ switch (lr->initiator) { case NL80211_REGDOM_SET_BY_CORE: r = reg_set_rd_core(rd); break; case NL80211_REGDOM_SET_BY_USER: cfg80211_save_user_regdom(rd); r = reg_set_rd_user(rd, lr); user_reset = true; break; case NL80211_REGDOM_SET_BY_DRIVER: r = reg_set_rd_driver(rd, lr); break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: r = reg_set_rd_country_ie(rd, lr); break; default: WARN(1, "invalid initiator %d\n", lr->initiator); kfree(rd); return -EINVAL; } if (r) { switch (r) { case -EALREADY: reg_set_request_processed(); break; default: /* Back to world regulatory in case of errors */ restore_regulatory_settings(user_reset, false); } kfree(rd); return r; } /* This would make this whole thing pointless */ if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom())) return -EINVAL; /* update all wiphys now with the new established regulatory domain */ update_all_wiphy_regulatory(lr->initiator); print_regdomain(get_cfg80211_regdom()); nl80211_send_reg_change_event(lr); reg_set_request_processed(); return 0; } static int __regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { const struct ieee80211_regdomain *regd; const struct ieee80211_regdomain *prev_regd; struct cfg80211_registered_device *rdev; if (WARN_ON(!wiphy || !rd)) return -EINVAL; if (WARN(!(wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED), "wiphy should have REGULATORY_WIPHY_SELF_MANAGED\n")) return -EPERM; if (WARN(!is_valid_rd(rd), "Invalid regulatory domain detected\n")) { print_regdomain_info(rd); return -EINVAL; } regd = reg_copy_regd(rd); if (IS_ERR(regd)) return PTR_ERR(regd); rdev = wiphy_to_rdev(wiphy); spin_lock(®_requests_lock); prev_regd = rdev->requested_regd; rdev->requested_regd = regd; spin_unlock(®_requests_lock); kfree(prev_regd); return 0; } int regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { int ret = __regulatory_set_wiphy_regd(wiphy, rd); if (ret) return ret; schedule_work(®_work); return 0; } EXPORT_SYMBOL(regulatory_set_wiphy_regd); int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { int ret; ASSERT_RTNL(); ret = __regulatory_set_wiphy_regd(wiphy, rd); if (ret) return ret; /* process the request immediately */ reg_process_self_managed_hints(); return 0; } EXPORT_SYMBOL(regulatory_set_wiphy_regd_sync_rtnl); void wiphy_regulatory_register(struct wiphy *wiphy) { struct regulatory_request *lr = get_last_request(); /* self-managed devices ignore beacon hints and country IE */ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS | REGULATORY_COUNTRY_IE_IGNORE; /* * The last request may have been received before this * registration call. Call the driver notifier if * initiator is USER and user type is CELL_BASE. */ if (lr->initiator == NL80211_REGDOM_SET_BY_USER && lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE) reg_call_notifier(wiphy, lr); } if (!reg_dev_ignore_cell_hint(wiphy)) reg_num_devs_support_basehint++; wiphy_update_regulatory(wiphy, lr->initiator); wiphy_all_share_dfs_chan_state(wiphy); } void wiphy_regulatory_deregister(struct wiphy *wiphy) { struct wiphy *request_wiphy = NULL; struct regulatory_request *lr; lr = get_last_request(); if (!reg_dev_ignore_cell_hint(wiphy)) reg_num_devs_support_basehint--; rcu_free_regdom(get_wiphy_regdom(wiphy)); RCU_INIT_POINTER(wiphy->regd, NULL); if (lr) request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); if (!request_wiphy || request_wiphy != wiphy) return; lr->wiphy_idx = WIPHY_IDX_INVALID; lr->country_ie_env = ENVIRON_ANY; } /* * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for * UNII band definitions */ int cfg80211_get_unii(int freq) { /* UNII-1 */ if (freq >= 5150 && freq <= 5250) return 0; /* UNII-2A */ if (freq > 5250 && freq <= 5350) return 1; /* UNII-2B */ if (freq > 5350 && freq <= 5470) return 2; /* UNII-2C */ if (freq > 5470 && freq <= 5725) return 3; /* UNII-3 */ if (freq > 5725 && freq <= 5825) return 4; return -EINVAL; } bool regulatory_indoor_allowed(void) { return reg_is_indoor; } bool regulatory_pre_cac_allowed(struct wiphy *wiphy) { const struct ieee80211_regdomain *regd = NULL; const struct ieee80211_regdomain *wiphy_regd = NULL; bool pre_cac_allowed = false; rcu_read_lock(); regd = rcu_dereference(cfg80211_regdomain); wiphy_regd = rcu_dereference(wiphy->regd); if (!wiphy_regd) { if (regd->dfs_region == NL80211_DFS_ETSI) pre_cac_allowed = true; rcu_read_unlock(); return pre_cac_allowed; } if (regd->dfs_region == wiphy_regd->dfs_region && wiphy_regd->dfs_region == NL80211_DFS_ETSI) pre_cac_allowed = true; rcu_read_unlock(); return pre_cac_allowed; } void regulatory_propagate_dfs_state(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_dfs_state dfs_state, enum nl80211_radar_event event) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); if (WARN_ON(!cfg80211_chandef_valid(chandef))) return; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (wiphy == &rdev->wiphy) continue; if (!reg_dfs_domain_same(wiphy, &rdev->wiphy)) continue; if (!ieee80211_get_channel(&rdev->wiphy, chandef->chan->center_freq)) continue; cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state); if (event == NL80211_RADAR_DETECTED || event == NL80211_RADAR_CAC_FINISHED) cfg80211_sched_dfs_chan_update(rdev); nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL); } } static int __init regulatory_init_db(void) { int err; /* * It's possible that - due to other bugs/issues - cfg80211 * never called regulatory_init() below, or that it failed; * in that case, don't try to do any further work here as * it's doomed to lead to crashes. */ if (IS_ERR_OR_NULL(reg_pdev)) return -EINVAL; err = load_builtin_regdb_keys(); if (err) return err; /* We always try to get an update for the static regdomain */ err = regulatory_hint_core(cfg80211_world_regdom->alpha2); if (err) { if (err == -ENOMEM) { platform_device_unregister(reg_pdev); return err; } /* * N.B. kobject_uevent_env() can fail mainly for when we're out * memory which is handled and propagated appropriately above * but it can also fail during a netlink_broadcast() or during * early boot for call_usermodehelper(). For now treat these * errors as non-fatal. */ pr_err("kobject_uevent_env() was unable to call CRDA during init\n"); } /* * Finally, if the user set the module parameter treat it * as a user hint. */ if (!is_world_regdom(ieee80211_regdom)) regulatory_hint_user(ieee80211_regdom, NL80211_USER_REG_HINT_USER); return 0; } #ifndef MODULE late_initcall(regulatory_init_db); #endif int __init regulatory_init(void) { reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0); if (IS_ERR(reg_pdev)) return PTR_ERR(reg_pdev); spin_lock_init(®_requests_lock); spin_lock_init(®_pending_beacons_lock); spin_lock_init(®_indoor_lock); rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom); user_alpha2[0] = '9'; user_alpha2[1] = '7'; #ifdef MODULE return regulatory_init_db(); #else return 0; #endif } void regulatory_exit(void) { struct regulatory_request *reg_request, *tmp; struct reg_beacon *reg_beacon, *btmp; cancel_work_sync(®_work); cancel_crda_timeout_sync(); cancel_delayed_work_sync(®_check_chans); /* Lock to suppress warnings */ rtnl_lock(); reset_regdomains(true, NULL); rtnl_unlock(); dev_set_uevent_suppress(®_pdev->dev, true); platform_device_unregister(reg_pdev); list_for_each_entry_safe(reg_beacon, btmp, ®_pending_beacons, list) { list_del(®_beacon->list); kfree(reg_beacon); } list_for_each_entry_safe(reg_beacon, btmp, ®_beacon_list, list) { list_del(®_beacon->list); kfree(reg_beacon); } list_for_each_entry_safe(reg_request, tmp, ®_requests_list, list) { list_del(®_request->list); kfree(reg_request); } if (!IS_ERR_OR_NULL(regdb)) kfree(regdb); if (!IS_ERR_OR_NULL(cfg80211_user_regdom)) kfree(cfg80211_user_regdom); free_regdb_keyring(); } backport-iwlwifi-dkms-7906/net/wireless/reg.h000066400000000000000000000167601351064634500212640ustar00rootroot00000000000000#ifndef __NET_WIRELESS_REG_H #define __NET_WIRELESS_REG_H #include /* * Copyright 2008-2011 Luis R. Rodriguez * Copyright (C) 2019 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ enum ieee80211_regd_source { REGD_SOURCE_INTERNAL_DB, REGD_SOURCE_CRDA, REGD_SOURCE_CACHED, }; extern const struct ieee80211_regdomain __rcu *cfg80211_regdomain; bool reg_is_valid_request(const char *alpha2); bool is_world_regdom(const char *alpha2); bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region); enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy); int regulatory_hint_user(const char *alpha2, enum nl80211_user_reg_hint_type user_reg_hint_type); /** * regulatory_hint_indoor - hint operation in indoor env. or not * @is_indoor: if true indicates that user space thinks that the * device is operating in an indoor environment. * @portid: the netlink port ID on which the hint was given. */ int regulatory_hint_indoor(bool is_indoor, u32 portid); /** * regulatory_netlink_notify - notify on released netlink socket * @portid: the netlink socket port ID */ void regulatory_netlink_notify(u32 portid); void wiphy_regulatory_register(struct wiphy *wiphy); void wiphy_regulatory_deregister(struct wiphy *wiphy); int __init regulatory_init(void); void regulatory_exit(void); int set_regdom(const struct ieee80211_regdomain *rd, enum ieee80211_regd_source regd_src); unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, const struct ieee80211_reg_rule *rule); bool reg_last_request_cell_base(void); const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy); /** * regulatory_hint_found_beacon - hints a beacon was found on a channel * @wiphy: the wireless device where the beacon was found on * @beacon_chan: the channel on which the beacon was found on * @gfp: context flags * * This informs the wireless core that a beacon from an AP was found on * the channel provided. This allows the wireless core to make educated * guesses on regulatory to help with world roaming. This is only used for * world roaming -- when we do not know our current location. This is * only useful on channels 12, 13 and 14 on the 2 GHz band as channels * 1-11 are already enabled by the world regulatory domain; and on * non-radar 5 GHz channels. * * Drivers do not need to call this, cfg80211 will do it for after a scan * on a newly found BSS. If you cannot make use of this feature you can * set the wiphy->disable_beacon_hints to true. */ int regulatory_hint_found_beacon(struct wiphy *wiphy, struct ieee80211_channel *beacon_chan, gfp_t gfp); /** * regulatory_hint_country_ie - hints a country IE as a regulatory domain * @wiphy: the wireless device giving the hint (used only for reporting * conflicts) * @band: the band on which the country IE was received on. This determines * the band we'll process the country IE channel triplets for. * @country_ie: pointer to the country IE * @country_ie_len: length of the country IE * * We will intersect the rd with the what CRDA tells us should apply * for the alpha2 this country IE belongs to, this prevents APs from * sending us incorrect or outdated information against a country. * * The AP is expected to provide Country IE channel triplets for the * band it is on. It is technically possible for APs to send channel * country IE triplets even for channels outside of the band they are * in but for that they would have to use the regulatory extension * in combination with a triplet but this behaviour is currently * not observed. For this reason if a triplet is seen with channel * information for a band the BSS is not present in it will be ignored. */ void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band, const u8 *country_ie, u8 country_ie_len); /** * regulatory_hint_disconnect - informs all devices have been disconneted * * Regulotory rules can be enhanced further upon scanning and upon * connection to an AP. These rules become stale if we disconnect * and go to another country, whether or not we suspend and resume. * If we suspend, go to another country and resume we'll automatically * get disconnected shortly after resuming and things will be reset as well. * This routine is a helper to restore regulatory settings to how they were * prior to our first connect attempt. This includes ignoring country IE and * beacon regulatory hints. The ieee80211_regdom module parameter will always * be respected but if a user had set the regulatory domain that will take * precedence. * * Must be called from process context. */ void regulatory_hint_disconnect(void); /** * cfg80211_get_unii - get the U-NII band for the frequency * @freq: the frequency for which we want to get the UNII band. * Get a value specifying the U-NII band frequency belongs to. * U-NII bands are defined by the FCC in C.F.R 47 part 15. * * Returns -EINVAL if freq is invalid, 0 for UNII-1, 1 for UNII-2A, * 2 for UNII-2B, 3 for UNII-2C and 4 for UNII-3. */ int cfg80211_get_unii(int freq); /** * regulatory_indoor_allowed - is indoor operation allowed */ bool regulatory_indoor_allowed(void); /* * Grace period to timeout pre-CAC results on the dfs channels. This timeout * value is used for Non-ETSI domain. * TODO: May be make this timeout available through regdb? */ #define REG_PRE_CAC_EXPIRY_GRACE_MS 2000 /** * regulatory_pre_cac_allowed - if pre-CAC allowed in the current dfs domain * @wiphy: wiphy for which pre-CAC capability is checked. * Pre-CAC is allowed only in ETSI domain. */ bool regulatory_pre_cac_allowed(struct wiphy *wiphy); /** * regulatory_propagate_dfs_state - Propagate DFS channel state to other wiphys * @wiphy - wiphy on which radar is detected and the event will be propagated * to other available wiphys having the same DFS domain * @chandef - Channel definition of radar detected channel * @dfs_state - DFS channel state to be set * @event - Type of radar event which triggered this DFS state change * * This function should be called with rtnl lock held. */ void regulatory_propagate_dfs_state(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_dfs_state dfs_state, enum nl80211_radar_event event); /** * reg_dfs_domain_same - Checks if both wiphy have same DFS domain configured * @wiphy1 - wiphy it's dfs_region to be checked against that of wiphy2 * @wiphy2 - wiphy it's dfs_region to be checked against that of wiphy1 */ bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2); /** * reg_reload_regdb - reload the regulatory.db firmware file */ int reg_reload_regdb(void); extern const u8 shipped_regdb_certs[]; extern unsigned int shipped_regdb_certs_len; extern const u8 extra_regdb_certs[]; extern unsigned int extra_regdb_certs_len; #endif /* __NET_WIRELESS_REG_H */ backport-iwlwifi-dkms-7906/net/wireless/scan.c000066400000000000000000002033311351064634500214160ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * cfg80211 scan result handling * * Copyright 2008 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2016 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include "core.h" #include "nl80211.h" #include "wext-compat.h" #include "rdev-ops.h" /** * DOC: BSS tree/list structure * * At the top level, the BSS list is kept in both a list in each * registered device (@bss_list) as well as an RB-tree for faster * lookup. In the RB-tree, entries can be looked up using their * channel, MESHID, MESHCONF (for MBSSes) or channel, BSSID, SSID * for other BSSes. * * Due to the possibility of hidden SSIDs, there's a second level * structure, the "hidden_list" and "hidden_beacon_bss" pointer. * The hidden_list connects all BSSes belonging to a single AP * that has a hidden SSID, and connects beacon and probe response * entries. For a probe response entry for a hidden SSID, the * hidden_beacon_bss pointer points to the BSS struct holding the * beacon's information. * * Reference counting is done for all these references except for * the hidden_list, so that a beacon BSS struct that is otherwise * not referenced has one reference for being on the bss_list and * one for each probe response entry that points to it using the * hidden_beacon_bss pointer. When a BSS struct that has such a * pointer is get/put, the refcount update is also propagated to * the referenced struct, this ensure that it cannot get removed * while somebody is using the probe response version. * * Note that the hidden_beacon_bss pointer never changes, due to * the reference counting. Therefore, no locking is needed for * it. * * Also note that the hidden_beacon_bss pointer is only relevant * if the driver uses something other than the IEs, e.g. private * data stored stored in the BSS struct, since the beacon IEs are * also linked into the probe response struct. */ /* * Limit the number of BSS entries stored in mac80211. Each one is * a bit over 4k at most, so this limits to roughly 4-5M of memory. * If somebody wants to really attack this though, they'd likely * use small beacons, and only one type of frame, limiting each of * the entries to a much smaller size (in order to generate more * entries in total, so overhead is bigger.) */ static int bss_entries_limit = 1000; module_param(bss_entries_limit, int, 0644); MODULE_PARM_DESC(bss_entries_limit, "limit to number of scan BSS entries (per wiphy, default 1000)"); #define IEEE80211_SCAN_RESULT_EXPIRE (CPTCFG_IWL_TIMEOUT_FACTOR * 30 * HZ) static void bss_free(struct cfg80211_internal_bss *bss) { struct cfg80211_bss_ies *ies; if (WARN_ON(atomic_read(&bss->hold))) return; ies = (void *)rcu_access_pointer(bss->pub.beacon_ies); if (ies && !bss->pub.hidden_beacon_bss) kfree_rcu(ies, rcu_head); ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies); if (ies) kfree_rcu(ies, rcu_head); /* * This happens when the module is removed, it doesn't * really matter any more save for completeness */ if (!list_empty(&bss->hidden_list)) list_del(&bss->hidden_list); kfree(bss); } static inline void bss_ref_get(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *bss) { lockdep_assert_held(&rdev->bss_lock); bss->refcount++; if (bss->pub.hidden_beacon_bss) { bss = container_of(bss->pub.hidden_beacon_bss, struct cfg80211_internal_bss, pub); bss->refcount++; } if (bss->pub.transmitted_bss) { bss = container_of(bss->pub.transmitted_bss, struct cfg80211_internal_bss, pub); bss->refcount++; } } static inline void bss_ref_put(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *bss) { lockdep_assert_held(&rdev->bss_lock); if (bss->pub.hidden_beacon_bss) { struct cfg80211_internal_bss *hbss; hbss = container_of(bss->pub.hidden_beacon_bss, struct cfg80211_internal_bss, pub); hbss->refcount--; if (hbss->refcount == 0) bss_free(hbss); } if (bss->pub.transmitted_bss) { struct cfg80211_internal_bss *tbss; tbss = container_of(bss->pub.transmitted_bss, struct cfg80211_internal_bss, pub); tbss->refcount--; if (tbss->refcount == 0) bss_free(tbss); } bss->refcount--; if (bss->refcount == 0) bss_free(bss); } static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *bss) { lockdep_assert_held(&rdev->bss_lock); if (!list_empty(&bss->hidden_list)) { /* * don't remove the beacon entry if it has * probe responses associated with it */ if (!bss->pub.hidden_beacon_bss) return false; /* * if it's a probe response entry break its * link to the other entries in the group */ list_del_init(&bss->hidden_list); } list_del_init(&bss->list); list_del_init(&bss->pub.nontrans_list); rb_erase(&bss->rbn, &rdev->bss_tree); rdev->bss_entries--; WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), "rdev bss entries[%d]/list[empty:%d] corruption\n", rdev->bss_entries, list_empty(&rdev->bss_list)); bss_ref_put(rdev, bss); return true; } bool cfg80211_is_element_inherited(const struct element *elem, const struct element *non_inherit_elem) { u8 id_len, ext_id_len, i, loop_len, id; const u8 *list; if (elem->id == WLAN_EID_MULTIPLE_BSSID) return false; if (!non_inherit_elem || non_inherit_elem->datalen < 2) return true; /* * non inheritance element format is: * ext ID (56) | IDs list len | list | extension IDs list len | list * Both lists are optional. Both lengths are mandatory. * This means valid length is: * elem_len = 1 (extension ID) + 2 (list len fields) + list lengths */ id_len = non_inherit_elem->data[1]; if (non_inherit_elem->datalen < 3 + id_len) return true; ext_id_len = non_inherit_elem->data[2 + id_len]; if (non_inherit_elem->datalen < 3 + id_len + ext_id_len) return true; if (elem->id == WLAN_EID_EXTENSION) { if (!ext_id_len) return true; loop_len = ext_id_len; list = &non_inherit_elem->data[3 + id_len]; id = elem->data[0]; } else { if (!id_len) return true; loop_len = id_len; list = &non_inherit_elem->data[2]; id = elem->id; } for (i = 0; i < loop_len; i++) { if (list[i] == id) return false; } return true; } EXPORT_SYMBOL(cfg80211_is_element_inherited); static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, const u8 *subelement, size_t subie_len, u8 *new_ie, gfp_t gfp) { u8 *pos, *tmp; const u8 *tmp_old, *tmp_new; const struct element *non_inherit_elem; u8 *sub_copy; /* copy subelement as we need to change its content to * mark an ie after it is processed. */ sub_copy = kmalloc(subie_len, gfp); if (!sub_copy) return 0; memcpy(sub_copy, subelement, subie_len); pos = &new_ie[0]; /* set new ssid */ tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len); if (tmp_new) { memcpy(pos, tmp_new, tmp_new[1] + 2); pos += (tmp_new[1] + 2); } /* get non inheritance list if exists */ non_inherit_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, sub_copy, subie_len); /* go through IEs in ie (skip SSID) and subelement, * merge them into new_ie */ tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie; while (tmp_old + tmp_old[1] + 2 - ie <= ielen) { if (tmp_old[0] == 0) { tmp_old++; continue; } if (tmp_old[0] == WLAN_EID_EXTENSION) tmp = (u8 *)cfg80211_find_ext_ie(tmp_old[2], sub_copy, subie_len); else tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy, subie_len); if (!tmp) { const struct element *old_elem = (void *)tmp_old; /* ie in old ie but not in subelement */ if (cfg80211_is_element_inherited(old_elem, non_inherit_elem)) { memcpy(pos, tmp_old, tmp_old[1] + 2); pos += tmp_old[1] + 2; } } else { /* ie in transmitting ie also in subelement, * copy from subelement and flag the ie in subelement * as copied (by setting eid field to WLAN_EID_SSID, * which is skipped anyway). * For vendor ie, compare OUI + type + subType to * determine if they are the same ie. */ if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) { if (!memcmp(tmp_old + 2, tmp + 2, 5)) { /* same vendor ie, copy from * subelement */ memcpy(pos, tmp, tmp[1] + 2); pos += tmp[1] + 2; tmp[0] = WLAN_EID_SSID; } else { memcpy(pos, tmp_old, tmp_old[1] + 2); pos += tmp_old[1] + 2; } } else { /* copy ie from subelement into new ie */ memcpy(pos, tmp, tmp[1] + 2); pos += tmp[1] + 2; tmp[0] = WLAN_EID_SSID; } } if (tmp_old + tmp_old[1] + 2 - ie == ielen) break; tmp_old += tmp_old[1] + 2; } /* go through subelement again to check if there is any ie not * copied to new ie, skip ssid, capability, bssid-index ie */ tmp_new = sub_copy; while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP || tmp_new[0] == WLAN_EID_SSID)) { memcpy(pos, tmp_new, tmp_new[1] + 2); pos += tmp_new[1] + 2; } if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len) break; tmp_new += tmp_new[1] + 2; } kfree(sub_copy); return pos - new_ie; } static bool is_bss(struct cfg80211_bss *a, const u8 *bssid, const u8 *ssid, size_t ssid_len) { const struct cfg80211_bss_ies *ies; const u8 *ssidie; if (bssid && !ether_addr_equal(a->bssid, bssid)) return false; if (!ssid) return true; ies = rcu_access_pointer(a->ies); if (!ies) return false; ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); if (!ssidie) return false; if (ssidie[1] != ssid_len) return false; return memcmp(ssidie + 2, ssid, ssid_len) == 0; } static int cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss, struct cfg80211_bss *nontrans_bss) { const u8 *ssid; size_t ssid_len; struct cfg80211_bss *bss = NULL; rcu_read_lock(); ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID); if (!ssid) { rcu_read_unlock(); return -EINVAL; } ssid_len = ssid[1]; ssid = ssid + 2; rcu_read_unlock(); /* check if nontrans_bss is in the list */ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) { if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) return 0; } /* add to the list */ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); return 0; } static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev, unsigned long expire_time) { struct cfg80211_internal_bss *bss, *tmp; bool expired = false; lockdep_assert_held(&rdev->bss_lock); list_for_each_entry_safe(bss, tmp, &rdev->bss_list, list) { if (atomic_read(&bss->hold)) continue; if (!time_after(expire_time, bss->ts)) continue; if (__cfg80211_unlink_bss(rdev, bss)) expired = true; } if (expired) rdev->bss_generation++; } static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) { struct cfg80211_internal_bss *bss, *oldest = NULL; bool ret; lockdep_assert_held(&rdev->bss_lock); list_for_each_entry(bss, &rdev->bss_list, list) { if (atomic_read(&bss->hold)) continue; if (!list_empty(&bss->hidden_list) && !bss->pub.hidden_beacon_bss) continue; if (oldest && time_before(oldest->ts, bss->ts)) continue; oldest = bss; } if (WARN_ON(!oldest)) return false; /* * The callers make sure to increase rdev->bss_generation if anything * gets removed (and a new entry added), so there's no need to also do * it here. */ ret = __cfg80211_unlink_bss(rdev, oldest); WARN_ON(!ret); return ret; } void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool send_message) { struct cfg80211_scan_request *request; struct wireless_dev *wdev; struct sk_buff *msg; #ifdef CPTCFG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_RTNL(); if (rdev->scan_msg) { nl80211_send_scan_msg(rdev, rdev->scan_msg); rdev->scan_msg = NULL; return; } request = rdev->scan_req; if (!request) return; wdev = request->wdev; /* * This must be before sending the other events! * Otherwise, wpa_supplicant gets completely confused with * wext events. */ if (wdev->netdev) cfg80211_sme_scan_done(wdev->netdev); if (!request->info.aborted && request->flags & NL80211_SCAN_FLAG_FLUSH) { /* flush entries from previous scans */ spin_lock_bh(&rdev->bss_lock); __cfg80211_bss_expire(rdev, request->scan_start); spin_unlock_bh(&rdev->bss_lock); } msg = nl80211_build_scan_msg(rdev, wdev, request->info.aborted); #ifdef CPTCFG_CFG80211_WEXT if (wdev->netdev && !request->info.aborted) { memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(wdev->netdev, SIOCGIWSCAN, &wrqu, NULL); } #endif if (wdev->netdev) dev_put(wdev->netdev); rdev->scan_req = NULL; kfree(request); if (!send_message) rdev->scan_msg = msg; else nl80211_send_scan_msg(rdev, msg); } void __cfg80211_scan_done(struct work_struct *wk) { struct cfg80211_registered_device *rdev; rdev = container_of(wk, struct cfg80211_registered_device, scan_done_wk); rtnl_lock(); ___cfg80211_scan_done(rdev, true); rtnl_unlock(); } void cfg80211_scan_done(struct cfg80211_scan_request *request, struct cfg80211_scan_info *info) { trace_cfg80211_scan_done(request, info); WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req); request->info = *info; request->notified = true; queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk); } EXPORT_SYMBOL(cfg80211_scan_done); void cfg80211_add_sched_scan_req(struct cfg80211_registered_device *rdev, struct cfg80211_sched_scan_request *req) { ASSERT_RTNL(); list_add_rcu(&req->list, &rdev->sched_scan_req_list); } static void cfg80211_del_sched_scan_req(struct cfg80211_registered_device *rdev, struct cfg80211_sched_scan_request *req) { ASSERT_RTNL(); list_del_rcu(&req->list); kfree_rcu(req, rcu_head); } static struct cfg80211_sched_scan_request * cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid) { struct cfg80211_sched_scan_request *pos; WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) { if (pos->reqid == reqid) return pos; } return NULL; } /* * Determines if a scheduled scan request can be handled. When a legacy * scheduled scan is running no other scheduled scan is allowed regardless * whether the request is for legacy or multi-support scan. When a multi-support * scheduled scan is running a request for legacy scan is not allowed. In this * case a request for multi-support scan can be handled if resources are * available, ie. struct wiphy::max_sched_scan_reqs limit is not yet reached. */ int cfg80211_sched_scan_req_possible(struct cfg80211_registered_device *rdev, bool want_multi) { struct cfg80211_sched_scan_request *pos; int i = 0; list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { /* request id zero means legacy in progress */ if (!i && !pos->reqid) return -EINPROGRESS; i++; } if (i) { /* no legacy allowed when multi request(s) are active */ if (!want_multi) return -EINPROGRESS; /* resource limit reached */ if (i == rdev->wiphy.max_sched_scan_reqs) return -ENOSPC; } return 0; } void cfg80211_sched_scan_results_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; struct cfg80211_sched_scan_request *req, *tmp; rdev = container_of(work, struct cfg80211_registered_device, sched_scan_res_wk); rtnl_lock(); list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { if (req->report_results) { req->report_results = false; if (req->flags & NL80211_SCAN_FLAG_FLUSH) { /* flush entries from previous scans */ spin_lock_bh(&rdev->bss_lock); __cfg80211_bss_expire(rdev, req->scan_start); spin_unlock_bh(&rdev->bss_lock); req->scan_start = jiffies; } nl80211_send_sched_scan(req, NL80211_CMD_SCHED_SCAN_RESULTS); } } rtnl_unlock(); } void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_sched_scan_request *request; trace_cfg80211_sched_scan_results(wiphy, reqid); /* ignore if we're not scanning */ rcu_read_lock(); request = cfg80211_find_sched_scan_req(rdev, reqid); if (request) { request->report_results = true; queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); } rcu_read_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_results); void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ASSERT_RTNL(); trace_cfg80211_sched_scan_stopped(wiphy, reqid); __cfg80211_stop_sched_scan(rdev, reqid, true); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl); void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid) { rtnl_lock(); cfg80211_sched_scan_stopped_rtnl(wiphy, reqid); rtnl_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped); int cfg80211_stop_sched_scan_req(struct cfg80211_registered_device *rdev, struct cfg80211_sched_scan_request *req, bool driver_initiated) { ASSERT_RTNL(); if (!driver_initiated) { int err = rdev_sched_scan_stop(rdev, req->dev, req->reqid); if (err) return err; } nl80211_send_sched_scan(req, NL80211_CMD_SCHED_SCAN_STOPPED); cfg80211_del_sched_scan_req(rdev, req); return 0; } int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, u64 reqid, bool driver_initiated) { struct cfg80211_sched_scan_request *sched_scan_req; ASSERT_RTNL(); sched_scan_req = cfg80211_find_sched_scan_req(rdev, reqid); if (!sched_scan_req) return -ENOENT; return cfg80211_stop_sched_scan_req(rdev, sched_scan_req, driver_initiated); } void cfg80211_bss_age(struct cfg80211_registered_device *rdev, unsigned long age_secs) { struct cfg80211_internal_bss *bss; unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); spin_lock_bh(&rdev->bss_lock); list_for_each_entry(bss, &rdev->bss_list, list) bss->ts -= age_jiffies; spin_unlock_bh(&rdev->bss_lock); } void cfg80211_bss_expire(struct cfg80211_registered_device *rdev) { __cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE); } const struct element * cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len, const u8 *match, unsigned int match_len, unsigned int match_offset) { const struct element *elem; for_each_element_id(elem, eid, ies, len) { if (elem->datalen >= match_offset + match_len && !memcmp(elem->data + match_offset, match, match_len)) return elem; } return NULL; } EXPORT_SYMBOL(cfg80211_find_elem_match); const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type, const u8 *ies, unsigned int len) { const struct element *elem; u8 match[] = { oui >> 16, oui >> 8, oui, oui_type }; int match_len = (oui_type < 0) ? 3 : sizeof(match); if (WARN_ON(oui_type > 0xff)) return NULL; elem = cfg80211_find_elem_match(WLAN_EID_VENDOR_SPECIFIC, ies, len, match, match_len, 0); if (!elem || elem->datalen < 4) return NULL; return elem; } EXPORT_SYMBOL(cfg80211_find_vendor_elem); /** * enum bss_compare_mode - BSS compare mode * @BSS_CMP_REGULAR: regular compare mode (for insertion and normal find) * @BSS_CMP_HIDE_ZLEN: find hidden SSID with zero-length mode * @BSS_CMP_HIDE_NUL: find hidden SSID with NUL-ed out mode */ enum bss_compare_mode { BSS_CMP_REGULAR, BSS_CMP_HIDE_ZLEN, BSS_CMP_HIDE_NUL, }; static int cmp_bss(struct cfg80211_bss *a, struct cfg80211_bss *b, enum bss_compare_mode mode) { const struct cfg80211_bss_ies *a_ies, *b_ies; const u8 *ie1 = NULL; const u8 *ie2 = NULL; int i, r; if (a->channel != b->channel) return b->channel->center_freq - a->channel->center_freq; a_ies = rcu_access_pointer(a->ies); if (!a_ies) return -1; b_ies = rcu_access_pointer(b->ies); if (!b_ies) return 1; if (WLAN_CAPABILITY_IS_STA_BSS(a->capability)) ie1 = cfg80211_find_ie(WLAN_EID_MESH_ID, a_ies->data, a_ies->len); if (WLAN_CAPABILITY_IS_STA_BSS(b->capability)) ie2 = cfg80211_find_ie(WLAN_EID_MESH_ID, b_ies->data, b_ies->len); if (ie1 && ie2) { int mesh_id_cmp; if (ie1[1] == ie2[1]) mesh_id_cmp = memcmp(ie1 + 2, ie2 + 2, ie1[1]); else mesh_id_cmp = ie2[1] - ie1[1]; ie1 = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, a_ies->data, a_ies->len); ie2 = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, b_ies->data, b_ies->len); if (ie1 && ie2) { if (mesh_id_cmp) return mesh_id_cmp; if (ie1[1] != ie2[1]) return ie2[1] - ie1[1]; return memcmp(ie1 + 2, ie2 + 2, ie1[1]); } } r = memcmp(a->bssid, b->bssid, sizeof(a->bssid)); if (r) return r; ie1 = cfg80211_find_ie(WLAN_EID_SSID, a_ies->data, a_ies->len); ie2 = cfg80211_find_ie(WLAN_EID_SSID, b_ies->data, b_ies->len); if (!ie1 && !ie2) return 0; /* * Note that with "hide_ssid", the function returns a match if * the already-present BSS ("b") is a hidden SSID beacon for * the new BSS ("a"). */ /* sort missing IE before (left of) present IE */ if (!ie1) return -1; if (!ie2) return 1; switch (mode) { case BSS_CMP_HIDE_ZLEN: /* * In ZLEN mode we assume the BSS entry we're * looking for has a zero-length SSID. So if * the one we're looking at right now has that, * return 0. Otherwise, return the difference * in length, but since we're looking for the * 0-length it's really equivalent to returning * the length of the one we're looking at. * * No content comparison is needed as we assume * the content length is zero. */ return ie2[1]; case BSS_CMP_REGULAR: default: /* sort by length first, then by contents */ if (ie1[1] != ie2[1]) return ie2[1] - ie1[1]; return memcmp(ie1 + 2, ie2 + 2, ie1[1]); case BSS_CMP_HIDE_NUL: if (ie1[1] != ie2[1]) return ie2[1] - ie1[1]; /* this is equivalent to memcmp(zeroes, ie2 + 2, len) */ for (i = 0; i < ie2[1]; i++) if (ie2[i + 2]) return -1; return 0; } } static bool cfg80211_bss_type_match(u16 capability, enum nl80211_band band, enum ieee80211_bss_type bss_type) { bool ret = true; u16 mask, val; if (bss_type == IEEE80211_BSS_TYPE_ANY) return ret; if (band == NL80211_BAND_60GHZ) { mask = WLAN_CAPABILITY_DMG_TYPE_MASK; switch (bss_type) { case IEEE80211_BSS_TYPE_ESS: val = WLAN_CAPABILITY_DMG_TYPE_AP; break; case IEEE80211_BSS_TYPE_PBSS: val = WLAN_CAPABILITY_DMG_TYPE_PBSS; break; case IEEE80211_BSS_TYPE_IBSS: val = WLAN_CAPABILITY_DMG_TYPE_IBSS; break; default: return false; } } else { mask = WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS; switch (bss_type) { case IEEE80211_BSS_TYPE_ESS: val = WLAN_CAPABILITY_ESS; break; case IEEE80211_BSS_TYPE_IBSS: val = WLAN_CAPABILITY_IBSS; break; case IEEE80211_BSS_TYPE_MBSS: val = 0; break; default: return false; } } ret = ((capability & mask) == val); return ret; } /* Returned bss is reference counted and must be cleaned up appropriately. */ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, const u8 *ssid, size_t ssid_len, enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_internal_bss *bss, *res = NULL; unsigned long now = jiffies; int bss_privacy; trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, bss_type, privacy); spin_lock_bh(&rdev->bss_lock); list_for_each_entry(bss, &rdev->bss_list, list) { if (!cfg80211_bss_type_match(bss->pub.capability, bss->pub.channel->band, bss_type)) continue; bss_privacy = (bss->pub.capability & WLAN_CAPABILITY_PRIVACY); if ((privacy == IEEE80211_PRIVACY_ON && !bss_privacy) || (privacy == IEEE80211_PRIVACY_OFF && bss_privacy)) continue; if (channel && bss->pub.channel != channel) continue; if (!is_valid_ether_addr(bss->pub.bssid)) continue; /* Don't get expired BSS structs */ if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) && !atomic_read(&bss->hold)) continue; if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { res = bss; bss_ref_get(rdev, res); break; } } spin_unlock_bh(&rdev->bss_lock); if (!res) return NULL; trace_cfg80211_return_bss(&res->pub); return &res->pub; } EXPORT_SYMBOL(cfg80211_get_bss); static void rb_insert_bss(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *bss) { struct rb_node **p = &rdev->bss_tree.rb_node; struct rb_node *parent = NULL; struct cfg80211_internal_bss *tbss; int cmp; while (*p) { parent = *p; tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn); cmp = cmp_bss(&bss->pub, &tbss->pub, BSS_CMP_REGULAR); if (WARN_ON(!cmp)) { /* will sort of leak this BSS */ return; } if (cmp < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&bss->rbn, parent, p); rb_insert_color(&bss->rbn, &rdev->bss_tree); } static struct cfg80211_internal_bss * rb_find_bss(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *res, enum bss_compare_mode mode) { struct rb_node *n = rdev->bss_tree.rb_node; struct cfg80211_internal_bss *bss; int r; while (n) { bss = rb_entry(n, struct cfg80211_internal_bss, rbn); r = cmp_bss(&res->pub, &bss->pub, mode); if (r == 0) return bss; else if (r < 0) n = n->rb_left; else n = n->rb_right; } return NULL; } static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *new) { const struct cfg80211_bss_ies *ies; struct cfg80211_internal_bss *bss; const u8 *ie; int i, ssidlen; u8 fold = 0; u32 n_entries = 0; ies = rcu_access_pointer(new->pub.beacon_ies); if (WARN_ON(!ies)) return false; ie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); if (!ie) { /* nothing to do */ return true; } ssidlen = ie[1]; for (i = 0; i < ssidlen; i++) fold |= ie[2 + i]; if (fold) { /* not a hidden SSID */ return true; } /* This is the bad part ... */ list_for_each_entry(bss, &rdev->bss_list, list) { /* * we're iterating all the entries anyway, so take the * opportunity to validate the list length accounting */ n_entries++; if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) continue; if (bss->pub.channel != new->pub.channel) continue; if (bss->pub.scan_width != new->pub.scan_width) continue; if (rcu_access_pointer(bss->pub.beacon_ies)) continue; ies = rcu_access_pointer(bss->pub.ies); if (!ies) continue; ie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); if (!ie) continue; if (ssidlen && ie[1] != ssidlen) continue; if (WARN_ON_ONCE(bss->pub.hidden_beacon_bss)) continue; if (WARN_ON_ONCE(!list_empty(&bss->hidden_list))) list_del(&bss->hidden_list); /* combine them */ list_add(&bss->hidden_list, &new->hidden_list); bss->pub.hidden_beacon_bss = &new->pub; new->refcount += bss->refcount; rcu_assign_pointer(bss->pub.beacon_ies, new->pub.beacon_ies); } WARN_ONCE(n_entries != rdev->bss_entries, "rdev bss entries[%d]/list[len:%d] corruption\n", rdev->bss_entries, n_entries); return true; } struct cfg80211_non_tx_bss { struct cfg80211_bss *tx_bss; u8 max_bssid_indicator; u8 bssid_index; }; /* Returned bss is reference counted and must be cleaned up appropriately. */ static struct cfg80211_internal_bss * cfg80211_bss_update(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *tmp, bool signal_valid) { struct cfg80211_internal_bss *found = NULL; if (WARN_ON(!tmp->pub.channel)) return NULL; tmp->ts = jiffies; spin_lock_bh(&rdev->bss_lock); if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) { spin_unlock_bh(&rdev->bss_lock); return NULL; } found = rb_find_bss(rdev, tmp, BSS_CMP_REGULAR); if (found) { /* Update IEs */ if (rcu_access_pointer(tmp->pub.proberesp_ies)) { const struct cfg80211_bss_ies *old; old = rcu_access_pointer(found->pub.proberesp_ies); rcu_assign_pointer(found->pub.proberesp_ies, tmp->pub.proberesp_ies); /* Override possible earlier Beacon frame IEs */ rcu_assign_pointer(found->pub.ies, tmp->pub.proberesp_ies); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); } else if (rcu_access_pointer(tmp->pub.beacon_ies)) { const struct cfg80211_bss_ies *old; struct cfg80211_internal_bss *bss; if (found->pub.hidden_beacon_bss && !list_empty(&found->hidden_list)) { const struct cfg80211_bss_ies *f; /* * The found BSS struct is one of the probe * response members of a group, but we're * receiving a beacon (beacon_ies in the tmp * bss is used). This can only mean that the * AP changed its beacon from not having an * SSID to showing it, which is confusing so * drop this information. */ f = rcu_access_pointer(tmp->pub.beacon_ies); kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head); goto drop; } old = rcu_access_pointer(found->pub.beacon_ies); rcu_assign_pointer(found->pub.beacon_ies, tmp->pub.beacon_ies); /* Override IEs if they were from a beacon before */ if (old == rcu_access_pointer(found->pub.ies)) rcu_assign_pointer(found->pub.ies, tmp->pub.beacon_ies); /* Assign beacon IEs to all sub entries */ list_for_each_entry(bss, &found->hidden_list, hidden_list) { const struct cfg80211_bss_ies *ies; ies = rcu_access_pointer(bss->pub.beacon_ies); WARN_ON(ies != old); rcu_assign_pointer(bss->pub.beacon_ies, tmp->pub.beacon_ies); } if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); } found->pub.beacon_interval = tmp->pub.beacon_interval; /* * don't update the signal if beacon was heard on * adjacent channel. */ if (signal_valid) found->pub.signal = tmp->pub.signal; found->pub.capability = tmp->pub.capability; found->ts = tmp->ts; found->ts_boottime = tmp->ts_boottime; found->parent_tsf = tmp->parent_tsf; found->pub.chains = tmp->pub.chains; memcpy(found->pub.chain_signal, tmp->pub.chain_signal, IEEE80211_MAX_CHAINS); ether_addr_copy(found->parent_bssid, tmp->parent_bssid); found->pub.max_bssid_indicator = tmp->pub.max_bssid_indicator; found->pub.bssid_index = tmp->pub.bssid_index; } else { struct cfg80211_internal_bss *new; struct cfg80211_internal_bss *hidden; struct cfg80211_bss_ies *ies; /* * create a copy -- the "res" variable that is passed in * is allocated on the stack since it's not needed in the * more common case of an update */ new = kzalloc(sizeof(*new) + rdev->wiphy.bss_priv_size, GFP_ATOMIC); if (!new) { ies = (void *)rcu_dereference(tmp->pub.beacon_ies); if (ies) kfree_rcu(ies, rcu_head); ies = (void *)rcu_dereference(tmp->pub.proberesp_ies); if (ies) kfree_rcu(ies, rcu_head); goto drop; } memcpy(new, tmp, sizeof(*new)); new->refcount = 1; INIT_LIST_HEAD(&new->hidden_list); INIT_LIST_HEAD(&new->pub.nontrans_list); if (rcu_access_pointer(tmp->pub.proberesp_ies)) { hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN); if (!hidden) hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_NUL); if (hidden) { new->pub.hidden_beacon_bss = &hidden->pub; list_add(&new->hidden_list, &hidden->hidden_list); hidden->refcount++; rcu_assign_pointer(new->pub.beacon_ies, hidden->pub.beacon_ies); } } else { /* * Ok so we found a beacon, and don't have an entry. If * it's a beacon with hidden SSID, we might be in for an * expensive search for any probe responses that should * be grouped with this beacon for updates ... */ if (!cfg80211_combine_bsses(rdev, new)) { kfree(new); goto drop; } } if (rdev->bss_entries >= bss_entries_limit && !cfg80211_bss_expire_oldest(rdev)) { kfree(new); goto drop; } /* This must be before the call to bss_ref_get */ if (tmp->pub.transmitted_bss) { struct cfg80211_internal_bss *pbss = container_of(tmp->pub.transmitted_bss, struct cfg80211_internal_bss, pub); new->pub.transmitted_bss = tmp->pub.transmitted_bss; bss_ref_get(rdev, pbss); } list_add_tail(&new->list, &rdev->bss_list); rdev->bss_entries++; rb_insert_bss(rdev, new); found = new; } rdev->bss_generation++; bss_ref_get(rdev, found); spin_unlock_bh(&rdev->bss_lock); return found; drop: spin_unlock_bh(&rdev->bss_lock); return NULL; } /* * Update RX channel information based on the available frame payload * information. This is mainly for the 2.4 GHz band where frames can be received * from neighboring channels and the Beacon frames use the DSSS Parameter Set * element to indicate the current (transmitting) channel, but this might also * be needed on other bands if RX frequency does not match with the actual * operating channel of a BSS. */ static struct ieee80211_channel * cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, struct ieee80211_channel *channel, enum nl80211_bss_scan_width scan_width) { const u8 *tmp; u32 freq; int channel_number = -1; struct ieee80211_channel *alt_channel; tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen); if (tmp && tmp[1] == 1) { channel_number = tmp[2]; } else { tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen); if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) { struct ieee80211_ht_operation *htop = (void *)(tmp + 2); channel_number = htop->primary_chan; } } if (channel_number < 0) { /* No channel information in frame payload */ return channel; } freq = ieee80211_channel_to_frequency(channel_number, channel->band); alt_channel = ieee80211_get_channel(wiphy, freq); if (!alt_channel) { if (channel->band == NL80211_BAND_2GHZ) { /* * Better not allow unexpected channels when that could * be going beyond the 1-11 range (e.g., discovering * BSS on channel 12 when radio is configured for * channel 11. */ return NULL; } /* No match for the payload channel number - ignore it */ return channel; } if (scan_width == NL80211_BSS_CHAN_WIDTH_10 || scan_width == NL80211_BSS_CHAN_WIDTH_5) { /* * Ignore channel number in 5 and 10 MHz channels where there * may not be an n:1 or 1:n mapping between frequencies and * channel numbers. */ return channel; } /* * Use the channel determined through the payload channel number * instead of the RX channel reported by the driver. */ if (alt_channel->flags & IEEE80211_CHAN_DISABLED) return NULL; return alt_channel; } /* Returned bss is reference counted and must be cleaned up appropriately. */ static struct cfg80211_bss * cfg80211_inform_single_bss_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, struct cfg80211_non_tx_bss *non_tx_data, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_bss_ies *ies; struct ieee80211_channel *channel; struct cfg80211_internal_bss tmp = {}, *res; int bss_type; bool signal_valid; if (WARN_ON(!wiphy)) return NULL; if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && (data->signal < 0 || data->signal > 100))) return NULL; channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan, data->scan_width); if (!channel) return NULL; memcpy(tmp.pub.bssid, bssid, ETH_ALEN); tmp.pub.channel = channel; tmp.pub.scan_width = data->scan_width; tmp.pub.signal = data->signal; tmp.pub.beacon_interval = beacon_interval; tmp.pub.capability = capability; tmp.ts_boottime = data->boottime_ns; if (non_tx_data) { tmp.pub.transmitted_bss = non_tx_data->tx_bss; tmp.pub.bssid_index = non_tx_data->bssid_index; tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator; } /* * If we do not know here whether the IEs are from a Beacon or Probe * Response frame, we need to pick one of the options and only use it * with the driver that does not provide the full Beacon/Probe Response * frame. Use Beacon frame pointer to avoid indicating that this should * override the IEs pointer should we have received an earlier * indication of Probe Response data. */ ies = kzalloc(sizeof(*ies) + ielen, gfp); if (!ies) return NULL; ies->len = ielen; ies->tsf = tsf; ies->from_beacon = false; memcpy(ies->data, ie, ielen); switch (ftype) { case CFG80211_BSS_FTYPE_BEACON: ies->from_beacon = true; /* fall through */ case CFG80211_BSS_FTYPE_UNKNOWN: rcu_assign_pointer(tmp.pub.beacon_ies, ies); break; case CFG80211_BSS_FTYPE_PRESP: rcu_assign_pointer(tmp.pub.proberesp_ies, ies); break; } rcu_assign_pointer(tmp.pub.ies, ies); signal_valid = data->chan == channel; res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid); if (!res) return NULL; if (channel->band == NL80211_BAND_60GHZ) { bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK; if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) regulatory_hint_found_beacon(wiphy, channel, gfp); } else { if (res->pub.capability & WLAN_CAPABILITY_ESS) regulatory_hint_found_beacon(wiphy, channel, gfp); } if (non_tx_data && non_tx_data->tx_bss) { /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there */ if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, &res->pub)) { if (__cfg80211_unlink_bss(rdev, res)) rdev->bss_generation++; } } trace_cfg80211_return_bss(&res->pub); /* cfg80211_bss_update gives us a referenced result */ return &res->pub; } static const struct element *cfg80211_get_profile_continuation(const u8 *ie, size_t ielen, const struct element *mbssid_elem, const struct element *sub_elem) { const u8 *mbssid_end = mbssid_elem->data + mbssid_elem->datalen; const struct element *next_mbssid; const struct element *next_sub; next_mbssid = cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, mbssid_end, ielen - (mbssid_end - ie)); /* * If is is not the last subelement in current MBSSID IE or there isn't * a next MBSSID IE - profile is complete. */ if ((sub_elem->data + sub_elem->datalen < mbssid_end - 1) || !next_mbssid) return NULL; /* For any length error, just return NULL */ if (next_mbssid->datalen < 4) return NULL; next_sub = (void *)&next_mbssid->data[1]; if (next_mbssid->data + next_mbssid->datalen < next_sub->data + next_sub->datalen) return NULL; if (next_sub->id != 0 || next_sub->datalen < 2) return NULL; /* * Check if the first element in the next sub element is a start * of a new profile */ return next_sub->data[0] == WLAN_EID_NON_TX_BSSID_CAP ? NULL : next_mbssid; } size_t cfg80211_merge_profile(const u8 *ie, size_t ielen, const struct element *mbssid_elem, const struct element *sub_elem, u8 **merged_ie, size_t max_copy_len) { size_t copied_len = sub_elem->datalen; const struct element *next_mbssid; if (sub_elem->datalen > max_copy_len) return 0; memcpy(*merged_ie, sub_elem->data, sub_elem->datalen); while ((next_mbssid = cfg80211_get_profile_continuation(ie, ielen, mbssid_elem, sub_elem))) { const struct element *next_sub = (void *)&next_mbssid->data[1]; if (copied_len + next_sub->datalen > max_copy_len) break; memcpy(*merged_ie + copied_len, next_sub->data, next_sub->datalen); copied_len += next_sub->datalen; } return copied_len; } EXPORT_SYMBOL(cfg80211_merge_profile); static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 beacon_interval, const u8 *ie, size_t ielen, struct cfg80211_non_tx_bss *non_tx_data, gfp_t gfp) { const u8 *mbssid_index_ie; const struct element *elem, *sub; size_t new_ie_len; u8 new_bssid[ETH_ALEN]; u8 *new_ie, *profile; u64 seen_indices = 0; u16 capability; struct cfg80211_bss *bss; if (!non_tx_data) return; if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return; if (!wiphy->support_mbssid) return; if (wiphy->support_only_he_mbssid && !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return; new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp); if (!new_ie) return; profile = kmalloc(ielen, gfp); if (!profile) goto out; for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) { if (elem->datalen < 4) continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 profile_len; if (sub->id != 0 || sub->datalen < 4) { /* not a valid BSS profile */ continue; } if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP || sub->data[1] != 2) { /* The first element within the Nontransmitted * BSSID Profile is not the Nontransmitted * BSSID Capability element. */ continue; } memset(profile, 0, ielen); profile_len = cfg80211_merge_profile(ie, ielen, elem, sub, &profile, ielen); /* found a Nontransmitted BSSID Profile */ mbssid_index_ie = cfg80211_find_ie (WLAN_EID_MULTI_BSSID_IDX, profile, profile_len); if (!mbssid_index_ie || mbssid_index_ie[1] < 1 || mbssid_index_ie[2] == 0 || mbssid_index_ie[2] > 46) { /* No valid Multiple BSSID-Index element */ continue; } if (seen_indices & BIT_ULL(mbssid_index_ie[2])) /* We don't support legacy split of a profile */ net_dbg_ratelimited("Partial info for BSSID index %d\n", mbssid_index_ie[2]); seen_indices |= BIT_ULL(mbssid_index_ie[2]); non_tx_data->bssid_index = mbssid_index_ie[2]; non_tx_data->max_bssid_indicator = elem->data[0]; cfg80211_gen_new_bssid(bssid, non_tx_data->max_bssid_indicator, non_tx_data->bssid_index, new_bssid); memset(new_ie, 0, IEEE80211_MAX_DATA_LEN); new_ie_len = cfg80211_gen_new_ie(ie, ielen, profile, profile_len, new_ie, gfp); if (!new_ie_len) continue; capability = get_unaligned_le16(profile + 2); bss = cfg80211_inform_single_bss_data(wiphy, data, ftype, new_bssid, tsf, capability, beacon_interval, new_ie, new_ie_len, non_tx_data, gfp); if (!bss) break; cfg80211_put_bss(wiphy, bss); } } out: kfree(new_ie); kfree(profile); } struct cfg80211_bss * cfg80211_inform_bss_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, gfp_t gfp) { struct cfg80211_bss *res; struct cfg80211_non_tx_bss non_tx_data; res = cfg80211_inform_single_bss_data(wiphy, data, ftype, bssid, tsf, capability, beacon_interval, ie, ielen, NULL, gfp); non_tx_data.tx_bss = res; cfg80211_parse_mbssid_data(wiphy, data, ftype, bssid, tsf, beacon_interval, ie, ielen, &non_tx_data, gfp); return res; } EXPORT_SYMBOL(cfg80211_inform_bss_data); static void cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, struct ieee80211_mgmt *mgmt, size_t len, struct cfg80211_non_tx_bss *non_tx_data, gfp_t gfp) { enum cfg80211_bss_frame_type ftype; const u8 *ie = mgmt->u.probe_resp.variable; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); ftype = ieee80211_is_beacon(mgmt->frame_control) ? CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP; cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid, le64_to_cpu(mgmt->u.probe_resp.timestamp), le16_to_cpu(mgmt->u.probe_resp.beacon_int), ie, ielen, non_tx_data, gfp); } static void cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, struct cfg80211_bss *nontrans_bss, struct ieee80211_mgmt *mgmt, size_t len, gfp_t gfp) { u8 *ie, *new_ie, *pos; const u8 *nontrans_ssid, *trans_ssid, *mbssid; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); size_t new_ie_len; struct cfg80211_bss_ies *new_ies; const struct cfg80211_bss_ies *old; u8 cpy_len; ie = mgmt->u.probe_resp.variable; new_ie_len = ielen; trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); if (!trans_ssid) return; new_ie_len -= trans_ssid[1]; mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen); if (!mbssid) return; new_ie_len -= mbssid[1]; rcu_read_lock(); nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID); if (!nontrans_ssid) { rcu_read_unlock(); return; } new_ie_len += nontrans_ssid[1]; rcu_read_unlock(); /* generate new ie for nontrans BSS * 1. replace SSID with nontrans BSS' SSID * 2. skip MBSSID IE */ new_ie = kzalloc(new_ie_len, gfp); if (!new_ie) return; new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp); if (!new_ies) goto out_free; pos = new_ie; /* copy the nontransmitted SSID */ cpy_len = nontrans_ssid[1] + 2; memcpy(pos, nontrans_ssid, cpy_len); pos += cpy_len; /* copy the IEs between SSID and MBSSID */ cpy_len = trans_ssid[1] + 2; memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len))); pos += (mbssid - (trans_ssid + cpy_len)); /* copy the IEs after MBSSID */ cpy_len = mbssid[1] + 2; memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len))); /* update ie */ new_ies->len = new_ie_len; new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control); memcpy(new_ies->data, new_ie, new_ie_len); if (ieee80211_is_probe_resp(mgmt->frame_control)) { old = rcu_access_pointer(nontrans_bss->proberesp_ies); rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies); rcu_assign_pointer(nontrans_bss->ies, new_ies); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); } else { old = rcu_access_pointer(nontrans_bss->beacon_ies); rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies); rcu_assign_pointer(nontrans_bss->ies, new_ies); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); } out_free: kfree(new_ie); } /* cfg80211_inform_bss_width_frame helper */ static struct cfg80211_bss * cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, struct ieee80211_mgmt *mgmt, size_t len, struct cfg80211_non_tx_bss *non_tx_data, gfp_t gfp) { struct cfg80211_internal_bss tmp = {}, *res; struct cfg80211_bss_ies *ies; struct ieee80211_channel *channel; bool signal_valid; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); int bss_type; BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) != offsetof(struct ieee80211_mgmt, u.beacon.variable)); trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len); if (WARN_ON(!mgmt)) return NULL; if (WARN_ON(!wiphy)) return NULL; if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && (data->signal < 0 || data->signal > 100))) return NULL; if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable))) return NULL; channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable, ielen, data->chan, data->scan_width); if (!channel) return NULL; ies = kzalloc(sizeof(*ies) + ielen, gfp); if (!ies) return NULL; ies->len = ielen; ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control); memcpy(ies->data, mgmt->u.probe_resp.variable, ielen); if (ieee80211_is_probe_resp(mgmt->frame_control)) rcu_assign_pointer(tmp.pub.proberesp_ies, ies); else rcu_assign_pointer(tmp.pub.beacon_ies, ies); rcu_assign_pointer(tmp.pub.ies, ies); memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN); tmp.pub.channel = channel; tmp.pub.scan_width = data->scan_width; tmp.pub.signal = data->signal; tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); tmp.ts_boottime = data->boottime_ns; tmp.parent_tsf = data->parent_tsf; tmp.pub.chains = data->chains; memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS); ether_addr_copy(tmp.parent_bssid, data->parent_bssid); if (non_tx_data) { tmp.pub.transmitted_bss = non_tx_data->tx_bss; tmp.pub.bssid_index = non_tx_data->bssid_index; tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator; } signal_valid = data->chan == channel; res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid); if (!res) return NULL; if (channel->band == NL80211_BAND_60GHZ) { bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK; if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) regulatory_hint_found_beacon(wiphy, channel, gfp); } else { if (res->pub.capability & WLAN_CAPABILITY_ESS) regulatory_hint_found_beacon(wiphy, channel, gfp); } trace_cfg80211_return_bss(&res->pub); /* cfg80211_bss_update gives us a referenced result */ return &res->pub; } struct cfg80211_bss * cfg80211_inform_bss_frame_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, struct ieee80211_mgmt *mgmt, size_t len, gfp_t gfp) { struct cfg80211_bss *res, *tmp_bss; const u8 *ie = mgmt->u.probe_resp.variable; const struct cfg80211_bss_ies *ies1, *ies2; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); struct cfg80211_non_tx_bss non_tx_data; res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, NULL, gfp); if (!res || !wiphy->support_mbssid || !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return res; if (wiphy->support_only_he_mbssid && !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return res; non_tx_data.tx_bss = res; /* process each non-transmitting bss */ cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len, &non_tx_data, gfp); /* check if the res has other nontransmitting bss which is not * in MBSSID IE */ ies1 = rcu_access_pointer(res->ies); /* go through nontrans_list, if the timestamp of the BSS is * earlier than the timestamp of the transmitting BSS then * update it */ list_for_each_entry(tmp_bss, &res->nontrans_list, nontrans_list) { ies2 = rcu_access_pointer(tmp_bss->ies); if (ies2->tsf < ies1->tsf) cfg80211_update_notlisted_nontrans(wiphy, tmp_bss, mgmt, len, gfp); } return res; } EXPORT_SYMBOL(cfg80211_inform_bss_frame_data); void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_internal_bss *bss; if (!pub) return; bss = container_of(pub, struct cfg80211_internal_bss, pub); spin_lock_bh(&rdev->bss_lock); bss_ref_get(rdev, bss); spin_unlock_bh(&rdev->bss_lock); } EXPORT_SYMBOL(cfg80211_ref_bss); void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_internal_bss *bss; if (!pub) return; bss = container_of(pub, struct cfg80211_internal_bss, pub); spin_lock_bh(&rdev->bss_lock); bss_ref_put(rdev, bss); spin_unlock_bh(&rdev->bss_lock); } EXPORT_SYMBOL(cfg80211_put_bss); void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_internal_bss *bss, *tmp1; struct cfg80211_bss *nontrans_bss, *tmp; if (WARN_ON(!pub)) return; bss = container_of(pub, struct cfg80211_internal_bss, pub); spin_lock_bh(&rdev->bss_lock); if (list_empty(&bss->list)) goto out; list_for_each_entry_safe(nontrans_bss, tmp, &pub->nontrans_list, nontrans_list) { tmp1 = container_of(nontrans_bss, struct cfg80211_internal_bss, pub); if (__cfg80211_unlink_bss(rdev, tmp1)) rdev->bss_generation++; } if (__cfg80211_unlink_bss(rdev, bss)) rdev->bss_generation++; out: spin_unlock_bh(&rdev->bss_lock); } EXPORT_SYMBOL(cfg80211_unlink_bss); void cfg80211_bss_iter(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, void (*iter)(struct wiphy *wiphy, struct cfg80211_bss *bss, void *data), void *iter_data) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_internal_bss *bss; spin_lock_bh(&rdev->bss_lock); list_for_each_entry(bss, &rdev->bss_list, list) { if (!chandef || cfg80211_is_sub_chan(chandef, bss->pub.channel)) iter(wiphy, &bss->pub, iter_data); } spin_unlock_bh(&rdev->bss_lock); } EXPORT_SYMBOL(cfg80211_bss_iter); #ifdef CPTCFG_CFG80211_WEXT static struct cfg80211_registered_device * cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) { struct cfg80211_registered_device *rdev; struct net_device *dev; ASSERT_RTNL(); dev = dev_get_by_index(net, ifindex); if (!dev) return ERR_PTR(-ENODEV); if (dev->ieee80211_ptr) rdev = wiphy_to_rdev(dev->ieee80211_ptr->wiphy); else rdev = ERR_PTR(-ENODEV); dev_put(dev); return rdev; } int cfg80211_wext_siwscan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; struct iw_scan_req *wreq = NULL; struct cfg80211_scan_request *creq = NULL; int i, err, n_channels = 0; enum nl80211_band band; if (!netif_running(dev)) return -ENETDOWN; if (wrqu->data.length == sizeof(struct iw_scan_req)) wreq = (struct iw_scan_req *)extra; rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex); if (IS_ERR(rdev)) return PTR_ERR(rdev); if (rdev->scan_req || rdev->scan_msg) { err = -EBUSY; goto out; } wiphy = &rdev->wiphy; /* Determine number of channels, needed to allocate creq */ if (wreq && wreq->num_channels) n_channels = wreq->num_channels; else n_channels = ieee80211_get_num_supported_channels(wiphy); creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + n_channels * sizeof(void *), GFP_ATOMIC); if (!creq) { err = -ENOMEM; goto out; } creq->wiphy = wiphy; creq->wdev = dev->ieee80211_ptr; /* SSIDs come after channels */ creq->ssids = (void *)&creq->channels[n_channels]; creq->n_channels = n_channels; creq->n_ssids = 1; creq->scan_start = jiffies; /* translate "Scan on frequencies" request */ i = 0; for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { /* ignore disabled channels */ if (wiphy->bands[band]->channels[j].flags & IEEE80211_CHAN_DISABLED) continue; /* If we have a wireless request structure and the * wireless request specifies frequencies, then search * for the matching hardware channel. */ if (wreq && wreq->num_channels) { int k; int wiphy_freq = wiphy->bands[band]->channels[j].center_freq; for (k = 0; k < wreq->num_channels; k++) { struct iw_freq *freq = &wreq->channel_list[k]; int wext_freq = cfg80211_wext_freq(freq); if (wext_freq == wiphy_freq) goto wext_freq_found; } goto wext_freq_not_found; } wext_freq_found: creq->channels[i] = &wiphy->bands[band]->channels[j]; i++; wext_freq_not_found: ; } } /* No channels found? */ if (!i) { err = -EINVAL; goto out; } /* Set real number of channels specified in creq->channels[] */ creq->n_channels = i; /* translate "Scan for SSID" request */ if (wreq) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out; } memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len); creq->ssids[0].ssid_len = wreq->essid_len; } if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) creq->n_ssids = 0; } for (i = 0; i < NUM_NL80211_BANDS; i++) if (wiphy->bands[i]) creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; eth_broadcast_addr(creq->bssid); rdev->scan_req = creq; err = rdev_scan(rdev, creq); if (err) { rdev->scan_req = NULL; /* creq will be freed below */ } else { nl80211_send_scan_start(rdev, dev->ieee80211_ptr); /* creq now owned by driver */ creq = NULL; dev_hold(dev); } out: kfree(creq); return err; } EXPORT_WEXT_HANDLER(cfg80211_wext_siwscan); static char *ieee80211_scan_add_ies(struct iw_request_info *info, const struct cfg80211_bss_ies *ies, char *current_ev, char *end_buf) { const u8 *pos, *end, *next; struct iw_event iwe; if (!ies) return current_ev; /* * If needed, fragment the IEs buffer (at IE boundaries) into short * enough fragments to fit into IW_GENERIC_IE_MAX octet messages. */ pos = ies->data; end = pos + ies->len; while (end - pos > IW_GENERIC_IE_MAX) { next = pos + 2 + pos[1]; while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX) next = next + 2 + next[1]; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVGENIE; iwe.u.data.length = next - pos; current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, (void *)pos); if (IS_ERR(current_ev)) return current_ev; pos = next; } if (end > pos) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVGENIE; iwe.u.data.length = end - pos; current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, (void *)pos); if (IS_ERR(current_ev)) return current_ev; } return current_ev; } static char * ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info, struct cfg80211_internal_bss *bss, char *current_ev, char *end_buf) { const struct cfg80211_bss_ies *ies; struct iw_event iwe; const u8 *ie; u8 buf[50]; u8 *cfg, *p, *tmp; int rem, i, sig; bool ismesh = false; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, bss->pub.bssid, ETH_ALEN); current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN); if (IS_ERR(current_ev)) return current_ev; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = ieee80211_frequency_to_channel(bss->pub.channel->center_freq); iwe.u.freq.e = 0; current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); if (IS_ERR(current_ev)) return current_ev; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = bss->pub.channel->center_freq; iwe.u.freq.e = 6; current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); if (IS_ERR(current_ev)) return current_ev; if (wiphy->signal_type != CFG80211_SIGNAL_TYPE_NONE) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVQUAL; iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_QUAL_UPDATED; switch (wiphy->signal_type) { case CFG80211_SIGNAL_TYPE_MBM: sig = bss->pub.signal / 100; iwe.u.qual.level = sig; iwe.u.qual.updated |= IW_QUAL_DBM; if (sig < -110) /* rather bad */ sig = -110; else if (sig > -40) /* perfect */ sig = -40; /* will give a range of 0 .. 70 */ iwe.u.qual.qual = sig + 110; break; case CFG80211_SIGNAL_TYPE_UNSPEC: iwe.u.qual.level = bss->pub.signal; /* will give range 0 .. 100 */ iwe.u.qual.qual = bss->pub.signal; break; default: /* not reached */ break; } current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); if (IS_ERR(current_ev)) return current_ev; } memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWENCODE; if (bss->pub.capability & WLAN_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, ""); if (IS_ERR(current_ev)) return current_ev; rcu_read_lock(); ies = rcu_dereference(bss->pub.ies); rem = ies->len; ie = ies->data; while (rem >= 2) { /* invalid data */ if (ie[1] > rem - 2) break; switch (ie[0]) { case WLAN_EID_SSID: memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWESSID; iwe.u.data.length = ie[1]; iwe.u.data.flags = 1; current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, (u8 *)ie + 2); if (IS_ERR(current_ev)) goto unlock; break; case WLAN_EID_MESH_ID: memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWESSID; iwe.u.data.length = ie[1]; iwe.u.data.flags = 1; current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, (u8 *)ie + 2); if (IS_ERR(current_ev)) goto unlock; break; case WLAN_EID_MESH_CONFIG: ismesh = true; if (ie[1] != sizeof(struct ieee80211_meshconf_ie)) break; cfg = (u8 *)ie + 2; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, "Mesh Network Path Selection Protocol ID: " "0x%02X", cfg[0]); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; sprintf(buf, "Path Selection Metric ID: 0x%02X", cfg[1]); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; sprintf(buf, "Congestion Control Mode ID: 0x%02X", cfg[2]); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; sprintf(buf, "Synchronization ID: 0x%02X", cfg[3]); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; sprintf(buf, "Authentication ID: 0x%02X", cfg[4]); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; sprintf(buf, "Formation Info: 0x%02X", cfg[5]); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; sprintf(buf, "Capabilities: 0x%02X", cfg[6]); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; break; case WLAN_EID_SUPP_RATES: case WLAN_EID_EXT_SUPP_RATES: /* display all supported rates in readable format */ p = current_ev + iwe_stream_lcp_len(info); memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWRATE; /* Those two flags are ignored... */ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; for (i = 0; i < ie[1]; i++) { iwe.u.bitrate.value = ((ie[i + 2] & 0x7f) * 500000); tmp = p; p = iwe_stream_add_value(info, current_ev, p, end_buf, &iwe, IW_EV_PARAM_LEN); if (p == tmp) { current_ev = ERR_PTR(-E2BIG); goto unlock; } } current_ev = p; break; } rem -= ie[1] + 2; ie += ie[1] + 2; } if (bss->pub.capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) || ismesh) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWMODE; if (ismesh) iwe.u.mode = IW_MODE_MESH; else if (bss->pub.capability & WLAN_CAPABILITY_ESS) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN); if (IS_ERR(current_ev)) goto unlock; } memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf)); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, " Last beacon: %ums ago", elapsed_jiffies_msecs(bss->ts)); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point_check(info, current_ev, end_buf, &iwe, buf); if (IS_ERR(current_ev)) goto unlock; current_ev = ieee80211_scan_add_ies(info, ies, current_ev, end_buf); unlock: rcu_read_unlock(); return current_ev; } static int ieee80211_scan_results(struct cfg80211_registered_device *rdev, struct iw_request_info *info, char *buf, size_t len) { char *current_ev = buf; char *end_buf = buf + len; struct cfg80211_internal_bss *bss; int err = 0; spin_lock_bh(&rdev->bss_lock); cfg80211_bss_expire(rdev); list_for_each_entry(bss, &rdev->bss_list, list) { if (buf + len - current_ev <= IW_EV_ADDR_LEN) { err = -E2BIG; break; } current_ev = ieee80211_bss(&rdev->wiphy, info, bss, current_ev, end_buf); if (IS_ERR(current_ev)) { err = PTR_ERR(current_ev); break; } } spin_unlock_bh(&rdev->bss_lock); if (err) return err; return current_ev - buf; } int cfg80211_wext_giwscan(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct cfg80211_registered_device *rdev; int res; if (!netif_running(dev)) return -ENETDOWN; rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex); if (IS_ERR(rdev)) return PTR_ERR(rdev); if (rdev->scan_req || rdev->scan_msg) return -EAGAIN; res = ieee80211_scan_results(rdev, info, extra, data->length); data->length = 0; if (res >= 0) { data->length = res; res = 0; } return res; } EXPORT_WEXT_HANDLER(cfg80211_wext_giwscan); #endif backport-iwlwifi-dkms-7906/net/wireless/sme.c000066400000000000000000001051111351064634500212530ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * SME code for cfg80211 * both driver SME event handling and the SME implementation * (for nl80211's connect() and wext) * * Copyright 2009 Johannes Berg * Copyright (C) 2009 Intel Corporation. All rights reserved. * Copyright 2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include #include #include "nl80211.h" #include "reg.h" #include "rdev-ops.h" /* * Software SME in cfg80211, using auth/assoc/deauth calls to the * driver. This is is for implementing nl80211's connect/disconnect * and wireless extensions (if configured.) */ struct cfg80211_conn { struct cfg80211_connect_params params; /* these are sub-states of the _CONNECTING sme_state */ enum { CFG80211_CONN_SCANNING, CFG80211_CONN_SCAN_AGAIN, CFG80211_CONN_AUTHENTICATE_NEXT, CFG80211_CONN_AUTHENTICATING, CFG80211_CONN_AUTH_FAILED_TIMEOUT, CFG80211_CONN_ASSOCIATE_NEXT, CFG80211_CONN_ASSOCIATING, CFG80211_CONN_ASSOC_FAILED, CFG80211_CONN_ASSOC_FAILED_TIMEOUT, CFG80211_CONN_DEAUTH, CFG80211_CONN_ABANDON, CFG80211_CONN_CONNECTED, } state; u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; const u8 *ie; size_t ie_len; bool auto_auth, prev_bssid_valid; }; static void cfg80211_sme_free(struct wireless_dev *wdev) { if (!wdev->conn) return; kfree(wdev->conn->ie); kfree(wdev->conn); wdev->conn = NULL; } static int cfg80211_conn_scan(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_scan_request *request; int n_channels, err; ASSERT_RTNL(); ASSERT_WDEV_LOCK(wdev); if (rdev->scan_req || rdev->scan_msg) return -EBUSY; if (wdev->conn->params.channel) n_channels = 1; else n_channels = ieee80211_get_num_supported_channels(wdev->wiphy); request = kzalloc(sizeof(*request) + sizeof(request->ssids[0]) + sizeof(request->channels[0]) * n_channels, GFP_KERNEL); if (!request) return -ENOMEM; if (wdev->conn->params.channel) { enum nl80211_band band = wdev->conn->params.channel->band; struct ieee80211_supported_band *sband = wdev->wiphy->bands[band]; if (!sband) { kfree(request); return -EINVAL; } request->channels[0] = wdev->conn->params.channel; request->rates[band] = (1 << sband->n_bitrates) - 1; } else { int i = 0, j; enum nl80211_band band; struct ieee80211_supported_band *bands; struct ieee80211_channel *channel; for (band = 0; band < NUM_NL80211_BANDS; band++) { bands = wdev->wiphy->bands[band]; if (!bands) continue; for (j = 0; j < bands->n_channels; j++) { channel = &bands->channels[j]; if (channel->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i++] = channel; } request->rates[band] = (1 << bands->n_bitrates) - 1; } n_channels = i; } request->n_channels = n_channels; request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = 1; memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len); request->ssids[0].ssid_len = wdev->conn->params.ssid_len; eth_broadcast_addr(request->bssid); request->wdev = wdev; request->wiphy = &rdev->wiphy; request->scan_start = jiffies; rdev->scan_req = request; err = rdev_scan(rdev, request); if (!err) { wdev->conn->state = CFG80211_CONN_SCANNING; nl80211_send_scan_start(rdev, wdev); dev_hold(wdev->netdev); } else { rdev->scan_req = NULL; kfree(request); } return err; } static int cfg80211_conn_do_work(struct wireless_dev *wdev, enum nl80211_timeout_reason *treason) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_connect_params *params; struct cfg80211_assoc_request req = {}; int err; ASSERT_WDEV_LOCK(wdev); if (!wdev->conn) return 0; params = &wdev->conn->params; switch (wdev->conn->state) { case CFG80211_CONN_SCANNING: /* didn't find it during scan ... */ return -ENOENT; case CFG80211_CONN_SCAN_AGAIN: return cfg80211_conn_scan(wdev); case CFG80211_CONN_AUTHENTICATE_NEXT: if (WARN_ON(!rdev->ops->auth)) return -EOPNOTSUPP; wdev->conn->state = CFG80211_CONN_AUTHENTICATING; return cfg80211_mlme_auth(rdev, wdev->netdev, params->channel, params->auth_type, params->bssid, params->ssid, params->ssid_len, NULL, 0, params->key, params->key_len, params->key_idx, NULL, 0); case CFG80211_CONN_AUTH_FAILED_TIMEOUT: *treason = NL80211_TIMEOUT_AUTH; return -ENOTCONN; case CFG80211_CONN_ASSOCIATE_NEXT: if (WARN_ON(!rdev->ops->assoc)) return -EOPNOTSUPP; wdev->conn->state = CFG80211_CONN_ASSOCIATING; if (wdev->conn->prev_bssid_valid) req.prev_bssid = wdev->conn->prev_bssid; req.ie = params->ie; req.ie_len = params->ie_len; req.use_mfp = params->mfp != NL80211_MFP_NO; req.crypto = params->crypto; req.flags = params->flags; req.ht_capa = params->ht_capa; req.ht_capa_mask = params->ht_capa_mask; req.vht_capa = params->vht_capa; req.vht_capa_mask = params->vht_capa_mask; err = cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel, params->bssid, params->ssid, params->ssid_len, &req); if (err) cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); return err; case CFG80211_CONN_ASSOC_FAILED_TIMEOUT: *treason = NL80211_TIMEOUT_ASSOC; /* fall through */ case CFG80211_CONN_ASSOC_FAILED: cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); return -ENOTCONN; case CFG80211_CONN_DEAUTH: cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); /* fall through */ case CFG80211_CONN_ABANDON: /* free directly, disconnected event already sent */ cfg80211_sme_free(wdev); return 0; default: return 0; } } void cfg80211_conn_work(struct work_struct *work) { struct cfg80211_registered_device *rdev = container_of(work, struct cfg80211_registered_device, conn_work); struct wireless_dev *wdev; u8 bssid_buf[ETH_ALEN], *bssid = NULL; enum nl80211_timeout_reason treason; rtnl_lock(); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; wdev_lock(wdev); if (!netif_running(wdev->netdev)) { wdev_unlock(wdev); continue; } if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED) { wdev_unlock(wdev); continue; } if (wdev->conn->params.bssid) { memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); bssid = bssid_buf; } treason = NL80211_TIMEOUT_UNSPECIFIED; if (cfg80211_conn_do_work(wdev, &treason)) { struct cfg80211_connect_resp_params cr; memset(&cr, 0, sizeof(cr)); cr.status = -1; cr.bssid = bssid; cr.timeout_reason = treason; __cfg80211_connect_result(wdev->netdev, &cr, false); } wdev_unlock(wdev); } rtnl_unlock(); } /* Returned bss is reference counted and must be cleaned up appropriately. */ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_bss *bss; ASSERT_WDEV_LOCK(wdev); bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel, wdev->conn->params.bssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len, wdev->conn_bss_type, IEEE80211_PRIVACY(wdev->conn->params.privacy)); if (!bss) return NULL; memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN); wdev->conn->params.bssid = wdev->conn->bssid; wdev->conn->params.channel = bss->channel; wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); return bss; } static void __cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_bss *bss; ASSERT_WDEV_LOCK(wdev); if (!wdev->conn) return; if (wdev->conn->state != CFG80211_CONN_SCANNING && wdev->conn->state != CFG80211_CONN_SCAN_AGAIN) return; bss = cfg80211_get_conn_bss(wdev); if (bss) cfg80211_put_bss(&rdev->wiphy, bss); else schedule_work(&rdev->conn_work); } void cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; wdev_lock(wdev); __cfg80211_sme_scan_done(dev); wdev_unlock(wdev); } void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; u16 status_code = le16_to_cpu(mgmt->u.auth.status_code); ASSERT_WDEV_LOCK(wdev); if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED) return; if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && wdev->conn->auto_auth && wdev->conn->params.auth_type != NL80211_AUTHTYPE_NETWORK_EAP) { /* select automatically between only open, shared, leap */ switch (wdev->conn->params.auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: if (wdev->connect_keys) wdev->conn->params.auth_type = NL80211_AUTHTYPE_SHARED_KEY; else wdev->conn->params.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; break; case NL80211_AUTHTYPE_SHARED_KEY: wdev->conn->params.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; break; default: /* huh? */ wdev->conn->params.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; break; } wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); } else if (status_code != WLAN_STATUS_SUCCESS) { struct cfg80211_connect_resp_params cr; memset(&cr, 0, sizeof(cr)); cr.status = status_code; cr.bssid = mgmt->bssid; cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED; __cfg80211_connect_result(wdev->netdev, &cr, false); } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); } } bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return false; if (status == WLAN_STATUS_SUCCESS) { wdev->conn->state = CFG80211_CONN_CONNECTED; return false; } if (wdev->conn->prev_bssid_valid) { /* * Some stupid APs don't accept reassoc, so we * need to fall back to trying regular assoc; * return true so no event is sent to userspace. */ wdev->conn->prev_bssid_valid = false; wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); return true; } wdev->conn->state = CFG80211_CONN_ASSOC_FAILED; schedule_work(&rdev->conn_work); return false; } void cfg80211_sme_deauth(struct wireless_dev *wdev) { cfg80211_sme_free(wdev); } void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return; wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT; schedule_work(&rdev->conn_work); } void cfg80211_sme_disassoc(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return; wdev->conn->state = CFG80211_CONN_DEAUTH; schedule_work(&rdev->conn_work); } void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return; wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT; schedule_work(&rdev->conn_work); } void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return; wdev->conn->state = CFG80211_CONN_ABANDON; schedule_work(&rdev->conn_work); } static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev, const u8 *ies, size_t ies_len, const u8 **out_ies, size_t *out_ies_len) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *buf; size_t offs; if (!rdev->wiphy.extended_capabilities_len || (ies && cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, ies, ies_len))) { *out_ies = kmemdup(ies, ies_len, GFP_KERNEL); if (!*out_ies) return -ENOMEM; *out_ies_len = ies_len; return 0; } buf = kmalloc(ies_len + rdev->wiphy.extended_capabilities_len + 2, GFP_KERNEL); if (!buf) return -ENOMEM; if (ies_len) { static const u8 before_extcapa[] = { /* not listing IEs expected to be created by driver */ WLAN_EID_RSN, WLAN_EID_QOS_CAPA, WLAN_EID_RRM_ENABLED_CAPABILITIES, WLAN_EID_MOBILITY_DOMAIN, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, WLAN_EID_BSS_COEX_2040, }; offs = ieee80211_ie_split(ies, ies_len, before_extcapa, ARRAY_SIZE(before_extcapa), 0); memcpy(buf, ies, offs); /* leave a whole for extended capabilities IE */ memcpy(buf + offs + rdev->wiphy.extended_capabilities_len + 2, ies + offs, ies_len - offs); } else { offs = 0; } /* place extended capabilities IE (with only driver capabilities) */ buf[offs] = WLAN_EID_EXT_CAPABILITY; buf[offs + 1] = rdev->wiphy.extended_capabilities_len; memcpy(buf + offs + 2, rdev->wiphy.extended_capabilities, rdev->wiphy.extended_capabilities_len); *out_ies = buf; *out_ies_len = ies_len + rdev->wiphy.extended_capabilities_len + 2; return 0; } static int cfg80211_sme_connect(struct wireless_dev *wdev, struct cfg80211_connect_params *connect, const u8 *prev_bssid) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_bss *bss; int err; if (!rdev->ops->auth || !rdev->ops->assoc) return -EOPNOTSUPP; if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); wdev->current_bss = NULL; cfg80211_sme_free(wdev); } if (WARN_ON(wdev->conn)) return -EINPROGRESS; wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); if (!wdev->conn) return -ENOMEM; /* * Copy all parameters, and treat explicitly IEs, BSSID, SSID. */ memcpy(&wdev->conn->params, connect, sizeof(*connect)); if (connect->bssid) { wdev->conn->params.bssid = wdev->conn->bssid; memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); } if (cfg80211_sme_get_conn_ies(wdev, connect->ie, connect->ie_len, &wdev->conn->ie, &wdev->conn->params.ie_len)) { kfree(wdev->conn); wdev->conn = NULL; return -ENOMEM; } wdev->conn->params.ie = wdev->conn->ie; if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { wdev->conn->auto_auth = true; /* start with open system ... should mostly work */ wdev->conn->params.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; } else { wdev->conn->auto_auth = false; } wdev->conn->params.ssid = wdev->ssid; wdev->conn->params.ssid_len = wdev->ssid_len; /* see if we have the bss already */ bss = cfg80211_get_conn_bss(wdev); if (prev_bssid) { memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); wdev->conn->prev_bssid_valid = true; } /* we're good if we have a matching bss struct */ if (bss) { enum nl80211_timeout_reason treason; err = cfg80211_conn_do_work(wdev, &treason); cfg80211_put_bss(wdev->wiphy, bss); } else { /* otherwise we'll need to scan for the AP first */ err = cfg80211_conn_scan(wdev); /* * If we can't scan right now, then we need to scan again * after the current scan finished, since the parameters * changed (unless we find a good AP anyway). */ if (err == -EBUSY) { err = 0; wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; } } if (err) cfg80211_sme_free(wdev); return err; } static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int err; if (!wdev->conn) return 0; if (!rdev->ops->deauth) return -EOPNOTSUPP; if (wdev->conn->state == CFG80211_CONN_SCANNING || wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) { err = 0; goto out; } /* wdev->conn->params.bssid must be set if > SCANNING */ err = cfg80211_mlme_deauth(rdev, wdev->netdev, wdev->conn->params.bssid, NULL, 0, reason, false); out: cfg80211_sme_free(wdev); return err; } /* * code shared for in-device and software SME */ static bool cfg80211_is_all_idle(void) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; bool is_all_idle = true; /* * All devices must be idle as otherwise if you are actively * scanning some new beacon hints could be learned and would * count as new regulatory hints. * Also if there is any other active beaconing interface we * need not issue a disconnect hint and reset any info such * as chan dfs state, etc. */ list_for_each_entry(rdev, &cfg80211_rdev_list, list) { list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { wdev_lock(wdev); if (wdev->conn || wdev->current_bss || cfg80211_beaconing_iface_active(wdev)) is_all_idle = false; wdev_unlock(wdev); } } return is_all_idle; } static void disconnect_work(struct work_struct *work) { rtnl_lock(); if (cfg80211_is_all_idle()) regulatory_hint_disconnect(); rtnl_unlock(); } DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); /* * API calls for drivers implementing connect/disconnect and * SME event handling */ /* This method must consume bss one way or another */ void __cfg80211_connect_result(struct net_device *dev, struct cfg80211_connect_resp_params *cr, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *country_ie; #ifdef CPTCFG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) { cfg80211_put_bss(wdev->wiphy, cr->bss); return; } nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr, GFP_KERNEL); #ifdef CPTCFG_CFG80211_WEXT if (wextev) { if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = cr->req_ie_len; wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, cr->req_ie); } if (cr->resp_ie && cr->status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = cr->resp_ie_len; wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, cr->resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; if (cr->bssid && cr->status == WLAN_STATUS_SUCCESS) { memcpy(wrqu.ap_addr.sa_data, cr->bssid, ETH_ALEN); memcpy(wdev->wext.prev_bssid, cr->bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; } wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } #endif if (!cr->bss && (cr->status == WLAN_STATUS_SUCCESS)) { WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect); cr->bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->bssid, wdev->ssid, wdev->ssid_len, wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); if (cr->bss) cfg80211_hold_bss(bss_from_pub(cr->bss)); } if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); wdev->current_bss = NULL; } if (cr->status != WLAN_STATUS_SUCCESS) { kzfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; wdev->conn_owner_nlportid = 0; if (cr->bss) { cfg80211_unhold_bss(bss_from_pub(cr->bss)); cfg80211_put_bss(wdev->wiphy, cr->bss); } cfg80211_sme_free(wdev); return; } if (WARN_ON(!cr->bss)) return; wdev->current_bss = bss_from_pub(cr->bss); if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP)) cfg80211_upload_connect_keys(wdev); rcu_read_lock(); country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY); if (!country_ie) { rcu_read_unlock(); return; } country_ie = kmemdup(country_ie, 2 + country_ie[1], GFP_ATOMIC); rcu_read_unlock(); if (!country_ie) return; /* * ieee80211_bss_get_ie() ensures we can access: * - country_ie + 2, the start of the country ie data, and * - and country_ie[1] which is the IE length */ regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band, country_ie + 2, country_ie[1]); kfree(country_ie); } /* Consumes bss object one way or another */ void cfg80211_connect_done(struct net_device *dev, struct cfg80211_connect_resp_params *params, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; u8 *next; if (params->bss) { /* Make sure the bss entry provided by the driver is valid. */ struct cfg80211_internal_bss *ibss = bss_from_pub(params->bss); if (WARN_ON(list_empty(&ibss->list))) { cfg80211_put_bss(wdev->wiphy, params->bss); return; } } ev = kzalloc(sizeof(*ev) + (params->bssid ? ETH_ALEN : 0) + params->req_ie_len + params->resp_ie_len + params->fils.kek_len + params->fils.pmk_len + (params->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp); if (!ev) { cfg80211_put_bss(wdev->wiphy, params->bss); return; } ev->type = EVENT_CONNECT_RESULT; next = ((u8 *)ev) + sizeof(*ev); if (params->bssid) { ev->cr.bssid = next; memcpy((void *)ev->cr.bssid, params->bssid, ETH_ALEN); next += ETH_ALEN; } if (params->req_ie_len) { ev->cr.req_ie = next; ev->cr.req_ie_len = params->req_ie_len; memcpy((void *)ev->cr.req_ie, params->req_ie, params->req_ie_len); next += params->req_ie_len; } if (params->resp_ie_len) { ev->cr.resp_ie = next; ev->cr.resp_ie_len = params->resp_ie_len; memcpy((void *)ev->cr.resp_ie, params->resp_ie, params->resp_ie_len); next += params->resp_ie_len; } if (params->fils.kek_len) { ev->cr.fils.kek = next; ev->cr.fils.kek_len = params->fils.kek_len; memcpy((void *)ev->cr.fils.kek, params->fils.kek, params->fils.kek_len); next += params->fils.kek_len; } if (params->fils.pmk_len) { ev->cr.fils.pmk = next; ev->cr.fils.pmk_len = params->fils.pmk_len; memcpy((void *)ev->cr.fils.pmk, params->fils.pmk, params->fils.pmk_len); next += params->fils.pmk_len; } if (params->fils.pmkid) { ev->cr.fils.pmkid = next; memcpy((void *)ev->cr.fils.pmkid, params->fils.pmkid, WLAN_PMKID_LEN); next += WLAN_PMKID_LEN; } ev->cr.fils.update_erp_next_seq_num = params->fils.update_erp_next_seq_num; if (params->fils.update_erp_next_seq_num) ev->cr.fils.erp_next_seq_num = params->fils.erp_next_seq_num; if (params->bss) cfg80211_hold_bss(bss_from_pub(params->bss)); ev->cr.bss = params->bss; ev->cr.status = params->status; ev->cr.timeout_reason = params->timeout_reason; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_connect_done); /* Consumes bss object one way or another */ void __cfg80211_roamed(struct wireless_dev *wdev, struct cfg80211_roam_info *info) { #ifdef CPTCFG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) goto out; if (WARN_ON(!wdev->current_bss)) goto out; cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); wdev->current_bss = NULL; if (WARN_ON(!info->bss)) return; cfg80211_hold_bss(bss_from_pub(info->bss)); wdev->current_bss = bss_from_pub(info->bss); nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy), wdev->netdev, info, GFP_KERNEL); #ifdef CPTCFG_CFG80211_WEXT if (info->req_ie) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = info->req_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCREQIE, &wrqu, info->req_ie); } if (info->resp_ie) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = info->resp_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCRESPIE, &wrqu, info->resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu.ap_addr.sa_data, info->bss->bssid, ETH_ALEN); memcpy(wdev->wext.prev_bssid, info->bss->bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); #endif return; out: cfg80211_put_bss(wdev->wiphy, info->bss); } /* Consumes info->bss object one way or another */ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; u8 *next; if (!info->bss) { info->bss = cfg80211_get_bss(wdev->wiphy, info->channel, info->bssid, wdev->ssid, wdev->ssid_len, wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); } if (WARN_ON(!info->bss)) return; ev = kzalloc(sizeof(*ev) + info->req_ie_len + info->resp_ie_len + info->fils.kek_len + info->fils.pmk_len + (info->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp); if (!ev) { cfg80211_put_bss(wdev->wiphy, info->bss); return; } ev->type = EVENT_ROAMED; next = ((u8 *)ev) + sizeof(*ev); if (info->req_ie_len) { ev->rm.req_ie = next; ev->rm.req_ie_len = info->req_ie_len; memcpy((void *)ev->rm.req_ie, info->req_ie, info->req_ie_len); next += info->req_ie_len; } if (info->resp_ie_len) { ev->rm.resp_ie = next; ev->rm.resp_ie_len = info->resp_ie_len; memcpy((void *)ev->rm.resp_ie, info->resp_ie, info->resp_ie_len); next += info->resp_ie_len; } if (info->fils.kek_len) { ev->rm.fils.kek = next; ev->rm.fils.kek_len = info->fils.kek_len; memcpy((void *)ev->rm.fils.kek, info->fils.kek, info->fils.kek_len); next += info->fils.kek_len; } if (info->fils.pmk_len) { ev->rm.fils.pmk = next; ev->rm.fils.pmk_len = info->fils.pmk_len; memcpy((void *)ev->rm.fils.pmk, info->fils.pmk, info->fils.pmk_len); next += info->fils.pmk_len; } if (info->fils.pmkid) { ev->rm.fils.pmkid = next; memcpy((void *)ev->rm.fils.pmkid, info->fils.pmkid, WLAN_PMKID_LEN); next += WLAN_PMKID_LEN; } ev->rm.fils.update_erp_next_seq_num = info->fils.update_erp_next_seq_num; if (info->fils.update_erp_next_seq_num) ev->rm.fils.erp_next_seq_num = info->fils.erp_next_seq_num; ev->rm.bss = info->bss; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_roamed); void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid) { ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return; if (WARN_ON(!wdev->current_bss) || WARN_ON(!ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return; nl80211_send_port_authorized(wiphy_to_rdev(wdev->wiphy), wdev->netdev, bssid); } void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; if (WARN_ON(!bssid)) return; ev = kzalloc(sizeof(*ev), gfp); if (!ev) return; ev->type = EVENT_PORT_AUTHORIZED; memcpy(ev->pa.bssid, bssid, ETH_ALEN); /* * Use the wdev event list so that if there are pending * connected/roamed events, they will be reported first. */ spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_port_authorized); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int i; #ifdef CPTCFG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); } wdev->current_bss = NULL; wdev->ssid_len = 0; wdev->conn_owner_nlportid = 0; kzfree(wdev->connect_keys); wdev->connect_keys = NULL; nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); /* stop critical protocol if supported */ if (rdev->ops->crit_proto_stop && rdev->crit_proto_nlportid) { rdev->crit_proto_nlportid = 0; rdev_crit_proto_stop(rdev, wdev); } /* * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. */ if (rdev->ops->del_key) for (i = 0; i < 6; i++) rdev_del_key(rdev, dev, i, false, NULL); rdev_set_qos_map(rdev, dev, NULL); #ifdef CPTCFG_CFG80211_WEXT memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); wdev->wext.connect.ssid_len = 0; #endif schedule_work(&cfg80211_disconnect_work); } void cfg80211_disconnected(struct net_device *dev, u16 reason, const u8 *ie, size_t ie_len, bool locally_generated, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; ev = kzalloc(sizeof(*ev) + ie_len, gfp); if (!ev) return; ev->type = EVENT_DISCONNECTED; ev->dc.ie = ((u8 *)ev) + sizeof(*ev); ev->dc.ie_len = ie_len; memcpy((void *)ev->dc.ie, ie, ie_len); ev->dc.reason = reason; ev->dc.locally_generated = locally_generated; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_disconnected); /* * API calls for nl80211/wext compatibility code */ int cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys, const u8 *prev_bssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); /* * If we have an ssid_len, we're trying to connect or are * already connected, so reject a new SSID unless it's the * same (which is the case for re-association.) */ if (wdev->ssid_len && (wdev->ssid_len != connect->ssid_len || memcmp(wdev->ssid, connect->ssid, wdev->ssid_len))) return -EALREADY; /* * If connected, reject (re-)association unless prev_bssid * matches the current BSSID. */ if (wdev->current_bss) { if (!prev_bssid) return -EALREADY; if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid)) return -ENOTCONN; } /* * Reject if we're in the process of connecting with WEP, * this case isn't very interesting and trying to handle * it would make the code much more complex. */ if (wdev->connect_keys) return -EINPROGRESS; cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, rdev->wiphy.ht_capa_mod_mask); cfg80211_oper_and_vht_capa(&connect->vht_capa_mask, rdev->wiphy.vht_capa_mod_mask); if (connkeys && connkeys->def >= 0) { int idx; u32 cipher; idx = connkeys->def; cipher = connkeys->params[idx].cipher; /* If given a WEP key we may need it for shared key auth */ if (cipher == WLAN_CIPHER_SUITE_WEP40 || cipher == WLAN_CIPHER_SUITE_WEP104) { connect->key_idx = idx; connect->key = connkeys->params[idx].key; connect->key_len = connkeys->params[idx].key_len; /* * If ciphers are not set (e.g. when going through * iwconfig), we have to set them appropriately here. */ if (connect->crypto.n_ciphers_group == 0) { connect->crypto.n_ciphers_group = 1; connect->crypto.ciphers_group[0] = cipher; } if (connect->crypto.n_ciphers_pairwise == 0) { connect->crypto.n_ciphers_pairwise = 1; connect->crypto.ciphers_pairwise[0] = cipher; } } connect->crypto.wep_keys = connkeys->params; connect->crypto.wep_tx_key = connkeys->def; } else { if (WARN_ON(connkeys)) return -EINVAL; } wdev->connect_keys = connkeys; memcpy(wdev->ssid, connect->ssid, connect->ssid_len); wdev->ssid_len = connect->ssid_len; wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS : IEEE80211_BSS_TYPE_ESS; if (!rdev->ops->connect) err = cfg80211_sme_connect(wdev, connect, prev_bssid); else err = rdev_connect(rdev, dev, connect); if (err) { wdev->connect_keys = NULL; /* * This could be reassoc getting refused, don't clear * ssid_len in that case. */ if (!wdev->current_bss) wdev->ssid_len = 0; return err; } return 0; } int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err = 0; ASSERT_WDEV_LOCK(wdev); kzfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->conn_owner_nlportid = 0; if (wdev->conn) err = cfg80211_sme_disconnect(wdev, reason); else if (!rdev->ops->disconnect) cfg80211_mlme_down(rdev, dev); else if (wdev->ssid_len) err = rdev_disconnect(rdev, dev, reason); /* * Clear ssid_len unless we actually were fully connected, * in which case cfg80211_disconnected() will take care of * this later. */ if (!wdev->current_bss) wdev->ssid_len = 0; return err; } /* * Used to clean up after the connection / connection attempt owner socket * disconnects */ void cfg80211_autodisconnect_wk(struct work_struct *work) { struct wireless_dev *wdev = container_of(work, struct wireless_dev, disconnect_wk); struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); wdev_lock(wdev); if (wdev->conn_owner_nlportid) { switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: cfg80211_leave_ibss(rdev, wdev->netdev, false); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: cfg80211_stop_ap(rdev, wdev->netdev, false); break; case NL80211_IFTYPE_MESH_POINT: cfg80211_leave_mesh(rdev, wdev->netdev); break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: /* * Use disconnect_bssid if still connecting and * ops->disconnect not implemented. Otherwise we can * use cfg80211_disconnect. */ if (rdev->ops->disconnect || wdev->current_bss) cfg80211_disconnect(rdev, wdev->netdev, WLAN_REASON_DEAUTH_LEAVING, true); else cfg80211_mlme_deauth(rdev, wdev->netdev, wdev->disconnect_bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); break; default: break; } } wdev_unlock(wdev); } backport-iwlwifi-dkms-7906/net/wireless/sysfs.c000066400000000000000000000104651351064634500216450ustar00rootroot00000000000000/* * This file provides /sys/class/ieee80211// * and some default attributes. * * Copyright 2005-2006 Jiri Benc * Copyright 2006 Johannes Berg * * This file is GPLv2 as found in COPYING. */ #include #include #include #include #include #include #include "sysfs.h" #include "core.h" #include "rdev-ops.h" static inline struct cfg80211_registered_device *dev_to_rdev( struct device *dev) { return container_of(dev, struct cfg80211_registered_device, wiphy.dev); } #define SHOW_FMT(name, fmt, member) \ static ssize_t name ## _show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \ } \ static DEVICE_ATTR_RO(name) SHOW_FMT(index, "%d", wiphy_idx); SHOW_FMT(macaddress, "%pM", wiphy.perm_addr); SHOW_FMT(address_mask, "%pM", wiphy.addr_mask); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy; return sprintf(buf, "%s\n", wiphy_name(wiphy)); } static DEVICE_ATTR_RO(name); static ssize_t addresses_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy; char *start = buf; int i; if (!wiphy->addresses) return sprintf(buf, "%pM\n", wiphy->perm_addr); for (i = 0; i < wiphy->n_addresses; i++) buf += sprintf(buf, "%pM\n", wiphy->addresses[i].addr); return buf - start; } static DEVICE_ATTR_RO(addresses); static struct attribute *ieee80211_attrs[] = { &dev_attr_index.attr, &dev_attr_macaddress.attr, &dev_attr_address_mask.attr, &dev_attr_addresses.attr, &dev_attr_name.attr, NULL, }; #if LINUX_VERSION_IS_GEQ(3,11,0) ATTRIBUTE_GROUPS(ieee80211); #else #define BP_ATTR_GRP_STRUCT device_attribute ATTRIBUTE_GROUPS_BACKPORT(ieee80211); #endif static void wiphy_dev_release(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); cfg80211_dev_free(rdev); } static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) { /* TODO, we probably need stuff here */ return 0; } #ifdef CONFIG_PM_SLEEP static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) cfg80211_leave(rdev, wdev); } static int wiphy_suspend(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; rdev->suspend_at = ktime_get_boottime_seconds(); trace_rdev_suspend_enter(&rdev->wiphy); rtnl_lock(); if (rdev->wiphy.registered) { if (!rdev->wiphy.wowlan_config) { cfg80211_leave_all(rdev); cfg80211_process_rdev_events(rdev); } if (rdev->ops->suspend) ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); if (ret == 1) { /* Driver refuse to configure wowlan */ cfg80211_leave_all(rdev); cfg80211_process_rdev_events(rdev); ret = rdev_suspend(rdev, NULL); } } trace_rdev_suspend_leave(&rdev->wiphy); rtnl_unlock(); return ret; } static int wiphy_resume(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; /* Age scan results with time spent in suspend */ cfg80211_bss_age(rdev, ktime_get_boottime_seconds() - rdev->suspend_at); rtnl_lock(); if (rdev->wiphy.registered && rdev->ops->resume) ret = rdev_resume(rdev); rtnl_unlock(); return ret; } static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume); #define WIPHY_PM_OPS (&wiphy_pm_ops) #else #define WIPHY_PM_OPS NULL #endif static const void *wiphy_namespace(struct device *d) { struct wiphy *wiphy = container_of(d, struct wiphy, dev); return wiphy_net(wiphy); } struct class ieee80211_class = { .name = "ieee80211", .owner = THIS_MODULE, .dev_release = wiphy_dev_release, #if LINUX_VERSION_IS_GEQ(3,11,0) .dev_groups = ieee80211_groups, #else .dev_attrs = ieee80211_dev_attrs, #endif .dev_uevent = wiphy_uevent, .pm = WIPHY_PM_OPS, .ns_type = &net_ns_type_operations, .namespace = wiphy_namespace, }; int wiphy_sysfs_init(void) { init_ieee80211_attrs(); return class_register(&ieee80211_class); } void wiphy_sysfs_exit(void) { class_unregister(&ieee80211_class); } backport-iwlwifi-dkms-7906/net/wireless/sysfs.h000066400000000000000000000003361351064634500216460ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __WIRELESS_SYSFS_H #define __WIRELESS_SYSFS_H int wiphy_sysfs_init(void); void wiphy_sysfs_exit(void); extern struct class ieee80211_class; #endif /* __WIRELESS_SYSFS_H */ backport-iwlwifi-dkms-7906/net/wireless/trace.c000066400000000000000000000002531351064634500215660ustar00rootroot00000000000000#include #if LINUX_VERSION_IS_LESS(3,1,0) #include #endif #ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "trace.h" #endif backport-iwlwifi-dkms-7906/net/wireless/trace.h000066400000000000000000002703771351064634500216130ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cfg80211 #if !defined(__RDEV_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ) #define __RDEV_OPS_TRACE #include #include #include #include #include "core.h" #define MAC_ENTRY(entry_mac) __array(u8, entry_mac, ETH_ALEN) #define MAC_ASSIGN(entry_mac, given_mac) do { \ if (given_mac) \ memcpy(__entry->entry_mac, given_mac, ETH_ALEN); \ else \ eth_zero_addr(__entry->entry_mac); \ } while (0) #define MAC_PR_FMT "%pM" #define MAC_PR_ARG(entry_mac) (__entry->entry_mac) #define MAXNAME 32 #define WIPHY_ENTRY __array(char, wiphy_name, 32) #define WIPHY_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME) #define WIPHY_PR_FMT "%s" #define WIPHY_PR_ARG __entry->wiphy_name #define WDEV_ENTRY __field(u32, id) #define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \ ? wdev->identifier : 0) #define WDEV_PR_FMT "wdev(%u)" #define WDEV_PR_ARG (__entry->id) #define NETDEV_ENTRY __array(char, name, IFNAMSIZ) \ __field(int, ifindex) #define NETDEV_ASSIGN \ do { \ memcpy(__entry->name, netdev->name, IFNAMSIZ); \ (__entry->ifindex) = (netdev->ifindex); \ } while (0) #define NETDEV_PR_FMT "netdev:%s(%d)" #define NETDEV_PR_ARG __entry->name, __entry->ifindex #define MESH_CFG_ENTRY __field(u16, dot11MeshRetryTimeout) \ __field(u16, dot11MeshConfirmTimeout) \ __field(u16, dot11MeshHoldingTimeout) \ __field(u16, dot11MeshMaxPeerLinks) \ __field(u8, dot11MeshMaxRetries) \ __field(u8, dot11MeshTTL) \ __field(u8, element_ttl) \ __field(bool, auto_open_plinks) \ __field(u32, dot11MeshNbrOffsetMaxNeighbor) \ __field(u8, dot11MeshHWMPmaxPREQretries) \ __field(u32, path_refresh_time) \ __field(u32, dot11MeshHWMPactivePathTimeout) \ __field(u16, min_discovery_timeout) \ __field(u16, dot11MeshHWMPpreqMinInterval) \ __field(u16, dot11MeshHWMPperrMinInterval) \ __field(u16, dot11MeshHWMPnetDiameterTraversalTime) \ __field(u8, dot11MeshHWMPRootMode) \ __field(u16, dot11MeshHWMPRannInterval) \ __field(bool, dot11MeshGateAnnouncementProtocol) \ __field(bool, dot11MeshForwarding) \ __field(s32, rssi_threshold) \ __field(u16, ht_opmode) \ __field(u32, dot11MeshHWMPactivePathToRootTimeout) \ __field(u16, dot11MeshHWMProotInterval) \ __field(u16, dot11MeshHWMPconfirmationInterval) #define MESH_CFG_ASSIGN \ do { \ __entry->dot11MeshRetryTimeout = conf->dot11MeshRetryTimeout; \ __entry->dot11MeshConfirmTimeout = \ conf->dot11MeshConfirmTimeout; \ __entry->dot11MeshHoldingTimeout = \ conf->dot11MeshHoldingTimeout; \ __entry->dot11MeshMaxPeerLinks = conf->dot11MeshMaxPeerLinks; \ __entry->dot11MeshMaxRetries = conf->dot11MeshMaxRetries; \ __entry->dot11MeshTTL = conf->dot11MeshTTL; \ __entry->element_ttl = conf->element_ttl; \ __entry->auto_open_plinks = conf->auto_open_plinks; \ __entry->dot11MeshNbrOffsetMaxNeighbor = \ conf->dot11MeshNbrOffsetMaxNeighbor; \ __entry->dot11MeshHWMPmaxPREQretries = \ conf->dot11MeshHWMPmaxPREQretries; \ __entry->path_refresh_time = conf->path_refresh_time; \ __entry->dot11MeshHWMPactivePathTimeout = \ conf->dot11MeshHWMPactivePathTimeout; \ __entry->min_discovery_timeout = conf->min_discovery_timeout; \ __entry->dot11MeshHWMPpreqMinInterval = \ conf->dot11MeshHWMPpreqMinInterval; \ __entry->dot11MeshHWMPperrMinInterval = \ conf->dot11MeshHWMPperrMinInterval; \ __entry->dot11MeshHWMPnetDiameterTraversalTime = \ conf->dot11MeshHWMPnetDiameterTraversalTime; \ __entry->dot11MeshHWMPRootMode = conf->dot11MeshHWMPRootMode; \ __entry->dot11MeshHWMPRannInterval = \ conf->dot11MeshHWMPRannInterval; \ __entry->dot11MeshGateAnnouncementProtocol = \ conf->dot11MeshGateAnnouncementProtocol; \ __entry->dot11MeshForwarding = conf->dot11MeshForwarding; \ __entry->rssi_threshold = conf->rssi_threshold; \ __entry->ht_opmode = conf->ht_opmode; \ __entry->dot11MeshHWMPactivePathToRootTimeout = \ conf->dot11MeshHWMPactivePathToRootTimeout; \ __entry->dot11MeshHWMProotInterval = \ conf->dot11MeshHWMProotInterval; \ __entry->dot11MeshHWMPconfirmationInterval = \ conf->dot11MeshHWMPconfirmationInterval; \ } while (0) #define CHAN_ENTRY __field(enum nl80211_band, band) \ __field(u32, center_freq) #define CHAN_ASSIGN(chan) \ do { \ if (chan) { \ __entry->band = chan->band; \ __entry->center_freq = chan->center_freq; \ } else { \ __entry->band = 0; \ __entry->center_freq = 0; \ } \ } while (0) #define CHAN_PR_FMT "band: %d, freq: %u" #define CHAN_PR_ARG __entry->band, __entry->center_freq #define CHAN_DEF_ENTRY __field(enum nl80211_band, band) \ __field(u32, control_freq) \ __field(u32, width) \ __field(u32, center_freq1) \ __field(u32, center_freq2) #define CHAN_DEF_ASSIGN(chandef) \ do { \ if ((chandef) && (chandef)->chan) { \ __entry->band = (chandef)->chan->band; \ __entry->control_freq = \ (chandef)->chan->center_freq; \ __entry->width = (chandef)->width; \ __entry->center_freq1 = (chandef)->center_freq1;\ __entry->center_freq2 = (chandef)->center_freq2;\ } else { \ __entry->band = 0; \ __entry->control_freq = 0; \ __entry->width = 0; \ __entry->center_freq1 = 0; \ __entry->center_freq2 = 0; \ } \ } while (0) #define CHAN_DEF_PR_FMT \ "band: %d, control freq: %u, width: %d, cf1: %u, cf2: %u" #define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq, \ __entry->width, __entry->center_freq1, \ __entry->center_freq2 #define SINFO_ENTRY __field(int, generation) \ __field(u32, connected_time) \ __field(u32, inactive_time) \ __field(u32, rx_bytes) \ __field(u32, tx_bytes) \ __field(u32, rx_packets) \ __field(u32, tx_packets) \ __field(u32, tx_retries) \ __field(u32, tx_failed) \ __field(u32, rx_dropped_misc) \ __field(u32, beacon_loss_count) \ __field(u16, llid) \ __field(u16, plid) \ __field(u8, plink_state) #define SINFO_ASSIGN \ do { \ __entry->generation = sinfo->generation; \ __entry->connected_time = sinfo->connected_time; \ __entry->inactive_time = sinfo->inactive_time; \ __entry->rx_bytes = sinfo->rx_bytes; \ __entry->tx_bytes = sinfo->tx_bytes; \ __entry->rx_packets = sinfo->rx_packets; \ __entry->tx_packets = sinfo->tx_packets; \ __entry->tx_retries = sinfo->tx_retries; \ __entry->tx_failed = sinfo->tx_failed; \ __entry->rx_dropped_misc = sinfo->rx_dropped_misc; \ __entry->beacon_loss_count = sinfo->beacon_loss_count; \ __entry->llid = sinfo->llid; \ __entry->plid = sinfo->plid; \ __entry->plink_state = sinfo->plink_state; \ } while (0) #define BOOL_TO_STR(bo) (bo) ? "true" : "false" #define QOS_MAP_ENTRY __field(u8, num_des) \ __array(u8, dscp_exception, \ 2 * IEEE80211_QOS_MAP_MAX_EX) \ __array(u8, up, IEEE80211_QOS_MAP_LEN_MIN) #define QOS_MAP_ASSIGN(qos_map) \ do { \ if ((qos_map)) { \ __entry->num_des = (qos_map)->num_des; \ memcpy(__entry->dscp_exception, \ &(qos_map)->dscp_exception, \ 2 * IEEE80211_QOS_MAP_MAX_EX); \ memcpy(__entry->up, &(qos_map)->up, \ IEEE80211_QOS_MAP_LEN_MIN); \ } else { \ __entry->num_des = 0; \ memset(__entry->dscp_exception, 0, \ 2 * IEEE80211_QOS_MAP_MAX_EX); \ memset(__entry->up, 0, \ IEEE80211_QOS_MAP_LEN_MIN); \ } \ } while (0) /************************************************************* * rdev->ops traces * *************************************************************/ TRACE_EVENT(rdev_suspend, TP_PROTO(struct wiphy *wiphy, struct cfg80211_wowlan *wow), TP_ARGS(wiphy, wow), TP_STRUCT__entry( WIPHY_ENTRY __field(bool, any) __field(bool, disconnect) __field(bool, magic_pkt) __field(bool, gtk_rekey_failure) __field(bool, eap_identity_req) __field(bool, four_way_handshake) __field(bool, rfkill_release) __field(bool, valid_wow) ), TP_fast_assign( WIPHY_ASSIGN; if (wow) { __entry->any = wow->any; __entry->disconnect = wow->disconnect; __entry->magic_pkt = wow->magic_pkt; __entry->gtk_rekey_failure = wow->gtk_rekey_failure; __entry->eap_identity_req = wow->eap_identity_req; __entry->four_way_handshake = wow->four_way_handshake; __entry->rfkill_release = wow->rfkill_release; __entry->valid_wow = true; } else { __entry->valid_wow = false; } ), TP_printk(WIPHY_PR_FMT ", wow%s - any: %d, disconnect: %d, " "magic pkt: %d, gtk rekey failure: %d, eap identify req: %d, " "four way handshake: %d, rfkill release: %d.", WIPHY_PR_ARG, __entry->valid_wow ? "" : "(Not configured!)", __entry->any, __entry->disconnect, __entry->magic_pkt, __entry->gtk_rekey_failure, __entry->eap_identity_req, __entry->four_way_handshake, __entry->rfkill_release) ); TRACE_EVENT(rdev_return_int, TP_PROTO(struct wiphy *wiphy, int ret), TP_ARGS(wiphy, ret), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; ), TP_printk(WIPHY_PR_FMT ", returned: %d", WIPHY_PR_ARG, __entry->ret) ); TRACE_EVENT(rdev_scan, TP_PROTO(struct wiphy *wiphy, struct cfg80211_scan_request *request), TP_ARGS(wiphy, request), TP_STRUCT__entry( WIPHY_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; ), TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) ); DECLARE_EVENT_CLASS(wiphy_only_evt, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy), TP_STRUCT__entry( WIPHY_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; ), TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) ); DEFINE_EVENT(wiphy_only_evt, rdev_suspend_enter, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); DEFINE_EVENT(wiphy_only_evt, rdev_suspend_leave, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); DEFINE_EVENT(wiphy_only_evt, rdev_resume, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); DEFINE_EVENT(wiphy_only_evt, rdev_return_void, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); DEFINE_EVENT(wiphy_only_evt, rdev_get_antenna, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); DEFINE_EVENT(wiphy_only_evt, rdev_rfkill_poll, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); DECLARE_EVENT_CLASS(wiphy_enabled_evt, TP_PROTO(struct wiphy *wiphy, bool enabled), TP_ARGS(wiphy, enabled), TP_STRUCT__entry( WIPHY_ENTRY __field(bool, enabled) ), TP_fast_assign( WIPHY_ASSIGN; __entry->enabled = enabled; ), TP_printk(WIPHY_PR_FMT ", %senabled ", WIPHY_PR_ARG, __entry->enabled ? "" : "not ") ); DEFINE_EVENT(wiphy_enabled_evt, rdev_set_wakeup, TP_PROTO(struct wiphy *wiphy, bool enabled), TP_ARGS(wiphy, enabled) ); TRACE_EVENT(rdev_add_virtual_intf, TP_PROTO(struct wiphy *wiphy, char *name, enum nl80211_iftype type), TP_ARGS(wiphy, name, type), TP_STRUCT__entry( WIPHY_ENTRY __string(vir_intf_name, name ? name : "") __field(enum nl80211_iftype, type) ), TP_fast_assign( WIPHY_ASSIGN; __assign_str(vir_intf_name, name ? name : ""); __entry->type = type; ), TP_printk(WIPHY_PR_FMT ", virtual intf name: %s, type: %d", WIPHY_PR_ARG, __get_str(vir_intf_name), __entry->type) ); DECLARE_EVENT_CLASS(wiphy_wdev_evt, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); DECLARE_EVENT_CLASS(wiphy_wdev_cookie_evt, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %lld", WIPHY_PR_ARG, WDEV_PR_ARG, (unsigned long long)__entry->cookie) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_return_wdev, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_del_virtual_intf, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_change_virtual_intf, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, enum nl80211_iftype type), TP_ARGS(wiphy, netdev, type), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(enum nl80211_iftype, type) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->type = type; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", type: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->type) ); DECLARE_EVENT_CLASS(key_handle, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr), TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(mac_addr) __field(u8, key_index) __field(bool, pairwise) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(mac_addr, mac_addr); __entry->key_index = key_index; __entry->pairwise = pairwise; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key_index: %u, pairwise: %s, mac addr: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index, BOOL_TO_STR(__entry->pairwise), MAC_PR_ARG(mac_addr)) ); DEFINE_EVENT(key_handle, rdev_add_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr), TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr) ); DEFINE_EVENT(key_handle, rdev_get_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr), TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr) ); DEFINE_EVENT(key_handle, rdev_del_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr), TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr) ); TRACE_EVENT(rdev_set_default_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool unicast, bool multicast), TP_ARGS(wiphy, netdev, key_index, unicast, multicast), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u8, key_index) __field(bool, unicast) __field(bool, multicast) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->key_index = key_index; __entry->unicast = unicast; __entry->multicast = multicast; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u, unicast: %s, multicast: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index, BOOL_TO_STR(__entry->unicast), BOOL_TO_STR(__entry->multicast)) ); TRACE_EVENT(rdev_set_default_mgmt_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index), TP_ARGS(wiphy, netdev, key_index), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u8, key_index) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->key_index = key_index; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index) ); TRACE_EVENT(rdev_start_ap, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ap_settings *settings), TP_ARGS(wiphy, netdev, settings), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY __field(int, beacon_interval) __field(int, dtim_period) __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) __field(enum nl80211_hidden_ssid, hidden_ssid) __field(u32, wpa_ver) __field(bool, privacy) __field(enum nl80211_auth_type, auth_type) __field(int, inactivity_timeout) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(&settings->chandef); __entry->beacon_interval = settings->beacon_interval; __entry->dtim_period = settings->dtim_period; __entry->hidden_ssid = settings->hidden_ssid; __entry->wpa_ver = settings->crypto.wpa_versions; __entry->privacy = settings->privacy; __entry->auth_type = settings->auth_type; __entry->inactivity_timeout = settings->inactivity_timeout; memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); memcpy(__entry->ssid, settings->ssid, settings->ssid_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", AP settings - ssid: %s, " CHAN_DEF_PR_FMT ", beacon interval: %d, dtim period: %d, " "hidden ssid: %d, wpa versions: %u, privacy: %s, " "auth type: %d, inactivity timeout: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ssid, CHAN_DEF_PR_ARG, __entry->beacon_interval, __entry->dtim_period, __entry->hidden_ssid, __entry->wpa_ver, BOOL_TO_STR(__entry->privacy), __entry->auth_type, __entry->inactivity_timeout) ); TRACE_EVENT(rdev_change_beacon, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_beacon_data *info), TP_ARGS(wiphy, netdev, info), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __dynamic_array(u8, head, info ? info->head_len : 0) __dynamic_array(u8, tail, info ? info->tail_len : 0) __dynamic_array(u8, beacon_ies, info ? info->beacon_ies_len : 0) __dynamic_array(u8, proberesp_ies, info ? info->proberesp_ies_len : 0) __dynamic_array(u8, assocresp_ies, info ? info->assocresp_ies_len : 0) __dynamic_array(u8, probe_resp, info ? info->probe_resp_len : 0) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; if (info) { if (info->head) memcpy(__get_dynamic_array(head), info->head, info->head_len); if (info->tail) memcpy(__get_dynamic_array(tail), info->tail, info->tail_len); if (info->beacon_ies) memcpy(__get_dynamic_array(beacon_ies), info->beacon_ies, info->beacon_ies_len); if (info->proberesp_ies) memcpy(__get_dynamic_array(proberesp_ies), info->proberesp_ies, info->proberesp_ies_len); if (info->assocresp_ies) memcpy(__get_dynamic_array(assocresp_ies), info->assocresp_ies, info->assocresp_ies_len); if (info->probe_resp) memcpy(__get_dynamic_array(probe_resp), info->probe_resp, info->probe_resp_len); } ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) ); DECLARE_EVENT_CLASS(wiphy_netdev_evt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_stop_ap, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_set_rekey_data, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_get_mesh_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_mesh, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ibss, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ocb, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DECLARE_EVENT_CLASS(station_add_change, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, struct station_parameters *params), TP_ARGS(wiphy, netdev, mac, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(sta_mac) __field(u32, sta_flags_mask) __field(u32, sta_flags_set) __field(u32, sta_modify_mask) __field(int, listen_interval) __field(u16, capability) __field(u16, aid) __field(u8, plink_action) __field(u8, plink_state) __field(u8, uapsd_queues) __field(u8, max_sp) __field(u8, opmode_notif) __field(bool, opmode_notif_used) __array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap)) __array(u8, vht_capa, (int)sizeof(struct ieee80211_vht_cap)) __array(char, vlan, IFNAMSIZ) __dynamic_array(u8, supported_rates, params->supported_rates_len) __dynamic_array(u8, ext_capab, params->ext_capab_len) __dynamic_array(u8, supported_channels, params->supported_channels_len) __dynamic_array(u8, supported_oper_classes, params->supported_oper_classes_len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(sta_mac, mac); __entry->sta_flags_mask = params->sta_flags_mask; __entry->sta_flags_set = params->sta_flags_set; __entry->sta_modify_mask = params->sta_modify_mask; __entry->listen_interval = params->listen_interval; __entry->aid = params->aid; __entry->plink_action = params->plink_action; __entry->plink_state = params->plink_state; __entry->uapsd_queues = params->uapsd_queues; memset(__entry->ht_capa, 0, sizeof(struct ieee80211_ht_cap)); if (params->ht_capa) memcpy(__entry->ht_capa, params->ht_capa, sizeof(struct ieee80211_ht_cap)); memset(__entry->vht_capa, 0, sizeof(struct ieee80211_vht_cap)); if (params->vht_capa) memcpy(__entry->vht_capa, params->vht_capa, sizeof(struct ieee80211_vht_cap)); memset(__entry->vlan, 0, sizeof(__entry->vlan)); if (params->vlan) memcpy(__entry->vlan, params->vlan->name, IFNAMSIZ); if (params->supported_rates && params->supported_rates_len) memcpy(__get_dynamic_array(supported_rates), params->supported_rates, params->supported_rates_len); if (params->ext_capab && params->ext_capab_len) memcpy(__get_dynamic_array(ext_capab), params->ext_capab, params->ext_capab_len); if (params->supported_channels && params->supported_channels_len) memcpy(__get_dynamic_array(supported_channels), params->supported_channels, params->supported_channels_len); if (params->supported_oper_classes && params->supported_oper_classes_len) memcpy(__get_dynamic_array(supported_oper_classes), params->supported_oper_classes, params->supported_oper_classes_len); __entry->max_sp = params->max_sp; __entry->capability = params->capability; __entry->opmode_notif = params->opmode_notif; __entry->opmode_notif_used = params->opmode_notif_used; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT ", station flags mask: %u, station flags set: %u, " "station modify mask: %u, listen interval: %d, aid: %u, " "plink action: %u, plink state: %u, uapsd queues: %u, vlan:%s", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac), __entry->sta_flags_mask, __entry->sta_flags_set, __entry->sta_modify_mask, __entry->listen_interval, __entry->aid, __entry->plink_action, __entry->plink_state, __entry->uapsd_queues, __entry->vlan) ); DEFINE_EVENT(station_add_change, rdev_add_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, struct station_parameters *params), TP_ARGS(wiphy, netdev, mac, params) ); DEFINE_EVENT(station_add_change, rdev_change_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, struct station_parameters *params), TP_ARGS(wiphy, netdev, mac, params) ); DECLARE_EVENT_CLASS(wiphy_netdev_mac_evt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), TP_ARGS(wiphy, netdev, mac), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(sta_mac) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(sta_mac, mac); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac)) ); DECLARE_EVENT_CLASS(station_del, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct station_del_parameters *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(sta_mac) __field(u8, subtype) __field(u16, reason_code) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(sta_mac, params->mac); __entry->subtype = params->subtype; __entry->reason_code = params->reason_code; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT ", subtype: %u, reason_code: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac), __entry->subtype, __entry->reason_code) ); DEFINE_EVENT(station_del, rdev_del_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct station_del_parameters *params), TP_ARGS(wiphy, netdev, params) ); DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_get_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), TP_ARGS(wiphy, netdev, mac) ); DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), TP_ARGS(wiphy, netdev, mac) ); DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_set_wds_peer, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), TP_ARGS(wiphy, netdev, mac) ); TRACE_EVENT(rdev_dump_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx, u8 *mac), TP_ARGS(wiphy, netdev, _idx, mac), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(sta_mac) __field(int, idx) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(sta_mac, mac); __entry->idx = _idx; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT ", idx: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac), __entry->idx) ); TRACE_EVENT(rdev_return_int_station_info, TP_PROTO(struct wiphy *wiphy, int ret, struct station_info *sinfo), TP_ARGS(wiphy, ret, sinfo), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) SINFO_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; SINFO_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", returned %d" , WIPHY_PR_ARG, __entry->ret) ); DECLARE_EVENT_CLASS(mpath_evt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, dst, next_hop), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dst) MAC_ENTRY(next_hop) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dst, dst); MAC_ASSIGN(next_hop, next_hop); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT ", next hop: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dst), MAC_PR_ARG(next_hop)) ); DEFINE_EVENT(mpath_evt, rdev_add_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, dst, next_hop) ); DEFINE_EVENT(mpath_evt, rdev_change_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, dst, next_hop) ); DEFINE_EVENT(mpath_evt, rdev_get_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, dst, next_hop) ); TRACE_EVENT(rdev_dump_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, _idx, dst, next_hop), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dst) MAC_ENTRY(next_hop) __field(int, idx) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dst, dst); MAC_ASSIGN(next_hop, next_hop); __entry->idx = _idx; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: " MAC_PR_FMT ", next hop: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst), MAC_PR_ARG(next_hop)) ); TRACE_EVENT(rdev_get_mpp, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *mpp), TP_ARGS(wiphy, netdev, dst, mpp), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dst) MAC_ENTRY(mpp) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dst, dst); MAC_ASSIGN(mpp, mpp); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT ", mpp: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dst), MAC_PR_ARG(mpp)) ); TRACE_EVENT(rdev_dump_mpp, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx, u8 *dst, u8 *mpp), TP_ARGS(wiphy, netdev, _idx, mpp, dst), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dst) MAC_ENTRY(mpp) __field(int, idx) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dst, dst); MAC_ASSIGN(mpp, mpp); __entry->idx = _idx; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: " MAC_PR_FMT ", mpp: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst), MAC_PR_ARG(mpp)) ); TRACE_EVENT(rdev_return_int_mpath_info, TP_PROTO(struct wiphy *wiphy, int ret, struct mpath_info *pinfo), TP_ARGS(wiphy, ret, pinfo), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) __field(int, generation) __field(u32, filled) __field(u32, frame_qlen) __field(u32, sn) __field(u32, metric) __field(u32, exptime) __field(u32, discovery_timeout) __field(u8, discovery_retries) __field(u8, flags) ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; __entry->generation = pinfo->generation; __entry->filled = pinfo->filled; __entry->frame_qlen = pinfo->frame_qlen; __entry->sn = pinfo->sn; __entry->metric = pinfo->metric; __entry->exptime = pinfo->exptime; __entry->discovery_timeout = pinfo->discovery_timeout; __entry->discovery_retries = pinfo->discovery_retries; __entry->flags = pinfo->flags; ), TP_printk(WIPHY_PR_FMT ", returned %d. mpath info - generation: %d, " "filled: %u, frame qlen: %u, sn: %u, metric: %u, exptime: %u," " discovery timeout: %u, discovery retries: %u, flags: %u", WIPHY_PR_ARG, __entry->ret, __entry->generation, __entry->filled, __entry->frame_qlen, __entry->sn, __entry->metric, __entry->exptime, __entry->discovery_timeout, __entry->discovery_retries, __entry->flags) ); TRACE_EVENT(rdev_return_int_mesh_config, TP_PROTO(struct wiphy *wiphy, int ret, struct mesh_config *conf), TP_ARGS(wiphy, ret, conf), TP_STRUCT__entry( WIPHY_ENTRY MESH_CFG_ENTRY __field(int, ret) ), TP_fast_assign( WIPHY_ASSIGN; MESH_CFG_ASSIGN; __entry->ret = ret; ), TP_printk(WIPHY_PR_FMT ", returned: %d", WIPHY_PR_ARG, __entry->ret) ); TRACE_EVENT(rdev_update_mesh_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 mask, const struct mesh_config *conf), TP_ARGS(wiphy, netdev, mask, conf), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MESH_CFG_ENTRY __field(u32, mask) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MESH_CFG_ASSIGN; __entry->mask = mask; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mask: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mask) ); TRACE_EVENT(rdev_join_mesh, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const struct mesh_config *conf, const struct mesh_setup *setup), TP_ARGS(wiphy, netdev, conf, setup), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MESH_CFG_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MESH_CFG_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) ); TRACE_EVENT(rdev_change_bss, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct bss_parameters *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(int, use_cts_prot) __field(int, use_short_preamble) __field(int, use_short_slot_time) __field(int, ap_isolate) __field(int, ht_opmode) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->use_cts_prot = params->use_cts_prot; __entry->use_short_preamble = params->use_short_preamble; __entry->use_short_slot_time = params->use_short_slot_time; __entry->ap_isolate = params->ap_isolate; __entry->ht_opmode = params->ht_opmode; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", use cts prot: %d, " "use short preamble: %d, use short slot time: %d, " "ap isolate: %d, ht opmode: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->use_cts_prot, __entry->use_short_preamble, __entry->use_short_slot_time, __entry->ap_isolate, __entry->ht_opmode) ); TRACE_EVENT(rdev_set_txq_params, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct ieee80211_txq_params *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(enum nl80211_ac, ac) __field(u16, txop) __field(u16, cwmin) __field(u16, cwmax) __field(u8, aifs) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->ac = params->ac; __entry->txop = params->txop; __entry->cwmin = params->cwmin; __entry->cwmax = params->cwmax; __entry->aifs = params->aifs; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", ac: %d, txop: %u, cwmin: %u, cwmax: %u, aifs: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ac, __entry->txop, __entry->cwmin, __entry->cwmax, __entry->aifs) ); TRACE_EVENT(rdev_libertas_set_mesh_channel, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct ieee80211_channel *chan), TP_ARGS(wiphy, netdev, chan), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_ASSIGN(chan); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_PR_ARG) ); TRACE_EVENT(rdev_set_monitor_channel, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, chandef), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, WIPHY_PR_ARG, CHAN_DEF_PR_ARG) ); TRACE_EVENT(rdev_auth, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_auth_request *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __field(enum nl80211_auth_type, auth_type) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; if (req->bss) MAC_ASSIGN(bssid, req->bss->bssid); else eth_zero_addr(__entry->bssid); __entry->auth_type = req->auth_type; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->auth_type, MAC_PR_ARG(bssid)) ); TRACE_EVENT(rdev_assoc, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_assoc_request *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) MAC_ENTRY(prev_bssid) __field(bool, use_mfp) __field(u32, flags) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; if (req->bss) MAC_ASSIGN(bssid, req->bss->bssid); else eth_zero_addr(__entry->bssid); MAC_ASSIGN(prev_bssid, req->prev_bssid); __entry->use_mfp = req->use_mfp; __entry->flags = req->flags; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", previous bssid: " MAC_PR_FMT ", use mfp: %s, flags: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), MAC_PR_ARG(prev_bssid), BOOL_TO_STR(__entry->use_mfp), __entry->flags) ); TRACE_EVENT(rdev_deauth, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_deauth_request *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __field(u16, reason_code) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, req->bssid); __entry->reason_code = req->reason_code; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", reason: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->reason_code) ); TRACE_EVENT(rdev_disassoc, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_disassoc_request *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __field(u16, reason_code) __field(bool, local_state_change) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; if (req->bss) MAC_ASSIGN(bssid, req->bss->bssid); else eth_zero_addr(__entry->bssid); __entry->reason_code = req->reason_code; __entry->local_state_change = req->local_state_change; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", reason: %u, local state change: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->reason_code, BOOL_TO_STR(__entry->local_state_change)) ); TRACE_EVENT(rdev_mgmt_tx_cancel_wait, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu ", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) ); TRACE_EVENT(rdev_set_power_mgmt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, bool enabled, int timeout), TP_ARGS(wiphy, netdev, enabled, timeout), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(bool, enabled) __field(int, timeout) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->enabled = enabled; __entry->timeout = timeout; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %senabled, timeout: %d ", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->enabled ? "" : "not ", __entry->timeout) ); TRACE_EVENT(rdev_connect, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_connect_params *sme), TP_ARGS(wiphy, netdev, sme), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) __field(enum nl80211_auth_type, auth_type) __field(bool, privacy) __field(u32, wpa_versions) __field(u32, flags) MAC_ENTRY(prev_bssid) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, sme->bssid); memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); memcpy(__entry->ssid, sme->ssid, sme->ssid_len); __entry->auth_type = sme->auth_type; __entry->privacy = sme->privacy; __entry->wpa_versions = sme->crypto.wpa_versions; __entry->flags = sme->flags; MAC_ASSIGN(prev_bssid, sme->prev_bssid); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, " "flags: %u, previous bssid: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid, __entry->auth_type, BOOL_TO_STR(__entry->privacy), __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid)) ); TRACE_EVENT(rdev_update_connect_params, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_connect_params *sme, u32 changed), TP_ARGS(wiphy, netdev, sme, changed), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u32, changed) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->changed = changed; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", parameters changed: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->changed) ); TRACE_EVENT(rdev_set_cqm_rssi_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, s32 rssi_thold, u32 rssi_hyst), TP_ARGS(wiphy, netdev, rssi_thold, rssi_hyst), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(s32, rssi_thold) __field(u32, rssi_hyst) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->rssi_thold = rssi_thold; __entry->rssi_hyst = rssi_hyst; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", rssi_thold: %d, rssi_hyst: %u ", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rssi_thold, __entry->rssi_hyst) ); TRACE_EVENT(rdev_set_cqm_rssi_range_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, s32 low, s32 high), TP_ARGS(wiphy, netdev, low, high), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(s32, rssi_low) __field(s32, rssi_high) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->rssi_low = low; __entry->rssi_high = high; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", range: %d - %d ", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rssi_low, __entry->rssi_high) ); TRACE_EVENT(rdev_set_cqm_txe_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 rate, u32 pkts, u32 intvl), TP_ARGS(wiphy, netdev, rate, pkts, intvl), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u32, rate) __field(u32, pkts) __field(u32, intvl) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->rate = rate; __entry->pkts = pkts; __entry->intvl = intvl; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", rate: %u, packets: %u, interval: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rate, __entry->pkts, __entry->intvl) ); TRACE_EVENT(rdev_disconnect, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u16 reason_code), TP_ARGS(wiphy, netdev, reason_code), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u16, reason_code) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->reason_code = reason_code; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", reason code: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->reason_code) ); TRACE_EVENT(rdev_join_ibss, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ibss_params *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, params->bssid); memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); memcpy(__entry->ssid, params->ssid, params->ssid_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", ssid: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid) ); TRACE_EVENT(rdev_join_ocb, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const struct ocb_setup *setup), TP_ARGS(wiphy, netdev, setup), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) ); TRACE_EVENT(rdev_set_wiphy_params, TP_PROTO(struct wiphy *wiphy, u32 changed), TP_ARGS(wiphy, changed), TP_STRUCT__entry( WIPHY_ENTRY __field(u32, changed) ), TP_fast_assign( WIPHY_ASSIGN; __entry->changed = changed; ), TP_printk(WIPHY_PR_FMT ", changed: %u", WIPHY_PR_ARG, __entry->changed) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_get_tx_power, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_set_tx_power, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm), TP_ARGS(wiphy, wdev, type, mbm), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(enum nl80211_tx_power_setting, type) __field(int, mbm) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->type = type; __entry->mbm = mbm; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type: %u, mbm: %d", WIPHY_PR_ARG, WDEV_PR_ARG,__entry->type, __entry->mbm) ); TRACE_EVENT(rdev_return_int_int, TP_PROTO(struct wiphy *wiphy, int func_ret, int func_fill), TP_ARGS(wiphy, func_ret, func_fill), TP_STRUCT__entry( WIPHY_ENTRY __field(int, func_ret) __field(int, func_fill) ), TP_fast_assign( WIPHY_ASSIGN; __entry->func_ret = func_ret; __entry->func_fill = func_fill; ), TP_printk(WIPHY_PR_FMT ", function returns: %d, function filled: %d", WIPHY_PR_ARG, __entry->func_ret, __entry->func_fill) ); #ifdef CPTCFG_NL80211_TESTMODE TRACE_EVENT(rdev_testmode_cmd, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(rdev_testmode_dump, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy), TP_STRUCT__entry( WIPHY_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; ), TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) ); #endif /* CPTCFG_NL80211_TESTMODE */ TRACE_EVENT(rdev_set_bitrate_mask, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer, const struct cfg80211_bitrate_mask *mask), TP_ARGS(wiphy, netdev, peer, mask), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer)) ); TRACE_EVENT(rdev_mgmt_frame_register, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u16 frame_type, bool reg), TP_ARGS(wiphy, wdev, frame_type, reg), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u16, frame_type) __field(bool, reg) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->frame_type = frame_type; __entry->reg = reg; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", frame_type: 0x%.2x, reg: %s ", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->frame_type, __entry->reg ? "true" : "false") ); TRACE_EVENT(rdev_return_int_tx_rx, TP_PROTO(struct wiphy *wiphy, int ret, u32 tx, u32 rx), TP_ARGS(wiphy, ret, tx, rx), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) __field(u32, tx) __field(u32, rx) ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; __entry->tx = tx; __entry->rx = rx; ), TP_printk(WIPHY_PR_FMT ", returned %d, tx: %u, rx: %u", WIPHY_PR_ARG, __entry->ret, __entry->tx, __entry->rx) ); TRACE_EVENT(rdev_return_void_tx_rx, TP_PROTO(struct wiphy *wiphy, u32 tx, u32 tx_max, u32 rx, u32 rx_max), TP_ARGS(wiphy, tx, tx_max, rx, rx_max), TP_STRUCT__entry( WIPHY_ENTRY __field(u32, tx) __field(u32, tx_max) __field(u32, rx) __field(u32, rx_max) ), TP_fast_assign( WIPHY_ASSIGN; __entry->tx = tx; __entry->tx_max = tx_max; __entry->rx = rx; __entry->rx_max = rx_max; ), TP_printk(WIPHY_PR_FMT ", tx: %u, tx_max: %u, rx: %u, rx_max: %u ", WIPHY_PR_ARG, __entry->tx, __entry->tx_max, __entry->rx, __entry->rx_max) ); DECLARE_EVENT_CLASS(tx_rx_evt, TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx), TP_ARGS(wiphy, rx, tx), TP_STRUCT__entry( WIPHY_ENTRY __field(u32, tx) __field(u32, rx) ), TP_fast_assign( WIPHY_ASSIGN; __entry->tx = tx; __entry->rx = rx; ), TP_printk(WIPHY_PR_FMT ", tx: %u, rx: %u ", WIPHY_PR_ARG, __entry->tx, __entry->rx) ); DEFINE_EVENT(tx_rx_evt, rdev_set_antenna, TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx), TP_ARGS(wiphy, rx, tx) ); DECLARE_EVENT_CLASS(wiphy_netdev_id_evt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), TP_ARGS(wiphy, netdev, id), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u64, id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->id = id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", id: %llu", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->id) ); DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_start, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), TP_ARGS(wiphy, netdev, id) ); DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_stop, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), TP_ARGS(wiphy, netdev, id) ); TRACE_EVENT(rdev_tdls_mgmt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *buf, size_t len), TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code, peer_capability, initiator, buf, len), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(u8, action_code) __field(u8, dialog_token) __field(u16, status_code) __field(u32, peer_capability) __field(bool, initiator) __dynamic_array(u8, buf, len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->action_code = action_code; __entry->dialog_token = dialog_token; __entry->status_code = status_code; __entry->peer_capability = peer_capability; __entry->initiator = initiator; memcpy(__get_dynamic_array(buf), buf, len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", action_code: %u, " "dialog_token: %u, status_code: %u, peer_capability: %u " "initiator: %s buf: %#.2x ", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->action_code, __entry->dialog_token, __entry->status_code, __entry->peer_capability, BOOL_TO_STR(__entry->initiator), ((u8 *)__get_dynamic_array(buf))[0]) ); TRACE_EVENT(rdev_dump_survey, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx), TP_ARGS(wiphy, netdev, _idx), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(int, idx) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->idx = _idx; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx) ); TRACE_EVENT(rdev_return_int_survey_info, TP_PROTO(struct wiphy *wiphy, int ret, struct survey_info *info), TP_ARGS(wiphy, ret, info), TP_STRUCT__entry( WIPHY_ENTRY CHAN_ENTRY __field(int, ret) __field(u64, time) __field(u64, time_busy) __field(u64, time_ext_busy) __field(u64, time_rx) __field(u64, time_tx) __field(u64, time_scan) __field(u32, filled) __field(s8, noise) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_ASSIGN(info->channel); __entry->ret = ret; __entry->time = info->time; __entry->time_busy = info->time_busy; __entry->time_ext_busy = info->time_ext_busy; __entry->time_rx = info->time_rx; __entry->time_tx = info->time_tx; __entry->time_scan = info->time_scan; __entry->filled = info->filled; __entry->noise = info->noise; ), TP_printk(WIPHY_PR_FMT ", returned: %d, " CHAN_PR_FMT ", channel time: %llu, channel time busy: %llu, " "channel time extension busy: %llu, channel time rx: %llu, " "channel time tx: %llu, scan time: %llu, filled: %u, noise: %d", WIPHY_PR_ARG, __entry->ret, CHAN_PR_ARG, __entry->time, __entry->time_busy, __entry->time_ext_busy, __entry->time_rx, __entry->time_tx, __entry->time_scan, __entry->filled, __entry->noise) ); TRACE_EVENT(rdev_tdls_oper, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *peer, enum nl80211_tdls_operation oper), TP_ARGS(wiphy, netdev, peer, oper), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(enum nl80211_tdls_operation, oper) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->oper = oper; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", oper: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper) ); DECLARE_EVENT_CLASS(rdev_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa), TP_ARGS(wiphy, netdev, pmksa), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, pmksa->bssid); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid)) ); TRACE_EVENT(rdev_probe_client, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer), TP_ARGS(wiphy, netdev, peer), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer)) ); DEFINE_EVENT(rdev_pmksa, rdev_set_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa), TP_ARGS(wiphy, netdev, pmksa) ); DEFINE_EVENT(rdev_pmksa, rdev_del_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa), TP_ARGS(wiphy, netdev, pmksa) ); TRACE_EVENT(rdev_remain_on_channel, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration), TP_ARGS(wiphy, wdev, chan, duration), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY CHAN_ENTRY __field(unsigned int, duration) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; CHAN_ASSIGN(chan); __entry->duration = duration; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", duration: %u", WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG, __entry->duration) ); TRACE_EVENT(rdev_return_int_cookie, TP_PROTO(struct wiphy *wiphy, int ret, u64 cookie), TP_ARGS(wiphy, ret, cookie), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", returned %d, cookie: %llu", WIPHY_PR_ARG, __entry->ret, __entry->cookie) ); TRACE_EVENT(rdev_cancel_remain_on_channel, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) ); TRACE_EVENT(rdev_mgmt_tx, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params), TP_ARGS(wiphy, wdev, params), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY CHAN_ENTRY __field(bool, offchan) __field(unsigned int, wait) __field(bool, no_cck) __field(bool, dont_wait_for_ack) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; CHAN_ASSIGN(params->chan); __entry->offchan = params->offchan; __entry->wait = params->wait; __entry->no_cck = params->no_cck; __entry->dont_wait_for_ack = params->dont_wait_for_ack; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", offchan: %s," " wait: %u, no cck: %s, dont wait for ack: %s", WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG, BOOL_TO_STR(__entry->offchan), __entry->wait, BOOL_TO_STR(__entry->no_cck), BOOL_TO_STR(__entry->dont_wait_for_ack)) ); TRACE_EVENT(rdev_tx_control_port, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *buf, size_t len, const u8 *dest, __be16 proto, bool unencrypted), TP_ARGS(wiphy, netdev, buf, len, dest, proto, unencrypted), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dest) __field(__be16, proto) __field(bool, unencrypted) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dest, dest); __entry->proto = proto; __entry->unencrypted = unencrypted; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT "," " proto: 0x%x, unencrypted: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dest), be16_to_cpu(__entry->proto), BOOL_TO_STR(__entry->unencrypted)) ); TRACE_EVENT(rdev_set_noack_map, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u16 noack_map), TP_ARGS(wiphy, netdev, noack_map), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u16, noack_map) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->noack_map = noack_map; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", noack_map: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_get_channel, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_return_chandef, TP_PROTO(struct wiphy *wiphy, int ret, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, ret, chandef), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; if (ret == 0) CHAN_DEF_ASSIGN(chandef); else CHAN_DEF_ASSIGN((struct cfg80211_chan_def *)NULL); __entry->ret = ret; ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", ret: %d", WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->ret) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_start_p2p_device, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_start_nan, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf), TP_ARGS(wiphy, wdev, conf), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u8, master_pref) __field(u8, bands) __field(u8, cdw_2g) __field(u8, cdw_5g) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->master_pref = conf->master_pref; __entry->bands = conf->bands; __entry->cdw_2g = conf->cdw_2g; __entry->cdw_5g = conf->cdw_5g; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", master preference: %u, bands: 0x%0x, cdw_2g: %u, cdw_5g: %u", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref, __entry->bands, __entry->cdw_2g, __entry->cdw_5g) ); TRACE_EVENT(rdev_nan_change_conf, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes), TP_ARGS(wiphy, wdev, conf, changes), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u8, master_pref) __field(u8, bands); __field(u32, changes); ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->master_pref = conf->master_pref; __entry->bands = conf->bands; __entry->changes = changes; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", master preference: %u, bands: 0x%0x, changes: %x", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref, __entry->bands, __entry->changes) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_nan, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_add_nan_func, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, const struct cfg80211_nan_func *func), TP_ARGS(wiphy, wdev, func), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u8, func_type) __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->func_type = func->type; __entry->cookie = func->cookie ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type=%u, cookie=%llu", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->func_type, __entry->cookie) ); TRACE_EVENT(rdev_del_nan_func, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie=%llu", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) ); TRACE_EVENT(rdev_set_mac_acl, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_acl_data *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u32, acl_policy) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->acl_policy = params->acl_policy; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy) ); TRACE_EVENT(rdev_update_ft_ies, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_update_ft_ies_params *ftie), TP_ARGS(wiphy, netdev, ftie), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u16, md) __dynamic_array(u8, ie, ftie->ie_len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->md = ftie->md; memcpy(__get_dynamic_array(ie), ftie->ie, ftie->ie_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", md: 0x%x", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->md) ); TRACE_EVENT(rdev_crit_proto_start, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_crit_proto_id protocol, u16 duration), TP_ARGS(wiphy, wdev, protocol, duration), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u16, proto) __field(u16, duration) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->proto = protocol; __entry->duration = duration; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", proto=%x, duration=%u", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->proto, __entry->duration) ); TRACE_EVENT(rdev_crit_proto_stop, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(rdev_channel_switch, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_csa_settings *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY __field(bool, radar_required) __field(bool, block_tx) __field(u8, count) __dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon) __dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(¶ms->chandef); __entry->radar_required = params->radar_required; __entry->block_tx = params->block_tx; __entry->count = params->count; memcpy(__get_dynamic_array(bcn_ofs), params->counter_offsets_beacon, params->n_counter_offsets_beacon * sizeof(u16)); /* probe response offsets are optional */ if (params->n_counter_offsets_presp) memcpy(__get_dynamic_array(pres_ofs), params->counter_offsets_presp, params->n_counter_offsets_presp * sizeof(u16)); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", block_tx: %d, count: %u, radar_required: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->block_tx, __entry->count, __entry->radar_required) ); TRACE_EVENT(rdev_set_qos_map, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_qos_map *qos_map), TP_ARGS(wiphy, netdev, qos_map), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY QOS_MAP_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; QOS_MAP_ASSIGN(qos_map); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", num_des: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des) ); TRACE_EVENT(rdev_set_ap_chanwidth, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, netdev, chandef), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG) ); TRACE_EVENT(rdev_add_tx_ts, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time), TP_ARGS(wiphy, netdev, tsid, peer, user_prio, admitted_time), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(u8, tsid) __field(u8, user_prio) __field(u16, admitted_time) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->tsid = tsid; __entry->user_prio = user_prio; __entry->admitted_time = admitted_time; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", TSID %d, UP %d, time %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tsid, __entry->user_prio, __entry->admitted_time) ); TRACE_EVENT(rdev_del_tx_ts, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 tsid, const u8 *peer), TP_ARGS(wiphy, netdev, tsid, peer), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(u8, tsid) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->tsid = tsid; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", TSID %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tsid) ); TRACE_EVENT(rdev_tdls_channel_switch, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, netdev, addr, oper_class, chandef), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(addr) __field(u8, oper_class) CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); CHAN_DEF_ASSIGN(chandef); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT " oper class %d, " CHAN_DEF_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->oper_class, CHAN_DEF_PR_ARG) ); TRACE_EVENT(rdev_tdls_cancel_channel_switch, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *addr), TP_ARGS(wiphy, netdev, addr), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(addr) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr)) ); TRACE_EVENT(rdev_set_pmk, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmk_conf *pmk_conf), TP_ARGS(wiphy, netdev, pmk_conf), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(aa) __field(u8, pmk_len) __field(u8, pmk_r0_name_len) __dynamic_array(u8, pmk, pmk_conf->pmk_len) __dynamic_array(u8, pmk_r0_name, WLAN_PMK_NAME_LEN) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(aa, pmk_conf->aa); __entry->pmk_len = pmk_conf->pmk_len; __entry->pmk_r0_name_len = pmk_conf->pmk_r0_name ? WLAN_PMK_NAME_LEN : 0; memcpy(__get_dynamic_array(pmk), pmk_conf->pmk, pmk_conf->pmk_len); memcpy(__get_dynamic_array(pmk_r0_name), pmk_conf->pmk_r0_name, pmk_conf->pmk_r0_name ? WLAN_PMK_NAME_LEN : 0); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT "pmk_len=%u, pmk: %s pmk_r0_name: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(aa), __entry->pmk_len, __print_array(__get_dynamic_array(pmk), __get_dynamic_array_len(pmk), 1), __entry->pmk_r0_name_len ? __print_array(__get_dynamic_array(pmk_r0_name), __get_dynamic_array_len(pmk_r0_name), 1) : "") ); TRACE_EVENT(rdev_del_pmk, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *aa), TP_ARGS(wiphy, netdev, aa), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(aa) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(aa, aa); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(aa)) ); TRACE_EVENT(rdev_external_auth, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_external_auth_params *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry(WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __array(u8, ssid, IEEE80211_MAX_SSID_LEN + 1) __field(u16, status) ), TP_fast_assign(WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, params->bssid); memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); memcpy(__entry->ssid, params->ssid.ssid, params->ssid.ssid_len); __entry->status = params->status; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", ssid: %s, status: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid, __entry->status) ); TRACE_EVENT(rdev_start_radar_detection, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_chan_def *chandef, u32 cac_time_ms), TP_ARGS(wiphy, netdev, chandef, cac_time_ms), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY __field(u32, cac_time_ms) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->cac_time_ms = cac_time_ms; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", cac_time_ms=%u", WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->cac_time_ms) ); TRACE_EVENT(rdev_set_mcast_rate, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int *mcast_rate), TP_ARGS(wiphy, netdev, mcast_rate), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __array(int, mcast_rate, NUM_NL80211_BANDS) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; memcpy(__entry->mcast_rate, mcast_rate, sizeof(int) * NUM_NL80211_BANDS); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mcast_rate[NL80211_BAND_2GHZ], __entry->mcast_rate[NL80211_BAND_5GHZ], __entry->mcast_rate[NL80211_BAND_60GHZ]) ); TRACE_EVENT(rdev_set_coalesce, TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce), TP_ARGS(wiphy, coalesce), TP_STRUCT__entry( WIPHY_ENTRY __field(int, n_rules) ), TP_fast_assign( WIPHY_ASSIGN; __entry->n_rules = coalesce ? coalesce->n_rules : 0; ), TP_printk(WIPHY_PR_FMT ", n_rules=%d", WIPHY_PR_ARG, __entry->n_rules) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_set_multicast_to_unicast, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const bool enabled), TP_ARGS(wiphy, netdev, enabled), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(bool, enabled) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->enabled = enabled; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, BOOL_TO_STR(__entry->enabled)) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_get_txq_stats, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_get_ftm_responder_stats, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ftm_responder_stats *ftm_stats), TP_ARGS(wiphy, netdev, ftm_stats), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u64, timestamp) __field(u32, success_num) __field(u32, partial_num) __field(u32, failed_num) __field(u32, asap_num) __field(u32, non_asap_num) __field(u64, duration) __field(u32, unknown_triggers) __field(u32, reschedule) __field(u32, out_of_window) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->success_num = ftm_stats->success_num; __entry->partial_num = ftm_stats->partial_num; __entry->failed_num = ftm_stats->failed_num; __entry->asap_num = ftm_stats->asap_num; __entry->non_asap_num = ftm_stats->non_asap_num; __entry->duration = ftm_stats->total_duration_ms; __entry->unknown_triggers = ftm_stats->unknown_triggers_num; __entry->reschedule = ftm_stats->reschedule_requests_num; __entry->out_of_window = ftm_stats->out_of_window_triggers_num; ), TP_printk(WIPHY_PR_FMT "Ftm responder stats: success %u, partial %u, " "failed %u, asap %u, non asap %u, total duration %llu, unknown " "triggers %u, rescheduled %u, out of window %u", WIPHY_PR_ARG, __entry->success_num, __entry->partial_num, __entry->failed_num, __entry->asap_num, __entry->non_asap_num, __entry->duration, __entry->unknown_triggers, __entry->reschedule, __entry->out_of_window) ); DEFINE_EVENT(wiphy_wdev_cookie_evt, rdev_start_pmsr, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie) ); DEFINE_EVENT(wiphy_wdev_cookie_evt, rdev_abort_pmsr, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie) ); /************************************************************* * cfg80211 exported functions traces * *************************************************************/ TRACE_EVENT(cfg80211_return_bool, TP_PROTO(bool ret), TP_ARGS(ret), TP_STRUCT__entry( __field(bool, ret) ), TP_fast_assign( __entry->ret = ret; ), TP_printk("returned %s", BOOL_TO_STR(__entry->ret)) ); DECLARE_EVENT_CLASS(cfg80211_netdev_mac_evt, TP_PROTO(struct net_device *netdev, const u8 *macaddr), TP_ARGS(netdev, macaddr), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(macaddr) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(macaddr, macaddr); ), TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(macaddr)) ); DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_notify_new_peer_candidate, TP_PROTO(struct net_device *netdev, const u8 *macaddr), TP_ARGS(netdev, macaddr) ); DECLARE_EVENT_CLASS(netdev_evt_only, TP_PROTO(struct net_device *netdev), TP_ARGS(netdev), TP_STRUCT__entry( NETDEV_ENTRY ), TP_fast_assign( NETDEV_ASSIGN; ), TP_printk(NETDEV_PR_FMT , NETDEV_PR_ARG) ); DEFINE_EVENT(netdev_evt_only, cfg80211_send_rx_auth, TP_PROTO(struct net_device *netdev), TP_ARGS(netdev) ); TRACE_EVENT(cfg80211_send_rx_assoc, TP_PROTO(struct net_device *netdev, struct cfg80211_bss *bss), TP_ARGS(netdev, bss), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(bssid) CHAN_ENTRY ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(bssid, bss->bssid); CHAN_ASSIGN(bss->channel); ), TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", " CHAN_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG) ); DECLARE_EVENT_CLASS(netdev_frame_event, TP_PROTO(struct net_device *netdev, const u8 *buf, int len), TP_ARGS(netdev, buf, len), TP_STRUCT__entry( NETDEV_ENTRY __dynamic_array(u8, frame, len) ), TP_fast_assign( NETDEV_ASSIGN; memcpy(__get_dynamic_array(frame), buf, len); ), TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", NETDEV_PR_ARG, le16_to_cpup((__le16 *)__get_dynamic_array(frame))) ); DEFINE_EVENT(netdev_frame_event, cfg80211_rx_unprot_mlme_mgmt, TP_PROTO(struct net_device *netdev, const u8 *buf, int len), TP_ARGS(netdev, buf, len) ); DEFINE_EVENT(netdev_frame_event, cfg80211_rx_mlme_mgmt, TP_PROTO(struct net_device *netdev, const u8 *buf, int len), TP_ARGS(netdev, buf, len) ); TRACE_EVENT(cfg80211_tx_mlme_mgmt, TP_PROTO(struct net_device *netdev, const u8 *buf, int len), TP_ARGS(netdev, buf, len), TP_STRUCT__entry( NETDEV_ENTRY __dynamic_array(u8, frame, len) ), TP_fast_assign( NETDEV_ASSIGN; memcpy(__get_dynamic_array(frame), buf, len); ), TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", NETDEV_PR_ARG, le16_to_cpup((__le16 *)__get_dynamic_array(frame))) ); DECLARE_EVENT_CLASS(netdev_mac_evt, TP_PROTO(struct net_device *netdev, const u8 *mac), TP_ARGS(netdev, mac), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(mac) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(mac, mac) ), TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(mac)) ); DEFINE_EVENT(netdev_mac_evt, cfg80211_send_auth_timeout, TP_PROTO(struct net_device *netdev, const u8 *mac), TP_ARGS(netdev, mac) ); DEFINE_EVENT(netdev_mac_evt, cfg80211_send_assoc_timeout, TP_PROTO(struct net_device *netdev, const u8 *mac), TP_ARGS(netdev, mac) ); TRACE_EVENT(cfg80211_michael_mic_failure, TP_PROTO(struct net_device *netdev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc), TP_ARGS(netdev, addr, key_type, key_id, tsc), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(addr) __field(enum nl80211_key_type, key_type) __field(int, key_id) __array(u8, tsc, 6) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); __entry->key_type = key_type; __entry->key_id = key_id; if (tsc) memcpy(__entry->tsc, tsc, 6); ), TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm", NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type, __entry->key_id, __entry->tsc) ); TRACE_EVENT(cfg80211_ready_on_channel, TP_PROTO(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration), TP_ARGS(wdev, cookie, chan, duration), TP_STRUCT__entry( WDEV_ENTRY __field(u64, cookie) CHAN_ENTRY __field(unsigned int, duration) ), TP_fast_assign( WDEV_ASSIGN; __entry->cookie = cookie; CHAN_ASSIGN(chan); __entry->duration = duration; ), TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT ", duration: %u", WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG, __entry->duration) ); TRACE_EVENT(cfg80211_ready_on_channel_expired, TP_PROTO(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan), TP_ARGS(wdev, cookie, chan), TP_STRUCT__entry( WDEV_ENTRY __field(u64, cookie) CHAN_ENTRY ), TP_fast_assign( WDEV_ASSIGN; __entry->cookie = cookie; CHAN_ASSIGN(chan); ), TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT, WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG) ); TRACE_EVENT(cfg80211_new_sta, TP_PROTO(struct net_device *netdev, const u8 *mac_addr, struct station_info *sinfo), TP_ARGS(netdev, mac_addr, sinfo), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(mac_addr) SINFO_ENTRY ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(mac_addr, mac_addr); SINFO_ASSIGN; ), TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(mac_addr)) ); DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta, TP_PROTO(struct net_device *netdev, const u8 *macaddr), TP_ARGS(netdev, macaddr) ); TRACE_EVENT(cfg80211_rx_mgmt, TP_PROTO(struct wireless_dev *wdev, int freq, int sig_dbm), TP_ARGS(wdev, freq, sig_dbm), TP_STRUCT__entry( WDEV_ENTRY __field(int, freq) __field(int, sig_dbm) ), TP_fast_assign( WDEV_ASSIGN; __entry->freq = freq; __entry->sig_dbm = sig_dbm; ), TP_printk(WDEV_PR_FMT ", freq: %d, sig dbm: %d", WDEV_PR_ARG, __entry->freq, __entry->sig_dbm) ); TRACE_EVENT(cfg80211_mgmt_tx_status, TP_PROTO(struct wireless_dev *wdev, u64 cookie, bool ack), TP_ARGS(wdev, cookie, ack), TP_STRUCT__entry( WDEV_ENTRY __field(u64, cookie) __field(bool, ack) ), TP_fast_assign( WDEV_ASSIGN; __entry->cookie = cookie; __entry->ack = ack; ), TP_printk(WDEV_PR_FMT", cookie: %llu, ack: %s", WDEV_PR_ARG, __entry->cookie, BOOL_TO_STR(__entry->ack)) ); TRACE_EVENT(cfg80211_rx_control_port, TP_PROTO(struct net_device *netdev, struct sk_buff *skb, bool unencrypted), TP_ARGS(netdev, skb, unencrypted), TP_STRUCT__entry( NETDEV_ENTRY __field(int, len) MAC_ENTRY(from) __field(u16, proto) __field(bool, unencrypted) ), TP_fast_assign( NETDEV_ASSIGN; __entry->len = skb->len; MAC_ASSIGN(from, eth_hdr(skb)->h_source); __entry->proto = be16_to_cpu(skb->protocol); __entry->unencrypted = unencrypted; ), TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s", NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from), __entry->proto, BOOL_TO_STR(__entry->unencrypted)) ); TRACE_EVENT(cfg80211_cqm_rssi_notify, TP_PROTO(struct net_device *netdev, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level), TP_ARGS(netdev, rssi_event, rssi_level), TP_STRUCT__entry( NETDEV_ENTRY __field(enum nl80211_cqm_rssi_threshold_event, rssi_event) __field(s32, rssi_level) ), TP_fast_assign( NETDEV_ASSIGN; __entry->rssi_event = rssi_event; __entry->rssi_level = rssi_level; ), TP_printk(NETDEV_PR_FMT ", rssi event: %d, level: %d", NETDEV_PR_ARG, __entry->rssi_event, __entry->rssi_level) ); TRACE_EVENT(cfg80211_reg_can_beacon, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype, bool check_no_ir), TP_ARGS(wiphy, chandef, iftype, check_no_ir), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY __field(enum nl80211_iftype, iftype) __field(bool, check_no_ir) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->iftype = iftype; __entry->check_no_ir = check_no_ir; ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s", WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype, BOOL_TO_STR(__entry->check_no_ir)) ); TRACE_EVENT(cfg80211_chandef_dfs_required, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, chandef), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, WIPHY_PR_ARG, CHAN_DEF_PR_ARG) ); TRACE_EVENT(cfg80211_ch_switch_notify, TP_PROTO(struct net_device *netdev, struct cfg80211_chan_def *chandef), TP_ARGS(netdev, chandef), TP_STRUCT__entry( NETDEV_ENTRY CHAN_DEF_ENTRY ), TP_fast_assign( NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); ), TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT, NETDEV_PR_ARG, CHAN_DEF_PR_ARG) ); TRACE_EVENT(cfg80211_ch_switch_started_notify, TP_PROTO(struct net_device *netdev, struct cfg80211_chan_def *chandef), TP_ARGS(netdev, chandef), TP_STRUCT__entry( NETDEV_ENTRY CHAN_DEF_ENTRY ), TP_fast_assign( NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); ), TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT, NETDEV_PR_ARG, CHAN_DEF_PR_ARG) ); TRACE_EVENT(cfg80211_radar_event, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, chandef), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, WIPHY_PR_ARG, CHAN_DEF_PR_ARG) ); TRACE_EVENT(cfg80211_cac_event, TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt), TP_ARGS(netdev, evt), TP_STRUCT__entry( NETDEV_ENTRY __field(enum nl80211_radar_event, evt) ), TP_fast_assign( NETDEV_ASSIGN; __entry->evt = evt; ), TP_printk(NETDEV_PR_FMT ", event: %d", NETDEV_PR_ARG, __entry->evt) ); DECLARE_EVENT_CLASS(cfg80211_rx_evt, TP_PROTO(struct net_device *netdev, const u8 *addr), TP_ARGS(netdev, addr), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(addr) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); ), TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr)) ); DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame, TP_PROTO(struct net_device *netdev, const u8 *addr), TP_ARGS(netdev, addr) ); DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame, TP_PROTO(struct net_device *netdev, const u8 *addr), TP_ARGS(netdev, addr) ); TRACE_EVENT(cfg80211_ibss_joined, TP_PROTO(struct net_device *netdev, const u8 *bssid, struct ieee80211_channel *channel), TP_ARGS(netdev, bssid, channel), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(bssid) CHAN_ENTRY ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(bssid, bssid); CHAN_ASSIGN(channel); ), TP_printk(NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", " CHAN_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG) ); TRACE_EVENT(cfg80211_probe_status, TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie, bool acked), TP_ARGS(netdev, addr, cookie, acked), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(addr) __field(u64, cookie) __field(bool, acked) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); __entry->cookie = cookie; __entry->acked = acked; ), TP_printk(NETDEV_PR_FMT " addr:" MAC_PR_FMT ", cookie: %llu, acked: %s", NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->cookie, BOOL_TO_STR(__entry->acked)) ); TRACE_EVENT(cfg80211_cqm_pktloss_notify, TP_PROTO(struct net_device *netdev, const u8 *peer, u32 num_packets), TP_ARGS(netdev, peer, num_packets), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(peer) __field(u32, num_packets) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->num_packets = num_packets; ), TP_printk(NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", num of lost packets: %u", NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->num_packets) ); DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_gtk_rekey_notify, TP_PROTO(struct net_device *netdev, const u8 *macaddr), TP_ARGS(netdev, macaddr) ); TRACE_EVENT(cfg80211_pmksa_candidate_notify, TP_PROTO(struct net_device *netdev, int index, const u8 *bssid, bool preauth), TP_ARGS(netdev, index, bssid, preauth), TP_STRUCT__entry( NETDEV_ENTRY __field(int, index) MAC_ENTRY(bssid) __field(bool, preauth) ), TP_fast_assign( NETDEV_ASSIGN; __entry->index = index; MAC_ASSIGN(bssid, bssid); __entry->preauth = preauth; ), TP_printk(NETDEV_PR_FMT ", index:%d, bssid: " MAC_PR_FMT ", pre auth: %s", NETDEV_PR_ARG, __entry->index, MAC_PR_ARG(bssid), BOOL_TO_STR(__entry->preauth)) ); TRACE_EVENT(cfg80211_report_obss_beacon, TP_PROTO(struct wiphy *wiphy, const u8 *frame, size_t len, int freq, int sig_dbm), TP_ARGS(wiphy, frame, len, freq, sig_dbm), TP_STRUCT__entry( WIPHY_ENTRY __field(int, freq) __field(int, sig_dbm) ), TP_fast_assign( WIPHY_ASSIGN; __entry->freq = freq; __entry->sig_dbm = sig_dbm; ), TP_printk(WIPHY_PR_FMT ", freq: %d, sig_dbm: %d", WIPHY_PR_ARG, __entry->freq, __entry->sig_dbm) ); TRACE_EVENT(cfg80211_tdls_oper_request, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code), TP_ARGS(wiphy, netdev, peer, oper, reason_code), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(enum nl80211_tdls_operation, oper) __field(u16, reason_code) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->oper = oper; __entry->reason_code = reason_code; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", oper: %d, reason_code %u", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper, __entry->reason_code) ); TRACE_EVENT(cfg80211_scan_done, TP_PROTO(struct cfg80211_scan_request *request, struct cfg80211_scan_info *info), TP_ARGS(request, info), TP_STRUCT__entry( __field(u32, n_channels) __dynamic_array(u8, ie, request ? request->ie_len : 0) __array(u32, rates, NUM_NL80211_BANDS) __field(u32, wdev_id) MAC_ENTRY(wiphy_mac) __field(bool, no_cck) __field(bool, aborted) __field(u64, scan_start_tsf) MAC_ENTRY(tsf_bssid) ), TP_fast_assign( if (request) { memcpy(__get_dynamic_array(ie), request->ie, request->ie_len); memcpy(__entry->rates, request->rates, NUM_NL80211_BANDS); __entry->wdev_id = request->wdev ? request->wdev->identifier : 0; if (request->wiphy) MAC_ASSIGN(wiphy_mac, request->wiphy->perm_addr); __entry->no_cck = request->no_cck; } if (info) { __entry->aborted = info->aborted; __entry->scan_start_tsf = info->scan_start_tsf; MAC_ASSIGN(tsf_bssid, info->tsf_bssid); } ), TP_printk("aborted: %s, scan start (TSF): %llu, tsf_bssid: " MAC_PR_FMT, BOOL_TO_STR(__entry->aborted), (unsigned long long)__entry->scan_start_tsf, MAC_PR_ARG(tsf_bssid)) ); DECLARE_EVENT_CLASS(wiphy_id_evt, TP_PROTO(struct wiphy *wiphy, u64 id), TP_ARGS(wiphy, id), TP_STRUCT__entry( WIPHY_ENTRY __field(u64, id) ), TP_fast_assign( WIPHY_ASSIGN; __entry->id = id; ), TP_printk(WIPHY_PR_FMT ", id: %llu", WIPHY_PR_ARG, __entry->id) ); DEFINE_EVENT(wiphy_id_evt, cfg80211_sched_scan_stopped, TP_PROTO(struct wiphy *wiphy, u64 id), TP_ARGS(wiphy, id) ); DEFINE_EVENT(wiphy_id_evt, cfg80211_sched_scan_results, TP_PROTO(struct wiphy *wiphy, u64 id), TP_ARGS(wiphy, id) ); TRACE_EVENT(cfg80211_get_bss, TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, const u8 *ssid, size_t ssid_len, enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy), TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, bss_type, privacy), TP_STRUCT__entry( WIPHY_ENTRY CHAN_ENTRY MAC_ENTRY(bssid) __dynamic_array(u8, ssid, ssid_len) __field(enum ieee80211_bss_type, bss_type) __field(enum ieee80211_privacy, privacy) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_ASSIGN(channel); MAC_ASSIGN(bssid, bssid); memcpy(__get_dynamic_array(ssid), ssid, ssid_len); __entry->bss_type = bss_type; __entry->privacy = privacy; ), TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT ", buf: %#.2x, bss_type: %d, privacy: %d", WIPHY_PR_ARG, CHAN_PR_ARG, MAC_PR_ARG(bssid), ((u8 *)__get_dynamic_array(ssid))[0], __entry->bss_type, __entry->privacy) ); TRACE_EVENT(cfg80211_inform_bss_frame, TP_PROTO(struct wiphy *wiphy, struct cfg80211_inform_bss *data, struct ieee80211_mgmt *mgmt, size_t len), TP_ARGS(wiphy, data, mgmt, len), TP_STRUCT__entry( WIPHY_ENTRY CHAN_ENTRY __field(enum nl80211_bss_scan_width, scan_width) __dynamic_array(u8, mgmt, len) __field(s32, signal) __field(u64, ts_boottime) __field(u64, parent_tsf) MAC_ENTRY(parent_bssid) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_ASSIGN(data->chan); __entry->scan_width = data->scan_width; if (mgmt) memcpy(__get_dynamic_array(mgmt), mgmt, len); __entry->signal = data->signal; __entry->ts_boottime = data->boottime_ns; __entry->parent_tsf = data->parent_tsf; MAC_ASSIGN(parent_bssid, data->parent_bssid); ), TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d, tsb:%llu, detect_tsf:%llu, tsf_bssid: " MAC_PR_FMT, WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width, __entry->signal, (unsigned long long)__entry->ts_boottime, (unsigned long long)__entry->parent_tsf, MAC_PR_ARG(parent_bssid)) ); DECLARE_EVENT_CLASS(cfg80211_bss_evt, TP_PROTO(struct cfg80211_bss *pub), TP_ARGS(pub), TP_STRUCT__entry( MAC_ENTRY(bssid) CHAN_ENTRY ), TP_fast_assign( MAC_ASSIGN(bssid, pub->bssid); CHAN_ASSIGN(pub->channel); ), TP_printk(MAC_PR_FMT ", " CHAN_PR_FMT, MAC_PR_ARG(bssid), CHAN_PR_ARG) ); DEFINE_EVENT(cfg80211_bss_evt, cfg80211_return_bss, TP_PROTO(struct cfg80211_bss *pub), TP_ARGS(pub) ); TRACE_EVENT(cfg80211_return_uint, TP_PROTO(unsigned int ret), TP_ARGS(ret), TP_STRUCT__entry( __field(unsigned int, ret) ), TP_fast_assign( __entry->ret = ret; ), TP_printk("ret: %d", __entry->ret) ); TRACE_EVENT(cfg80211_return_u32, TP_PROTO(u32 ret), TP_ARGS(ret), TP_STRUCT__entry( __field(u32, ret) ), TP_fast_assign( __entry->ret = ret; ), TP_printk("ret: %u", __entry->ret) ); TRACE_EVENT(cfg80211_report_wowlan_wakeup, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_wowlan_wakeup *wakeup), TP_ARGS(wiphy, wdev, wakeup), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(bool, non_wireless) __field(bool, disconnect) __field(bool, magic_pkt) __field(bool, gtk_rekey_failure) __field(bool, eap_identity_req) __field(bool, four_way_handshake) __field(bool, rfkill_release) __field(s32, pattern_idx) __field(u32, packet_len) __dynamic_array(u8, packet, wakeup ? wakeup->packet_present_len : 0) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->non_wireless = !wakeup; __entry->disconnect = wakeup ? wakeup->disconnect : false; __entry->magic_pkt = wakeup ? wakeup->magic_pkt : false; __entry->gtk_rekey_failure = wakeup ? wakeup->gtk_rekey_failure : false; __entry->eap_identity_req = wakeup ? wakeup->eap_identity_req : false; __entry->four_way_handshake = wakeup ? wakeup->four_way_handshake : false; __entry->rfkill_release = wakeup ? wakeup->rfkill_release : false; __entry->pattern_idx = wakeup ? wakeup->pattern_idx : false; __entry->packet_len = wakeup ? wakeup->packet_len : false; if (wakeup && wakeup->packet && wakeup->packet_present_len) memcpy(__get_dynamic_array(packet), wakeup->packet, wakeup->packet_present_len); ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(cfg80211_ft_event, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ft_event_params *ft_event), TP_ARGS(wiphy, netdev, ft_event), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __dynamic_array(u8, ies, ft_event->ies_len) MAC_ENTRY(target_ap) __dynamic_array(u8, ric_ies, ft_event->ric_ies_len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; if (ft_event->ies) memcpy(__get_dynamic_array(ies), ft_event->ies, ft_event->ies_len); MAC_ASSIGN(target_ap, ft_event->target_ap); if (ft_event->ric_ies) memcpy(__get_dynamic_array(ric_ies), ft_event->ric_ies, ft_event->ric_ies_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", target_ap: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap)) ); TRACE_EVENT(cfg80211_stop_iface, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(cfg80211_pmsr_report, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie, const u8 *addr), TP_ARGS(wiphy, wdev, cookie, addr), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) MAC_ENTRY(addr) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; MAC_ASSIGN(addr, addr); ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie:%lld, " MAC_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG, (unsigned long long)__entry->cookie, MAC_PR_ARG(addr)) ); TRACE_EVENT(cfg80211_pmsr_complete, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie:%lld", WIPHY_PR_ARG, WDEV_PR_ARG, (unsigned long long)__entry->cookie) ); #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace #include backport-iwlwifi-dkms-7906/net/wireless/util.c000066400000000000000000001411151351064634500214500ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * Wireless utility functions * * Copyright 2007-2009 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH * Copyright (C) 2018-2019 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "core.h" #include "rdev-ops.h" struct ieee80211_rate * ieee80211_get_response_rate(struct ieee80211_supported_band *sband, u32 basic_rates, int bitrate) { struct ieee80211_rate *result = &sband->bitrates[0]; int i; for (i = 0; i < sband->n_bitrates; i++) { if (!(basic_rates & BIT(i))) continue; if (sband->bitrates[i].bitrate > bitrate) continue; result = &sband->bitrates[i]; } return result; } EXPORT_SYMBOL(ieee80211_get_response_rate); u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, enum nl80211_bss_scan_width scan_width) { struct ieee80211_rate *bitrates; u32 mandatory_rates = 0; enum ieee80211_rate_flags mandatory_flag; int i; if (WARN_ON(!sband)) return 1; if (sband->band == NL80211_BAND_2GHZ) { if (scan_width == NL80211_BSS_CHAN_WIDTH_5 || scan_width == NL80211_BSS_CHAN_WIDTH_10) mandatory_flag = IEEE80211_RATE_MANDATORY_G; else mandatory_flag = IEEE80211_RATE_MANDATORY_B; } else { mandatory_flag = IEEE80211_RATE_MANDATORY_A; } bitrates = sband->bitrates; for (i = 0; i < sband->n_bitrates; i++) if (bitrates[i].flags & mandatory_flag) mandatory_rates |= BIT(i); return mandatory_rates; } EXPORT_SYMBOL(ieee80211_mandatory_rates); int ieee80211_channel_to_frequency(int chan, enum nl80211_band band) { /* see 802.11 17.3.8.3.2 and Annex J * there are overlapping channel numbers in 5GHz and 2GHz bands */ if (chan <= 0) return 0; /* not supported */ switch (band) { case NL80211_BAND_2GHZ: if (chan == 14) return 2484; else if (chan < 14) return 2407 + chan * 5; break; case NL80211_BAND_5GHZ: if (chan >= 182 && chan <= 196) return 4000 + chan * 5; else return 5000 + chan * 5; break; case NL80211_BAND_60GHZ: if (chan < 7) return 56160 + chan * 2160; break; default: ; } return 0; /* not supported */ } EXPORT_SYMBOL(ieee80211_channel_to_frequency); int ieee80211_frequency_to_channel(int freq) { /* see 802.11 17.3.8.3.2 and Annex J */ if (freq == 2484) return 14; else if (freq < 2484) return (freq - 2407) / 5; else if (freq >= 4910 && freq <= 4980) return (freq - 4000) / 5; else if (freq <= 45000) /* DMG band lower limit */ return (freq - 5000) / 5; else if (freq >= 58320 && freq <= 70200) return (freq - 56160) / 2160; else return 0; } EXPORT_SYMBOL(ieee80211_frequency_to_channel); struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq) { enum nl80211_band band; struct ieee80211_supported_band *sband; int i; for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].center_freq == freq) return &sband->channels[i]; } } return NULL; } EXPORT_SYMBOL(ieee80211_get_channel); static void set_mandatory_flags_band(struct ieee80211_supported_band *sband) { int i, want; switch (sband->band) { case NL80211_BAND_5GHZ: want = 3; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == 60 || sband->bitrates[i].bitrate == 120 || sband->bitrates[i].bitrate == 240) { sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_A; want--; } } WARN_ON(want); break; case NL80211_BAND_2GHZ: want = 7; for (i = 0; i < sband->n_bitrates; i++) { switch (sband->bitrates[i].bitrate) { case 10: case 20: case 55: case 110: sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_B | IEEE80211_RATE_MANDATORY_G; want--; break; case 60: case 120: case 240: sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_G; want--; /* fall through */ default: sband->bitrates[i].flags |= IEEE80211_RATE_ERP_G; break; } } WARN_ON(want != 0 && want != 3); break; case NL80211_BAND_60GHZ: /* check for mandatory HT MCS 1..4 */ WARN_ON(!sband->ht_cap.ht_supported); WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e); break; case NUM_NL80211_BANDS: default: WARN_ON(1); break; } } void ieee80211_set_bitrate_flags(struct wiphy *wiphy) { enum nl80211_band band; for (band = 0; band < NUM_NL80211_BANDS; band++) if (wiphy->bands[band]) set_mandatory_flags_band(wiphy->bands[band]); } bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher) { int i; for (i = 0; i < wiphy->n_cipher_suites; i++) if (cipher == wiphy->cipher_suites[i]) return true; return false; } int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr) { if (key_idx < 0 || key_idx > 5) return -EINVAL; if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) return -EINVAL; if (pairwise && !mac_addr) return -EINVAL; switch (params->cipher) { case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: /* Disallow pairwise keys with non-zero index unless it's WEP * or a vendor specific cipher (because current deployments use * pairwise WEP keys with non-zero indices and for vendor * specific ciphers this should be validated in the driver or * hardware level - but 802.11i clearly specifies to use zero) */ if (pairwise && key_idx) return -EINVAL; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: /* Disallow BIP (group-only) cipher as pairwise cipher */ if (pairwise) return -EINVAL; if (key_idx < 4) return -EINVAL; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (key_idx > 3) return -EINVAL; default: break; } switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: if (params->key_len != WLAN_KEY_LEN_WEP40) return -EINVAL; break; case WLAN_CIPHER_SUITE_TKIP: if (params->key_len != WLAN_KEY_LEN_TKIP) return -EINVAL; break; case WLAN_CIPHER_SUITE_CCMP: if (params->key_len != WLAN_KEY_LEN_CCMP) return -EINVAL; break; case WLAN_CIPHER_SUITE_CCMP_256: if (params->key_len != WLAN_KEY_LEN_CCMP_256) return -EINVAL; break; case WLAN_CIPHER_SUITE_GCMP: if (params->key_len != WLAN_KEY_LEN_GCMP) return -EINVAL; break; case WLAN_CIPHER_SUITE_GCMP_256: if (params->key_len != WLAN_KEY_LEN_GCMP_256) return -EINVAL; break; case WLAN_CIPHER_SUITE_WEP104: if (params->key_len != WLAN_KEY_LEN_WEP104) return -EINVAL; break; case WLAN_CIPHER_SUITE_AES_CMAC: if (params->key_len != WLAN_KEY_LEN_AES_CMAC) return -EINVAL; break; case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (params->key_len != WLAN_KEY_LEN_BIP_CMAC_256) return -EINVAL; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_128) return -EINVAL; break; case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_256) return -EINVAL; break; default: /* * We don't know anything about this algorithm, * allow using it -- but the driver must check * all parameters! We still check below whether * or not the driver supports this algorithm, * of course. */ break; } if (params->seq) { switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* These ciphers do not use key sequence */ return -EINVAL; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (params->seq_len != 6) return -EINVAL; break; } } if (!cfg80211_supported_cipher_suite(&rdev->wiphy, params->cipher)) return -EINVAL; return 0; } unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) { unsigned int hdrlen = 24; if (ieee80211_is_data(fc)) { if (ieee80211_has_a4(fc)) hdrlen = 30; if (ieee80211_is_data_qos(fc)) { hdrlen += IEEE80211_QOS_CTL_LEN; if (ieee80211_has_order(fc)) hdrlen += IEEE80211_HT_CTL_LEN; } goto out; } if (ieee80211_is_mgmt(fc)) { if (ieee80211_has_order(fc)) hdrlen += IEEE80211_HT_CTL_LEN; goto out; } if (ieee80211_is_ctl(fc)) { /* * ACK and CTS are 10 bytes, all others 16. To see how * to get this condition consider * subtype mask: 0b0000000011110000 (0x00F0) * ACK subtype: 0b0000000011010000 (0x00D0) * CTS subtype: 0b0000000011000000 (0x00C0) * bits that matter: ^^^ (0x00E0) * value of those: 0b0000000011000000 (0x00C0) */ if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0)) hdrlen = 10; else hdrlen = 16; } out: return hdrlen; } EXPORT_SYMBOL(ieee80211_hdrlen); unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) { const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data; unsigned int hdrlen; if (unlikely(skb->len < 10)) return 0; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (unlikely(hdrlen > skb->len)) return 0; return hdrlen; } EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); static unsigned int __ieee80211_get_mesh_hdrlen(u8 flags) { int ae = flags & MESH_FLAGS_AE; /* 802.11-2012, 8.2.4.7.3 */ switch (ae) { default: case 0: return 6; case MESH_FLAGS_AE_A4: return 12; case MESH_FLAGS_AE_A5_A6: return 18; } } unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) { return __ieee80211_get_mesh_hdrlen(meshhdr->flags); } EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, const u8 *addr, enum nl80211_iftype iftype, u8 data_offset) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct { u8 hdr[ETH_ALEN] __aligned(2); __be16 proto; } payload; struct ethhdr tmp; u16 hdrlen; u8 mesh_flags = 0; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control) + data_offset; if (skb->len < hdrlen + 8) return -1; /* convert IEEE 802.11 header + possible LLC headers into Ethernet * header * IEEE 802.11 address fields: * ToDS FromDS Addr1 Addr2 Addr3 Addr4 * 0 0 DA SA BSSID n/a * 0 1 DA BSSID SA n/a * 1 0 BSSID SA DA n/a * 1 1 RA TA DA SA */ memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN); memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN); if (iftype == NL80211_IFTYPE_MESH_POINT) skb_copy_bits(skb, hdrlen, &mesh_flags, 1); mesh_flags &= MESH_FLAGS_AE; switch (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { case cpu_to_le16(IEEE80211_FCTL_TODS): if (unlikely(iftype != NL80211_IFTYPE_AP && iftype != NL80211_IFTYPE_AP_VLAN && iftype != NL80211_IFTYPE_P2P_GO)) return -1; break; case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): if (unlikely(iftype != NL80211_IFTYPE_WDS && iftype != NL80211_IFTYPE_MESH_POINT && iftype != NL80211_IFTYPE_AP_VLAN && iftype != NL80211_IFTYPE_STATION)) return -1; if (iftype == NL80211_IFTYPE_MESH_POINT) { if (mesh_flags == MESH_FLAGS_AE_A4) return -1; if (mesh_flags == MESH_FLAGS_AE_A5_A6) { skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr1), tmp.h_dest, 2 * ETH_ALEN); } hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags); } break; case cpu_to_le16(IEEE80211_FCTL_FROMDS): if ((iftype != NL80211_IFTYPE_STATION && iftype != NL80211_IFTYPE_P2P_CLIENT && iftype != NL80211_IFTYPE_MESH_POINT) || (is_multicast_ether_addr(tmp.h_dest) && ether_addr_equal(tmp.h_source, addr))) return -1; if (iftype == NL80211_IFTYPE_MESH_POINT) { if (mesh_flags == MESH_FLAGS_AE_A5_A6) return -1; if (mesh_flags == MESH_FLAGS_AE_A4) skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr1), tmp.h_source, ETH_ALEN); hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags); } break; case cpu_to_le16(0): if (iftype != NL80211_IFTYPE_ADHOC && iftype != NL80211_IFTYPE_STATION && iftype != NL80211_IFTYPE_OCB && iftype != NL80211_IFTYPE_NAN_DATA) return -1; break; } skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)); tmp.h_proto = payload.proto; if (likely((ether_addr_equal(payload.hdr, rfc1042_header) && tmp.h_proto != htons(ETH_P_AARP) && tmp.h_proto != htons(ETH_P_IPX)) || ether_addr_equal(payload.hdr, bridge_tunnel_header))) /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ hdrlen += ETH_ALEN + 2; else tmp.h_proto = htons(skb->len - hdrlen); pskb_pull(skb, hdrlen); if (!ehdr) ehdr = skb_push(skb, sizeof(struct ethhdr)); memcpy(ehdr, &tmp, sizeof(tmp)); return 0; } EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr); static void __frame_add_frag(struct sk_buff *skb, struct page *page, void *ptr, int len, int size) { struct skb_shared_info *sh = skb_shinfo(skb); int page_offset; page_ref_inc(page); page_offset = ptr - page_address(page); skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size); } static void __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame, int offset, int len) { struct skb_shared_info *sh = skb_shinfo(skb); const skb_frag_t *frag = &sh->frags[0]; struct page *frag_page; void *frag_ptr; int frag_len, frag_size; int head_size = skb->len - skb->data_len; int cur_len; frag_page = virt_to_head_page(skb->head); frag_ptr = skb->data; frag_size = head_size; while (offset >= frag_size) { offset -= frag_size; frag_page = skb_frag_page(frag); frag_ptr = skb_frag_address(frag); frag_size = skb_frag_size(frag); frag++; } frag_ptr += offset; frag_len = frag_size - offset; cur_len = min(len, frag_len); __frame_add_frag(frame, frag_page, frag_ptr, cur_len, frag_size); len -= cur_len; while (len > 0) { frag_len = skb_frag_size(frag); cur_len = min(len, frag_len); __frame_add_frag(frame, skb_frag_page(frag), skb_frag_address(frag), cur_len, frag_len); len -= cur_len; frag++; } } static struct sk_buff * __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen, int offset, int len, bool reuse_frag) { struct sk_buff *frame; int cur_len = len; if (skb->len - offset < len) return NULL; /* * When reusing framents, copy some data to the head to simplify * ethernet header handling and speed up protocol header processing * in the stack later. */ if (reuse_frag) cur_len = min_t(int, len, 32); /* * Allocate and reserve two bytes more for payload * alignment since sizeof(struct ethhdr) is 14. */ frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len); if (!frame) return NULL; skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2); skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len); len -= cur_len; if (!len) return frame; offset += cur_len; __ieee80211_amsdu_copy_frag(skb, frame, offset, len); return frame; } void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, const u8 *check_da, const u8 *check_sa) { unsigned int hlen = ALIGN(extra_headroom, 4); struct sk_buff *frame = NULL; u16 ethertype; u8 *payload; int offset = 0, remaining; struct ethhdr eth; #if LINUX_VERSION_IS_LESS(3,5,0) bool reuse_frag = 0; #else bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb); #endif bool reuse_skb = false; bool last = false; while (!last) { unsigned int subframe_len; int len; u8 padding; skb_copy_bits(skb, offset, ð, sizeof(eth)); len = ntohs(eth.h_proto); subframe_len = sizeof(struct ethhdr) + len; padding = (4 - subframe_len) & 0x3; /* the last MSDU has no padding */ remaining = skb->len - offset; if (subframe_len > remaining) goto purge; offset += sizeof(struct ethhdr); last = remaining <= subframe_len + padding; /* FIXME: should we really accept multicast DA? */ if ((check_da && !is_multicast_ether_addr(eth.h_dest) && !ether_addr_equal(check_da, eth.h_dest)) || (check_sa && !ether_addr_equal(check_sa, eth.h_source))) { offset += len + padding; continue; } /* reuse skb for the last subframe */ if (!skb_is_nonlinear(skb) && !reuse_frag && last) { skb_pull(skb, offset); frame = skb; reuse_skb = true; } else { frame = __ieee80211_amsdu_copy(skb, hlen, offset, len, reuse_frag); if (!frame) goto purge; offset += len + padding; } skb_reset_network_header(frame); frame->dev = skb->dev; frame->priority = skb->priority; payload = frame->data; ethertype = (payload[6] << 8) | payload[7]; if (likely((ether_addr_equal(payload, rfc1042_header) && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || ether_addr_equal(payload, bridge_tunnel_header))) { eth.h_proto = htons(ethertype); skb_pull(frame, ETH_ALEN + 2); } memcpy(skb_push(frame, sizeof(eth)), ð, sizeof(eth)); __skb_queue_tail(list, frame); } if (!reuse_skb) dev_kfree_skb(skb); return; purge: __skb_queue_purge(list); dev_kfree_skb(skb); } EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); /* Given a data frame determine the 802.1p/1d tag to use. */ unsigned int cfg80211_classify8021d(struct sk_buff *skb, struct cfg80211_qos_map *qos_map) { unsigned int dscp; unsigned char vlan_priority; unsigned int ret; /* skb->priority values from 256->263 are magic values to * directly indicate a specific 802.1d priority. This is used * to allow 802.1d priority to be passed directly in from VLAN * tags, etc. */ if (skb->priority >= 256 && skb->priority <= 263) { ret = skb->priority - 256; goto out; } if (skb_vlan_tag_present(skb)) { vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; if (vlan_priority > 0) { ret = vlan_priority; goto out; } } switch (skb->protocol) { case htons(ETH_P_IP): dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc; break; case htons(ETH_P_IPV6): dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc; break; case htons(ETH_P_MPLS_UC): case htons(ETH_P_MPLS_MC): { struct mpls_label mpls_tmp, *mpls; mpls = skb_header_pointer(skb, sizeof(struct ethhdr), sizeof(*mpls), &mpls_tmp); if (!mpls) return 0; ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; goto out; } case htons(ETH_P_80221): /* 802.21 is always network control traffic */ return 7; default: return 0; } if (qos_map) { unsigned int i, tmp_dscp = dscp >> 2; for (i = 0; i < qos_map->num_des; i++) { if (tmp_dscp == qos_map->dscp_exception[i].dscp) { ret = qos_map->dscp_exception[i].up; goto out; } } for (i = 0; i < 8; i++) { if (tmp_dscp >= qos_map->up[i].low && tmp_dscp <= qos_map->up[i].high) { ret = i; goto out; } } } ret = dscp >> 5; out: return array_index_nospec(ret, IEEE80211_NUM_TIDS); } EXPORT_SYMBOL(cfg80211_classify8021d); const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id) { const struct cfg80211_bss_ies *ies; ies = rcu_dereference(bss->ies); if (!ies) return NULL; return cfg80211_find_elem(id, ies->data, ies->len); } EXPORT_SYMBOL(ieee80211_bss_get_elem); void cfg80211_upload_connect_keys(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct net_device *dev = wdev->netdev; int i; if (!wdev->connect_keys) return; for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++) { if (!wdev->connect_keys->params[i].cipher) continue; if (rdev_add_key(rdev, dev, i, false, NULL, &wdev->connect_keys->params[i])) { netdev_err(dev, "failed to set key %d\n", i); continue; } if (wdev->connect_keys->def == i && rdev_set_default_key(rdev, dev, i, true, true)) { netdev_err(dev, "failed to set defkey %d\n", i); continue; } } kzfree(wdev->connect_keys); wdev->connect_keys = NULL; } void cfg80211_process_wdev_events(struct wireless_dev *wdev) { struct cfg80211_event *ev; unsigned long flags; spin_lock_irqsave(&wdev->event_lock, flags); while (!list_empty(&wdev->event_list)) { ev = list_first_entry(&wdev->event_list, struct cfg80211_event, list); list_del(&ev->list); spin_unlock_irqrestore(&wdev->event_lock, flags); wdev_lock(wdev); switch (ev->type) { case EVENT_CONNECT_RESULT: __cfg80211_connect_result( wdev->netdev, &ev->cr, ev->cr.status == WLAN_STATUS_SUCCESS); break; case EVENT_ROAMED: __cfg80211_roamed(wdev, &ev->rm); break; case EVENT_DISCONNECTED: __cfg80211_disconnected(wdev->netdev, ev->dc.ie, ev->dc.ie_len, ev->dc.reason, !ev->dc.locally_generated); break; case EVENT_IBSS_JOINED: __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid, ev->ij.channel); break; case EVENT_STOPPED: __cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev); break; case EVENT_PORT_AUTHORIZED: __cfg80211_port_authorized(wdev, ev->pa.bssid); break; } wdev_unlock(wdev); kfree(ev); spin_lock_irqsave(&wdev->event_lock, flags); } spin_unlock_irqrestore(&wdev->event_lock, flags); } void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; ASSERT_RTNL(); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) cfg80211_process_wdev_events(wdev); } int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, struct vif_params *params) { int err; enum nl80211_iftype otype = dev->ieee80211_ptr->iftype; ASSERT_RTNL(); /* don't support changing VLANs, you just re-create them */ if (otype == NL80211_IFTYPE_AP_VLAN) return -EOPNOTSUPP; /* cannot change into P2P device or NAN */ if (ntype == NL80211_IFTYPE_P2P_DEVICE || ntype == NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!rdev->ops->change_virtual_intf || !(rdev->wiphy.interface_modes & (1 << ntype))) return -EOPNOTSUPP; /* if it's part of a bridge, reject changing type to station/ibss */ if ((dev->priv_flags & IFF_BRIDGE_PORT) && (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION || ntype == NL80211_IFTYPE_P2P_CLIENT)) return -EBUSY; if (ntype != otype) { dev->ieee80211_ptr->use_4addr = false; dev->ieee80211_ptr->mesh_id_up_len = 0; wdev_lock(dev->ieee80211_ptr); rdev_set_qos_map(rdev, dev, NULL); wdev_unlock(dev->ieee80211_ptr); switch (otype) { case NL80211_IFTYPE_AP: cfg80211_stop_ap(rdev, dev, true); break; case NL80211_IFTYPE_ADHOC: cfg80211_leave_ibss(rdev, dev, false); break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: wdev_lock(dev->ieee80211_ptr); cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, true); wdev_unlock(dev->ieee80211_ptr); break; case NL80211_IFTYPE_MESH_POINT: /* mesh should be handled? */ break; default: break; } cfg80211_process_rdev_events(rdev); } err = rdev_change_virtual_intf(rdev, dev, ntype, params); WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); if (!err && params && params->use_4addr != -1) dev->ieee80211_ptr->use_4addr = params->use_4addr; if (!err) { dev->priv_flags &= ~IFF_DONT_BRIDGE; switch (ntype) { case NL80211_IFTYPE_STATION: if (dev->ieee80211_ptr->use_4addr) break; /* fall through */ case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_NAN_DATA: dev->priv_flags |= IFF_DONT_BRIDGE; break; case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: /* bridging OK */ break; case NL80211_IFTYPE_MONITOR: /* monitor can't bridge anyway */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: /* not happening */ break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: WARN_ON(1); break; } } if (!err && ntype != otype && netif_running(dev)) { cfg80211_update_iface_num(rdev, ntype, 1); cfg80211_update_iface_num(rdev, otype, -1); } return err; } static u32 cfg80211_calculate_bitrate_ht(struct rate_info *rate) { int modulation, streams, bitrate; /* the formula below does only work for MCS values smaller than 32 */ if (WARN_ON_ONCE(rate->mcs >= 32)) return 0; modulation = rate->mcs & 7; streams = (rate->mcs >> 3) + 1; bitrate = (rate->bw == RATE_INFO_BW_40) ? 13500000 : 6500000; if (modulation < 4) bitrate *= (modulation + 1); else if (modulation == 4) bitrate *= (modulation + 2); else bitrate *= (modulation + 3); bitrate *= streams; if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) bitrate = (bitrate / 9) * 10; /* do NOT round down here */ return (bitrate + 50000) / 100000; } static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate) { static const u32 __mcs2bitrate[] = { /* control PHY */ [0] = 275, /* SC PHY */ [1] = 3850, [2] = 7700, [3] = 9625, [4] = 11550, [5] = 12512, /* 1251.25 mbps */ [6] = 15400, [7] = 19250, [8] = 23100, [9] = 25025, [10] = 30800, [11] = 38500, [12] = 46200, /* OFDM PHY */ [13] = 6930, [14] = 8662, /* 866.25 mbps */ [15] = 13860, [16] = 17325, [17] = 20790, [18] = 27720, [19] = 34650, [20] = 41580, [21] = 45045, [22] = 51975, [23] = 62370, [24] = 67568, /* 6756.75 mbps */ /* LP-SC PHY */ [25] = 6260, [26] = 8340, [27] = 11120, [28] = 12510, [29] = 16680, [30] = 22240, [31] = 25030, }; if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate))) return 0; return __mcs2bitrate[rate->mcs]; } static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) { static const u32 base[4][10] = { { 6500000, 13000000, 19500000, 26000000, 39000000, 52000000, 58500000, 65000000, 78000000, /* not in the spec, but some devices use this: */ 86500000, }, { 13500000, 27000000, 40500000, 54000000, 81000000, 108000000, 121500000, 135000000, 162000000, 180000000, }, { 29300000, 58500000, 87800000, 117000000, 175500000, 234000000, 263300000, 292500000, 351000000, 390000000, }, { 58500000, 117000000, 175500000, 234000000, 351000000, 468000000, 526500000, 585000000, 702000000, 780000000, }, }; u32 bitrate; int idx; if (rate->mcs > 9) goto warn; switch (rate->bw) { case RATE_INFO_BW_160: idx = 3; break; case RATE_INFO_BW_80: idx = 2; break; case RATE_INFO_BW_40: idx = 1; break; case RATE_INFO_BW_5: case RATE_INFO_BW_10: default: goto warn; case RATE_INFO_BW_20: idx = 0; } bitrate = base[idx][rate->mcs]; bitrate *= rate->nss; if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) bitrate = (bitrate / 9) * 10; /* do NOT round down here */ return (bitrate + 50000) / 100000; warn: WARN_ONCE(1, "invalid rate bw=%d, mcs=%d, nss=%d\n", rate->bw, rate->mcs, rate->nss); return 0; } static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) { #define SCALE 2048 u16 mcs_divisors[12] = { 34133, /* 16.666666... */ 17067, /* 8.333333... */ 11378, /* 5.555555... */ 8533, /* 4.166666... */ 5689, /* 2.777777... */ 4267, /* 2.083333... */ 3923, /* 1.851851... */ 3413, /* 1.666666... */ 2844, /* 1.388888... */ 2560, /* 1.250000... */ 2276, /* 1.111111... */ 2048, /* 1.000000... */ }; u32 rates_160M[3] = { 960777777, 907400000, 816666666 }; u32 rates_969[3] = { 480388888, 453700000, 408333333 }; u32 rates_484[3] = { 229411111, 216666666, 195000000 }; u32 rates_242[3] = { 114711111, 108333333, 97500000 }; u32 rates_106[3] = { 40000000, 37777777, 34000000 }; u32 rates_52[3] = { 18820000, 17777777, 16000000 }; u32 rates_26[3] = { 9411111, 8888888, 8000000 }; u64 tmp; u32 result; if (WARN_ON_ONCE(rate->mcs > 11)) return 0; if (WARN_ON_ONCE(rate->he_gi > NL80211_RATE_INFO_HE_GI_3_2)) return 0; if (WARN_ON_ONCE(rate->he_ru_alloc > NL80211_RATE_INFO_HE_RU_ALLOC_2x996)) return 0; if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8)) return 0; if (rate->bw == RATE_INFO_BW_160) result = rates_160M[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_80 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996)) result = rates_969[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_40 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484)) result = rates_484[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_20 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_242)) result = rates_242[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_106) result = rates_106[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_52) result = rates_52[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) result = rates_26[rate->he_gi]; else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", rate->bw, rate->he_ru_alloc)) return 0; /* now scale to the appropriate MCS */ tmp = result; tmp *= SCALE; do_div(tmp, mcs_divisors[rate->mcs]); result = tmp; /* and take NSS, DCM into account */ result = (result * rate->nss) / 8; if (rate->he_dcm) result /= 2; return result; } u32 cfg80211_calculate_bitrate(struct rate_info *rate) { if (rate->flags & RATE_INFO_FLAGS_MCS) return cfg80211_calculate_bitrate_ht(rate); if (rate->flags & RATE_INFO_FLAGS_60G) return cfg80211_calculate_bitrate_60g(rate); if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) return cfg80211_calculate_bitrate_vht(rate); if (rate->flags & RATE_INFO_FLAGS_HE_MCS) return cfg80211_calculate_bitrate_he(rate); return rate->legacy; } EXPORT_SYMBOL(cfg80211_calculate_bitrate); int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len, enum ieee80211_p2p_attr_id attr, u8 *buf, unsigned int bufsize) { u8 *out = buf; u16 attr_remaining = 0; bool desired_attr = false; u16 desired_len = 0; while (len > 0) { unsigned int iedatalen; unsigned int copy; const u8 *iedata; if (len < 2) return -EILSEQ; iedatalen = ies[1]; if (iedatalen + 2 > len) return -EILSEQ; if (ies[0] != WLAN_EID_VENDOR_SPECIFIC) goto cont; if (iedatalen < 4) goto cont; iedata = ies + 2; /* check WFA OUI, P2P subtype */ if (iedata[0] != 0x50 || iedata[1] != 0x6f || iedata[2] != 0x9a || iedata[3] != 0x09) goto cont; iedatalen -= 4; iedata += 4; /* check attribute continuation into this IE */ copy = min_t(unsigned int, attr_remaining, iedatalen); if (copy && desired_attr) { desired_len += copy; if (out) { memcpy(out, iedata, min(bufsize, copy)); out += min(bufsize, copy); bufsize -= min(bufsize, copy); } if (copy == attr_remaining) return desired_len; } attr_remaining -= copy; if (attr_remaining) goto cont; iedatalen -= copy; iedata += copy; while (iedatalen > 0) { u16 attr_len; /* P2P attribute ID & size must fit */ if (iedatalen < 3) return -EILSEQ; desired_attr = iedata[0] == attr; attr_len = get_unaligned_le16(iedata + 1); iedatalen -= 3; iedata += 3; copy = min_t(unsigned int, attr_len, iedatalen); if (desired_attr) { desired_len += copy; if (out) { memcpy(out, iedata, min(bufsize, copy)); out += min(bufsize, copy); bufsize -= min(bufsize, copy); } if (copy == attr_len) return desired_len; } iedata += copy; iedatalen -= copy; attr_remaining = attr_len - copy; } cont: len -= ies[1] + 2; ies += ies[1] + 2; } if (attr_remaining && desired_attr) return -EILSEQ; return -ENOENT; } EXPORT_SYMBOL(cfg80211_get_p2p_attr); static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id, bool id_ext) { int i; /* Make sure array values are legal */ if (WARN_ON(ids[n_ids - 1] == WLAN_EID_EXTENSION)) return false; i = 0; while (i < n_ids) { if (ids[i] == WLAN_EID_EXTENSION) { if (id_ext && (ids[i + 1] == id)) return true; i += 2; continue; } if (ids[i] == id && !id_ext) return true; i++; } return false; } static size_t skip_ie(const u8 *ies, size_t ielen, size_t pos) { /* we assume a validly formed IEs buffer */ u8 len = ies[pos + 1]; pos += 2 + len; /* the IE itself must have 255 bytes for fragments to follow */ if (len < 255) return pos; while (pos < ielen && ies[pos] == WLAN_EID_FRAGMENT) { len = ies[pos + 1]; pos += 2 + len; } return pos; } size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, const u8 *after_ric, int n_after_ric, size_t offset) { size_t pos = offset; while (pos < ielen) { u8 ext = 0; if (ies[pos] == WLAN_EID_EXTENSION) ext = 2; if ((pos + ext) >= ielen) break; if (!ieee80211_id_in_list(ids, n_ids, ies[pos + ext], ies[pos] == WLAN_EID_EXTENSION)) break; if (ies[pos] == WLAN_EID_RIC_DATA && n_after_ric) { pos = skip_ie(ies, ielen, pos); while (pos < ielen) { if (ies[pos] == WLAN_EID_EXTENSION) ext = 2; else ext = 0; if ((pos + ext) >= ielen) break; if (!ieee80211_id_in_list(after_ric, n_after_ric, ies[pos + ext], ext == 2)) pos = skip_ie(ies, ielen, pos); else break; } } else { pos = skip_ie(ies, ielen, pos); } } return pos; } EXPORT_SYMBOL(ieee80211_ie_split_ric); bool ieee80211_operating_class_to_band(u8 operating_class, enum nl80211_band *band) { switch (operating_class) { case 112: case 115 ... 127: case 128 ... 130: *band = NL80211_BAND_5GHZ; return true; case 81: case 82: case 83: case 84: *band = NL80211_BAND_2GHZ; return true; case 180: *band = NL80211_BAND_60GHZ; return true; } return false; } EXPORT_SYMBOL(ieee80211_operating_class_to_band); bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, u8 *op_class) { u8 vht_opclass; u32 freq = chandef->center_freq1; if (freq >= 2412 && freq <= 2472) { if (chandef->width > NL80211_CHAN_WIDTH_40) return false; /* 2.407 GHz, channels 1..13 */ if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 83; /* HT40+ */ else *op_class = 84; /* HT40- */ } else { *op_class = 81; } return true; } if (freq == 2484) { if (chandef->width > NL80211_CHAN_WIDTH_40) return false; *op_class = 82; /* channel 14 */ return true; } switch (chandef->width) { case NL80211_CHAN_WIDTH_80: vht_opclass = 128; break; case NL80211_CHAN_WIDTH_160: vht_opclass = 129; break; case NL80211_CHAN_WIDTH_80P80: vht_opclass = 130; break; case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_5: return false; /* unsupported for now */ default: vht_opclass = 0; break; } /* 5 GHz, channels 36..48 */ if (freq >= 5180 && freq <= 5240) { if (vht_opclass) { *op_class = vht_opclass; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 116; else *op_class = 117; } else { *op_class = 115; } return true; } /* 5 GHz, channels 52..64 */ if (freq >= 5260 && freq <= 5320) { if (vht_opclass) { *op_class = vht_opclass; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 119; else *op_class = 120; } else { *op_class = 118; } return true; } /* 5 GHz, channels 100..144 */ if (freq >= 5500 && freq <= 5720) { if (vht_opclass) { *op_class = vht_opclass; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 122; else *op_class = 123; } else { *op_class = 121; } return true; } /* 5 GHz, channels 149..169 */ if (freq >= 5745 && freq <= 5845) { if (vht_opclass) { *op_class = vht_opclass; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 126; else *op_class = 127; } else if (freq <= 5805) { *op_class = 124; } else { *op_class = 125; } return true; } /* 56.16 GHz, channel 1..4 */ if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 6) { if (chandef->width >= NL80211_CHAN_WIDTH_40) return false; *op_class = 180; return true; } /* not supported yet */ return false; } EXPORT_SYMBOL(ieee80211_chandef_to_operating_class); static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int, u32 *beacon_int_gcd, bool *beacon_int_different) { struct wireless_dev *wdev; *beacon_int_gcd = 0; *beacon_int_different = false; list_for_each_entry(wdev, &wiphy->wdev_list, list) { if (!wdev->beacon_interval) continue; if (!*beacon_int_gcd) { *beacon_int_gcd = wdev->beacon_interval; continue; } if (wdev->beacon_interval == *beacon_int_gcd) continue; *beacon_int_different = true; *beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval); } if (new_beacon_int && *beacon_int_gcd != new_beacon_int) { if (*beacon_int_gcd) *beacon_int_different = true; *beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int); } } int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, u32 beacon_int) { /* * This is just a basic pre-condition check; if interface combinations * are possible the driver must already be checking those with a call * to cfg80211_check_combinations(), in which case we'll validate more * through the cfg80211_calculate_bi_data() call and code in * cfg80211_iter_combinations(). */ if (beacon_int < 10 || beacon_int > 10000) return -EINVAL; return 0; } int cfg80211_iter_combinations(struct wiphy *wiphy, struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data) { const struct ieee80211_regdomain *regdom; enum nl80211_dfs_regions region = 0; int i, j, iftype; int num_interfaces = 0; u32 used_iftypes = 0; u32 beacon_int_gcd; bool beacon_int_different; /* * This is a bit strange, since the iteration used to rely only on * the data given by the driver, but here it now relies on context, * in form of the currently operating interfaces. * This is OK for all current users, and saves us from having to * push the GCD calculations into all the drivers. * In the future, this should probably rely more on data that's in * cfg80211 already - the only thing not would appear to be any new * interfaces (while being brought up) and channel/radar data. */ cfg80211_calculate_bi_data(wiphy, params->new_beacon_int, &beacon_int_gcd, &beacon_int_different); if (params->radar_detect) { rcu_read_lock(); regdom = rcu_dereference(cfg80211_regdomain); if (regdom) region = regdom->dfs_region; rcu_read_unlock(); } for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { num_interfaces += params->iftype_num[iftype]; if (params->iftype_num[iftype] > 0 && !(wiphy->software_iftypes & BIT(iftype))) used_iftypes |= BIT(iftype); } for (i = 0; i < wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *c; struct ieee80211_iface_limit *limits; u32 all_iftypes = 0; c = &wiphy->iface_combinations[i]; if (num_interfaces > c->max_interfaces) continue; if (params->num_different_channels > c->num_different_channels) continue; limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits, GFP_KERNEL); if (!limits) return -ENOMEM; for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { if (wiphy->software_iftypes & BIT(iftype)) continue; for (j = 0; j < c->n_limits; j++) { all_iftypes |= limits[j].types; if (!(limits[j].types & BIT(iftype))) continue; if (limits[j].max < params->iftype_num[iftype]) goto cont; limits[j].max -= params->iftype_num[iftype]; } } if (params->radar_detect != (c->radar_detect_widths & params->radar_detect)) goto cont; if (params->radar_detect && c->radar_detect_regions && !(c->radar_detect_regions & BIT(region))) goto cont; /* Finally check that all iftypes that we're currently * using are actually part of this combination. If they * aren't then we can't use this combination and have * to continue to the next. */ if ((all_iftypes & used_iftypes) != used_iftypes) goto cont; if (beacon_int_gcd) { if (c->beacon_int_min_gcd && beacon_int_gcd < c->beacon_int_min_gcd) goto cont; if (!c->beacon_int_min_gcd && beacon_int_different) goto cont; } /* This combination covered all interface types and * supported the requested numbers, so we're good. */ (*iter)(c, data); cont: kfree(limits); } return 0; } EXPORT_SYMBOL(cfg80211_iter_combinations); static void cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c, void *data) { int *num = data; (*num)++; } int cfg80211_check_combinations(struct wiphy *wiphy, struct iface_combination_params *params) { int err, num = 0; err = cfg80211_iter_combinations(wiphy, params, cfg80211_iter_sum_ifcombs, &num); if (err) return err; if (num == 0) return -EBUSY; return 0; } EXPORT_SYMBOL(cfg80211_check_combinations); int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, const u8 *rates, unsigned int n_rates, u32 *mask) { int i, j; if (!sband) return -EINVAL; if (n_rates == 0 || n_rates > NL80211_MAX_SUPP_RATES) return -EINVAL; *mask = 0; for (i = 0; i < n_rates; i++) { int rate = (rates[i] & 0x7f) * 5; bool found = false; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].bitrate == rate) { found = true; *mask |= BIT(j); break; } } if (!found) return -EINVAL; } /* * mask must have at least one bit set here since we * didn't accept a 0-length rates array nor allowed * entries in the array that didn't exist */ return 0; } unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy) { enum nl80211_band band; unsigned int n_channels = 0; for (band = 0; band < NUM_NL80211_BANDS; band++) if (wiphy->bands[band]) n_channels += wiphy->bands[band]->n_channels; return n_channels; } EXPORT_SYMBOL(ieee80211_get_num_supported_channels); int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; wdev = dev->ieee80211_ptr; if (!wdev) return -EOPNOTSUPP; rdev = wiphy_to_rdev(wdev->wiphy); if (!rdev->ops->get_station) return -EOPNOTSUPP; memset(sinfo, 0, sizeof(*sinfo)); return rdev_get_station(rdev, dev, mac_addr, sinfo); } EXPORT_SYMBOL(cfg80211_get_station); void cfg80211_free_nan_func(struct cfg80211_nan_func *f) { int i; if (!f) return; kfree(f->serv_spec_info); kfree(f->srf_bf); kfree(f->srf_macs); for (i = 0; i < f->num_rx_filters; i++) kfree(f->rx_filters[i].filter); for (i = 0; i < f->num_tx_filters; i++) kfree(f->tx_filters[i].filter); for (i = 0; i < f->sec.n_ctx_ids; i++) kfree(f->sec.ctx_ids[i].data); kfree(f->sec.ctx_ids); kfree(f->rx_filters); kfree(f->tx_filters); kfree(f); } EXPORT_SYMBOL(cfg80211_free_nan_func); bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, u32 center_freq_khz, u32 bw_khz) { u32 start_freq_khz, end_freq_khz; start_freq_khz = center_freq_khz - (bw_khz / 2); end_freq_khz = center_freq_khz + (bw_khz / 2); if (start_freq_khz >= freq_range->start_freq_khz && end_freq_khz <= freq_range->end_freq_khz) return true; return false; } int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp) { sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1, sizeof(*(sinfo->pertid)), gfp); if (!sinfo->pertid) return -ENOMEM; return 0; } EXPORT_SYMBOL(cfg80211_sinfo_alloc_tid_stats); /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ const unsigned char rfc1042_header[] __aligned(2) = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; EXPORT_SYMBOL(rfc1042_header); /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ const unsigned char bridge_tunnel_header[] __aligned(2) = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; EXPORT_SYMBOL(bridge_tunnel_header); /* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ struct iapp_layer2_update { u8 da[ETH_ALEN]; /* broadcast */ u8 sa[ETH_ALEN]; /* STA addr */ __be16 len; /* 6 */ u8 dsap; /* 0 */ u8 ssap; /* 0 */ u8 control; u8 xid_info[3]; } __packed; void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr) { struct iapp_layer2_update *msg; struct sk_buff *skb; /* Send Level 2 Update Frame to update forwarding tables in layer 2 * bridge devices */ skb = dev_alloc_skb(sizeof(*msg)); if (!skb) return; msg = skb_put(skb, sizeof(*msg)); /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID) * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ eth_broadcast_addr(msg->da); ether_addr_copy(msg->sa, addr); msg->len = htons(6); msg->dsap = 0; msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ msg->control = 0xaf; /* XID response lsb.1111F101. * F=0 (no poll command; unsolicited frame) */ msg->xid_info[0] = 0x81; /* XID format identifier */ msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx_ni(skb); } EXPORT_SYMBOL(cfg80211_send_layer2_update); int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, enum ieee80211_vht_chanwidth bw, int mcs, bool ext_nss_bw_capable) { u16 map = le16_to_cpu(cap->supp_mcs.rx_mcs_map); int max_vht_nss = 0; int ext_nss_bw; int supp_width; int i, mcs_encoding; if (map == 0xffff) return 0; if (WARN_ON(mcs > 9)) return 0; if (mcs <= 7) mcs_encoding = 0; else if (mcs == 8) mcs_encoding = 1; else mcs_encoding = 2; /* find max_vht_nss for the given MCS */ for (i = 7; i >= 0; i--) { int supp = (map >> (2 * i)) & 3; if (supp == 3) continue; if (supp >= mcs_encoding) { max_vht_nss = i + 1; break; } } if (!(cap->supp_mcs.tx_mcs_map & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE))) return max_vht_nss; ext_nss_bw = le32_get_bits(cap->vht_cap_info, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); supp_width = le32_get_bits(cap->vht_cap_info, IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK); /* if not capable, treat ext_nss_bw as 0 */ if (!ext_nss_bw_capable) ext_nss_bw = 0; /* This is invalid */ if (supp_width == 3) return 0; /* This is an invalid combination so pretend nothing is supported */ if (supp_width == 2 && (ext_nss_bw == 1 || ext_nss_bw == 2)) return 0; /* * Cover all the special cases according to IEEE 802.11-2016 * Table 9-250. All other cases are either factor of 1 or not * valid/supported. */ switch (bw) { case IEEE80211_VHT_CHANWIDTH_USE_HT: case IEEE80211_VHT_CHANWIDTH_80MHZ: if ((supp_width == 1 || supp_width == 2) && ext_nss_bw == 3) return 2 * max_vht_nss; break; case IEEE80211_VHT_CHANWIDTH_160MHZ: if (supp_width == 0 && (ext_nss_bw == 1 || ext_nss_bw == 2)) return max_vht_nss / 2; if (supp_width == 0 && ext_nss_bw == 3) return (3 * max_vht_nss) / 4; if (supp_width == 1 && ext_nss_bw == 3) return 2 * max_vht_nss; break; case IEEE80211_VHT_CHANWIDTH_80P80MHZ: if (supp_width == 0 && ext_nss_bw == 1) return 0; /* not possible */ if (supp_width == 0 && ext_nss_bw == 2) return max_vht_nss / 2; if (supp_width == 0 && ext_nss_bw == 3) return (3 * max_vht_nss) / 4; if (supp_width == 1 && ext_nss_bw == 0) return 0; /* not possible */ if (supp_width == 1 && ext_nss_bw == 1) return max_vht_nss / 2; if (supp_width == 1 && ext_nss_bw == 2) return (3 * max_vht_nss) / 4; break; } /* not covered or invalid combination received */ return max_vht_nss; } EXPORT_SYMBOL(ieee80211_get_vht_max_nss); backport-iwlwifi-dkms-7906/net/wireless/wext-compat.c000066400000000000000000001155321351064634500227470ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * cfg80211 - wext compat code * * This is temporary code until all wireless functionality is migrated * into cfg80211, when that happens all the exports here go away and * we directly assign the wireless handlers of wireless interfaces. * * Copyright 2008-2009 Johannes Berg * Copyright (C) 2019 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include "wext-compat.h" #include "core.h" #include "rdev-ops.h" int cfg80211_wext_giwname(struct net_device *dev, struct iw_request_info *info, char *name, char *extra) { strcpy(name, "IEEE 802.11"); return 0; } EXPORT_WEXT_HANDLER(cfg80211_wext_giwname); int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, u32 *mode, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; struct vif_params vifparams; enum nl80211_iftype type; rdev = wiphy_to_rdev(wdev->wiphy); switch (*mode) { case IW_MODE_INFRA: type = NL80211_IFTYPE_STATION; break; case IW_MODE_ADHOC: type = NL80211_IFTYPE_ADHOC; break; case IW_MODE_REPEAT: type = NL80211_IFTYPE_WDS; break; case IW_MODE_MONITOR: type = NL80211_IFTYPE_MONITOR; break; default: return -EINVAL; } if (type == wdev->iftype) return 0; memset(&vifparams, 0, sizeof(vifparams)); return cfg80211_change_iface(rdev, dev, type, &vifparams); } EXPORT_WEXT_HANDLER(cfg80211_wext_siwmode); int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, u32 *mode, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; if (!wdev) return -EOPNOTSUPP; switch (wdev->iftype) { case NL80211_IFTYPE_AP: *mode = IW_MODE_MASTER; break; case NL80211_IFTYPE_STATION: *mode = IW_MODE_INFRA; break; case NL80211_IFTYPE_ADHOC: *mode = IW_MODE_ADHOC; break; case NL80211_IFTYPE_MONITOR: *mode = IW_MODE_MONITOR; break; case NL80211_IFTYPE_WDS: *mode = IW_MODE_REPEAT; break; case NL80211_IFTYPE_AP_VLAN: *mode = IW_MODE_SECOND; /* FIXME */ break; default: *mode = IW_MODE_AUTO; break; } return 0; } EXPORT_WEXT_HANDLER(cfg80211_wext_giwmode); int cfg80211_wext_giwrange(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct iw_range *range = (struct iw_range *) extra; enum nl80211_band band; int i, c = 0; if (!wdev) return -EOPNOTSUPP; data->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 21; range->retry_capa = IW_RETRY_LIMIT; range->retry_flags = IW_RETRY_LIMIT; range->min_retry = 0; range->max_retry = 255; range->min_rts = 0; range->max_rts = 2347; range->min_frag = 256; range->max_frag = 2346; range->max_encoding_tokens = 4; range->max_qual.updated = IW_QUAL_NOISE_INVALID; switch (wdev->wiphy->signal_type) { case CFG80211_SIGNAL_TYPE_NONE: break; case CFG80211_SIGNAL_TYPE_MBM: range->max_qual.level = (u8)-110; range->max_qual.qual = 70; range->avg_qual.qual = 35; range->max_qual.updated |= IW_QUAL_DBM; range->max_qual.updated |= IW_QUAL_QUAL_UPDATED; range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED; break; case CFG80211_SIGNAL_TYPE_UNSPEC: range->max_qual.level = 100; range->max_qual.qual = 100; range->avg_qual.qual = 50; range->max_qual.updated |= IW_QUAL_QUAL_UPDATED; range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED; break; } range->avg_qual.level = range->max_qual.level / 2; range->avg_qual.noise = range->max_qual.noise / 2; range->avg_qual.updated = range->max_qual.updated; for (i = 0; i < wdev->wiphy->n_cipher_suites; i++) { switch (wdev->wiphy->cipher_suites[i]) { case WLAN_CIPHER_SUITE_TKIP: range->enc_capa |= (IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_WPA); break; case WLAN_CIPHER_SUITE_CCMP: range->enc_capa |= (IW_ENC_CAPA_CIPHER_CCMP | IW_ENC_CAPA_WPA2); break; case WLAN_CIPHER_SUITE_WEP40: range->encoding_size[range->num_encoding_sizes++] = WLAN_KEY_LEN_WEP40; break; case WLAN_CIPHER_SUITE_WEP104: range->encoding_size[range->num_encoding_sizes++] = WLAN_KEY_LEN_WEP104; break; } } for (band = 0; band < NUM_NL80211_BANDS; band ++) { struct ieee80211_supported_band *sband; sband = wdev->wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels && c < IW_MAX_FREQUENCIES; i++) { struct ieee80211_channel *chan = &sband->channels[i]; if (!(chan->flags & IEEE80211_CHAN_DISABLED)) { range->freq[c].i = ieee80211_frequency_to_channel( chan->center_freq); range->freq[c].m = chan->center_freq; range->freq[c].e = 6; c++; } } } range->num_channels = c; range->num_frequency = c; IW_EVENT_CAPA_SET_KERNEL(range->event_capa); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); if (wdev->wiphy->max_scan_ssids > 0) range->scan_capa |= IW_SCAN_CAPA_ESSID; return 0; } EXPORT_WEXT_HANDLER(cfg80211_wext_giwrange); /** * cfg80211_wext_freq - get wext frequency for non-"auto" * @dev: the net device * @freq: the wext freq encoding * * Returns a frequency, or a negative error code, or 0 for auto. */ int cfg80211_wext_freq(struct iw_freq *freq) { /* * Parse frequency - return 0 for auto and * -EINVAL for impossible things. */ if (freq->e == 0) { enum nl80211_band band = NL80211_BAND_2GHZ; if (freq->m < 0) return 0; if (freq->m > 14) band = NL80211_BAND_5GHZ; return ieee80211_channel_to_frequency(freq->m, band); } else { int i, div = 1000000; for (i = 0; i < freq->e; i++) div /= 10; if (div <= 0) return -EINVAL; return freq->m / div; } } int cfg80211_wext_siwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u32 orts = wdev->wiphy->rts_threshold; int err; if (rts->disabled || !rts->fixed) wdev->wiphy->rts_threshold = (u32) -1; else if (rts->value < 0) return -EINVAL; else wdev->wiphy->rts_threshold = rts->value; err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD); if (err) wdev->wiphy->rts_threshold = orts; return err; } EXPORT_WEXT_HANDLER(cfg80211_wext_siwrts); int cfg80211_wext_giwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; rts->value = wdev->wiphy->rts_threshold; rts->disabled = rts->value == (u32) -1; rts->fixed = 1; return 0; } EXPORT_WEXT_HANDLER(cfg80211_wext_giwrts); int cfg80211_wext_siwfrag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u32 ofrag = wdev->wiphy->frag_threshold; int err; if (frag->disabled || !frag->fixed) wdev->wiphy->frag_threshold = (u32) -1; else if (frag->value < 256) return -EINVAL; else { /* Fragment length must be even, so strip LSB. */ wdev->wiphy->frag_threshold = frag->value & ~0x1; } err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD); if (err) wdev->wiphy->frag_threshold = ofrag; return err; } EXPORT_WEXT_HANDLER(cfg80211_wext_siwfrag); int cfg80211_wext_giwfrag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; frag->value = wdev->wiphy->frag_threshold; frag->disabled = frag->value == (u32) -1; frag->fixed = 1; return 0; } EXPORT_WEXT_HANDLER(cfg80211_wext_giwfrag); static int cfg80211_wext_siwretry(struct net_device *dev, struct iw_request_info *info, struct iw_param *retry, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u32 changed = 0; u8 olong = wdev->wiphy->retry_long; u8 oshort = wdev->wiphy->retry_short; int err; if (retry->disabled || retry->value < 1 || retry->value > 255 || (retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT) return -EINVAL; if (retry->flags & IW_RETRY_LONG) { wdev->wiphy->retry_long = retry->value; changed |= WIPHY_PARAM_RETRY_LONG; } else if (retry->flags & IW_RETRY_SHORT) { wdev->wiphy->retry_short = retry->value; changed |= WIPHY_PARAM_RETRY_SHORT; } else { wdev->wiphy->retry_short = retry->value; wdev->wiphy->retry_long = retry->value; changed |= WIPHY_PARAM_RETRY_LONG; changed |= WIPHY_PARAM_RETRY_SHORT; } if (!changed) return 0; err = rdev_set_wiphy_params(rdev, changed); if (err) { wdev->wiphy->retry_short = oshort; wdev->wiphy->retry_long = olong; } return err; } int cfg80211_wext_giwretry(struct net_device *dev, struct iw_request_info *info, struct iw_param *retry, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; retry->disabled = 0; if (retry->flags == 0 || (retry->flags & IW_RETRY_SHORT)) { /* * First return short value, iwconfig will ask long value * later if needed */ retry->flags |= IW_RETRY_LIMIT | IW_RETRY_SHORT; retry->value = wdev->wiphy->retry_short; if (wdev->wiphy->retry_long == wdev->wiphy->retry_short) retry->flags |= IW_RETRY_LONG; return 0; } if (retry->flags & IW_RETRY_LONG) { retry->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; retry->value = wdev->wiphy->retry_long; } return 0; } EXPORT_WEXT_HANDLER(cfg80211_wext_giwretry); static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, struct net_device *dev, bool pairwise, const u8 *addr, bool remove, bool tx_key, int idx, struct key_params *params) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err, i; bool rejoin = false; if (pairwise && !addr) return -EINVAL; /* * In many cases we won't actually need this, but it's better * to do it first in case the allocation fails. Don't use wext. */ if (!wdev->wext.keys) { wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys), GFP_KERNEL); if (!wdev->wext.keys) return -ENOMEM; for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++) wdev->wext.keys->params[i].key = wdev->wext.keys->data[i]; } if (wdev->iftype != NL80211_IFTYPE_ADHOC && wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { if (!wdev->current_bss) return -ENOLINK; if (!rdev->ops->set_default_mgmt_key) return -EOPNOTSUPP; if (idx < 4 || idx > 5) return -EINVAL; } else if (idx < 0 || idx > 3) return -EINVAL; if (remove) { err = 0; if (wdev->current_bss) { /* * If removing the current TX key, we will need to * join a new IBSS without the privacy bit clear. */ if (idx == wdev->wext.default_key && wdev->iftype == NL80211_IFTYPE_ADHOC) { __cfg80211_leave_ibss(rdev, wdev->netdev, true); rejoin = true; } if (!pairwise && addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; else err = rdev_del_key(rdev, dev, idx, pairwise, addr); } wdev->wext.connect.privacy = false; /* * Applications using wireless extensions expect to be * able to delete keys that don't exist, so allow that. */ if (err == -ENOENT) err = 0; if (!err) { if (!addr && idx < 4) { memset(wdev->wext.keys->data[idx], 0, sizeof(wdev->wext.keys->data[idx])); wdev->wext.keys->params[idx].key_len = 0; wdev->wext.keys->params[idx].cipher = 0; } if (idx == wdev->wext.default_key) wdev->wext.default_key = -1; else if (idx == wdev->wext.default_mgmt_key) wdev->wext.default_mgmt_key = -1; } if (!err && rejoin) err = cfg80211_ibss_wext_join(rdev, wdev); return err; } if (addr) tx_key = false; if (cfg80211_validate_key_settings(rdev, params, idx, pairwise, addr)) return -EINVAL; err = 0; if (wdev->current_bss) err = rdev_add_key(rdev, dev, idx, pairwise, addr, params); else if (params->cipher != WLAN_CIPHER_SUITE_WEP40 && params->cipher != WLAN_CIPHER_SUITE_WEP104) return -EINVAL; if (err) return err; /* * We only need to store WEP keys, since they're the only keys that * can be be set before a connection is established and persist after * disconnecting. */ if (!addr && (params->cipher == WLAN_CIPHER_SUITE_WEP40 || params->cipher == WLAN_CIPHER_SUITE_WEP104)) { wdev->wext.keys->params[idx] = *params; memcpy(wdev->wext.keys->data[idx], params->key, params->key_len); wdev->wext.keys->params[idx].key = wdev->wext.keys->data[idx]; } if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 || params->cipher == WLAN_CIPHER_SUITE_WEP104) && (tx_key || (!addr && wdev->wext.default_key == -1))) { if (wdev->current_bss) { /* * If we are getting a new TX key from not having * had one before we need to join a new IBSS with * the privacy bit set. */ if (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->wext.default_key == -1) { __cfg80211_leave_ibss(rdev, wdev->netdev, true); rejoin = true; } err = rdev_set_default_key(rdev, dev, idx, true, true); } if (!err) { wdev->wext.default_key = idx; if (rejoin) err = cfg80211_ibss_wext_join(rdev, wdev); } return err; } if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC && (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) { if (wdev->current_bss) err = rdev_set_default_mgmt_key(rdev, dev, idx); if (!err) wdev->wext.default_mgmt_key = idx; return err; } return 0; } static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, struct net_device *dev, bool pairwise, const u8 *addr, bool remove, bool tx_key, int idx, struct key_params *params) { int err; wdev_lock(dev->ieee80211_ptr); err = __cfg80211_set_encryption(rdev, dev, pairwise, addr, remove, tx_key, idx, params); wdev_unlock(dev->ieee80211_ptr); return err; } static int cfg80211_wext_siwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *keybuf) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int idx, err; bool remove = false; struct key_params params; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; /* no use -- only MFP (set_default_mgmt_key) is optional */ if (!rdev->ops->del_key || !rdev->ops->add_key || !rdev->ops->set_default_key) return -EOPNOTSUPP; idx = erq->flags & IW_ENCODE_INDEX; if (idx == 0) { idx = wdev->wext.default_key; if (idx < 0) idx = 0; } else if (idx < 1 || idx > 4) return -EINVAL; else idx--; if (erq->flags & IW_ENCODE_DISABLED) remove = true; else if (erq->length == 0) { /* No key data - just set the default TX key index */ err = 0; wdev_lock(wdev); if (wdev->current_bss) err = rdev_set_default_key(rdev, dev, idx, true, true); if (!err) wdev->wext.default_key = idx; wdev_unlock(wdev); return err; } memset(¶ms, 0, sizeof(params)); params.key = keybuf; params.key_len = erq->length; if (erq->length == 5) params.cipher = WLAN_CIPHER_SUITE_WEP40; else if (erq->length == 13) params.cipher = WLAN_CIPHER_SUITE_WEP104; else if (!remove) return -EINVAL; return cfg80211_set_encryption(rdev, dev, false, NULL, remove, wdev->wext.default_key == -1, idx, ¶ms); } static int cfg80211_wext_siwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; const u8 *addr; int idx; bool remove = false; struct key_params params; u32 cipher; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; /* no use -- only MFP (set_default_mgmt_key) is optional */ if (!rdev->ops->del_key || !rdev->ops->add_key || !rdev->ops->set_default_key) return -EOPNOTSUPP; switch (ext->alg) { case IW_ENCODE_ALG_NONE: remove = true; cipher = 0; break; case IW_ENCODE_ALG_WEP: if (ext->key_len == 5) cipher = WLAN_CIPHER_SUITE_WEP40; else if (ext->key_len == 13) cipher = WLAN_CIPHER_SUITE_WEP104; else return -EINVAL; break; case IW_ENCODE_ALG_TKIP: cipher = WLAN_CIPHER_SUITE_TKIP; break; case IW_ENCODE_ALG_CCMP: cipher = WLAN_CIPHER_SUITE_CCMP; break; case IW_ENCODE_ALG_AES_CMAC: cipher = WLAN_CIPHER_SUITE_AES_CMAC; break; default: return -EOPNOTSUPP; } if (erq->flags & IW_ENCODE_DISABLED) remove = true; idx = erq->flags & IW_ENCODE_INDEX; if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) { if (idx < 4 || idx > 5) { idx = wdev->wext.default_mgmt_key; if (idx < 0) return -EINVAL; } else idx--; } else { if (idx < 1 || idx > 4) { idx = wdev->wext.default_key; if (idx < 0) return -EINVAL; } else idx--; } addr = ext->addr.sa_data; if (is_broadcast_ether_addr(addr)) addr = NULL; memset(¶ms, 0, sizeof(params)); params.key = ext->key; params.key_len = ext->key_len; params.cipher = cipher; if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { params.seq = ext->rx_seq; params.seq_len = 6; } return cfg80211_set_encryption( rdev, dev, !(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY), addr, remove, ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, idx, ¶ms); } static int cfg80211_wext_giwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *keybuf) { struct wireless_dev *wdev = dev->ieee80211_ptr; int idx; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; idx = erq->flags & IW_ENCODE_INDEX; if (idx == 0) { idx = wdev->wext.default_key; if (idx < 0) idx = 0; } else if (idx < 1 || idx > 4) return -EINVAL; else idx--; erq->flags = idx + 1; if (!wdev->wext.keys || !wdev->wext.keys->params[idx].cipher) { erq->flags |= IW_ENCODE_DISABLED; erq->length = 0; return 0; } erq->length = min_t(size_t, erq->length, wdev->wext.keys->params[idx].key_len); memcpy(keybuf, wdev->wext.keys->params[idx].key, erq->length); erq->flags |= IW_ENCODE_ENABLED; return 0; } static int cfg80211_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_chan_def chandef = { .width = NL80211_CHAN_WIDTH_20_NOHT, }; int freq; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); case NL80211_IFTYPE_MONITOR: freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq == 0) return -EINVAL; chandef.center_freq1 = freq; chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (!chandef.chan) return -EINVAL; return cfg80211_set_monitor_channel(rdev, &chandef); case NL80211_IFTYPE_MESH_POINT: freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq == 0) return -EINVAL; chandef.center_freq1 = freq; chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (!chandef.chan) return -EINVAL; return cfg80211_set_mesh_channel(rdev, wdev, &chandef); default: return -EOPNOTSUPP; } } static int cfg80211_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_chan_def chandef; int ret; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra); case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); case NL80211_IFTYPE_MONITOR: if (!rdev->ops->get_channel) return -EINVAL; ret = rdev_get_channel(rdev, wdev, &chandef); if (ret) return ret; freq->m = chandef.chan->center_freq; freq->e = 6; return 0; default: return -EINVAL; } } static int cfg80211_wext_siwtxpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); enum nl80211_tx_power_setting type; int dbm = 0; if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) return -EINVAL; if (data->txpower.flags & IW_TXPOW_RANGE) return -EINVAL; if (!rdev->ops->set_tx_power) return -EOPNOTSUPP; /* only change when not disabling */ if (!data->txpower.disabled) { rfkill_set_sw_state(rdev->rfkill, false); if (data->txpower.fixed) { /* * wext doesn't support negative values, see * below where it's for automatic */ if (data->txpower.value < 0) return -EINVAL; dbm = data->txpower.value; type = NL80211_TX_POWER_FIXED; /* TODO: do regulatory check! */ } else { /* * Automatic power level setting, max being the value * passed in from userland. */ if (data->txpower.value < 0) { type = NL80211_TX_POWER_AUTOMATIC; } else { dbm = data->txpower.value; type = NL80211_TX_POWER_LIMITED; } } } else { if (rfkill_set_sw_state(rdev->rfkill, true)) schedule_work(&rdev->rfkill_block); return 0; } return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm)); } static int cfg80211_wext_giwtxpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int err, val; if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) return -EINVAL; if (data->txpower.flags & IW_TXPOW_RANGE) return -EINVAL; if (!rdev->ops->get_tx_power) return -EOPNOTSUPP; err = rdev_get_tx_power(rdev, wdev, &val); if (err) return err; /* well... oh well */ data->txpower.fixed = 1; data->txpower.disabled = rfkill_blocked(rdev->rfkill); data->txpower.value = val; data->txpower.flags = IW_TXPOW_DBM; return 0; } static int cfg80211_set_auth_alg(struct wireless_dev *wdev, s32 auth_alg) { int nr_alg = 0; if (!auth_alg) return -EINVAL; if (auth_alg & ~(IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY | IW_AUTH_ALG_LEAP)) return -EINVAL; if (auth_alg & IW_AUTH_ALG_OPEN_SYSTEM) { nr_alg++; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; } if (auth_alg & IW_AUTH_ALG_SHARED_KEY) { nr_alg++; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_SHARED_KEY; } if (auth_alg & IW_AUTH_ALG_LEAP) { nr_alg++; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; } if (nr_alg > 1) wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; return 0; } static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions) { if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA | IW_AUTH_WPA_VERSION_WPA2| IW_AUTH_WPA_VERSION_DISABLED)) return -EINVAL; if ((wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) && (wpa_versions & (IW_AUTH_WPA_VERSION_WPA| IW_AUTH_WPA_VERSION_WPA2))) return -EINVAL; if (wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) wdev->wext.connect.crypto.wpa_versions &= ~(NL80211_WPA_VERSION_1|NL80211_WPA_VERSION_2); if (wpa_versions & IW_AUTH_WPA_VERSION_WPA) wdev->wext.connect.crypto.wpa_versions |= NL80211_WPA_VERSION_1; if (wpa_versions & IW_AUTH_WPA_VERSION_WPA2) wdev->wext.connect.crypto.wpa_versions |= NL80211_WPA_VERSION_2; return 0; } static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher) { if (cipher & IW_AUTH_CIPHER_WEP40) wdev->wext.connect.crypto.ciphers_group[0] = WLAN_CIPHER_SUITE_WEP40; else if (cipher & IW_AUTH_CIPHER_WEP104) wdev->wext.connect.crypto.ciphers_group[0] = WLAN_CIPHER_SUITE_WEP104; else if (cipher & IW_AUTH_CIPHER_TKIP) wdev->wext.connect.crypto.ciphers_group[0] = WLAN_CIPHER_SUITE_TKIP; else if (cipher & IW_AUTH_CIPHER_CCMP) wdev->wext.connect.crypto.ciphers_group[0] = WLAN_CIPHER_SUITE_CCMP; else if (cipher & IW_AUTH_CIPHER_AES_CMAC) wdev->wext.connect.crypto.ciphers_group[0] = WLAN_CIPHER_SUITE_AES_CMAC; else if (cipher & IW_AUTH_CIPHER_NONE) wdev->wext.connect.crypto.ciphers_group[0] = 0; else return -EINVAL; wdev->wext.connect.crypto.n_ciphers_group = 1; return 0; } static int cfg80211_set_cipher_pairwise(struct wireless_dev *wdev, u32 cipher) { int nr_ciphers = 0; u32 *ciphers_pairwise = wdev->wext.connect.crypto.ciphers_pairwise; if (cipher & IW_AUTH_CIPHER_WEP40) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP40; nr_ciphers++; } if (cipher & IW_AUTH_CIPHER_WEP104) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP104; nr_ciphers++; } if (cipher & IW_AUTH_CIPHER_TKIP) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_TKIP; nr_ciphers++; } if (cipher & IW_AUTH_CIPHER_CCMP) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_CCMP; nr_ciphers++; } if (cipher & IW_AUTH_CIPHER_AES_CMAC) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_AES_CMAC; nr_ciphers++; } BUILD_BUG_ON(NL80211_MAX_NR_CIPHER_SUITES < 5); wdev->wext.connect.crypto.n_ciphers_pairwise = nr_ciphers; return 0; } static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt) { int nr_akm_suites = 0; if (key_mgt & ~(IW_AUTH_KEY_MGMT_802_1X | IW_AUTH_KEY_MGMT_PSK)) return -EINVAL; if (key_mgt & IW_AUTH_KEY_MGMT_802_1X) { wdev->wext.connect.crypto.akm_suites[nr_akm_suites] = WLAN_AKM_SUITE_8021X; nr_akm_suites++; } if (key_mgt & IW_AUTH_KEY_MGMT_PSK) { wdev->wext.connect.crypto.akm_suites[nr_akm_suites] = WLAN_AKM_SUITE_PSK; nr_akm_suites++; } wdev->wext.connect.crypto.n_akm_suites = nr_akm_suites; return 0; } static int cfg80211_wext_siwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; switch (data->flags & IW_AUTH_INDEX) { case IW_AUTH_PRIVACY_INVOKED: wdev->wext.connect.privacy = data->value; return 0; case IW_AUTH_WPA_VERSION: return cfg80211_set_wpa_version(wdev, data->value); case IW_AUTH_CIPHER_GROUP: return cfg80211_set_cipher_group(wdev, data->value); case IW_AUTH_KEY_MGMT: return cfg80211_set_key_mgt(wdev, data->value); case IW_AUTH_CIPHER_PAIRWISE: return cfg80211_set_cipher_pairwise(wdev, data->value); case IW_AUTH_80211_AUTH_ALG: return cfg80211_set_auth_alg(wdev, data->value); case IW_AUTH_WPA_ENABLED: case IW_AUTH_RX_UNENCRYPTED_EAPOL: case IW_AUTH_DROP_UNENCRYPTED: case IW_AUTH_MFP: return 0; default: return -EOPNOTSUPP; } } static int cfg80211_wext_giwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *data, char *extra) { /* XXX: what do we need? */ return -EOPNOTSUPP; } static int cfg80211_wext_siwpower(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); bool ps = wdev->ps; int timeout = wdev->ps_timeout; int err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; if (!rdev->ops->set_power_mgmt) return -EOPNOTSUPP; if (wrq->disabled) { ps = false; } else { switch (wrq->flags & IW_POWER_MODE) { case IW_POWER_ON: /* If not specified */ case IW_POWER_MODE: /* If set all mask */ case IW_POWER_ALL_R: /* If explicitely state all */ ps = true; break; default: /* Otherwise we ignore */ return -EINVAL; } if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT)) return -EINVAL; if (wrq->flags & IW_POWER_TIMEOUT) timeout = wrq->value / 1000; } err = rdev_set_power_mgmt(rdev, dev, ps, timeout); if (err) return err; wdev->ps = ps; wdev->ps_timeout = timeout; return 0; } static int cfg80211_wext_giwpower(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; wrq->disabled = !wdev->ps; return 0; } static int cfg80211_wds_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int err; if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS)) return -EINVAL; if (addr->sa_family != ARPHRD_ETHER) return -EINVAL; if (netif_running(dev)) return -EBUSY; if (!rdev->ops->set_wds_peer) return -EOPNOTSUPP; err = rdev_set_wds_peer(rdev, dev, (u8 *)&addr->sa_data); if (err) return err; memcpy(&wdev->wext.bssid, (u8 *) &addr->sa_data, ETH_ALEN); return 0; } static int cfg80211_wds_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS)) return -EINVAL; addr->sa_family = ARPHRD_ETHER; memcpy(&addr->sa_data, wdev->wext.bssid, ETH_ALEN); return 0; } static int cfg80211_wext_siwrate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rate, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_bitrate_mask mask; u32 fixed, maxrate; struct ieee80211_supported_band *sband; int band, ridx; bool match = false; if (!rdev->ops->set_bitrate_mask) return -EOPNOTSUPP; memset(&mask, 0, sizeof(mask)); fixed = 0; maxrate = (u32)-1; if (rate->value < 0) { /* nothing */ } else if (rate->fixed) { fixed = rate->value / 100000; } else { maxrate = rate->value / 100000; } for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wdev->wiphy->bands[band]; if (sband == NULL) continue; for (ridx = 0; ridx < sband->n_bitrates; ridx++) { struct ieee80211_rate *srate = &sband->bitrates[ridx]; if (fixed == srate->bitrate) { mask.control[band].legacy = 1 << ridx; match = true; break; } if (srate->bitrate <= maxrate) { mask.control[band].legacy |= 1 << ridx; match = true; } } } if (!match) return -EINVAL; return rdev_set_bitrate_mask(rdev, dev, NULL, &mask); } static int cfg80211_wext_giwrate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rate, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct station_info sinfo = {}; u8 addr[ETH_ALEN]; int err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!rdev->ops->get_station) return -EOPNOTSUPP; err = 0; wdev_lock(wdev); if (wdev->current_bss) memcpy(addr, wdev->current_bss->pub.bssid, ETH_ALEN); else err = -EOPNOTSUPP; wdev_unlock(wdev); if (err) return err; err = rdev_get_station(rdev, dev, addr, &sinfo); if (err) return err; if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { err = -EOPNOTSUPP; goto free; } rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate); free: cfg80211_sinfo_release_content(&sinfo); return err; } /* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); /* we are under RTNL - globally locked - so can use static structs */ static struct iw_statistics wstats; static struct station_info sinfo = {}; u8 bssid[ETH_ALEN]; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) return NULL; if (!rdev->ops->get_station) return NULL; /* Grab BSSID of current BSS, if any */ wdev_lock(wdev); if (!wdev->current_bss) { wdev_unlock(wdev); return NULL; } memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); wdev_unlock(wdev); memset(&sinfo, 0, sizeof(sinfo)); if (rdev_get_station(rdev, dev, bssid, &sinfo)) return NULL; memset(&wstats, 0, sizeof(wstats)); switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { int sig = sinfo.signal; wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; wstats.qual.updated |= IW_QUAL_DBM; wstats.qual.level = sig; if (sig < -110) sig = -110; else if (sig > -40) sig = -40; wstats.qual.qual = sig + 110; break; } case CFG80211_SIGNAL_TYPE_UNSPEC: if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; wstats.qual.level = sinfo.signal; wstats.qual.qual = sinfo.signal; break; } default: wstats.qual.updated |= IW_QUAL_LEVEL_INVALID; wstats.qual.updated |= IW_QUAL_QUAL_INVALID; } wstats.qual.updated |= IW_QUAL_NOISE_INVALID; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC)) wstats.discard.misc = sinfo.rx_dropped_misc; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) wstats.discard.retries = sinfo.tx_failed; cfg80211_sinfo_release_content(&sinfo); return &wstats; } static int cfg80211_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra); case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra); case NL80211_IFTYPE_WDS: return cfg80211_wds_wext_siwap(dev, info, ap_addr, extra); default: return -EOPNOTSUPP; } } static int cfg80211_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra); case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra); case NL80211_IFTYPE_WDS: return cfg80211_wds_wext_giwap(dev, info, ap_addr, extra); default: return -EOPNOTSUPP; } } static int cfg80211_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_siwessid(dev, info, data, ssid); case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_siwessid(dev, info, data, ssid); default: return -EOPNOTSUPP; } } static int cfg80211_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; data->flags = 0; data->length = 0; switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_giwessid(dev, info, data, ssid); default: return -EOPNOTSUPP; } } static int cfg80211_wext_siwpmksa(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_pmksa cfg_pmksa; struct iw_pmksa *pmksa = (struct iw_pmksa *)extra; memset(&cfg_pmksa, 0, sizeof(struct cfg80211_pmksa)); if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; cfg_pmksa.bssid = pmksa->bssid.sa_data; cfg_pmksa.pmkid = pmksa->pmkid; switch (pmksa->cmd) { case IW_PMKSA_ADD: if (!rdev->ops->set_pmksa) return -EOPNOTSUPP; return rdev_set_pmksa(rdev, dev, &cfg_pmksa); case IW_PMKSA_REMOVE: if (!rdev->ops->del_pmksa) return -EOPNOTSUPP; return rdev_del_pmksa(rdev, dev, &cfg_pmksa); case IW_PMKSA_FLUSH: if (!rdev->ops->flush_pmksa) return -EOPNOTSUPP; return rdev_flush_pmksa(rdev, dev); default: return -EOPNOTSUPP; } } static const iw_handler cfg80211_handlers[] = { [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, [IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq, [IW_IOCTL_IDX(SIOCGIWFREQ)] = (iw_handler) cfg80211_wext_giwfreq, [IW_IOCTL_IDX(SIOCSIWMODE)] = (iw_handler) cfg80211_wext_siwmode, [IW_IOCTL_IDX(SIOCGIWMODE)] = (iw_handler) cfg80211_wext_giwmode, [IW_IOCTL_IDX(SIOCGIWRANGE)] = (iw_handler) cfg80211_wext_giwrange, [IW_IOCTL_IDX(SIOCSIWAP)] = (iw_handler) cfg80211_wext_siwap, [IW_IOCTL_IDX(SIOCGIWAP)] = (iw_handler) cfg80211_wext_giwap, [IW_IOCTL_IDX(SIOCSIWMLME)] = (iw_handler) cfg80211_wext_siwmlme, [IW_IOCTL_IDX(SIOCSIWSCAN)] = (iw_handler) cfg80211_wext_siwscan, [IW_IOCTL_IDX(SIOCGIWSCAN)] = (iw_handler) cfg80211_wext_giwscan, [IW_IOCTL_IDX(SIOCSIWESSID)] = (iw_handler) cfg80211_wext_siwessid, [IW_IOCTL_IDX(SIOCGIWESSID)] = (iw_handler) cfg80211_wext_giwessid, [IW_IOCTL_IDX(SIOCSIWRATE)] = (iw_handler) cfg80211_wext_siwrate, [IW_IOCTL_IDX(SIOCGIWRATE)] = (iw_handler) cfg80211_wext_giwrate, [IW_IOCTL_IDX(SIOCSIWRTS)] = (iw_handler) cfg80211_wext_siwrts, [IW_IOCTL_IDX(SIOCGIWRTS)] = (iw_handler) cfg80211_wext_giwrts, [IW_IOCTL_IDX(SIOCSIWFRAG)] = (iw_handler) cfg80211_wext_siwfrag, [IW_IOCTL_IDX(SIOCGIWFRAG)] = (iw_handler) cfg80211_wext_giwfrag, [IW_IOCTL_IDX(SIOCSIWTXPOW)] = (iw_handler) cfg80211_wext_siwtxpower, [IW_IOCTL_IDX(SIOCGIWTXPOW)] = (iw_handler) cfg80211_wext_giwtxpower, [IW_IOCTL_IDX(SIOCSIWRETRY)] = (iw_handler) cfg80211_wext_siwretry, [IW_IOCTL_IDX(SIOCGIWRETRY)] = (iw_handler) cfg80211_wext_giwretry, [IW_IOCTL_IDX(SIOCSIWENCODE)] = (iw_handler) cfg80211_wext_siwencode, [IW_IOCTL_IDX(SIOCGIWENCODE)] = (iw_handler) cfg80211_wext_giwencode, [IW_IOCTL_IDX(SIOCSIWPOWER)] = (iw_handler) cfg80211_wext_siwpower, [IW_IOCTL_IDX(SIOCGIWPOWER)] = (iw_handler) cfg80211_wext_giwpower, [IW_IOCTL_IDX(SIOCSIWGENIE)] = (iw_handler) cfg80211_wext_siwgenie, [IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth, [IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth, [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext, [IW_IOCTL_IDX(SIOCSIWPMKSA)] = (iw_handler) cfg80211_wext_siwpmksa, }; const struct iw_handler_def cfg80211_wext_handler = { .num_standard = ARRAY_SIZE(cfg80211_handlers), .standard = cfg80211_handlers, .get_wireless_stats = cfg80211_wireless_stats, }; backport-iwlwifi-dkms-7906/net/wireless/wext-compat.h000066400000000000000000000044101351064634500227440ustar00rootroot00000000000000#ifndef __WEXT_COMPAT #define __WEXT_COMPAT #include #include #ifdef CPTCFG_CFG80211_WEXT_EXPORT #define EXPORT_WEXT_HANDLER(h) EXPORT_SYMBOL_GPL(h) #else #define EXPORT_WEXT_HANDLER(h) #endif /* CPTCFG_CFG80211_WEXT_EXPORT */ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra); int cfg80211_ibss_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra); int cfg80211_ibss_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra); int cfg80211_ibss_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra); int cfg80211_ibss_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid); int cfg80211_ibss_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid); int cfg80211_mgd_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra); int cfg80211_mgd_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra); int cfg80211_mgd_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra); int cfg80211_mgd_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra); int cfg80211_mgd_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid); int cfg80211_mgd_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid); int cfg80211_wext_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra); int cfg80211_wext_siwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra); int cfg80211_wext_freq(struct iw_freq *freq); extern const struct iw_handler_def cfg80211_wext_handler; #endif /* __WEXT_COMPAT */ backport-iwlwifi-dkms-7906/net/wireless/wext-core.c000066400000000000000000000770471351064634500224240ustar00rootroot00000000000000/* * This file implement the Wireless Extensions core API. * * Authors : Jean Tourrilhes - HPL - * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * Copyright 2009 Johannes Berg * * (As all part of the Linux kernel, this file is GPL) */ #include #include #include #include #include #include #include #include #include #include #include #include typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, unsigned int, struct iw_request_info *, iw_handler); /* * Meta-data about all the standard Wireless Extension request we * know about. */ static const struct iw_ioctl_description standard_ioctl[] = { [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWNAME)] = { .header_type = IW_HEADER_TYPE_CHAR, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWNWID)] = { .header_type = IW_HEADER_TYPE_PARAM, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWNWID)] = { .header_type = IW_HEADER_TYPE_PARAM, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWFREQ)] = { .header_type = IW_HEADER_TYPE_FREQ, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWFREQ)] = { .header_type = IW_HEADER_TYPE_FREQ, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWMODE)] = { .header_type = IW_HEADER_TYPE_UINT, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWMODE)] = { .header_type = IW_HEADER_TYPE_UINT, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWSENS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWSENS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRANGE)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWRANGE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_range), .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWPRIV)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */ .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_priv_args), .max_tokens = 16, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWSTATS)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */ .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_statistics), .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr), .max_tokens = IW_MAX_SPY, }, [IW_IOCTL_IDX(SIOCGIWSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality), .max_tokens = IW_MAX_SPY, }, [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_thrspy), .min_tokens = 1, .max_tokens = 1, }, [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_thrspy), .min_tokens = 1, .max_tokens = 1, }, [IW_IOCTL_IDX(SIOCSIWAP)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_IOCTL_IDX(SIOCGIWAP)] = { .header_type = IW_HEADER_TYPE_ADDR, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWMLME)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_mlme), .max_tokens = sizeof(struct iw_mlme), }, [IW_IOCTL_IDX(SIOCGIWAPLIST)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality), .max_tokens = IW_MAX_AP, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWSCAN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = 0, .max_tokens = sizeof(struct iw_scan_req), }, [IW_IOCTL_IDX(SIOCGIWSCAN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_SCAN_MAX_DATA, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWESSID)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWESSID)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWNICKN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, }, [IW_IOCTL_IDX(SIOCGIWNICKN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, }, [IW_IOCTL_IDX(SIOCSIWRATE)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRATE)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRTS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRTS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWFRAG)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWFRAG)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWTXPOW)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWTXPOW)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRETRY)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRETRY)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWENCODE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ENCODING_TOKEN_MAX, .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, }, [IW_IOCTL_IDX(SIOCGIWENCODE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ENCODING_TOKEN_MAX, .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, }, [IW_IOCTL_IDX(SIOCSIWPOWER)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWPOWER)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_IOCTL_IDX(SIOCGIWGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_IOCTL_IDX(SIOCSIWAUTH)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWAUTH)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_encode_ext), .max_tokens = sizeof(struct iw_encode_ext) + IW_ENCODING_TOKEN_MAX, }, [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_encode_ext), .max_tokens = sizeof(struct iw_encode_ext) + IW_ENCODING_TOKEN_MAX, }, [IW_IOCTL_IDX(SIOCSIWPMKSA)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_pmksa), .max_tokens = sizeof(struct iw_pmksa), }, }; static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl); /* * Meta-data about all the additional standard Wireless Extension events * we know about. */ static const struct iw_ioctl_description standard_event[] = { [IW_EVENT_IDX(IWEVTXDROP)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVQUAL)] = { .header_type = IW_HEADER_TYPE_QUAL, }, [IW_EVENT_IDX(IWEVCUSTOM)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_CUSTOM_MAX, }, [IW_EVENT_IDX(IWEVREGISTERED)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVEXPIRED)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_michaelmicfailure), }, [IW_EVENT_IDX(IWEVASSOCREQIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVASSOCRESPIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVPMKIDCAND)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_pmkid_cand), }, }; static const unsigned int standard_event_num = ARRAY_SIZE(standard_event); /* Size (in bytes) of various events */ static const int event_type_size[] = { IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */ 0, IW_EV_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ 0, IW_EV_UINT_LEN, /* IW_HEADER_TYPE_UINT */ IW_EV_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ IW_EV_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ 0, IW_EV_POINT_LEN, /* Without variable payload */ IW_EV_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ IW_EV_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ }; #ifdef CONFIG_COMPAT static const int compat_event_type_size[] = { IW_EV_COMPAT_LCP_LEN, /* IW_HEADER_TYPE_NULL */ 0, IW_EV_COMPAT_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ 0, IW_EV_COMPAT_UINT_LEN, /* IW_HEADER_TYPE_UINT */ IW_EV_COMPAT_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ IW_EV_COMPAT_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ 0, IW_EV_COMPAT_POINT_LEN, /* Without variable payload */ IW_EV_COMPAT_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ IW_EV_COMPAT_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ }; #endif /* IW event code */ void wireless_nlevent_flush(void) { struct sk_buff *skb; struct net *net; down_read(&net_rwsem); for_each_net(net) { while ((skb = skb_dequeue(&net->wext_nlevents))) rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); } up_read(&net_rwsem); } EXPORT_SYMBOL_GPL(wireless_nlevent_flush); static int wext_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) { /* * When a netdev changes state in any way, flush all pending messages * to avoid them going out in a strange order, e.g. RTM_NEWLINK after * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() * or similar - all of which could otherwise happen due to delays from * schedule_work(). */ wireless_nlevent_flush(); return NOTIFY_OK; } static struct notifier_block wext_netdev_notifier = { .notifier_call = wext_netdev_notifier_call, }; static int __net_init wext_pernet_init(struct net *net) { skb_queue_head_init(&net->wext_nlevents); return 0; } static void __net_exit wext_pernet_exit(struct net *net) { skb_queue_purge(&net->wext_nlevents); } static struct pernet_operations wext_pernet_ops = { .init = wext_pernet_init, .exit = wext_pernet_exit, }; static int __init wireless_nlevent_init(void) { int err = register_pernet_subsys(&wext_pernet_ops); if (err) return err; err = register_netdevice_notifier(&wext_netdev_notifier); if (err) unregister_pernet_subsys(&wext_pernet_ops); return err; } subsys_initcall(wireless_nlevent_init); /* Process events generated by the wireless layer or the driver. */ static void wireless_nlevent_process(struct work_struct *work) { wireless_nlevent_flush(); } static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, struct sk_buff *skb) { struct ifinfomsg *r; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); if (!nlh) return NULL; r = nlmsg_data(nlh); r->ifi_family = AF_UNSPEC; r->__ifi_pad = 0; r->ifi_type = dev->type; r->ifi_index = dev->ifindex; r->ifi_flags = dev_get_flags(dev); r->ifi_change = 0; /* Wireless changes don't affect those flags */ if (nla_put_string(skb, IFLA_IFNAME, dev->name)) goto nla_put_failure; return nlh; nla_put_failure: nlmsg_cancel(skb, nlh); return NULL; } /* * Main event dispatcher. Called from other parts and drivers. * Send the event on the appropriate channels. * May be called from interrupt context. */ void wireless_send_event(struct net_device * dev, unsigned int cmd, union iwreq_data * wrqu, const char * extra) { const struct iw_ioctl_description * descr = NULL; int extra_len = 0; struct iw_event *event; /* Mallocated whole event */ int event_len; /* Its size */ int hdr_len; /* Size of the event header */ int wrqu_off = 0; /* Offset in wrqu */ /* Don't "optimise" the following variable, it will crash */ unsigned int cmd_index; /* *MUST* be unsigned */ struct sk_buff *skb; struct nlmsghdr *nlh; struct nlattr *nla; #ifdef CONFIG_COMPAT struct __compat_iw_event *compat_event; struct compat_iw_point compat_wrqu; struct sk_buff *compskb; #endif /* * Nothing in the kernel sends scan events with data, be safe. * This is necessary because we cannot fix up scan event data * for compat, due to being contained in 'extra', but normally * applications are required to retrieve the scan data anyway * and no data is included in the event, this codifies that * practice. */ if (WARN_ON(cmd == SIOCGIWSCAN && extra)) extra = NULL; /* Get the description of the Event */ if (cmd <= SIOCIWLAST) { cmd_index = IW_IOCTL_IDX(cmd); if (cmd_index < standard_ioctl_num) descr = &(standard_ioctl[cmd_index]); } else { cmd_index = IW_EVENT_IDX(cmd); if (cmd_index < standard_event_num) descr = &(standard_event[cmd_index]); } /* Don't accept unknown events */ if (descr == NULL) { /* Note : we don't return an error to the driver, because * the driver would not know what to do about it. It can't * return an error to the user, because the event is not * initiated by a user request. * The best the driver could do is to log an error message. * We will do it ourselves instead... */ netdev_err(dev, "(WE) : Invalid/Unknown Wireless Event (0x%04X)\n", cmd); return; } /* Check extra parameters and set extra_len */ if (descr->header_type == IW_HEADER_TYPE_POINT) { /* Check if number of token fits within bounds */ if (wrqu->data.length > descr->max_tokens) { netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too big (%d)\n", cmd, wrqu->data.length); return; } if (wrqu->data.length < descr->min_tokens) { netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too small (%d)\n", cmd, wrqu->data.length); return; } /* Calculate extra_len - extra is NULL for restricted events */ if (extra != NULL) extra_len = wrqu->data.length * descr->token_size; /* Always at an offset in wrqu */ wrqu_off = IW_EV_POINT_OFF; } /* Total length of the event */ hdr_len = event_type_size[descr->header_type]; event_len = hdr_len + extra_len; /* * The problem for 64/32 bit. * * On 64-bit, a regular event is laid out as follows: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | wrqu data ... (with the correct size) | * * This padding exists because we manipulate event->u, * and 'event' is not packed. * * An iw_point event is laid out like this instead: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | iwpnt.len | iwpnt.flg | p a d d i n g | * | extra data ... * * The second padding exists because struct iw_point is extended, * but this depends on the platform... * * On 32-bit, all the padding shouldn't be there. */ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, skb); if (WARN_ON(!nlh)) { kfree_skb(skb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(skb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); return; } event = nla_data(nla); /* Fill event - first clear to avoid data leaking */ memset(event, 0, hdr_len); event->len = event_len; event->cmd = cmd; memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); if (extra_len) memcpy(((char *) event) + hdr_len, extra, extra_len); nlmsg_end(skb, nlh); #ifdef CONFIG_COMPAT hdr_len = compat_event_type_size[descr->header_type]; event_len = hdr_len + extra_len; compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!compskb) { kfree_skb(skb); return; } /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, compskb); if (WARN_ON(!nlh)) { kfree_skb(skb); kfree_skb(compskb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); kfree_skb(compskb); return; } compat_event = nla_data(nla); compat_event->len = event_len; compat_event->cmd = cmd; if (descr->header_type == IW_HEADER_TYPE_POINT) { compat_wrqu.length = wrqu->data.length; compat_wrqu.flags = wrqu->data.flags; memcpy(&compat_event->pointer, ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, hdr_len - IW_EV_COMPAT_LCP_LEN); if (extra_len) memcpy(((char *) compat_event) + hdr_len, extra, extra_len); } else { /* extra_len must be zero, so no if (extra) needed */ memcpy(&compat_event->pointer, wrqu, hdr_len - IW_EV_COMPAT_LCP_LEN); } nlmsg_end(compskb, nlh); skb_shinfo(skb)->frag_list = compskb; #endif skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); schedule_work(&wireless_nlevent_work); } EXPORT_SYMBOL(wireless_send_event); /* IW handlers */ struct iw_statistics *get_wireless_stats(struct net_device *dev) { #ifdef CONFIG_WIRELESS_EXT if ((dev->wireless_handlers != NULL) && (dev->wireless_handlers->get_wireless_stats != NULL)) return dev->wireless_handlers->get_wireless_stats(dev); #endif #ifdef CPTCFG_CFG80211_WEXT if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy && dev->ieee80211_ptr->wiphy->wext && dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev); #endif /* not found */ return NULL; } static int iw_handler_get_iwstats(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { /* Get stats from the driver */ struct iw_statistics *stats; stats = get_wireless_stats(dev); if (stats) { /* Copy statistics to extra */ memcpy(extra, stats, sizeof(struct iw_statistics)); wrqu->data.length = sizeof(struct iw_statistics); /* Check if we need to clear the updated flag */ if (wrqu->data.flags != 0) stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; return 0; } else return -EOPNOTSUPP; } static iw_handler get_handler(struct net_device *dev, unsigned int cmd) { /* Don't "optimise" the following variable, it will crash */ unsigned int index; /* *MUST* be unsigned */ const struct iw_handler_def *handlers = NULL; #ifdef CPTCFG_CFG80211_WEXT if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) handlers = dev->ieee80211_ptr->wiphy->wext; #endif #ifdef CONFIG_WIRELESS_EXT if (dev->wireless_handlers) handlers = dev->wireless_handlers; #endif if (!handlers) return NULL; /* Try as a standard command */ index = IW_IOCTL_IDX(cmd); if (index < handlers->num_standard) return handlers->standard[index]; #ifdef CONFIG_WEXT_PRIV /* Try as a private command */ index = cmd - SIOCIWFIRSTPRIV; if (index < handlers->num_private) return handlers->private[index]; #endif /* Not found */ return NULL; } static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, const struct iw_ioctl_description *descr, iw_handler handler, struct net_device *dev, struct iw_request_info *info) { int err, extra_size, user_length = 0, essid_compat = 0; char *extra; /* Calculate space needed by arguments. Always allocate * for max space. */ extra_size = descr->max_tokens * descr->token_size; /* Check need for ESSID compatibility for WE < 21 */ switch (cmd) { case SIOCSIWESSID: case SIOCGIWESSID: case SIOCSIWNICKN: case SIOCGIWNICKN: if (iwp->length == descr->max_tokens + 1) essid_compat = 1; else if (IW_IS_SET(cmd) && (iwp->length != 0)) { char essid[IW_ESSID_MAX_SIZE + 1]; unsigned int len; len = iwp->length * descr->token_size; if (len > IW_ESSID_MAX_SIZE) return -EFAULT; err = copy_from_user(essid, iwp->pointer, len); if (err) return -EFAULT; if (essid[iwp->length - 1] == '\0') essid_compat = 1; } break; default: break; } iwp->length -= essid_compat; /* Check what user space is giving us */ if (IW_IS_SET(cmd)) { /* Check NULL pointer */ if (!iwp->pointer && iwp->length != 0) return -EFAULT; /* Check if number of token fits within bounds */ if (iwp->length > descr->max_tokens) return -E2BIG; if (iwp->length < descr->min_tokens) return -EINVAL; } else { /* Check NULL pointer */ if (!iwp->pointer) return -EFAULT; /* Save user space buffer size for checking */ user_length = iwp->length; /* Don't check if user_length > max to allow forward * compatibility. The test user_length < min is * implied by the test at the end. */ /* Support for very large requests */ if ((descr->flags & IW_DESCR_FLAG_NOMAX) && (user_length > descr->max_tokens)) { /* Allow userspace to GET more than max so * we can support any size GET requests. * There is still a limit : -ENOMEM. */ extra_size = user_length * descr->token_size; /* Note : user_length is originally a __u16, * and token_size is controlled by us, * so extra_size won't get negative and * won't overflow... */ } } /* kzalloc() ensures NULL-termination for essid_compat. */ extra = kzalloc(extra_size, GFP_KERNEL); if (!extra) return -ENOMEM; /* If it is a SET, get all the extra data in here */ if (IW_IS_SET(cmd) && (iwp->length != 0)) { if (copy_from_user(extra, iwp->pointer, iwp->length * descr->token_size)) { err = -EFAULT; goto out; } if (cmd == SIOCSIWENCODEEXT) { struct iw_encode_ext *ee = (void *) extra; if (iwp->length < sizeof(*ee) + ee->key_len) { err = -EFAULT; goto out; } } } if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { /* * If this is a GET, but not NOMAX, it means that the extra * data is not bounded by userspace, but by max_tokens. Thus * set the length to max_tokens. This matches the extra data * allocation. * The driver should fill it with the number of tokens it * provided, and it may check iwp->length rather than having * knowledge of max_tokens. If the driver doesn't change the * iwp->length, this ioctl just copies back max_token tokens * filled with zeroes. Hopefully the driver isn't claiming * them to be valid data. */ iwp->length = descr->max_tokens; } err = handler(dev, info, (union iwreq_data *) iwp, extra); iwp->length += essid_compat; /* If we have something to return to the user */ if (!err && IW_IS_GET(cmd)) { /* Check if there is enough buffer up there */ if (user_length < iwp->length) { err = -E2BIG; goto out; } if (copy_to_user(iwp->pointer, extra, iwp->length * descr->token_size)) { err = -EFAULT; goto out; } } /* Generate an event to notify listeners of the change */ if ((descr->flags & IW_DESCR_FLAG_EVENT) && ((err == 0) || (err == -EIWCOMMIT))) { union iwreq_data *data = (union iwreq_data *) iwp; if (descr->flags & IW_DESCR_FLAG_RESTRICT) /* If the event is restricted, don't * export the payload. */ wireless_send_event(dev, cmd, data, NULL); else wireless_send_event(dev, cmd, data, extra); } out: kfree(extra); return err; } /* * Call the commit handler in the driver * (if exist and if conditions are right) * * Note : our current commit strategy is currently pretty dumb, * but we will be able to improve on that... * The goal is to try to agreagate as many changes as possible * before doing the commit. Drivers that will define a commit handler * are usually those that need a reset after changing parameters, so * we want to minimise the number of reset. * A cool idea is to use a timer : at each "set" command, we re-set the * timer, when the timer eventually fires, we call the driver. * Hopefully, more on that later. * * Also, I'm waiting to see how many people will complain about the * netif_running(dev) test. I'm open on that one... * Hopefully, the driver will remember to do a commit in "open()" ;-) */ int call_commit_handler(struct net_device *dev) { #ifdef CONFIG_WIRELESS_EXT if ((netif_running(dev)) && (dev->wireless_handlers->standard[0] != NULL)) /* Call the commit handler on the driver */ return dev->wireless_handlers->standard[0](dev, NULL, NULL, NULL); else return 0; /* Command completed successfully */ #else /* cfg80211 has no commit */ return 0; #endif } /* * Main IOCTl dispatcher. * Check the type of IOCTL and call the appropriate wrapper... */ static int wireless_process_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, wext_ioctl_func standard, wext_ioctl_func private) { struct net_device *dev; iw_handler handler; /* Permissions are already checked in dev_ioctl() before calling us. * The copy_to/from_user() of ifr is also dealt with in there */ /* Make sure the device exist */ if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL) return -ENODEV; /* A bunch of special cases, then the generic case... * Note that 'cmd' is already filtered in dev_ioctl() with * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ if (cmd == SIOCGIWSTATS) return standard(dev, iwr, cmd, info, &iw_handler_get_iwstats); #ifdef CONFIG_WEXT_PRIV if (cmd == SIOCGIWPRIV && dev->wireless_handlers) return standard(dev, iwr, cmd, info, iw_handler_get_private); #endif /* Basic check */ if (!netif_device_present(dev)) return -ENODEV; /* New driver API : try to find the handler */ handler = get_handler(dev, cmd); if (handler) { /* Standard and private are not the same */ if (cmd < SIOCIWFIRSTPRIV) return standard(dev, iwr, cmd, info, handler); else if (private) return private(dev, iwr, cmd, info, handler); } return -EOPNOTSUPP; } /* If command is `set a parameter', or `get the encoding parameters', * check if the user has the right to do it. */ static int wext_permission_check(unsigned int cmd) { if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) && !capable(CAP_NET_ADMIN)) return -EPERM; return 0; } /* entry point from dev ioctl */ static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, wext_ioctl_func standard, wext_ioctl_func private) { int ret = wext_permission_check(cmd); if (ret) return ret; dev_load(net, iwr->ifr_name); rtnl_lock(); ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private); rtnl_unlock(); return ret; } /* * Wrapper to call a standard Wireless Extension handler. * We do various checks and also take care of moving data between * user space and kernel space. */ static int ioctl_standard_call(struct net_device * dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { const struct iw_ioctl_description * descr; int ret = -EINVAL; /* Get the description of the IOCTL */ if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num) return -EOPNOTSUPP; descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]); /* Check if we have a pointer to user space data or not */ if (descr->header_type != IW_HEADER_TYPE_POINT) { /* No extra arguments. Trivial to handle */ ret = handler(dev, info, &(iwr->u), NULL); /* Generate an event to notify listeners of the change */ if ((descr->flags & IW_DESCR_FLAG_EVENT) && ((ret == 0) || (ret == -EIWCOMMIT))) wireless_send_event(dev, cmd, &(iwr->u), NULL); } else { ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, handler, dev, info); } /* Call commit handler if needed and defined */ if (ret == -EIWCOMMIT) ret = call_commit_handler(dev); /* Here, we will generate the appropriate event if needed */ return ret; } int wext_handle_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct iw_request_info info = { .cmd = cmd, .flags = 0 }; struct iwreq iwr; int ret; if (copy_from_user(&iwr, arg, sizeof(iwr))) return -EFAULT; iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0; ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, ioctl_standard_call, ioctl_private_call); if (ret >= 0 && IW_IS_GET(cmd) && copy_to_user(arg, &iwr, sizeof(struct iwreq))) return -EFAULT; return ret; } #ifdef CONFIG_COMPAT static int compat_standard_call(struct net_device *dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { const struct iw_ioctl_description *descr; struct compat_iw_point *iwp_compat; struct iw_point iwp; int err; descr = standard_ioctl + IW_IOCTL_IDX(cmd); if (descr->header_type != IW_HEADER_TYPE_POINT) return ioctl_standard_call(dev, iwr, cmd, info, handler); iwp_compat = (struct compat_iw_point *) &iwr->u.data; iwp.pointer = compat_ptr(iwp_compat->pointer); iwp.length = iwp_compat->length; iwp.flags = iwp_compat->flags; err = ioctl_standard_iw_point(&iwp, cmd, descr, handler, dev, info); iwp_compat->pointer = ptr_to_compat(iwp.pointer); iwp_compat->length = iwp.length; iwp_compat->flags = iwp.flags; return err; } int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct iw_request_info info; struct iwreq iwr; char *colon; int ret; if (copy_from_user(&iwr, argp, sizeof(struct iwreq))) return -EFAULT; iwr.ifr_name[IFNAMSIZ-1] = 0; colon = strchr(iwr.ifr_name, ':'); if (colon) *colon = 0; info.cmd = cmd; info.flags = IW_REQUEST_FLAG_COMPAT; ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, compat_standard_call, compat_private_call); if (ret >= 0 && IW_IS_GET(cmd) && copy_to_user(argp, &iwr, sizeof(struct iwreq))) return -EFAULT; return ret; } #endif char *iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, int event_len) { int lcp_len = iwe_stream_lcp_len(info); event_len = iwe_stream_event_len_adjust(info, event_len); /* Check if it's possible */ if (likely((stream + event_len) < ends)) { iwe->len = event_len; /* Beware of alignement issues on 64 bits */ memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); memcpy(stream + lcp_len, &iwe->u, event_len - lcp_len); stream += event_len; } return stream; } EXPORT_SYMBOL(iwe_stream_add_event); char *iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, char *extra) { int event_len = iwe_stream_point_len(info) + iwe->u.data.length; int point_len = iwe_stream_point_len(info); int lcp_len = iwe_stream_lcp_len(info); /* Check if it's possible */ if (likely((stream + event_len) < ends)) { iwe->len = event_len; memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); memcpy(stream + lcp_len, ((char *) &iwe->u) + IW_EV_POINT_OFF, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); if (iwe->u.data.length && extra) memcpy(stream + point_len, extra, iwe->u.data.length); stream += event_len; } return stream; } EXPORT_SYMBOL(iwe_stream_add_point); char *iwe_stream_add_value(struct iw_request_info *info, char *event, char *value, char *ends, struct iw_event *iwe, int event_len) { int lcp_len = iwe_stream_lcp_len(info); /* Don't duplicate LCP */ event_len -= IW_EV_LCP_LEN; /* Check if it's possible */ if (likely((value + event_len) < ends)) { /* Add new value */ memcpy(value, &iwe->u, event_len); value += event_len; /* Patch LCP */ iwe->len = value - event; memcpy(event, (char *) iwe, lcp_len); } return value; } EXPORT_SYMBOL(iwe_stream_add_value); backport-iwlwifi-dkms-7906/net/wireless/wext-priv.c000066400000000000000000000155631351064634500224470ustar00rootroot00000000000000/* * This file implement the Wireless Extensions priv API. * * Authors : Jean Tourrilhes - HPL - * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * Copyright 2009 Johannes Berg * * (As all part of the Linux kernel, this file is GPL) */ #include #include #include #include #include int iw_handler_get_private(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { /* Check if the driver has something to export */ if ((dev->wireless_handlers->num_private_args == 0) || (dev->wireless_handlers->private_args == NULL)) return -EOPNOTSUPP; /* Check if there is enough buffer up there */ if (wrqu->data.length < dev->wireless_handlers->num_private_args) { /* User space can't know in advance how large the buffer * needs to be. Give it a hint, so that we can support * any size buffer we want somewhat efficiently... */ wrqu->data.length = dev->wireless_handlers->num_private_args; return -E2BIG; } /* Set the number of available ioctls. */ wrqu->data.length = dev->wireless_handlers->num_private_args; /* Copy structure to the user buffer. */ memcpy(extra, dev->wireless_handlers->private_args, sizeof(struct iw_priv_args) * wrqu->data.length); return 0; } /* Size (in bytes) of the various private data types */ static const char iw_priv_type_size[] = { 0, /* IW_PRIV_TYPE_NONE */ 1, /* IW_PRIV_TYPE_BYTE */ 1, /* IW_PRIV_TYPE_CHAR */ 0, /* Not defined */ sizeof(__u32), /* IW_PRIV_TYPE_INT */ sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */ sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */ 0, /* Not defined */ }; static int get_priv_size(__u16 args) { int num = args & IW_PRIV_SIZE_MASK; int type = (args & IW_PRIV_TYPE_MASK) >> 12; return num * iw_priv_type_size[type]; } static int adjust_priv_size(__u16 args, struct iw_point *iwp) { int num = iwp->length; int max = args & IW_PRIV_SIZE_MASK; int type = (args & IW_PRIV_TYPE_MASK) >> 12; /* Make sure the driver doesn't goof up */ if (max < num) num = max; return num * iw_priv_type_size[type]; } /* * Wrapper to call a private Wireless Extension handler. * We do various checks and also take care of moving data between * user space and kernel space. * It's not as nice and slimline as the standard wrapper. The cause * is struct iw_priv_args, which was not really designed for the * job we are going here. * * IMPORTANT : This function prevent to set and get data on the same * IOCTL and enforce the SET/GET convention. Not doing it would be * far too hairy... * If you need to set and get data at the same time, please don't use * a iw_handler but process it in your ioctl handler (i.e. use the * old driver API). */ static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd, const struct iw_priv_args **descrp) { const struct iw_priv_args *descr; int i, extra_size; descr = NULL; for (i = 0; i < dev->wireless_handlers->num_private_args; i++) { if (cmd == dev->wireless_handlers->private_args[i].cmd) { descr = &dev->wireless_handlers->private_args[i]; break; } } extra_size = 0; if (descr) { if (IW_IS_SET(cmd)) { int offset = 0; /* For sub-ioctls */ /* Check for sub-ioctl handler */ if (descr->name[0] == '\0') /* Reserve one int for sub-ioctl index */ offset = sizeof(__u32); /* Size of set arguments */ extra_size = get_priv_size(descr->set_args); /* Does it fits in iwr ? */ if ((descr->set_args & IW_PRIV_SIZE_FIXED) && ((extra_size + offset) <= IFNAMSIZ)) extra_size = 0; } else { /* Size of get arguments */ extra_size = get_priv_size(descr->get_args); /* Does it fits in iwr ? */ if ((descr->get_args & IW_PRIV_SIZE_FIXED) && (extra_size <= IFNAMSIZ)) extra_size = 0; } } *descrp = descr; return extra_size; } static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, const struct iw_priv_args *descr, iw_handler handler, struct net_device *dev, struct iw_request_info *info, int extra_size) { char *extra; int err; /* Check what user space is giving us */ if (IW_IS_SET(cmd)) { if (!iwp->pointer && iwp->length != 0) return -EFAULT; if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK)) return -E2BIG; } else if (!iwp->pointer) return -EFAULT; extra = kzalloc(extra_size, GFP_KERNEL); if (!extra) return -ENOMEM; /* If it is a SET, get all the extra data in here */ if (IW_IS_SET(cmd) && (iwp->length != 0)) { if (copy_from_user(extra, iwp->pointer, extra_size)) { err = -EFAULT; goto out; } } /* Call the handler */ err = handler(dev, info, (union iwreq_data *) iwp, extra); /* If we have something to return to the user */ if (!err && IW_IS_GET(cmd)) { /* Adjust for the actual length if it's variable, * avoid leaking kernel bits outside. */ if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) extra_size = adjust_priv_size(descr->get_args, iwp); if (copy_to_user(iwp->pointer, extra, extra_size)) err = -EFAULT; } out: kfree(extra); return err; } int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { int extra_size = 0, ret = -EINVAL; const struct iw_priv_args *descr; extra_size = get_priv_descr_and_size(dev, cmd, &descr); /* Check if we have a pointer to user space data or not. */ if (extra_size == 0) { /* No extra arguments. Trivial to handle */ ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); } else { ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr, handler, dev, info, extra_size); } /* Call commit handler if needed and defined */ if (ret == -EIWCOMMIT) ret = call_commit_handler(dev); return ret; } #ifdef CONFIG_COMPAT int compat_private_call(struct net_device *dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { const struct iw_priv_args *descr; int ret, extra_size; extra_size = get_priv_descr_and_size(dev, cmd, &descr); /* Check if we have a pointer to user space data or not. */ if (extra_size == 0) { /* No extra arguments. Trivial to handle */ ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); } else { struct compat_iw_point *iwp_compat; struct iw_point iwp; iwp_compat = (struct compat_iw_point *) &iwr->u.data; iwp.pointer = compat_ptr(iwp_compat->pointer); iwp.length = iwp_compat->length; iwp.flags = iwp_compat->flags; ret = ioctl_private_iw_point(&iwp, cmd, descr, handler, dev, info, extra_size); iwp_compat->pointer = ptr_to_compat(iwp.pointer); iwp_compat->length = iwp.length; iwp_compat->flags = iwp.flags; } /* Call commit handler if needed and defined */ if (ret == -EIWCOMMIT) ret = call_commit_handler(dev); return ret; } #endif backport-iwlwifi-dkms-7906/net/wireless/wext-proc.c000066400000000000000000000072311351064634500224230ustar00rootroot00000000000000/* * This file implement the Wireless Extensions proc API. * * Authors : Jean Tourrilhes - HPL - * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * * (As all part of the Linux kernel, this file is GPL) */ /* * The /proc/net/wireless file is a human readable user-space interface * exporting various wireless specific statistics from the wireless devices. * This is the most popular part of the Wireless Extensions ;-) * * This interface is a pure clone of /proc/net/dev (in net/core/dev.c). * The content of the file is basically the content of "struct iw_statistics". */ #include #include #include #include #include #include #include #include static void wireless_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { /* Get stats from the driver */ struct iw_statistics *stats = get_wireless_stats(dev); static struct iw_statistics nullstats = {}; /* show device if it's wireless regardless of current stats */ if (!stats) { #ifdef CONFIG_WIRELESS_EXT if (dev->wireless_handlers) stats = &nullstats; #endif #ifdef CPTCFG_CFG80211 if (dev->ieee80211_ptr) stats = &nullstats; #endif } if (stats) { seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " "%6d %6d %6d\n", dev->name, stats->status, stats->qual.qual, stats->qual.updated & IW_QUAL_QUAL_UPDATED ? '.' : ' ', ((__s32) stats->qual.level) - ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), stats->qual.updated & IW_QUAL_LEVEL_UPDATED ? '.' : ' ', ((__s32) stats->qual.noise) - ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), stats->qual.updated & IW_QUAL_NOISE_UPDATED ? '.' : ' ', stats->discard.nwid, stats->discard.code, stats->discard.fragment, stats->discard.retries, stats->discard.misc, stats->miss.beacon); if (stats != &nullstats) stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; } } /* ---------------------------------------------------------------- */ /* * Print info for /proc/net/wireless (print all entries) */ static int wireless_dev_seq_show(struct seq_file *seq, void *v) { might_sleep(); if (v == SEQ_START_TOKEN) seq_printf(seq, "Inter-| sta-| Quality | Discarded " "packets | Missed | WE\n" " face | tus | link level noise | nwid " "crypt frag retry misc | beacon | %d\n", WIRELESS_EXT); else wireless_seq_printf_stats(seq, v); return 0; } static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); loff_t off; struct net_device *dev; rtnl_lock(); if (!*pos) return SEQ_START_TOKEN; off = 1; for_each_netdev(net, dev) if (off++ == *pos) return dev; return NULL; } static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); ++*pos; return v == SEQ_START_TOKEN ? first_net_device(net) : next_net_device(v); } static void wireless_dev_seq_stop(struct seq_file *seq, void *v) { rtnl_unlock(); } static const struct seq_operations wireless_seq_ops = { .start = wireless_dev_seq_start, .next = wireless_dev_seq_next, .stop = wireless_dev_seq_stop, .show = wireless_dev_seq_show, }; int __net_init wext_proc_init(struct net *net) { /* Create /proc/net/wireless entry */ if (!proc_create_net("wireless", 0444, net->proc_net, &wireless_seq_ops, sizeof(struct seq_net_private))) return -ENOMEM; return 0; } void __net_exit wext_proc_exit(struct net *net) { remove_proc_entry("wireless", net->proc_net); } backport-iwlwifi-dkms-7906/net/wireless/wext-sme.c000066400000000000000000000211751351064634500222470ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * cfg80211 wext compat for managed mode. * * Copyright 2009 Johannes Berg * Copyright (C) 2009 Intel Corporation. All rights reserved. */ #include #include #include #include #include #include #include "wext-compat.h" #include "nl80211.h" int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; const u8 *prev_bssid = NULL; int err, i; ASSERT_RTNL(); ASSERT_WDEV_LOCK(wdev); if (!netif_running(wdev->netdev)) return 0; wdev->wext.connect.ie = wdev->wext.ie; wdev->wext.connect.ie_len = wdev->wext.ie_len; /* Use default background scan period */ wdev->wext.connect.bg_scan_period = -1; if (wdev->wext.keys) { wdev->wext.keys->def = wdev->wext.default_key; if (wdev->wext.default_key != -1) wdev->wext.connect.privacy = true; } if (!wdev->wext.connect.ssid_len) return 0; if (wdev->wext.keys && wdev->wext.keys->def != -1) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++) ck->params[i].key = ck->data[i]; } if (wdev->wext.prev_bssid_valid) prev_bssid = wdev->wext.prev_bssid; err = cfg80211_connect(rdev, wdev->netdev, &wdev->wext.connect, ck, prev_bssid); if (err) kzfree(ck); return err; } int cfg80211_mgd_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_channel *chan = NULL; int err, freq; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq) { chan = ieee80211_get_channel(wdev->wiphy, freq); if (!chan) return -EINVAL; if (chan->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } wdev_lock(wdev); if (wdev->conn) { bool event = true; if (wdev->wext.connect.channel == chan) { err = 0; goto out; } /* if SSID set, we'll try right again, avoid event */ if (wdev->wext.connect.ssid_len) event = false; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.connect.channel = chan; err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_channel *chan = NULL; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; wdev_lock(wdev); if (wdev->current_bss) chan = wdev->current_bss->pub.channel; else if (wdev->wext.connect.channel) chan = wdev->wext.connect.channel; wdev_unlock(wdev); if (chan) { freq->m = chan->center_freq; freq->e = 6; return 0; } /* no channel if not joining */ return -EINVAL; } int cfg80211_mgd_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); size_t len = data->length; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (!data->flags) len = 0; /* iwconfig uses nul termination in SSID.. */ if (len > 0 && ssid[len - 1] == '\0') len--; wdev_lock(wdev); err = 0; if (wdev->conn) { bool event = true; if (wdev->wext.connect.ssid && len && len == wdev->wext.connect.ssid_len && memcmp(wdev->wext.connect.ssid, ssid, len) == 0) goto out; /* if SSID set now, we'll try to connect, avoid event */ if (len) event = false; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.prev_bssid_valid = false; wdev->wext.connect.ssid = wdev->wext.ssid; memcpy(wdev->wext.ssid, ssid, len); wdev->wext.connect.ssid_len = len; wdev->wext.connect.crypto.control_port = false; wdev->wext.connect.crypto.control_port_ethertype = cpu_to_be16(ETH_P_PAE); err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; data->flags = 0; wdev_lock(wdev); if (wdev->current_bss) { const u8 *ie; rcu_read_lock(); ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, WLAN_EID_SSID); if (ie) { data->flags = 1; data->length = ie[1]; memcpy(ssid, ie + 2, data->length); } rcu_read_unlock(); } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { data->flags = 1; data->length = wdev->wext.connect.ssid_len; memcpy(ssid, wdev->wext.connect.ssid, data->length); } wdev_unlock(wdev); return 0; } int cfg80211_mgd_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *bssid = ap_addr->sa_data; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (ap_addr->sa_family != ARPHRD_ETHER) return -EINVAL; /* automatic mode */ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; wdev_lock(wdev); if (wdev->conn) { err = 0; /* both automatic */ if (!bssid && !wdev->wext.connect.bssid) goto out; /* fixed already - and no change */ if (wdev->wext.connect.bssid && bssid && ether_addr_equal(bssid, wdev->wext.connect.bssid)) goto out; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } if (bssid) { memcpy(wdev->wext.bssid, bssid, ETH_ALEN); wdev->wext.connect.bssid = wdev->wext.bssid; } else wdev->wext.connect.bssid = NULL; err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; ap_addr->sa_family = ARPHRD_ETHER; wdev_lock(wdev); if (wdev->current_bss) memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); else eth_zero_addr(ap_addr->sa_data); wdev_unlock(wdev); return 0; } int cfg80211_wext_siwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *ie = extra; int ie_len = data->length, err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!ie_len) ie = NULL; wdev_lock(wdev); /* no change */ err = 0; if (wdev->wext.ie_len == ie_len && memcmp(wdev->wext.ie, ie, ie_len) == 0) goto out; if (ie_len) { ie = kmemdup(extra, ie_len, GFP_KERNEL); if (!ie) { err = -ENOMEM; goto out; } } else ie = NULL; kfree(wdev->wext.ie); wdev->wext.ie = ie; wdev->wext.ie_len = ie_len; if (wdev->conn) { err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } /* userspace better not think we'll reconnect */ err = 0; out: wdev_unlock(wdev); return err; } int cfg80211_wext_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct iw_mlme *mlme = (struct iw_mlme *)extra; struct cfg80211_registered_device *rdev; int err; if (!wdev) return -EOPNOTSUPP; rdev = wiphy_to_rdev(wdev->wiphy); if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; if (mlme->addr.sa_family != ARPHRD_ETHER) return -EINVAL; wdev_lock(wdev); switch (mlme->cmd) { case IW_MLME_DEAUTH: case IW_MLME_DISASSOC: err = cfg80211_disconnect(rdev, dev, mlme->reason_code, true); break; default: err = -EOPNOTSUPP; break; } wdev_unlock(wdev); return err; } backport-iwlwifi-dkms-7906/net/wireless/wext-spy.c000066400000000000000000000150741351064634500222770ustar00rootroot00000000000000/* * This file implement the Wireless Extensions spy API. * * Authors : Jean Tourrilhes - HPL - * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * * (As all part of the Linux kernel, this file is GPL) */ #include #include #include #include #include #include #include static inline struct iw_spy_data *get_spydata(struct net_device *dev) { /* This is the new way */ if (dev->wireless_data) return dev->wireless_data->spy_data; return NULL; } int iw_handler_set_spy(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { struct iw_spy_data * spydata = get_spydata(dev); struct sockaddr * address = (struct sockaddr *) extra; /* Make sure driver is not buggy or using the old API */ if (!spydata) return -EOPNOTSUPP; /* Disable spy collection while we copy the addresses. * While we copy addresses, any call to wireless_spy_update() * will NOP. This is OK, as anyway the addresses are changing. */ spydata->spy_number = 0; /* We want to operate without locking, because wireless_spy_update() * most likely will happen in the interrupt handler, and therefore * have its own locking constraints and needs performance. * The rtnl_lock() make sure we don't race with the other iw_handlers. * This make sure wireless_spy_update() "see" that the spy list * is temporarily disabled. */ smp_wmb(); /* Are there are addresses to copy? */ if (wrqu->data.length > 0) { int i; /* Copy addresses */ for (i = 0; i < wrqu->data.length; i++) memcpy(spydata->spy_address[i], address[i].sa_data, ETH_ALEN); /* Reset stats */ memset(spydata->spy_stat, 0, sizeof(struct iw_quality) * IW_MAX_SPY); } /* Make sure above is updated before re-enabling */ smp_wmb(); /* Enable addresses */ spydata->spy_number = wrqu->data.length; return 0; } EXPORT_SYMBOL(iw_handler_set_spy); int iw_handler_get_spy(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { struct iw_spy_data * spydata = get_spydata(dev); struct sockaddr * address = (struct sockaddr *) extra; int i; /* Make sure driver is not buggy or using the old API */ if (!spydata) return -EOPNOTSUPP; wrqu->data.length = spydata->spy_number; /* Copy addresses. */ for (i = 0; i < spydata->spy_number; i++) { memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN); address[i].sa_family = AF_UNIX; } /* Copy stats to the user buffer (just after). */ if (spydata->spy_number > 0) memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number), spydata->spy_stat, sizeof(struct iw_quality) * spydata->spy_number); /* Reset updated flags. */ for (i = 0; i < spydata->spy_number; i++) spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED; return 0; } EXPORT_SYMBOL(iw_handler_get_spy); /*------------------------------------------------------------------*/ /* * Standard Wireless Handler : set spy threshold */ int iw_handler_set_thrspy(struct net_device * dev, struct iw_request_info *info, union iwreq_data * wrqu, char * extra) { struct iw_spy_data * spydata = get_spydata(dev); struct iw_thrspy * threshold = (struct iw_thrspy *) extra; /* Make sure driver is not buggy or using the old API */ if (!spydata) return -EOPNOTSUPP; /* Just do it */ memcpy(&(spydata->spy_thr_low), &(threshold->low), 2 * sizeof(struct iw_quality)); /* Clear flag */ memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); return 0; } EXPORT_SYMBOL(iw_handler_set_thrspy); /*------------------------------------------------------------------*/ /* * Standard Wireless Handler : get spy threshold */ int iw_handler_get_thrspy(struct net_device * dev, struct iw_request_info *info, union iwreq_data * wrqu, char * extra) { struct iw_spy_data * spydata = get_spydata(dev); struct iw_thrspy * threshold = (struct iw_thrspy *) extra; /* Make sure driver is not buggy or using the old API */ if (!spydata) return -EOPNOTSUPP; /* Just do it */ memcpy(&(threshold->low), &(spydata->spy_thr_low), 2 * sizeof(struct iw_quality)); return 0; } EXPORT_SYMBOL(iw_handler_get_thrspy); /*------------------------------------------------------------------*/ /* * Prepare and send a Spy Threshold event */ static void iw_send_thrspy_event(struct net_device * dev, struct iw_spy_data * spydata, unsigned char * address, struct iw_quality * wstats) { union iwreq_data wrqu; struct iw_thrspy threshold; /* Init */ wrqu.data.length = 1; wrqu.data.flags = 0; /* Copy address */ memcpy(threshold.addr.sa_data, address, ETH_ALEN); threshold.addr.sa_family = ARPHRD_ETHER; /* Copy stats */ memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality)); /* Copy also thresholds */ memcpy(&(threshold.low), &(spydata->spy_thr_low), 2 * sizeof(struct iw_quality)); /* Send event to user space */ wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); } /* ---------------------------------------------------------------- */ /* * Call for the driver to update the spy data. * For now, the spy data is a simple array. As the size of the array is * small, this is good enough. If we wanted to support larger number of * spy addresses, we should use something more efficient... */ void wireless_spy_update(struct net_device * dev, unsigned char * address, struct iw_quality * wstats) { struct iw_spy_data * spydata = get_spydata(dev); int i; int match = -1; /* Make sure driver is not buggy or using the old API */ if (!spydata) return; /* Update all records that match */ for (i = 0; i < spydata->spy_number; i++) if (ether_addr_equal(address, spydata->spy_address[i])) { memcpy(&(spydata->spy_stat[i]), wstats, sizeof(struct iw_quality)); match = i; } /* Generate an event if we cross the spy threshold. * To avoid event storms, we have a simple hysteresis : we generate * event only when we go under the low threshold or above the * high threshold. */ if (match >= 0) { if (spydata->spy_thr_under[match]) { if (wstats->level > spydata->spy_thr_high.level) { spydata->spy_thr_under[match] = 0; iw_send_thrspy_event(dev, spydata, address, wstats); } } else { if (wstats->level < spydata->spy_thr_low.level) { spydata->spy_thr_under[match] = 1; iw_send_thrspy_event(dev, spydata, address, wstats); } } } } EXPORT_SYMBOL(wireless_spy_update); backport-iwlwifi-dkms-7906/scripts/000077500000000000000000000000001351064634500173705ustar00rootroot00000000000000backport-iwlwifi-dkms-7906/scripts/blacklist.sh000077500000000000000000000015251351064634500217020ustar00rootroot00000000000000#!/bin/bash BLACKLIST_CONF="/etc/modprobe.d/backports.conf" BLACKLIST_MAP=".blacklist.map" MODULE_DIR=$1 MODULE_UPDATES=$2 if [[ ! -d $MODULE_DIR ]]; then exit fi if [[ ! -d $MODULE_UPDATES ]]; then exit fi mkdir -p $(dirname $BLACKLIST_CONF) rm -f $BLACKLIST_CONF echo "# To be used when using backported drivers" > $BLACKLIST_CONF for i in $(grep -v ^# $BLACKLIST_MAP | awk '{print $2}'); do MODULE="${i}.ko" MODULE_UPDATE="$(grep -v ^# $BLACKLIST_MAP | grep $i | awk '{print $1}' | head -1).ko" COUNT=$(find $MODULE_DIR -type f -name ${MODULE} -or -name ${MODULE}.gz | wc -l) COUNT_REPLACE=$(find $MODULE_UPDATES -type f -name ${MODULE_UPDATE} -or -name ${MODULE_UPDATE}.gz | wc -l) if [ $COUNT -ne 0 ]; then if [ $COUNT_REPLACE -ne 0 ]; then echo "Blacklisting $MODULE ..." echo blacklist $i >> $BLACKLIST_CONF fi fi done backport-iwlwifi-dkms-7906/scripts/check_depmod.sh000077500000000000000000000043041351064634500223350ustar00rootroot00000000000000#!/bin/bash # Copyright 2009-2013 Luis R. Rodriguez # # Ensures your distribution likes to prefer updates/ over the kernel/ # search updates built-in # Seems Mandriva has an $DEPMOD_DIR but it doesn't have any files, # so lets deal with those distributions. DEPMOD_CONF="/etc/depmod.conf" DEPMOD_CONF_TMP="$DEPMOD_CONF.backports.old" DEPMOD_DIR="/etc/depmod.d/" BACKPORT_DEPMOD_FILE="backports.conf" GREP_REGEX_UPDATES="^[[:space:]]*search.*[[:space:]]updates\([[:space:]]\|$\)" GREP_REGEX_SEARCH="^[[:space:]]*search[[:space:]].\+$" DEPMOD_CMD="depmod" function add_compat_depmod_conf { echo "NOTE: Your distribution lacks an $DEPMOD_DIR directory with " echo "updates/ directory being prioritized for modules, we're adding " echo "one for you." mkdir -p $DEPMOD_DIR FIRST_FILE=$(ls $DEPMOD_DIR|head -1) [ -n "$FIRST_FILE" ] && while [[ $FIRST_FILE < $BACKPORT_DEPMOD_FILE ]]; do BACKPORT_DEPMOD_FILE="0$BACKPORT_DEPMOD_FILE" done echo "search updates" > $DEPMOD_DIR/$BACKPORT_DEPMOD_FILE } function add_global_depmod_conf { echo "NOTE: Your distribution lacks updates/ directory being" echo "prioritized for modules, we're adding it to $DEPMOD_CONF." rm -f $DEPMOD_CONF_TMP [ -f $DEPMOD_CONF ] && cp -f $DEPMOD_CONF $DEPMOD_CONF_TMP echo "search updates" > $DEPMOD_CONF [ -f $DEPMOD_CONF_TMP ] && cat $DEPMOD_CONF_TMP >> $DEPMOD_CONF } function depmod_updates_ok { echo "depmod will prefer updates/ over kernel/ -- OK!" } function add_depmod_conf { if [ -f "$DEPMOD_CONF" ]; then add_global_depmod_conf else DEPMOD_VERSION=$($DEPMOD_CMD --version | cut -d" " -f2 | sed "s/\.//") if [[ $DEPMOD_VERSION -gt 36 ]]; then add_compat_depmod_conf else add_global_depmod_conf fi fi } GREP_FILES="" [ -f $DEPMOD_CONF ] && GREP_FILES="$DEPMOD_CONF" if [ -d $DEPMOD_DIR ]; then DEPMOD_FILE_COUNT=$(ls $DEPMOD_DIR | wc -l) [[ $DEPMOD_FILE_COUNT -gt 0 ]] && GREP_FILES="$GREP_FILES $DEPMOD_DIR/*" fi if [ -n "$GREP_FILES" ]; then grep -q "$GREP_REGEX_SEARCH" $GREP_FILES if [[ $? -eq 0 ]]; then grep -q "$GREP_REGEX_UPDATES" $GREP_FILES if [[ $? -eq 0 ]]; then depmod_updates_ok else add_depmod_conf fi else depmod_updates_ok fi else depmod_updates_ok fi exit 0 backport-iwlwifi-dkms-7906/scripts/compress_modules.sh000077500000000000000000000003311351064634500233070ustar00rootroot00000000000000#!/bin/bash set -e source ./scripts/mod_helpers.sh if test "$(mod_filename mac80211)" = "mac80211.ko.gz" ; then for driver in $(find "$1" -type f -name *.ko); do echo COMPRESS $driver gzip -9 $driver done fi backport-iwlwifi-dkms-7906/scripts/kernel-doc000077500000000000000000002607061351064634500213540ustar00rootroot00000000000000#!/usr/bin/perl -w use strict; ## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ## ## Copyright (C) 2000, 1 Tim Waugh ## ## Copyright (C) 2001 Simon Huggins ## ## Copyright (C) 2005-2012 Randy Dunlap ## ## Copyright (C) 2012 Dan Luedtke ## ## ## ## #define enhancements by Armin Kuster ## ## Copyright (c) 2000 MontaVista Software, Inc. ## ## ## ## This software falls under the GNU General Public License. ## ## Please read the COPYING file for more information ## # 18/01/2001 - Cleanups # Functions prototyped as foo(void) same as foo() # Stop eval'ing where we don't need to. # -- huggie@earth.li # 27/06/2001 - Allowed whitespace after initial "/**" and # allowed comments before function declarations. # -- Christian Kreibich # Still to do: # - add perldoc documentation # - Look more closely at some of the scarier bits :) # 26/05/2001 - Support for separate source and object trees. # Return error code. # Keith Owens # 23/09/2001 - Added support for typedefs, structs, enums and unions # Support for Context section; can be terminated using empty line # Small fixes (like spaces vs. \s in regex) # -- Tim Jansen # 25/07/2012 - Added support for HTML5 # -- Dan Luedtke sub usage { my $message = <<"EOF"; Usage: $0 [OPTION ...] FILE ... Read C language source or header FILEs, extract embedded documentation comments, and print formatted documentation to standard output. The documentation comments are identified by "/**" opening comment mark. See Documentation/kernel-doc-nano-HOWTO.txt for the documentation comment syntax. Output format selection (mutually exclusive): -docbook Output DocBook format. -html Output HTML format. -html5 Output HTML5 format. -list Output symbol list format. This is for use by docproc. -man Output troff manual page format. This is the default. -rst Output reStructuredText format. -text Output plain text format. Output selection (mutually exclusive): -export Only output documentation for symbols that have been exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL() in any input FILE or -export-file FILE. -internal Only output documentation for symbols that have NOT been exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL() in any input FILE or -export-file FILE. -function NAME Only output documentation for the given function(s) or DOC: section title(s). All other functions and DOC: sections are ignored. May be specified multiple times. -nofunction NAME Do NOT output documentation for the given function(s); only output documentation for the other functions and DOC: sections. May be specified multiple times. Output selection modifiers: -no-doc-sections Do not output DOC: sections. -enable-lineno Enable output of #define LINENO lines. Only works with reStructuredText format. -export-file FILE Specify an additional FILE in which to look for EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL(). To be used with -export or -internal. May be specified multiple times. Other parameters: -v Verbose output, more warnings and other information. -h Print this help. EOF print $message; exit 1; } # # format of comments. # In the following table, (...)? signifies optional structure. # (...)* signifies 0 or more structure elements # /** # * function_name(:)? (- short description)? # (* @parameterx: (description of parameter x)?)* # (* a blank line)? # * (Description:)? (Description of function)? # * (section header: (section description)? )* # (*)?*/ # # So .. the trivial example would be: # # /** # * my_function # */ # # If the Description: header tag is omitted, then there must be a blank line # after the last parameter specification. # e.g. # /** # * my_function - does my stuff # * @my_arg: its mine damnit # * # * Does my stuff explained. # */ # # or, could also use: # /** # * my_function - does my stuff # * @my_arg: its mine damnit # * Description: Does my stuff explained. # */ # etc. # # Besides functions you can also write documentation for structs, unions, # enums and typedefs. Instead of the function name you must write the name # of the declaration; the struct/union/enum/typedef must always precede # the name. Nesting of declarations is not supported. # Use the argument mechanism to document members or constants. # e.g. # /** # * struct my_struct - short description # * @a: first member # * @b: second member # * # * Longer description # */ # struct my_struct { # int a; # int b; # /* private: */ # int c; # }; # # All descriptions can be multiline, except the short function description. # # For really longs structs, you can also describe arguments inside the # body of the struct. # eg. # /** # * struct my_struct - short description # * @a: first member # * @b: second member # * # * Longer description # */ # struct my_struct { # int a; # int b; # /** # * @c: This is longer description of C # * # * You can use paragraphs to describe arguments # * using this method. # */ # int c; # }; # # This should be use only for struct/enum members. # # You can also add additional sections. When documenting kernel functions you # should document the "Context:" of the function, e.g. whether the functions # can be called form interrupts. Unlike other sections you can end it with an # empty line. # A non-void function should have a "Return:" section describing the return # value(s). # Example-sections should contain the string EXAMPLE so that they are marked # appropriately in DocBook. # # Example: # /** # * user_function - function that can only be called in user context # * @a: some argument # * Context: !in_interrupt() # * # * Some description # * Example: # * user_function(22); # */ # ... # # # All descriptive text is further processed, scanning for the following special # patterns, which are highlighted appropriately. # # 'funcname()' - function # '$ENVVAR' - environmental variable # '&struct_name' - name of a structure (up to two words including 'struct') # '@parameter' - name of a parameter # '%CONST' - name of a constant. ## init lots of data my $errors = 0; my $warnings = 0; my $anon_struct_union = 0; # match expressions used to find embedded type information my $type_constant = '\%([-_\w]+)'; my $type_func = '(\w+)\(\)'; my $type_param = '\@(\w+(\.\.\.)?)'; my $type_fp_param = '\@(\w+)\(\)'; # Special RST handling for func ptr params my $type_struct = '\&((struct\s*)*[_\w]+)'; my $type_struct_xml = '\\&((struct\s*)*[_\w]+)'; my $type_env = '(\$\w+)'; my $type_enum_full = '\&(enum)\s*([_\w]+)'; my $type_struct_full = '\&(struct)\s*([_\w]+)'; my $type_typedef_full = '\&(typedef)\s*([_\w]+)'; my $type_union_full = '\&(union)\s*([_\w]+)'; my $type_member = '\&([_\w]+)((\.|->)[_\w]+)'; my $type_member_func = $type_member . '\(\)'; # Output conversion substitutions. # One for each output format # these work fairly well my @highlights_html = ( [$type_constant, "\$1"], [$type_func, "\$1"], [$type_struct_xml, "\$1"], [$type_env, "\$1"], [$type_param, "\$1"] ); my $local_lt = "\\\\\\\\lt:"; my $local_gt = "\\\\\\\\gt:"; my $blankline_html = $local_lt . "p" . $local_gt; # was "

" # html version 5 my @highlights_html5 = ( [$type_constant, "\$1"], [$type_func, "\$1"], [$type_struct_xml, "\$1"], [$type_env, "\$1"], [$type_param, "\$1]"] ); my $blankline_html5 = $local_lt . "br /" . $local_gt; # XML, docbook format my @highlights_xml = ( ["([^=])\\\"([^\\\"<]+)\\\"", "\$1\$2"], [$type_constant, "\$1"], [$type_struct_xml, "\$1"], [$type_param, "\$1"], [$type_func, "\$1"], [$type_env, "\$1"] ); my $blankline_xml = $local_lt . "/para" . $local_gt . $local_lt . "para" . $local_gt . "\n"; # gnome, docbook format my @highlights_gnome = ( [$type_constant, "\$1"], [$type_func, "\$1"], [$type_struct, "\$1"], [$type_env, "\$1"], [$type_param, "\$1" ] ); my $blankline_gnome = "\n"; # these are pretty rough my @highlights_man = ( [$type_constant, "\$1"], [$type_func, "\\\\fB\$1\\\\fP"], [$type_struct, "\\\\fI\$1\\\\fP"], [$type_param, "\\\\fI\$1\\\\fP"] ); my $blankline_man = ""; # text-mode my @highlights_text = ( [$type_constant, "\$1"], [$type_func, "\$1"], [$type_struct, "\$1"], [$type_param, "\$1"] ); my $blankline_text = ""; # rst-mode my @highlights_rst = ( [$type_constant, "``\$1``"], # Note: need to escape () to avoid func matching later [$type_member_func, "\\:c\\:type\\:`\$1\$2\\\\(\\\\) <\$1>`"], [$type_member, "\\:c\\:type\\:`\$1\$2 <\$1>`"], [$type_fp_param, "**\$1\\\\(\\\\)**"], [$type_func, "\\:c\\:func\\:`\$1()`"], [$type_struct_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"], [$type_enum_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"], [$type_typedef_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"], [$type_union_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"], # in rst this can refer to any type [$type_struct, "\\:c\\:type\\:`\$1`"], [$type_param, "**\$1**"] ); my $blankline_rst = "\n"; # list mode my @highlights_list = ( [$type_constant, "\$1"], [$type_func, "\$1"], [$type_struct, "\$1"], [$type_param, "\$1"] ); my $blankline_list = ""; # read arguments if ($#ARGV == -1) { usage(); } my $kernelversion; my $dohighlight = ""; my $verbose = 0; my $output_mode = "man"; my $output_preformatted = 0; my $no_doc_sections = 0; my $enable_lineno = 0; my @highlights = @highlights_man; my $blankline = $blankline_man; my $modulename = "Kernel API"; use constant { OUTPUT_ALL => 0, # output all symbols and doc sections OUTPUT_INCLUDE => 1, # output only specified symbols OUTPUT_EXCLUDE => 2, # output everything except specified symbols OUTPUT_EXPORTED => 3, # output exported symbols OUTPUT_INTERNAL => 4, # output non-exported symbols }; my $output_selection = OUTPUT_ALL; my $show_not_found = 0; my @export_file_list; my @build_time; if (defined($ENV{'KBUILD_BUILD_TIMESTAMP'}) && (my $seconds = `date -d"${ENV{'KBUILD_BUILD_TIMESTAMP'}}" +%s`) ne '') { @build_time = gmtime($seconds); } else { @build_time = localtime; } my $man_date = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December')[$build_time[4]] . " " . ($build_time[5]+1900); # Essentially these are globals. # They probably want to be tidied up, made more localised or something. # CAVEAT EMPTOR! Some of the others I localised may not want to be, which # could cause "use of undefined value" or other bugs. my ($function, %function_table, %parametertypes, $declaration_purpose); my $declaration_start_line; my ($type, $declaration_name, $return_type); my ($newsection, $newcontents, $prototype, $brcount, %source_map); if (defined($ENV{'KBUILD_VERBOSE'})) { $verbose = "$ENV{'KBUILD_VERBOSE'}"; } # Generated docbook code is inserted in a template at a point where # docbook v3.1 requires a non-zero sequence of RefEntry's; see: # http://www.oasis-open.org/docbook/documentation/reference/html/refentry.html # We keep track of number of generated entries and generate a dummy # if needs be to ensure the expanded template can be postprocessed # into html. my $section_counter = 0; my $lineprefix=""; # Parser states use constant { STATE_NORMAL => 0, # normal code STATE_NAME => 1, # looking for function name STATE_FIELD => 2, # scanning field start STATE_PROTO => 3, # scanning prototype STATE_DOCBLOCK => 4, # documentation block STATE_INLINE => 5, # gathering documentation outside main block }; my $state; my $in_doc_sect; # Inline documentation state use constant { STATE_INLINE_NA => 0, # not applicable ($state != STATE_INLINE) STATE_INLINE_NAME => 1, # looking for member name (@foo:) STATE_INLINE_TEXT => 2, # looking for member documentation STATE_INLINE_END => 3, # done STATE_INLINE_ERROR => 4, # error - Comment without header was found. # Spit a warning as it's not # proper kernel-doc and ignore the rest. }; my $inline_doc_state; #declaration types: can be # 'function', 'struct', 'union', 'enum', 'typedef' my $decl_type; my $doc_start = '^/\*\*\s*$'; # Allow whitespace at end of comment start. my $doc_end = '\*/'; my $doc_com = '\s*\*\s*'; my $doc_com_body = '\s*\* ?'; my $doc_decl = $doc_com . '(\w+)'; # @params and a strictly limited set of supported section names my $doc_sect = $doc_com . '\s*(\@[.\w]+|\@\.\.\.|description|context|returns?|notes?|examples?)\s*:(.*)'; my $doc_content = $doc_com_body . '(.*)'; my $doc_block = $doc_com . 'DOC:\s*(.*)?'; my $doc_inline_start = '^\s*/\*\*\s*$'; my $doc_inline_sect = '\s*\*\s*(@[\w\s]+):(.*)'; my $doc_inline_end = '^\s*\*/\s*$'; my $doc_inline_oneline = '^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$'; my $export_symbol = '^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*;'; my %parameterdescs; my %parameterdesc_start_lines; my @parameterlist; my %sections; my @sectionlist; my %section_start_lines; my $sectcheck; my $struct_actual; my $contents = ""; my $new_start_line = 0; # the canonical section names. see also $doc_sect above. my $section_default = "Description"; # default section my $section_intro = "Introduction"; my $section = $section_default; my $section_context = "Context"; my $section_return = "Return"; my $undescribed = "-- undescribed --"; reset_state(); while ($ARGV[0] =~ m/^-(.*)/) { my $cmd = shift @ARGV; if ($cmd eq "-html") { $output_mode = "html"; @highlights = @highlights_html; $blankline = $blankline_html; } elsif ($cmd eq "-html5") { $output_mode = "html5"; @highlights = @highlights_html5; $blankline = $blankline_html5; } elsif ($cmd eq "-man") { $output_mode = "man"; @highlights = @highlights_man; $blankline = $blankline_man; } elsif ($cmd eq "-text") { $output_mode = "text"; @highlights = @highlights_text; $blankline = $blankline_text; } elsif ($cmd eq "-rst") { $output_mode = "rst"; @highlights = @highlights_rst; $blankline = $blankline_rst; } elsif ($cmd eq "-docbook") { $output_mode = "xml"; @highlights = @highlights_xml; $blankline = $blankline_xml; } elsif ($cmd eq "-list") { $output_mode = "list"; @highlights = @highlights_list; $blankline = $blankline_list; } elsif ($cmd eq "-gnome") { $output_mode = "gnome"; @highlights = @highlights_gnome; $blankline = $blankline_gnome; } elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document $modulename = shift @ARGV; } elsif ($cmd eq "-function") { # to only output specific functions $output_selection = OUTPUT_INCLUDE; $function = shift @ARGV; $function_table{$function} = 1; } elsif ($cmd eq "-nofunction") { # output all except specific functions $output_selection = OUTPUT_EXCLUDE; $function = shift @ARGV; $function_table{$function} = 1; } elsif ($cmd eq "-export") { # only exported symbols $output_selection = OUTPUT_EXPORTED; %function_table = (); } elsif ($cmd eq "-internal") { # only non-exported symbols $output_selection = OUTPUT_INTERNAL; %function_table = (); } elsif ($cmd eq "-export-file") { my $file = shift @ARGV; push(@export_file_list, $file); } elsif ($cmd eq "-v") { $verbose = 1; } elsif (($cmd eq "-h") || ($cmd eq "--help")) { usage(); } elsif ($cmd eq '-no-doc-sections') { $no_doc_sections = 1; } elsif ($cmd eq '-enable-lineno') { $enable_lineno = 1; } elsif ($cmd eq '-show-not-found') { $show_not_found = 1; } } # continue execution near EOF; # get kernel version from env sub get_kernel_version() { my $version = 'unknown kernel version'; if (defined($ENV{'KERNELVERSION'})) { $version = $ENV{'KERNELVERSION'}; } return $version; } # sub print_lineno { my $lineno = shift; if ($enable_lineno && defined($lineno)) { print "#define LINENO " . $lineno . "\n"; } } ## # dumps section contents to arrays/hashes intended for that purpose. # sub dump_section { my $file = shift; my $name = shift; my $contents = join "\n", @_; if ($name =~ m/$type_param/) { $name = $1; $parameterdescs{$name} = $contents; $sectcheck = $sectcheck . $name . " "; $parameterdesc_start_lines{$name} = $new_start_line; $new_start_line = 0; } elsif ($name eq "@\.\.\.") { $name = "..."; $parameterdescs{$name} = $contents; $sectcheck = $sectcheck . $name . " "; $parameterdesc_start_lines{$name} = $new_start_line; $new_start_line = 0; } else { if (defined($sections{$name}) && ($sections{$name} ne "")) { # Only warn on user specified duplicate section names. if ($name ne $section_default) { print STDERR "${file}:$.: warning: duplicate section name '$name'\n"; ++$warnings; } $sections{$name} .= $contents; } else { $sections{$name} = $contents; push @sectionlist, $name; $section_start_lines{$name} = $new_start_line; $new_start_line = 0; } } } ## # dump DOC: section after checking that it should go out # sub dump_doc_section { my $file = shift; my $name = shift; my $contents = join "\n", @_; if ($no_doc_sections) { return; } if (($output_selection == OUTPUT_ALL) || ($output_selection == OUTPUT_INCLUDE && defined($function_table{$name})) || ($output_selection == OUTPUT_EXCLUDE && !defined($function_table{$name}))) { dump_section($file, $name, $contents); output_blockhead({'sectionlist' => \@sectionlist, 'sections' => \%sections, 'module' => $modulename, 'content-only' => ($output_selection != OUTPUT_ALL), }); } } ## # output function # # parameterdescs, a hash. # function => "function name" # parameterlist => @list of parameters # parameterdescs => %parameter descriptions # sectionlist => @list of sections # sections => %section descriptions # sub output_highlight { my $contents = join "\n",@_; my $line; # DEBUG # if (!defined $contents) { # use Carp; # confess "output_highlight got called with no args?\n"; # } if ($output_mode eq "html" || $output_mode eq "html5" || $output_mode eq "xml") { $contents = local_unescape($contents); # convert data read & converted thru xml_escape() into &xyz; format: $contents =~ s/\\\\\\/\&/g; } # print STDERR "contents b4:$contents\n"; eval $dohighlight; die $@ if $@; # print STDERR "contents af:$contents\n"; # strip whitespaces when generating html5 if ($output_mode eq "html5") { $contents =~ s/^\s+//; $contents =~ s/\s+$//; } foreach $line (split "\n", $contents) { if (! $output_preformatted) { $line =~ s/^\s*//; } if ($line eq ""){ if (! $output_preformatted) { print $lineprefix, local_unescape($blankline); } } else { $line =~ s/\\\\\\/\&/g; if ($output_mode eq "man" && substr($line, 0, 1) eq ".") { print "\\&$line"; } else { print $lineprefix, $line; } } print "\n"; } } # output sections in html sub output_section_html(%) { my %args = %{$_[0]}; my $section; foreach $section (@{$args{'sectionlist'}}) { print "

$section

\n"; print "
\n"; output_highlight($args{'sections'}{$section}); print "
\n"; } } # output enum in html sub output_enum_html(%) { my %args = %{$_[0]}; my ($parameter); my $count; print "

enum " . $args{'enum'} . "

\n"; print "enum " . $args{'enum'} . " {
\n"; $count = 0; foreach $parameter (@{$args{'parameterlist'}}) { print " " . $parameter . ""; if ($count != $#{$args{'parameterlist'}}) { $count++; print ",\n"; } print "
"; } print "};
\n"; print "

Constants

\n"; print "
\n"; foreach $parameter (@{$args{'parameterlist'}}) { print "
" . $parameter . "\n"; print "
"; output_highlight($args{'parameterdescs'}{$parameter}); } print "
\n"; output_section_html(@_); print "
\n"; } # output typedef in html sub output_typedef_html(%) { my %args = %{$_[0]}; my ($parameter); my $count; print "

typedef " . $args{'typedef'} . "

\n"; print "typedef " . $args{'typedef'} . "\n"; output_section_html(@_); print "
\n"; } # output struct in html sub output_struct_html(%) { my %args = %{$_[0]}; my ($parameter); print "

" . $args{'type'} . " " . $args{'struct'} . " - " . $args{'purpose'} . "

\n"; print "" . $args{'type'} . " " . $args{'struct'} . " {
\n"; foreach $parameter (@{$args{'parameterlist'}}) { if ($parameter =~ /^#/) { print "$parameter
\n"; next; } my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print "    $1$parameter) ($2);
\n"; } elsif ($type =~ m/^(.*?)\s*(:.*)/) { # bitfield print "    $1 $parameter$2;
\n"; } else { print "    $type $parameter;
\n"; } } print "};
\n"; print "

Members

\n"; print "
\n"; foreach $parameter (@{$args{'parameterlist'}}) { ($parameter =~ /^#/) && next; my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; print "
" . $parameter . "\n"; print "
"; output_highlight($args{'parameterdescs'}{$parameter_name}); } print "
\n"; output_section_html(@_); print "
\n"; } # output function in html sub output_function_html(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; print "

" . $args{'function'} . " - " . $args{'purpose'} . "

\n"; print "" . $args{'functiontype'} . "\n"; print "" . $args{'function'} . "\n"; print "("; $count = 0; foreach $parameter (@{$args{'parameterlist'}}) { $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print "$1$parameter) ($2)"; } else { print "" . $type . " " . $parameter . ""; } if ($count != $#{$args{'parameterlist'}}) { $count++; print ",\n"; } } print ")\n"; print "

Arguments

\n"; print "
\n"; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; print "
" . $parameter . "\n"; print "
"; output_highlight($args{'parameterdescs'}{$parameter_name}); } print "
\n"; output_section_html(@_); print "
\n"; } # output DOC: block header in html sub output_blockhead_html(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; foreach $section (@{$args{'sectionlist'}}) { print "

$section

\n"; print "
    \n"; output_highlight($args{'sections'}{$section}); print "
\n"; } print "
\n"; } # output sections in html5 sub output_section_html5(%) { my %args = %{$_[0]}; my $section; foreach $section (@{$args{'sectionlist'}}) { print "
\n"; print "

$section

\n"; print "

\n"; output_highlight($args{'sections'}{$section}); print "

\n"; print "
\n"; } } # output enum in html5 sub output_enum_html5(%) { my %args = %{$_[0]}; my ($parameter); my $count; my $html5id; $html5id = $args{'enum'}; $html5id =~ s/[^a-zA-Z0-9\-]+/_/g; print "
"; print "

enum " . $args{'enum'} . "

\n"; print "
    \n"; print "
  1. "; print "enum "; print "" . $args{'enum'} . " {"; print "
  2. \n"; $count = 0; foreach $parameter (@{$args{'parameterlist'}}) { print "
  3. "; print "" . $parameter . ""; if ($count != $#{$args{'parameterlist'}}) { $count++; print ","; } print "
  4. \n"; } print "
  5. };
  6. \n"; print "
\n"; print "
\n"; print "

Constants

\n"; print "
\n"; foreach $parameter (@{$args{'parameterlist'}}) { print "
" . $parameter . "
\n"; print "
"; output_highlight($args{'parameterdescs'}{$parameter}); print "
\n"; } print "
\n"; print "
\n"; output_section_html5(@_); print "
\n"; } # output typedef in html5 sub output_typedef_html5(%) { my %args = %{$_[0]}; my ($parameter); my $count; my $html5id; $html5id = $args{'typedef'}; $html5id =~ s/[^a-zA-Z0-9\-]+/_/g; print "
\n"; print "

typedef " . $args{'typedef'} . "

\n"; print "
    \n"; print "
  1. "; print "typedef "; print "" . $args{'typedef'} . ""; print "
  2. \n"; print "
\n"; output_section_html5(@_); print "
\n"; } # output struct in html5 sub output_struct_html5(%) { my %args = %{$_[0]}; my ($parameter); my $html5id; $html5id = $args{'struct'}; $html5id =~ s/[^a-zA-Z0-9\-]+/_/g; print "
\n"; print "
\n"; print "

" . $args{'type'} . " " . $args{'struct'} . "

"; print "

". $args{'purpose'} . "

\n"; print "
\n"; print "
    \n"; print "
  1. "; print "" . $args{'type'} . " "; print "" . $args{'struct'} . " {"; print "
  2. \n"; foreach $parameter (@{$args{'parameterlist'}}) { print "
  3. "; if ($parameter =~ /^#/) { print "" . $parameter ."\n"; print "
  4. \n"; next; } my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print "$1 "; print "$parameter"; print ") "; print "($2);"; } elsif ($type =~ m/^(.*?)\s*(:.*)/) { # bitfield print "$1 "; print "$parameter"; print "$2;"; } else { print "$type "; print "$parameter;"; } print "\n"; } print "
  5. };
  6. \n"; print "
\n"; print "
\n"; print "

Members

\n"; print "
\n"; foreach $parameter (@{$args{'parameterlist'}}) { ($parameter =~ /^#/) && next; my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; print "
" . $parameter . "
\n"; print "
"; output_highlight($args{'parameterdescs'}{$parameter_name}); print "
\n"; } print "
\n"; print "
\n"; output_section_html5(@_); print "
\n"; } # output function in html5 sub output_function_html5(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; my $html5id; $html5id = $args{'function'}; $html5id =~ s/[^a-zA-Z0-9\-]+/_/g; print "
\n"; print "
\n"; print "

" . $args{'function'} . "

"; print "

" . $args{'purpose'} . "

\n"; print "
\n"; print "
    \n"; print "
  1. "; print "" . $args{'functiontype'} . " "; print "" . $args{'function'} . " ("; print "
  2. "; $count = 0; foreach $parameter (@{$args{'parameterlist'}}) { print "
  3. "; $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print "$1 "; print "$parameter"; print ") "; print "($2)"; } else { print "$type "; print "$parameter"; } if ($count != $#{$args{'parameterlist'}}) { $count++; print ","; } print "
  4. \n"; } print "
  5. )
  6. \n"; print "
\n"; print "
\n"; print "

Arguments

\n"; print "

\n"; print "

\n"; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; print "
" . $parameter . "
\n"; print "
"; output_highlight($args{'parameterdescs'}{$parameter_name}); print "
\n"; } print "
\n"; print "
\n"; output_section_html5(@_); print "
\n"; } # output DOC: block header in html5 sub output_blockhead_html5(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; my $html5id; foreach $section (@{$args{'sectionlist'}}) { $html5id = $section; $html5id =~ s/[^a-zA-Z0-9\-]+/_/g; print "
\n"; print "

$section

\n"; print "

\n"; output_highlight($args{'sections'}{$section}); print "

\n"; } print "
\n"; } sub output_section_xml(%) { my %args = %{$_[0]}; my $section; # print out each section $lineprefix=" "; foreach $section (@{$args{'sectionlist'}}) { print "\n"; print "$section\n"; if ($section =~ m/EXAMPLE/i) { print "\n"; $output_preformatted = 1; } else { print "\n"; } output_highlight($args{'sections'}{$section}); $output_preformatted = 0; if ($section =~ m/EXAMPLE/i) { print "\n"; } else { print "\n"; } print "\n"; } } # output function in XML DocBook sub output_function_xml(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; my $id; $id = "API-" . $args{'function'}; $id =~ s/[^A-Za-z0-9]/-/g; print "\n"; print "\n"; print " LINUX\n"; print " Kernel Hackers Manual\n"; print " $man_date\n"; print "\n"; print "\n"; print " " . $args{'function'} . "\n"; print " 9\n"; print " " . $kernelversion . "\n"; print "\n"; print "\n"; print " " . $args{'function'} . "\n"; print " \n"; print " "; output_highlight ($args{'purpose'}); print " \n"; print "\n"; print "\n"; print " Synopsis\n"; print " \n"; print " " . $args{'functiontype'} . " "; print "" . $args{'function'} . " \n"; $count = 0; if ($#{$args{'parameterlist'}} >= 0) { foreach $parameter (@{$args{'parameterlist'}}) { $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print " $1$parameter)\n"; print " $2\n"; } else { print " " . $type; print " $parameter\n"; } } } else { print " \n"; } print " \n"; print "\n"; # print parameters print "\n Arguments\n"; if ($#{$args{'parameterlist'}} >= 0) { print " \n"; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; print " \n $parameter\n"; print " \n \n"; $lineprefix=" "; output_highlight($args{'parameterdescs'}{$parameter_name}); print " \n \n \n"; } print " \n"; } else { print " \n None\n \n"; } print "\n"; output_section_xml(@_); print "\n\n"; } # output struct in XML DocBook sub output_struct_xml(%) { my %args = %{$_[0]}; my ($parameter, $section); my $id; $id = "API-struct-" . $args{'struct'}; $id =~ s/[^A-Za-z0-9]/-/g; print "\n"; print "\n"; print " LINUX\n"; print " Kernel Hackers Manual\n"; print " $man_date\n"; print "\n"; print "\n"; print " " . $args{'type'} . " " . $args{'struct'} . "\n"; print " 9\n"; print " " . $kernelversion . "\n"; print "\n"; print "\n"; print " " . $args{'type'} . " " . $args{'struct'} . "\n"; print " \n"; print " "; output_highlight ($args{'purpose'}); print " \n"; print "\n"; print "\n"; print " Synopsis\n"; print " \n"; print $args{'type'} . " " . $args{'struct'} . " {\n"; foreach $parameter (@{$args{'parameterlist'}}) { if ($parameter =~ /^#/) { my $prm = $parameter; # convert data read & converted thru xml_escape() into &xyz; format: # This allows us to have #define macros interspersed in a struct. $prm =~ s/\\\\\\/\&/g; print "$prm\n"; next; } my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; defined($args{'parameterdescs'}{$parameter_name}) || next; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print " $1 $parameter) ($2);\n"; } elsif ($type =~ m/^(.*?)\s*(:.*)/) { # bitfield print " $1 $parameter$2;\n"; } else { print " " . $type . " " . $parameter . ";\n"; } } print "};"; print " \n"; print "\n"; print " \n"; print " Members\n"; if ($#{$args{'parameterlist'}} >= 0) { print " \n"; foreach $parameter (@{$args{'parameterlist'}}) { ($parameter =~ /^#/) && next; my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; defined($args{'parameterdescs'}{$parameter_name}) || next; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; print " "; print " $parameter\n"; print " \n"; output_highlight($args{'parameterdescs'}{$parameter_name}); print " \n"; print " \n"; } print " \n"; } else { print " \n None\n \n"; } print " \n"; output_section_xml(@_); print "\n\n"; } # output enum in XML DocBook sub output_enum_xml(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; my $id; $id = "API-enum-" . $args{'enum'}; $id =~ s/[^A-Za-z0-9]/-/g; print "\n"; print "\n"; print " LINUX\n"; print " Kernel Hackers Manual\n"; print " $man_date\n"; print "\n"; print "\n"; print " enum " . $args{'enum'} . "\n"; print " 9\n"; print " " . $kernelversion . "\n"; print "\n"; print "\n"; print " enum " . $args{'enum'} . "\n"; print " \n"; print " "; output_highlight ($args{'purpose'}); print " \n"; print "\n"; print "\n"; print " Synopsis\n"; print " \n"; print "enum " . $args{'enum'} . " {\n"; $count = 0; foreach $parameter (@{$args{'parameterlist'}}) { print " $parameter"; if ($count != $#{$args{'parameterlist'}}) { $count++; print ","; } print "\n"; } print "};"; print " \n"; print "\n"; print "\n"; print " Constants\n"; print " \n"; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; print " "; print " $parameter\n"; print " \n"; output_highlight($args{'parameterdescs'}{$parameter_name}); print " \n"; print " \n"; } print " \n"; print "\n"; output_section_xml(@_); print "\n\n"; } # output typedef in XML DocBook sub output_typedef_xml(%) { my %args = %{$_[0]}; my ($parameter, $section); my $id; $id = "API-typedef-" . $args{'typedef'}; $id =~ s/[^A-Za-z0-9]/-/g; print "\n"; print "\n"; print " LINUX\n"; print " Kernel Hackers Manual\n"; print " $man_date\n"; print "\n"; print "\n"; print " typedef " . $args{'typedef'} . "\n"; print " 9\n"; print "\n"; print "\n"; print " typedef " . $args{'typedef'} . "\n"; print " \n"; print " "; output_highlight ($args{'purpose'}); print " \n"; print "\n"; print "\n"; print " Synopsis\n"; print " typedef " . $args{'typedef'} . ";\n"; print "\n"; output_section_xml(@_); print "\n\n"; } # output in XML DocBook sub output_blockhead_xml(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; my $id = $args{'module'}; $id =~ s/[^A-Za-z0-9]/-/g; # print out each section $lineprefix=" "; foreach $section (@{$args{'sectionlist'}}) { if (!$args{'content-only'}) { print "\n $section\n"; } if ($section =~ m/EXAMPLE/i) { print "\n"; $output_preformatted = 1; } else { print "\n"; } output_highlight($args{'sections'}{$section}); $output_preformatted = 0; if ($section =~ m/EXAMPLE/i) { print "\n"; } else { print ""; } if (!$args{'content-only'}) { print "\n\n"; } } print "\n\n"; } # output in XML DocBook sub output_function_gnome { my %args = %{$_[0]}; my ($parameter, $section); my $count; my $id; $id = $args{'module'} . "-" . $args{'function'}; $id =~ s/[^A-Za-z0-9]/-/g; print "\n"; print " " . $args{'function'} . "\n"; print " \n"; print " " . $args{'functiontype'} . " "; print "" . $args{'function'} . " "; print "\n"; $count = 0; if ($#{$args{'parameterlist'}} >= 0) { foreach $parameter (@{$args{'parameterlist'}}) { $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print " $1 $parameter)\n"; print " $2\n"; } else { print " " . $type; print " $parameter\n"; } } } else { print " \n"; } print " \n"; if ($#{$args{'parameterlist'}} >= 0) { print " \n"; print "\n"; print "\n"; print "\n"; print "\n"; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; print " $parameter\n"; print " \n"; $lineprefix=" "; output_highlight($args{'parameterdescs'}{$parameter_name}); print " \n"; } print " \n"; } else { print " \n None\n \n"; } # print out each section $lineprefix=" "; foreach $section (@{$args{'sectionlist'}}) { print "\n $section\n"; if ($section =~ m/EXAMPLE/i) { print "\n"; $output_preformatted = 1; } else { } print "\n"; output_highlight($args{'sections'}{$section}); $output_preformatted = 0; print "\n"; if ($section =~ m/EXAMPLE/i) { print "\n"; } else { } print " \n"; } print "\n\n"; } ## # output function in man sub output_function_man(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; print ".TH \"$args{'function'}\" 9 \"$args{'function'}\" \"$man_date\" \"Kernel Hacker's Manual\" LINUX\n"; print ".SH NAME\n"; print $args{'function'} . " \\- " . $args{'purpose'} . "\n"; print ".SH SYNOPSIS\n"; if ($args{'functiontype'} ne "") { print ".B \"" . $args{'functiontype'} . "\" " . $args{'function'} . "\n"; } else { print ".B \"" . $args{'function'} . "\n"; } $count = 0; my $parenth = "("; my $post = ","; foreach my $parameter (@{$args{'parameterlist'}}) { if ($count == $#{$args{'parameterlist'}}) { $post = ");"; } $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print ".BI \"" . $parenth . $1 . "\" " . $parameter . " \") (" . $2 . ")" . $post . "\"\n"; } else { $type =~ s/([^\*])$/$1 /; print ".BI \"" . $parenth . $type . "\" " . $parameter . " \"" . $post . "\"\n"; } $count++; $parenth = ""; } print ".SH ARGUMENTS\n"; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; print ".IP \"" . $parameter . "\" 12\n"; output_highlight($args{'parameterdescs'}{$parameter_name}); } foreach $section (@{$args{'sectionlist'}}) { print ".SH \"", uc $section, "\"\n"; output_highlight($args{'sections'}{$section}); } } ## # output enum in man sub output_enum_man(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; print ".TH \"$args{'module'}\" 9 \"enum $args{'enum'}\" \"$man_date\" \"API Manual\" LINUX\n"; print ".SH NAME\n"; print "enum " . $args{'enum'} . " \\- " . $args{'purpose'} . "\n"; print ".SH SYNOPSIS\n"; print "enum " . $args{'enum'} . " {\n"; $count = 0; foreach my $parameter (@{$args{'parameterlist'}}) { print ".br\n.BI \" $parameter\"\n"; if ($count == $#{$args{'parameterlist'}}) { print "\n};\n"; last; } else { print ", \n.br\n"; } $count++; } print ".SH Constants\n"; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; print ".IP \"" . $parameter . "\" 12\n"; output_highlight($args{'parameterdescs'}{$parameter_name}); } foreach $section (@{$args{'sectionlist'}}) { print ".SH \"$section\"\n"; output_highlight($args{'sections'}{$section}); } } ## # output struct in man sub output_struct_man(%) { my %args = %{$_[0]}; my ($parameter, $section); print ".TH \"$args{'module'}\" 9 \"" . $args{'type'} . " " . $args{'struct'} . "\" \"$man_date\" \"API Manual\" LINUX\n"; print ".SH NAME\n"; print $args{'type'} . " " . $args{'struct'} . " \\- " . $args{'purpose'} . "\n"; print ".SH SYNOPSIS\n"; print $args{'type'} . " " . $args{'struct'} . " {\n.br\n"; foreach my $parameter (@{$args{'parameterlist'}}) { if ($parameter =~ /^#/) { print ".BI \"$parameter\"\n.br\n"; next; } my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print ".BI \" " . $1 . "\" " . $parameter . " \") (" . $2 . ")" . "\"\n;\n"; } elsif ($type =~ m/^(.*?)\s*(:.*)/) { # bitfield print ".BI \" " . $1 . "\ \" " . $parameter . $2 . " \"" . "\"\n;\n"; } else { $type =~ s/([^\*])$/$1 /; print ".BI \" " . $type . "\" " . $parameter . " \"" . "\"\n;\n"; } print "\n.br\n"; } print "};\n.br\n"; print ".SH Members\n"; foreach $parameter (@{$args{'parameterlist'}}) { ($parameter =~ /^#/) && next; my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; print ".IP \"" . $parameter . "\" 12\n"; output_highlight($args{'parameterdescs'}{$parameter_name}); } foreach $section (@{$args{'sectionlist'}}) { print ".SH \"$section\"\n"; output_highlight($args{'sections'}{$section}); } } ## # output typedef in man sub output_typedef_man(%) { my %args = %{$_[0]}; my ($parameter, $section); print ".TH \"$args{'module'}\" 9 \"$args{'typedef'}\" \"$man_date\" \"API Manual\" LINUX\n"; print ".SH NAME\n"; print "typedef " . $args{'typedef'} . " \\- " . $args{'purpose'} . "\n"; foreach $section (@{$args{'sectionlist'}}) { print ".SH \"$section\"\n"; output_highlight($args{'sections'}{$section}); } } sub output_blockhead_man(%) { my %args = %{$_[0]}; my ($parameter, $section); my $count; print ".TH \"$args{'module'}\" 9 \"$args{'module'}\" \"$man_date\" \"API Manual\" LINUX\n"; foreach $section (@{$args{'sectionlist'}}) { print ".SH \"$section\"\n"; output_highlight($args{'sections'}{$section}); } } ## # output in text sub output_function_text(%) { my %args = %{$_[0]}; my ($parameter, $section); my $start; print "Name:\n\n"; print $args{'function'} . " - " . $args{'purpose'} . "\n"; print "\nSynopsis:\n\n"; if ($args{'functiontype'} ne "") { $start = $args{'functiontype'} . " " . $args{'function'} . " ("; } else { $start = $args{'function'} . " ("; } print $start; my $count = 0; foreach my $parameter (@{$args{'parameterlist'}}) { $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print $1 . $parameter . ") (" . $2; } else { print $type . " " . $parameter; } if ($count != $#{$args{'parameterlist'}}) { $count++; print ",\n"; print " " x length($start); } else { print ");\n\n"; } } print "Arguments:\n\n"; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; print $parameter . "\n\t" . $args{'parameterdescs'}{$parameter_name} . "\n"; } output_section_text(@_); } #output sections in text sub output_section_text(%) { my %args = %{$_[0]}; my $section; print "\n"; foreach $section (@{$args{'sectionlist'}}) { print "$section:\n\n"; output_highlight($args{'sections'}{$section}); } print "\n\n"; } # output enum in text sub output_enum_text(%) { my %args = %{$_[0]}; my ($parameter); my $count; print "Enum:\n\n"; print "enum " . $args{'enum'} . " - " . $args{'purpose'} . "\n\n"; print "enum " . $args{'enum'} . " {\n"; $count = 0; foreach $parameter (@{$args{'parameterlist'}}) { print "\t$parameter"; if ($count != $#{$args{'parameterlist'}}) { $count++; print ","; } print "\n"; } print "};\n\n"; print "Constants:\n\n"; foreach $parameter (@{$args{'parameterlist'}}) { print "$parameter\n\t"; print $args{'parameterdescs'}{$parameter} . "\n"; } output_section_text(@_); } # output typedef in text sub output_typedef_text(%) { my %args = %{$_[0]}; my ($parameter); my $count; print "Typedef:\n\n"; print "typedef " . $args{'typedef'} . " - " . $args{'purpose'} . "\n"; output_section_text(@_); } # output struct as text sub output_struct_text(%) { my %args = %{$_[0]}; my ($parameter); print $args{'type'} . " " . $args{'struct'} . " - " . $args{'purpose'} . "\n\n"; print $args{'type'} . " " . $args{'struct'} . " {\n"; foreach $parameter (@{$args{'parameterlist'}}) { if ($parameter =~ /^#/) { print "$parameter\n"; next; } my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print "\t$1 $parameter) ($2);\n"; } elsif ($type =~ m/^(.*?)\s*(:.*)/) { # bitfield print "\t$1 $parameter$2;\n"; } else { print "\t" . $type . " " . $parameter . ";\n"; } } print "};\n\n"; print "Members:\n\n"; foreach $parameter (@{$args{'parameterlist'}}) { ($parameter =~ /^#/) && next; my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; print "$parameter\n\t"; print $args{'parameterdescs'}{$parameter_name} . "\n"; } print "\n"; output_section_text(@_); } sub output_blockhead_text(%) { my %args = %{$_[0]}; my ($parameter, $section); foreach $section (@{$args{'sectionlist'}}) { print " $section:\n"; print " -> "; output_highlight($args{'sections'}{$section}); } } ## # output in restructured text # # # This could use some work; it's used to output the DOC: sections, and # starts by putting out the name of the doc section itself, but that tends # to duplicate a header already in the template file. # sub output_blockhead_rst(%) { my %args = %{$_[0]}; my ($parameter, $section); foreach $section (@{$args{'sectionlist'}}) { if ($output_selection != OUTPUT_INCLUDE) { print "**$section**\n\n"; } print_lineno($section_start_lines{$section}); output_highlight_rst($args{'sections'}{$section}); print "\n"; } } sub output_highlight_rst { my $contents = join "\n",@_; my $line; # undo the evil effects of xml_escape() earlier $contents = xml_unescape($contents); eval $dohighlight; die $@ if $@; foreach $line (split "\n", $contents) { print $lineprefix . $line . "\n"; } } sub output_function_rst(%) { my %args = %{$_[0]}; my ($parameter, $section); my $oldprefix = $lineprefix; my $start = ""; if ($args{'typedef'}) { print ".. c:type:: ". $args{'function'} . "\n\n"; print_lineno($declaration_start_line); print " **Typedef**: "; $lineprefix = ""; output_highlight_rst($args{'purpose'}); $start = "\n\n**Syntax**\n\n ``"; } else { print ".. c:function:: "; } if ($args{'functiontype'} ne "") { $start .= $args{'functiontype'} . " " . $args{'function'} . " ("; } else { $start .= $args{'function'} . " ("; } print $start; my $count = 0; foreach my $parameter (@{$args{'parameterlist'}}) { if ($count ne 0) { print ", "; } $count++; $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print $1 . $parameter . ") (" . $2; } else { print $type . " " . $parameter; } } if ($args{'typedef'}) { print ");``\n\n"; } else { print ")\n\n"; print_lineno($declaration_start_line); $lineprefix = " "; output_highlight_rst($args{'purpose'}); print "\n"; } print "**Parameters**\n\n"; $lineprefix = " "; foreach $parameter (@{$args{'parameterlist'}}) { my $parameter_name = $parameter; #$parameter_name =~ s/\[.*//; $type = $args{'parametertypes'}{$parameter}; if ($type ne "") { print "``$type $parameter``\n"; } else { print "``$parameter``\n"; } print_lineno($parameterdesc_start_lines{$parameter_name}); if (defined($args{'parameterdescs'}{$parameter_name}) && $args{'parameterdescs'}{$parameter_name} ne $undescribed) { output_highlight_rst($args{'parameterdescs'}{$parameter_name}); } else { print " *undescribed*\n"; } print "\n"; } $lineprefix = $oldprefix; output_section_rst(@_); } sub output_section_rst(%) { my %args = %{$_[0]}; my $section; my $oldprefix = $lineprefix; $lineprefix = ""; foreach $section (@{$args{'sectionlist'}}) { print "**$section**\n\n"; print_lineno($section_start_lines{$section}); output_highlight_rst($args{'sections'}{$section}); print "\n"; } print "\n"; $lineprefix = $oldprefix; } sub output_enum_rst(%) { my %args = %{$_[0]}; my ($parameter); my $oldprefix = $lineprefix; my $count; my $name = "enum " . $args{'enum'}; print "\n\n.. c:type:: " . $name . "\n\n"; print_lineno($declaration_start_line); $lineprefix = " "; output_highlight_rst($args{'purpose'}); print "\n"; print "**Constants**\n\n"; $lineprefix = " "; foreach $parameter (@{$args{'parameterlist'}}) { print "``$parameter``\n"; if ($args{'parameterdescs'}{$parameter} ne $undescribed) { output_highlight_rst($args{'parameterdescs'}{$parameter}); } else { print " *undescribed*\n"; } print "\n"; } $lineprefix = $oldprefix; output_section_rst(@_); } sub output_typedef_rst(%) { my %args = %{$_[0]}; my ($parameter); my $oldprefix = $lineprefix; my $name = "typedef " . $args{'typedef'}; print "\n\n.. c:type:: " . $name . "\n\n"; print_lineno($declaration_start_line); $lineprefix = " "; output_highlight_rst($args{'purpose'}); print "\n"; $lineprefix = $oldprefix; output_section_rst(@_); } sub output_struct_rst(%) { my %args = %{$_[0]}; my ($parameter); my $oldprefix = $lineprefix; my $name = $args{'type'} . " " . $args{'struct'}; print "\n\n.. c:type:: " . $name . "\n\n"; print_lineno($declaration_start_line); $lineprefix = " "; output_highlight_rst($args{'purpose'}); print "\n"; print "**Definition**\n\n"; print "::\n\n"; print " " . $args{'type'} . " " . $args{'struct'} . " {\n"; foreach $parameter (@{$args{'parameterlist'}}) { if ($parameter =~ /^#/) { print " " . "$parameter\n"; next; } my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; $type = $args{'parametertypes'}{$parameter}; if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { # pointer-to-function print " $1 $parameter) ($2);\n"; } elsif ($type =~ m/^(.*?)\s*(:.*)/) { # bitfield print " $1 $parameter$2;\n"; } else { print " " . $type . " " . $parameter . ";\n"; } } print " };\n\n"; print "**Members**\n\n"; $lineprefix = " "; foreach $parameter (@{$args{'parameterlist'}}) { ($parameter =~ /^#/) && next; my $parameter_name = $parameter; $parameter_name =~ s/\[.*//; ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; $type = $args{'parametertypes'}{$parameter}; print_lineno($parameterdesc_start_lines{$parameter_name}); print "``" . $parameter . "``\n"; output_highlight_rst($args{'parameterdescs'}{$parameter_name}); print "\n"; } print "\n"; $lineprefix = $oldprefix; output_section_rst(@_); } ## list mode output functions sub output_function_list(%) { my %args = %{$_[0]}; print $args{'function'} . "\n"; } # output enum in list sub output_enum_list(%) { my %args = %{$_[0]}; print $args{'enum'} . "\n"; } # output typedef in list sub output_typedef_list(%) { my %args = %{$_[0]}; print $args{'typedef'} . "\n"; } # output struct as list sub output_struct_list(%) { my %args = %{$_[0]}; print $args{'struct'} . "\n"; } sub output_blockhead_list(%) { my %args = %{$_[0]}; my ($parameter, $section); foreach $section (@{$args{'sectionlist'}}) { print "DOC: $section\n"; } } ## # generic output function for all types (function, struct/union, typedef, enum); # calls the generated, variable output_ function name based on # functype and output_mode sub output_declaration { no strict 'refs'; my $name = shift; my $functype = shift; my $func = "output_${functype}_$output_mode"; if (($output_selection == OUTPUT_ALL) || (($output_selection == OUTPUT_INCLUDE || $output_selection == OUTPUT_EXPORTED) && defined($function_table{$name})) || (($output_selection == OUTPUT_EXCLUDE || $output_selection == OUTPUT_INTERNAL) && !($functype eq "function" && defined($function_table{$name})))) { &$func(@_); $section_counter++; } } ## # generic output function - calls the right one based on current output mode. sub output_blockhead { no strict 'refs'; my $func = "output_blockhead_" . $output_mode; &$func(@_); $section_counter++; } ## # takes a declaration (struct, union, enum, typedef) and # invokes the right handler. NOT called for functions. sub dump_declaration($$) { no strict 'refs'; my ($prototype, $file) = @_; my $func = "dump_" . $decl_type; &$func(@_); } sub dump_union($$) { dump_struct(@_); } sub dump_struct($$) { my $x = shift; my $file = shift; my $nested; if ($x =~ /(struct|union)\s+(\w+)\s*{(.*)}/) { my $decl_type = $1; $declaration_name = $2; my $members = $3; # ignore embedded structs or unions $members =~ s/({.*})//g; $nested = $1; # ignore members marked private: $members =~ s/\/\*\s*private:.*?\/\*\s*public:.*?\*\///gosi; $members =~ s/\/\*\s*private:.*//gosi; # strip comments: $members =~ s/\/\*.*?\*\///gos; $nested =~ s/\/\*.*?\*\///gos; # strip kmemcheck_bitfield_{begin,end}.*; $members =~ s/kmemcheck_bitfield_.*?;//gos; # strip attributes $members =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; $members =~ s/__aligned\s*\([^;]*\)//gos; $members =~ s/\s*CRYPTO_MINALIGN_ATTR//gos; # replace DECLARE_BITMAP $members =~ s/DECLARE_BITMAP\s*\(([^,)]+), ([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos; create_parameterlist($members, ';', $file); check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual, $nested); output_declaration($declaration_name, 'struct', {'struct' => $declaration_name, 'module' => $modulename, 'parameterlist' => \@parameterlist, 'parameterdescs' => \%parameterdescs, 'parametertypes' => \%parametertypes, 'sectionlist' => \@sectionlist, 'sections' => \%sections, 'purpose' => $declaration_purpose, 'type' => $decl_type }); } else { print STDERR "${file}:$.: error: Cannot parse struct or union!\n"; ++$errors; } } sub dump_enum($$) { my $x = shift; my $file = shift; $x =~ s@/\*.*?\*/@@gos; # strip comments. # strip #define macros inside enums $x =~ s@#\s*((define|ifdef)\s+|endif)[^;]*;@@gos; if ($x =~ /enum\s+(\w+)\s*{(.*)}/) { $declaration_name = $1; my $members = $2; my %_members; foreach my $arg (split ',', $members) { $arg =~ s/^\s*(\w+).*/$1/; push @parameterlist, $arg; if (!$parameterdescs{$arg}) { $parameterdescs{$arg} = $undescribed; print STDERR "${file}:$.: warning: Enum value '$arg' ". "not described in enum '$declaration_name'\n"; } $_members{$arg} = 1; } while (my ($k, $v) = each %parameterdescs) { if (!exists($_members{$k})) { print STDERR "${file}:$.: warning: Excess enum value " . "'$k' description in '$declaration_name'\n"; } } output_declaration($declaration_name, 'enum', {'enum' => $declaration_name, 'module' => $modulename, 'parameterlist' => \@parameterlist, 'parameterdescs' => \%parameterdescs, 'sectionlist' => \@sectionlist, 'sections' => \%sections, 'purpose' => $declaration_purpose }); } else { print STDERR "${file}:$.: error: Cannot parse enum!\n"; ++$errors; } } sub dump_typedef($$) { my $x = shift; my $file = shift; $x =~ s@/\*.*?\*/@@gos; # strip comments. # Parse function prototypes if ($x =~ /typedef\s+(\w+)\s*\(\*\s*(\w\S+)\s*\)\s*\((.*)\);/ || $x =~ /typedef\s+(\w+)\s*(\w\S+)\s*\s*\((.*)\);/) { # Function typedefs $return_type = $1; $declaration_name = $2; my $args = $3; create_parameterlist($args, ',', $file); output_declaration($declaration_name, 'function', {'function' => $declaration_name, 'typedef' => 1, 'module' => $modulename, 'functiontype' => $return_type, 'parameterlist' => \@parameterlist, 'parameterdescs' => \%parameterdescs, 'parametertypes' => \%parametertypes, 'sectionlist' => \@sectionlist, 'sections' => \%sections, 'purpose' => $declaration_purpose }); return; } while (($x =~ /\(*.\)\s*;$/) || ($x =~ /\[*.\]\s*;$/)) { $x =~ s/\(*.\)\s*;$/;/; $x =~ s/\[*.\]\s*;$/;/; } if ($x =~ /typedef.*\s+(\w+)\s*;/) { $declaration_name = $1; output_declaration($declaration_name, 'typedef', {'typedef' => $declaration_name, 'module' => $modulename, 'sectionlist' => \@sectionlist, 'sections' => \%sections, 'purpose' => $declaration_purpose }); } else { print STDERR "${file}:$.: error: Cannot parse typedef!\n"; ++$errors; } } sub save_struct_actual($) { my $actual = shift; # strip all spaces from the actual param so that it looks like one string item $actual =~ s/\s*//g; $struct_actual = $struct_actual . $actual . " "; } sub create_parameterlist($$$) { my $args = shift; my $splitter = shift; my $file = shift; my $type; my $param; # temporarily replace commas inside function pointer definition while ($args =~ /(\([^\),]+),/) { $args =~ s/(\([^\),]+),/$1#/g; } foreach my $arg (split($splitter, $args)) { # strip comments $arg =~ s/\/\*.*\*\///; # strip leading/trailing spaces $arg =~ s/^\s*//; $arg =~ s/\s*$//; $arg =~ s/\s+/ /; if ($arg =~ /^#/) { # Treat preprocessor directive as a typeless variable just to fill # corresponding data structures "correctly". Catch it later in # output_* subs. push_parameter($arg, "", $file); } elsif ($arg =~ m/\(.+\)\s*\(/) { # pointer-to-function $arg =~ tr/#/,/; $arg =~ m/[^\(]+\(\*?\s*(\w*)\s*\)/; $param = $1; $type = $arg; $type =~ s/([^\(]+\(\*?)\s*$param/$1/; save_struct_actual($param); push_parameter($param, $type, $file); } elsif ($arg) { $arg =~ s/\s*:\s*/:/g; $arg =~ s/\s*\[/\[/g; my @args = split('\s*,\s*', $arg); if ($args[0] =~ m/\*/) { $args[0] =~ s/(\*+)\s*/ $1/; } my @first_arg; if ($args[0] =~ /^(.*\s+)(.*?\[.*\].*)$/) { shift @args; push(@first_arg, split('\s+', $1)); push(@first_arg, $2); } else { @first_arg = split('\s+', shift @args); } unshift(@args, pop @first_arg); $type = join " ", @first_arg; foreach $param (@args) { if ($param =~ m/^(\*+)\s*(.*)/) { save_struct_actual($2); push_parameter($2, "$type $1", $file); } elsif ($param =~ m/(.*?):(\d+)/) { if ($type ne "") { # skip unnamed bit-fields save_struct_actual($1); push_parameter($1, "$type:$2", $file) } } else { save_struct_actual($param); push_parameter($param, $type, $file); } } } } } sub push_parameter($$$) { my $param = shift; my $type = shift; my $file = shift; if (($anon_struct_union == 1) && ($type eq "") && ($param eq "}")) { return; # ignore the ending }; from anon. struct/union } $anon_struct_union = 0; my $param_name = $param; $param_name =~ s/\[.*//; if ($type eq "" && $param =~ /\.\.\.$/) { if (!$param =~ /\w\.\.\.$/) { # handles unnamed variable parameters $param = "..."; } if (!defined $parameterdescs{$param} || $parameterdescs{$param} eq "") { $parameterdescs{$param} = "variable arguments"; } } elsif ($type eq "" && ($param eq "" or $param eq "void")) { $param="void"; $parameterdescs{void} = "no arguments"; } elsif ($type eq "" && ($param eq "struct" or $param eq "union")) # handle unnamed (anonymous) union or struct: { $type = $param; $param = "{unnamed_" . $param . "}"; $parameterdescs{$param} = "anonymous\n"; $anon_struct_union = 1; } # warn if parameter has no description # (but ignore ones starting with # as these are not parameters # but inline preprocessor statements); # also ignore unnamed structs/unions; if (!$anon_struct_union) { if (!defined $parameterdescs{$param_name} && $param_name !~ /^#/) { $parameterdescs{$param_name} = $undescribed; if (($type eq 'function') || ($type eq 'enum')) { print STDERR "${file}:$.: warning: Function parameter ". "or member '$param' not " . "described in '$declaration_name'\n"; } print STDERR "${file}:$.: warning:" . " No description found for parameter '$param'\n"; ++$warnings; } } $param = xml_escape($param); # strip spaces from $param so that it is one continuous string # on @parameterlist; # this fixes a problem where check_sections() cannot find # a parameter like "addr[6 + 2]" because it actually appears # as "addr[6", "+", "2]" on the parameter list; # but it's better to maintain the param string unchanged for output, # so just weaken the string compare in check_sections() to ignore # "[blah" in a parameter string; ###$param =~ s/\s*//g; push @parameterlist, $param; $parametertypes{$param} = $type; } sub check_sections($$$$$$) { my ($file, $decl_name, $decl_type, $sectcheck, $prmscheck, $nested) = @_; my @sects = split ' ', $sectcheck; my @prms = split ' ', $prmscheck; my $err; my ($px, $sx); my $prm_clean; # strip trailing "[array size]" and/or beginning "*" foreach $sx (0 .. $#sects) { $err = 1; foreach $px (0 .. $#prms) { $prm_clean = $prms[$px]; $prm_clean =~ s/\[.*\]//; $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; # ignore array size in a parameter string; # however, the original param string may contain # spaces, e.g.: addr[6 + 2] # and this appears in @prms as "addr[6" since the # parameter list is split at spaces; # hence just ignore "[..." for the sections check; $prm_clean =~ s/\[.*//; ##$prm_clean =~ s/^\**//; if ($prm_clean eq $sects[$sx]) { $err = 0; last; } } if ($err) { if ($decl_type eq "function") { print STDERR "${file}:$.: warning: " . "Excess function parameter " . "'$sects[$sx]' " . "description in '$decl_name'\n"; ++$warnings; } else { if ($nested !~ m/\Q$sects[$sx]\E/) { print STDERR "${file}:$.: warning: " . "Excess $decl_type member " . "'$sects[$sx]' " . "description in '$decl_name'\n"; ++$warnings; } } } } } ## # Checks the section describing the return value of a function. sub check_return_section { my $file = shift; my $declaration_name = shift; my $return_type = shift; # Ignore an empty return type (It's a macro) # Ignore functions with a "void" return type. (But don't ignore "void *") if (($return_type eq "") || ($return_type =~ /void\s*\w*\s*$/)) { return; } if (!defined($sections{$section_return}) || $sections{$section_return} eq "") { print STDERR "${file}:$.: warning: " . "No description found for return value of " . "'$declaration_name'\n"; ++$warnings; } } ## # takes a function prototype and the name of the current file being # processed and spits out all the details stored in the global # arrays/hashes. sub dump_function($$) { my $prototype = shift; my $file = shift; my $noret = 0; $prototype =~ s/^static +//; $prototype =~ s/^extern +//; $prototype =~ s/^asmlinkage +//; $prototype =~ s/^inline +//; $prototype =~ s/^__inline__ +//; $prototype =~ s/^__inline +//; $prototype =~ s/^__always_inline +//; $prototype =~ s/^noinline +//; $prototype =~ s/__init +//; $prototype =~ s/__init_or_module +//; $prototype =~ s/__meminit +//; $prototype =~ s/__must_check +//; $prototype =~ s/__weak +//; my $define = $prototype =~ s/^#\s*define\s+//; #ak added $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; # Yes, this truly is vile. We are looking for: # 1. Return type (may be nothing if we're looking at a macro) # 2. Function name # 3. Function parameters. # # All the while we have to watch out for function pointer parameters # (which IIRC is what the two sections are for), C types (these # regexps don't even start to express all the possibilities), and # so on. # # If you mess with these regexps, it's a good idea to check that # the following functions' documentation still comes out right: # - parport_register_device (function pointer parameters) # - atomic_set (macro) # - pci_match_device, __copy_to_user (long return type) if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) { # This is an object-like macro, it has no return type and no parameter # list. # Function-like macros are not allowed to have spaces between # declaration_name and opening parenthesis (notice the \s+). $return_type = $1; $declaration_name = $2; $noret = 1; } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || $prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s+\w+\s+\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || $prototype =~ m/^(\w+\s+\w+\s*\*\s*\w+\s*\*\s*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/) { $return_type = $1; $declaration_name = $2; my $args = $3; create_parameterlist($args, ',', $file); } else { print STDERR "${file}:$.: warning: cannot understand function prototype: '$prototype'\n"; return; } my $prms = join " ", @parameterlist; check_sections($file, $declaration_name, "function", $sectcheck, $prms, ""); # This check emits a lot of warnings at the moment, because many # functions don't have a 'Return' doc section. So until the number # of warnings goes sufficiently down, the check is only performed in # verbose mode. # TODO: always perform the check. if ($verbose && !$noret) { check_return_section($file, $declaration_name, $return_type); } output_declaration($declaration_name, 'function', {'function' => $declaration_name, 'module' => $modulename, 'functiontype' => $return_type, 'parameterlist' => \@parameterlist, 'parameterdescs' => \%parameterdescs, 'parametertypes' => \%parametertypes, 'sectionlist' => \@sectionlist, 'sections' => \%sections, 'purpose' => $declaration_purpose }); } sub reset_state { $function = ""; %parameterdescs = (); %parametertypes = (); @parameterlist = (); %sections = (); @sectionlist = (); $sectcheck = ""; $struct_actual = ""; $prototype = ""; $state = STATE_NORMAL; $inline_doc_state = STATE_INLINE_NA; } sub tracepoint_munge($) { my $file = shift; my $tracepointname = 0; my $tracepointargs = 0; if ($prototype =~ m/TRACE_EVENT\((.*?),/) { $tracepointname = $1; } if ($prototype =~ m/DEFINE_SINGLE_EVENT\((.*?),/) { $tracepointname = $1; } if ($prototype =~ m/DEFINE_EVENT\((.*?),(.*?),/) { $tracepointname = $2; } $tracepointname =~ s/^\s+//; #strip leading whitespace if ($prototype =~ m/TP_PROTO\((.*?)\)/) { $tracepointargs = $1; } if (($tracepointname eq 0) || ($tracepointargs eq 0)) { print STDERR "${file}:$.: warning: Unrecognized tracepoint format: \n". "$prototype\n"; } else { $prototype = "static inline void trace_$tracepointname($tracepointargs)"; } } sub syscall_munge() { my $void = 0; $prototype =~ s@[\r\n\t]+@ @gos; # strip newlines/CR's/tabs ## if ($prototype =~ m/SYSCALL_DEFINE0\s*\(\s*(a-zA-Z0-9_)*\s*\)/) { if ($prototype =~ m/SYSCALL_DEFINE0/) { $void = 1; ## $prototype = "long sys_$1(void)"; } $prototype =~ s/SYSCALL_DEFINE.*\(/long sys_/; # fix return type & func name if ($prototype =~ m/long (sys_.*?),/) { $prototype =~ s/,/\(/; } elsif ($void) { $prototype =~ s/\)/\(void\)/; } # now delete all of the odd-number commas in $prototype # so that arg types & arg names don't have a comma between them my $count = 0; my $len = length($prototype); if ($void) { $len = 0; # skip the for-loop } for (my $ix = 0; $ix < $len; $ix++) { if (substr($prototype, $ix, 1) eq ',') { $count++; if ($count % 2 == 1) { substr($prototype, $ix, 1) = ' '; } } } } sub process_proto_function($$) { my $x = shift; my $file = shift; $x =~ s@\/\/.*$@@gos; # strip C99-style comments to end of line if ($x =~ m#\s*/\*\s+MACDOC\s*#io || ($x =~ /^#/ && $x !~ /^#\s*define/)) { # do nothing } elsif ($x =~ /([^\{]*)/) { $prototype .= $1; } if (($x =~ /\{/) || ($x =~ /\#\s*define/) || ($x =~ /;/)) { $prototype =~ s@/\*.*?\*/@@gos; # strip comments. $prototype =~ s@[\r\n]+@ @gos; # strip newlines/cr's. $prototype =~ s@^\s+@@gos; # strip leading spaces if ($prototype =~ /SYSCALL_DEFINE/) { syscall_munge(); } if ($prototype =~ /TRACE_EVENT/ || $prototype =~ /DEFINE_EVENT/ || $prototype =~ /DEFINE_SINGLE_EVENT/) { tracepoint_munge($file); } dump_function($prototype, $file); reset_state(); } } sub process_proto_type($$) { my $x = shift; my $file = shift; $x =~ s@[\r\n]+@ @gos; # strip newlines/cr's. $x =~ s@^\s+@@gos; # strip leading spaces $x =~ s@\s+$@@gos; # strip trailing spaces $x =~ s@\/\/.*$@@gos; # strip C99-style comments to end of line if ($x =~ /^#/) { # To distinguish preprocessor directive from regular declaration later. $x .= ";"; } while (1) { if ( $x =~ /([^{};]*)([{};])(.*)/ ) { $prototype .= $1 . $2; ($2 eq '{') && $brcount++; ($2 eq '}') && $brcount--; if (($2 eq ';') && ($brcount == 0)) { dump_declaration($prototype, $file); reset_state(); last; } $x = $3; } else { $prototype .= $x; last; } } } # xml_escape: replace <, >, and & in the text stream; # # however, formatting controls that are generated internally/locally in the # kernel-doc script are not escaped here; instead, they begin life like # $blankline_html (4 of '\' followed by a mnemonic + ':'), then these strings # are converted to their mnemonic-expected output, without the 4 * '\' & ':', # just before actual output; (this is done by local_unescape()) sub xml_escape($) { my $text = shift; if (($output_mode eq "text") || ($output_mode eq "man")) { return $text; } $text =~ s/\&/\\\\\\amp;/g; $text =~ s/\/\\\\\\gt;/g; return $text; } # xml_unescape: reverse the effects of xml_escape sub xml_unescape($) { my $text = shift; if (($output_mode eq "text") || ($output_mode eq "man")) { return $text; } $text =~ s/\\\\\\amp;/\&/g; $text =~ s/\\\\\\lt;//g; return $text; } # convert local escape strings to html # local escape strings look like: '\\\\menmonic:' (that's 4 backslashes) sub local_unescape($) { my $text = shift; if (($output_mode eq "text") || ($output_mode eq "man")) { return $text; } $text =~ s/\\\\\\\\lt://g; return $text; } sub map_filename($) { my $file; my ($orig_file) = @_; if (defined($ENV{'SRCTREE'})) { $file = "$ENV{'SRCTREE'}" . "/" . $orig_file; } else { $file = $orig_file; } if (defined($source_map{$file})) { $file = $source_map{$file}; } return $file; } sub process_export_file($) { my ($orig_file) = @_; my $file = map_filename($orig_file); if (!open(IN,"<$file")) { print STDERR "Error: Cannot open file $file\n"; ++$errors; return; } while () { if (/$export_symbol/) { $function_table{$2} = 1; } } close(IN); } sub process_file($) { my $file; my $identifier; my $func; my $descr; my $in_purpose = 0; my $initial_section_counter = $section_counter; my ($orig_file) = @_; my $leading_space; $file = map_filename($orig_file); if (!open(IN,"<$file")) { print STDERR "Error: Cannot open file $file\n"; ++$errors; return; } $. = 1; $section_counter = 0; while () { while (s/\\\s*$//) { $_ .= ; } if ($state == STATE_NORMAL) { if (/$doc_start/o) { $state = STATE_NAME; # next line is always the function name $in_doc_sect = 0; $declaration_start_line = $. + 1; } } elsif ($state == STATE_NAME) {# this line is the function name (always) if (/$doc_block/o) { $state = STATE_DOCBLOCK; $contents = ""; $new_start_line = $. + 1; if ( $1 eq "" ) { $section = $section_intro; } else { $section = $1; } } elsif (/$doc_decl/o) { $identifier = $1; if (/\s*([\w\s]+?)\s*-/) { $identifier = $1; } $state = STATE_FIELD; # if there's no @param blocks need to set up default section # here $contents = ""; $section = $section_default; $new_start_line = $. + 1; if (/-(.*)/) { # strip leading/trailing/multiple spaces $descr= $1; $descr =~ s/^\s*//; $descr =~ s/\s*$//; $descr =~ s/\s+/ /g; $declaration_purpose = xml_escape($descr); $in_purpose = 1; } else { $declaration_purpose = ""; } if (($declaration_purpose eq "") && $verbose) { print STDERR "${file}:$.: warning: missing initial short description on line:\n"; print STDERR $_; ++$warnings; } if ($identifier =~ m/^struct/) { $decl_type = 'struct'; } elsif ($identifier =~ m/^union/) { $decl_type = 'union'; } elsif ($identifier =~ m/^enum/) { $decl_type = 'enum'; } elsif ($identifier =~ m/^typedef/) { $decl_type = 'typedef'; } else { $decl_type = 'function'; } if ($verbose) { print STDERR "${file}:$.: info: Scanning doc for $identifier\n"; } } else { print STDERR "${file}:$.: warning: Cannot understand $_ on line $.", " - I thought it was a doc line\n"; ++$warnings; $state = STATE_NORMAL; } } elsif ($state == STATE_FIELD) { # look for head: lines, and include content if (/$doc_sect/i) { # case insensitive for supported section names $newsection = $1; $newcontents = $2; # map the supported section names to the canonical names if ($newsection =~ m/^description$/i) { $newsection = $section_default; } elsif ($newsection =~ m/^context$/i) { $newsection = $section_context; } elsif ($newsection =~ m/^returns?$/i) { $newsection = $section_return; } elsif ($newsection =~ m/^\@return$/) { # special: @return is a section, not a param description $newsection = $section_return; } if (($contents ne "") && ($contents ne "\n")) { if (!$in_doc_sect && $verbose) { print STDERR "${file}:$.: warning: contents before sections\n"; ++$warnings; } dump_section($file, $section, xml_escape($contents)); $section = $section_default; } $in_doc_sect = 1; $in_purpose = 0; $contents = $newcontents; $new_start_line = $.; while ((substr($contents, 0, 1) eq " ") || substr($contents, 0, 1) eq "\t") { $contents = substr($contents, 1); } if ($contents ne "") { $contents .= "\n"; } $section = $newsection; $leading_space = undef; } elsif (/$doc_end/) { if (($contents ne "") && ($contents ne "\n")) { dump_section($file, $section, xml_escape($contents)); $section = $section_default; $contents = ""; } # look for doc_com + + doc_end: if ($_ =~ m'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') { print STDERR "${file}:$.: warning: suspicious ending line: $_"; ++$warnings; } $prototype = ""; $state = STATE_PROTO; $brcount = 0; # print STDERR "end of doc comment, looking for prototype\n"; } elsif (/$doc_content/) { # miguel-style comment kludge, look for blank lines after # @parameter line to signify start of description if ($1 eq "") { if ($section =~ m/^@/ || $section eq $section_context) { dump_section($file, $section, xml_escape($contents)); $section = $section_default; $contents = ""; $new_start_line = $.; } else { $contents .= "\n"; } $in_purpose = 0; } elsif ($in_purpose == 1) { # Continued declaration purpose chomp($declaration_purpose); $declaration_purpose .= " " . xml_escape($1); $declaration_purpose =~ s/\s+/ /g; } else { my $cont = $1; if ($section =~ m/^@/ || $section eq $section_context) { if (!defined $leading_space) { if ($cont =~ m/^(\s+)/) { $leading_space = $1; } else { $leading_space = ""; } } $cont =~ s/^$leading_space//; } $contents .= $cont . "\n"; } } else { # i dont know - bad line? ignore. print STDERR "${file}:$.: warning: bad line: $_"; ++$warnings; } } elsif ($state == STATE_INLINE) { # scanning for inline parameters # First line (state 1) needs to be a @parameter if ($inline_doc_state == STATE_INLINE_NAME && /$doc_inline_sect/o) { $section = $1; $contents = $2; $new_start_line = $.; if ($contents ne "") { while ((substr($contents, 0, 1) eq " ") || substr($contents, 0, 1) eq "\t") { $contents = substr($contents, 1); } $contents .= "\n"; } $inline_doc_state = STATE_INLINE_TEXT; # Documentation block end */ } elsif (/$doc_inline_end/) { if (($contents ne "") && ($contents ne "\n")) { dump_section($file, $section, xml_escape($contents)); $section = $section_default; $contents = ""; } $state = STATE_PROTO; $inline_doc_state = STATE_INLINE_NA; # Regular text } elsif (/$doc_content/) { if ($inline_doc_state == STATE_INLINE_TEXT) { $contents .= $1 . "\n"; # nuke leading blank lines if ($contents =~ /^\s*$/) { $contents = ""; } } elsif ($inline_doc_state == STATE_INLINE_NAME) { $inline_doc_state = STATE_INLINE_ERROR; print STDERR "${file}:$.: warning: "; print STDERR "Incorrect use of kernel-doc format: $_"; ++$warnings; } } } elsif ($state == STATE_PROTO) { # scanning for function '{' (end of prototype) if (/$doc_inline_oneline/) { $section = $1; $contents = $2; if ($contents ne "") { $contents .= "\n"; dump_section($file, $section, xml_escape($contents)); $section = $section_default; $contents = ""; } } elsif (/$doc_inline_start/) { $state = STATE_INLINE; $inline_doc_state = STATE_INLINE_NAME; } elsif ($decl_type eq 'function') { process_proto_function($_, $file); } else { process_proto_type($_, $file); } } elsif ($state == STATE_DOCBLOCK) { if (/$doc_end/) { dump_doc_section($file, $section, xml_escape($contents)); $section = $section_default; $contents = ""; $function = ""; %parameterdescs = (); %parametertypes = (); @parameterlist = (); %sections = (); @sectionlist = (); $prototype = ""; $state = STATE_NORMAL; } elsif (/$doc_content/) { if ( $1 eq "" ) { $contents .= $blankline; } else { $contents .= $1 . "\n"; } } } } if ($initial_section_counter == $section_counter) { print STDERR "${file}:1: warning: no structured comments found\n"; if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) { print STDERR " Was looking for '$_'.\n" for keys %function_table; } if ($output_mode eq "xml") { # The template wants at least one RefEntry here; make one. print "\n"; print " \n"; print " \n"; print " ${orig_file}\n"; print " \n"; print " \n"; print " Document generation inconsistency\n"; print " \n"; print " \n"; print " \n"; print " \n"; print " Oops\n"; print " \n"; print " \n"; print " \n"; print " The template for this document tried to insert\n"; print " the structured comment from the file\n"; print " ${orig_file} at this point,\n"; print " but none was found.\n"; print " This dummy section is inserted to allow\n"; print " generation to continue.\n"; print " \n"; print " \n"; print " \n"; print "\n"; } } } $kernelversion = get_kernel_version(); # generate a sequence of code that will splice in highlighting information # using the s// operator. for (my $k = 0; $k < @highlights; $k++) { my $pattern = $highlights[$k][0]; my $result = $highlights[$k][1]; # print STDERR "scanning pattern:$pattern, highlight:($result)\n"; $dohighlight .= "\$contents =~ s:$pattern:$result:gs;\n"; } # Read the file that maps relative names to absolute names for # separate source and object directories and for shadow trees. if (open(SOURCE_MAP, "<.tmp_filelist.txt")) { my ($relname, $absname); while() { chop(); ($relname, $absname) = (split())[0..1]; $relname =~ s:^/+::; $source_map{$relname} = $absname; } close(SOURCE_MAP); } if ($output_selection == OUTPUT_EXPORTED || $output_selection == OUTPUT_INTERNAL) { push(@export_file_list, @ARGV); foreach (@export_file_list) { chomp; process_export_file($_); } } foreach (@ARGV) { chomp; process_file($_); } if ($verbose && $errors) { print STDERR "$errors errors\n"; } if ($verbose && $warnings) { print STDERR "$warnings warnings\n"; } exit($errors); backport-iwlwifi-dkms-7906/scripts/mod_helpers.sh000066400000000000000000000003401351064634500222220ustar00rootroot00000000000000function mod_filename() { which modinfo > /dev/null 2>&1 if [[ $? -eq 0 ]]; then MOD_QUERY="modinfo -F filename" else MOD_QUERY="modprobe -l" fi mod_path="$($MOD_QUERY $1 | tail -1)" echo $(basename "$mod_path") } backport-iwlwifi-dkms-7906/scripts/uninstall.sh000077500000000000000000000006061351064634500217420ustar00rootroot00000000000000#!/bin/bash set -e source ./scripts/mod_helpers.sh if test "$(mod_filename compat)" = "compat.ko.gz" ; then compr=".gz" elif test "$(mod_filename compat)" = "compat.ko.xz" ; then compr=".xz" else compr="" fi for driver in $(find ${BACKPORT_DIR} -type f -name *.ko); do mod_name=${driver/${BACKPORT_DIR}/${KLIB}${KMODDIR}}${compr} echo " uninstall" $mod_name rm -f $mod_name done backport-iwlwifi-dkms-7906/scripts/update-initramfs.sh000077500000000000000000000044021351064634500232030ustar00rootroot00000000000000#!/bin/bash # Copyright 2009-2013 Luis R. Rodriguez # # Since we provide ssb, ethernet modules and most importantly # DRM drivers, people may want to update the initramfs image # of their distribution. This can also help people who may # want to wireless-boot their systems. KLIB="$1" ver=$(echo $KLIB | awk -F "/lib/modules/" '{print $2}' | awk -F"/" '{print $1}') dir=/boot/ LSB_RED_ID=$(/usr/bin/lsb_release -i -s &> /dev/null) if [[ -z $LSB_RED_ID && -f "/etc/os-release" ]]; then # Let's try with os-release. Fedora doesn't have # lsb_release anymore. LSB_RED_ID=$(sed -n '/^NAME/ s/^NAME=\(.*\)$/\1/p' /etc/os-release) fi case $LSB_RED_ID in "Ubuntu") echo "Updating ${LSB_RED_ID}'s initramfs for $ver under $dir ..." mkinitramfs -o $dir/initrd.img-$ver $ver echo "Will now run update-grub to ensure grub will find the new initramfs ..." update-grub ;; "Debian") echo "Updating ${LSB_RED_ID}'s initramfs for $ver under $dir ..." mkinitramfs -o $dir/initrd.img-$ver $ver echo "Will now run update-grub to ensure grub will find the new initramfs ..." update-grub ;; "Fedora") # This adds a -compat-drivers suffixed initramfs with a new grub2 # entry to not override distribution's default stuff. INITRAMFS=${dir}initramfs-$ver-compat-drivers.img KERNEL=${dir}vmlinuz-$ver GRUB_TITLE="Fedora ($ver) with compat-drivers" echo "Updating ${LSB_RED_ID}'s initramfs for $ver under $dir ..." mkinitrd --force $INITRAMFS $ver # If a previous compat-drivers entry for the same kernel exists # do not add it again. grep -q "${GRUB_TITLE}" /etc/grub2.cfg &> /dev/null if [[ "$?" == "1" ]]; then echo "Will now run grubby to add a new kernel entry ..." # Add a new kernel entry grubby --grub2 --copy-default --add-kernel="$KERNEL" --initrd="$INITRAMFS" --title="$GRUB_TITLE" fi ;; *) echo "Note:" echo "You may or may not need to update your initramfs, you should if" echo "any of the modules installed are part of your initramfs. To add" echo "support for your distribution to do this automatically send a" echo "patch against \"$(basename $0)\". If your distribution does not" echo "require this send a patch with the '/usr/bin/lsb_release -i -s'" echo "($LSB_RED_ID) tag for your distribution to avoid this warning." ;; esac backport-iwlwifi-dkms-7906/versions000066400000000000000000000003261351064634500174750ustar00rootroot00000000000000BACKPORTS_VERSION="(see git)" BACKPORTED_KERNEL_VERSION="(see git)" BACKPORTED_KERNEL_NAME="iwlwifi" BACKPORTS_BUILD_TSTAMP=__DATE__ \" \" __TIME__ BACKPORTS_GIT_TRACKED="iwlwifi-stack-public:master:7906:7773a757"